text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Wrapper around 'git rev-list'."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlgit_revlist
#
# Public Functions:
# commits
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def commits(repo, *commits):
"""Return list of strings of commit SHA1s reachable from each in 'commits'.
:repo: a callable supporting git commands, e.g. repo("status")
:*commits: string names of commits to traverse
:returns: list of string commit SHA1s
"""
return repo('rev-list', *commits).splitlines()
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
{
"content_hash": "726d90a721a64fe00cdc7a05f65394f7",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 38.11363636363637,
"alnum_prop": 0.5241502683363148,
"repo_name": "bloomberg/phabricator-tools",
"id": "8a7b3616030d9e3a19ddc0155e637c364a5ca49b",
"size": "1677",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "py/phl/phlgit_revlist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "342"
},
{
"name": "Dockerfile",
"bytes": "3828"
},
{
"name": "HTML",
"bytes": "471"
},
{
"name": "Puppet",
"bytes": "4016"
},
{
"name": "Python",
"bytes": "1069073"
},
{
"name": "Ruby",
"bytes": "1945"
},
{
"name": "Shell",
"bytes": "135331"
}
],
"symlink_target": ""
}
|
DEBUG_MODE = True
# Ordered List of Coin Symbol Dictionaries
COIN_SYMBOL_ODICT_LIST = [
{
'coin_symbol': 'btc',
'display_name': 'Bitcoin',
'display_shortname': 'BTC',
'blockcypher_code': 'btc',
'blockcypher_network': 'main',
'currency_abbrev': 'BTC',
'pow': 'sha',
'example_address': '16Fg2yjwrbtC6fZp61EV9mNVKmwCzGasw5',
'address_first_char_list': ('1', '3'),
},
{
'coin_symbol': 'btc-testnet',
'display_name': 'Bitcoin Testnet',
'display_shortname': 'BTC Testnet',
'blockcypher_code': 'btc',
'blockcypher_network': 'test3',
'currency_abbrev': 'BTC',
'pow': 'sha',
'example_address': '2N1rjhumXA3ephUQTDMfGhufxGQPZuZUTMk',
'address_first_char_list': ('m', 'n', '2'),
},
{
'coin_symbol': 'ltc',
'display_name': 'Litecoin',
'display_shortname': 'LTC',
'blockcypher_code': 'ltc',
'blockcypher_network': 'main',
'currency_abbrev': 'LTC',
'pow': 'scrypt',
'example_address': 'LcFFkbRUrr8j7TMi8oXUnfR4GPsgcXDepo',
'address_first_char_list': ('U', '3'),
},
{
'coin_symbol': 'doge',
'display_name': 'Dogecoin',
'display_shortname': 'DOGE',
'blockcypher_code': 'doge',
'blockcypher_network': 'main',
'currency_abbrev': 'DOGE',
'pow': 'scrypt',
'example_address': 'D7Y55r6Yoc1G8EECxkQ6SuSjTgGJJ7M6yD',
'address_first_char_list': ('D', '9', 'A'),
},
{
'coin_symbol': 'uro',
'display_name': 'Uro',
'display_shortname': 'URO',
'blockcypher_code': 'uro',
'blockcypher_network': 'main',
'currency_abbrev': 'URO',
'pow': 'sha',
'example_address': 'Uhf1LGdgmWe33hB9VVtubyzq1GduUAtaAJ',
'address_first_char_list': ('U', ), # TODO: more?
},
{
'coin_symbol': 'bcy',
'display_name': 'BlockCypher Testnet',
'display_shortname': 'BC Testnet',
'blockcypher_code': 'bcy',
'blockcypher_network': 'test',
'currency_abbrev': 'BCY',
'pow': 'sha',
'example_address': 'CFr99841LyMkyX5ZTGepY58rjXJhyNGXHf',
'address_first_char_list': ('B', 'C', 'D'),
},
]
# all fields required
REQUIRED_FIELDS = (
'coin_symbol', # this is a made up unique symbole for library use only
'display_name', # what it commonly looks like
'display_shortname', # an abbreviated version of display_name (for when space is tight)
'blockcypher_code', # blockcypher's unique ID (for their URLs)
'blockcypher_network', # the blockcypher network (main/test)
'currency_abbrev', # what the unit of currency looks like when abbreviated
'pow', # the proof of work algorithm (sha/scrypt)
'example_address', # an example address
)
ELIGIBLE_POW_ENTRIES = set(['sha', 'scrypt'])
# Safety checks on the data
for coin_symbol_dict in COIN_SYMBOL_ODICT_LIST:
# Make sure POW is set correctly
assert coin_symbol_dict['pow'] in ELIGIBLE_POW_ENTRIES, coin_symbol_dict['pow']
# Make sure no fields are missing
for required_field in REQUIRED_FIELDS:
assert required_field in coin_symbol_dict
COIN_SYMBOL_LIST = [x['coin_symbol'] for x in COIN_SYMBOL_ODICT_LIST]
SHA_COINS = [x['coin_symbol'] for x in COIN_SYMBOL_ODICT_LIST if x['pow'] == 'sha']
SCRYPT_COINS = [x['coin_symbol'] for x in COIN_SYMBOL_ODICT_LIST if x['pow'] == 'scrypt']
# For django-style lists (with "best" order)
COIN_CHOICES = []
for coin_symbol_dict in COIN_SYMBOL_ODICT_LIST:
COIN_CHOICES.append((coin_symbol_dict['coin_symbol'], coin_symbol_dict['display_name']))
# mappings (similar to above but easier retrieval for when order doens't matter)
COIN_SYMBOL_MAPPINGS = {}
for coin_symbol_dict in COIN_SYMBOL_ODICT_LIST:
coin_symbol = coin_symbol_dict.pop('coin_symbol')
COIN_SYMBOL_MAPPINGS[coin_symbol] = coin_symbol_dict
|
{
"content_hash": "0fa13778a74c18900ed27806e4033fca",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 92,
"avg_line_length": 39.26605504587156,
"alnum_prop": 0.561214953271028,
"repo_name": "wizardofozzie/blockcypher-python",
"id": "d1e20a463f1bc7c765ad59bcb48cb24717c757af",
"size": "4280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blockcypher/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30427"
}
],
"symlink_target": ""
}
|
"""SQLite parser plugin for Google Chrome cookies database files."""
from __future__ import unicode_literals
from dfdatetime import webkit_time as dfdatetime_webkit_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
# Register the cookie plugins.
from plaso.parsers import cookie_plugins # pylint: disable=unused-import
from plaso.parsers import sqlite
from plaso.parsers.cookie_plugins import manager as cookie_plugins_manager
from plaso.parsers.sqlite_plugins import interface
class ChromeCookieEventData(events.EventData):
"""Chrome Cookie event data.
Attributes:
cookie_name (str): name of the cookie.
host (str): hostname of host that set the cookie value.
httponly (bool): True if the cookie cannot be accessed through client
side script.
path (str): path where the cookie got set.
persistent (bool): True if the cookie is persistent.
secure (bool): True if the cookie should only be transmitted over a
secure channel.
url (str): URL or path where the cookie got set.
data (str): value of the cookie.
"""
DATA_TYPE = 'chrome:cookie:entry'
def __init__(self):
"""Initializes event data."""
super(ChromeCookieEventData, self).__init__(data_type=self.DATA_TYPE)
self.cookie_name = None
self.data = None
self.host = None
self.httponly = None
self.path = None
self.persistent = None
self.secure = None
self.url = None
class BaseChromeCookiePlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Google Chrome cookies database files."""
# Point to few sources for URL information.
URLS = [
'http://src.chromium.org/svn/trunk/src/net/cookies/',
('http://www.dfinews.com/articles/2012/02/'
'google-analytics-cookies-and-forensic-implications')]
# Google Analytics __utmz variable translation.
# Taken from:
# http://www.dfinews.com/sites/dfinews.com/files/u739/Tab2Cookies020312.jpg
GA_UTMZ_TRANSLATION = {
'utmcsr': 'Last source used to access.',
'utmccn': 'Ad campaign information.',
'utmcmd': 'Last type of visit.',
'utmctr': 'Keywords used to find site.',
'utmcct': 'Path to the page of referring link.'}
def __init__(self):
"""Initializes a plugin."""
super(BaseChromeCookiePlugin, self).__init__()
self._cookie_plugins = (
cookie_plugins_manager.CookiePluginsManager.GetPlugins())
def ParseCookieRow(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a cookie row.
Args:
parser_mediator (ParserMediator): parser mediator.
query (str): query that created the row.
row (sqlite3.Row): row resulting from the query.
"""
query_hash = hash(query)
cookie_name = self._GetRowValue(query_hash, row, 'name')
cookie_data = self._GetRowValue(query_hash, row, 'value')
hostname = self._GetRowValue(query_hash, row, 'host_key')
if hostname.startswith('.'):
hostname = hostname[1:]
httponly = self._GetRowValue(query_hash, row, 'httponly')
path = self._GetRowValue(query_hash, row, 'path')
persistent = self._GetRowValue(query_hash, row, 'persistent')
secure = self._GetRowValue(query_hash, row, 'secure')
if secure:
scheme = 'https'
else:
scheme = 'http'
url = '{0:s}://{1:s}{2:s}'.format(scheme, hostname, path)
event_data = ChromeCookieEventData()
event_data.cookie_name = cookie_name
event_data.data = cookie_data
event_data.host = hostname
event_data.httponly = bool(httponly)
event_data.path = path
event_data.persistent = bool(persistent)
event_data.query = query
event_data.secure = bool(secure)
event_data.url = url
timestamp = self._GetRowValue(query_hash, row, 'creation_utc')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'last_access_utc')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
timestamp = self._GetRowValue(query_hash, row, 'expires_utc')
if timestamp:
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for plugin in self._cookie_plugins:
if cookie_name != plugin.COOKIE_NAME:
continue
try:
plugin.UpdateChainAndProcess(
parser_mediator, cookie_data=cookie_data, cookie_name=cookie_name,
url=url)
except Exception as exception: # pylint: disable=broad-except
parser_mediator.ProduceExtractionWarning(
'plugin: {0:s} unable to parse cookie with error: {1!s}'.format(
plugin.NAME, exception))
class Chrome17CookiePlugin(BaseChromeCookiePlugin):
"""SQLite parser plugin for Google Chrome 17 - 65 cookies database files."""
NAME = 'chrome_17_cookies'
DATA_FORMAT = 'Google Chrome 17 - 65 cookies SQLite database file'
REQUIRED_STRUCTURE = {
'cookies': frozenset([
'creation_utc', 'host_key', 'name', 'value', 'path', 'expires_utc',
'secure', 'httponly', 'last_access_utc', 'has_expires',
'persistent']),
'meta': frozenset([])}
QUERIES = [
(('SELECT creation_utc, host_key, name, value, path, expires_utc, '
'secure, httponly, last_access_utc, has_expires, persistent '
'FROM cookies'), 'ParseCookieRow')]
SCHEMAS = [{
'cookies': (
'CREATE TABLE cookies (creation_utc INTEGER NOT NULL UNIQUE PRIMARY '
'KEY, host_key TEXT NOT NULL, name TEXT NOT NULL, value TEXT NOT '
'NULL, path TEXT NOT NULL, expires_utc INTEGER NOT NULL, secure '
'INTEGER NOT NULL, httponly INTEGER NOT NULL, last_access_utc '
'INTEGER NOT NULL, has_expires INTEGER DEFAULT 1, persistent '
'INTEGER DEFAULT 1)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)')}]
class Chrome66CookiePlugin(BaseChromeCookiePlugin):
"""SQLite parser plugin for Google Chrome 66+ cookies database files."""
NAME = 'chrome_66_cookies'
DATA_FORMAT = 'Google Chrome 66 and later cookies SQLite database file'
REQUIRED_STRUCTURE = {
'cookies': frozenset([
'creation_utc', 'host_key', 'name', 'value', 'path', 'expires_utc',
'is_secure', 'is_httponly', 'last_access_utc', 'has_expires',
'is_persistent']),
'meta': frozenset([])}
QUERIES = [
(('SELECT creation_utc, host_key, name, value, path, expires_utc, '
'is_secure AS secure, is_httponly AS httponly, last_access_utc, '
'has_expires, is_persistent AS persistent '
'FROM cookies'), 'ParseCookieRow')]
SCHEMAS = [{
'cookies': (
'CREATE TABLE cookies (creation_utc INTEGER NOT NULL, host_key TEXT '
'NOT NULL, name TEXT NOT NULL, value TEXT NOT NULL, path TEXT NOT '
'NULL, expires_utc INTEGER NOT NULL, is_secure INTEGER NOT NULL, '
'is_httponly INTEGER NOT NULL, last_access_utc INTEGER NOT NULL, '
'has_expires INTEGER NOT NULL DEFAULT 1, is_persistent INTEGER NOT '
'NULL DEFAULT 1, priority INTEGER NOT NULL DEFAULT 1, '
'encrypted_value BLOB DEFAULT \'\', firstpartyonly INTEGER NOT NULL '
'DEFAULT 0, UNIQUE (host_key, name, path))'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)')}]
sqlite.SQLiteParser.RegisterPlugins([
Chrome17CookiePlugin, Chrome66CookiePlugin])
|
{
"content_hash": "fad4ca4b49ab44838d48c14f5a99acd4",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 37.774647887323944,
"alnum_prop": 0.6692766592095452,
"repo_name": "rgayon/plaso",
"id": "0583f433be4aa055bc5e39d93ade44336a1605d2",
"size": "8070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/sqlite_plugins/chrome_cookies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "415"
},
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "PowerShell",
"bytes": "17771"
},
{
"name": "Python",
"bytes": "4803191"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "46225"
}
],
"symlink_target": ""
}
|
import os
import pytest
from .common import * # NOQA
RANCHER_CLEANUP_PROJECT = os.environ.get("RANCHER_CLEANUP_PROJECT", "True")
namespace = {"p_client": None, "ns": None, "cluster": None,
"project": None, "testclient_pods": [], "workload": None}
DNS_RESOLUTION_DEFAULT_SECONDS = \
os.environ.get("RANCHER_DNS_RESOLUTION_SECONDS", 30)
SKIP_PING_CHECK_TEST = \
ast.literal_eval(os.environ.get('RANCHER_SKIP_PING_CHECK_TEST', "False"))
if_skip_ping_check_test = pytest.mark.skipif(
SKIP_PING_CHECK_TEST,
reason='This test is only for testing upgrading Rancher')
def create_and_validate_wl(name, con, scale, type, p_client=None, ns=None):
if p_client is None:
p_client = namespace["p_client"]
if ns is None:
ns = namespace["ns"]
workload = p_client.create_workload(name=name, containers=con,
namespaceId=ns.id, scale=scale)
wait_for_pods_in_workload(p_client, workload, scale)
validate_workload(p_client, workload, type, ns.id, pod_count=scale)
return workload
def validate_service_discovery(workload, scale,
p_client=None, ns=None, testclient_pods=None):
if p_client is None:
p_client = namespace["p_client"]
if ns is None:
ns = namespace["ns"]
if testclient_pods is None:
testclient_pods = namespace["testclient_pods"]
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
host = '{0}.{1}.svc.cluster.local'.format(workload.name, ns.id)
for pod in testclient_pods:
validate_dns_entry(pod, host, expected_ips)
def update_and_validate_workload(workload, con, scale, p_client=None, ns=None):
if p_client is None:
p_client = namespace["p_client"]
if ns is None:
ns = namespace["ns"]
p_client.update(workload, containers=con, scale=scale)
workload = wait_for_wl_to_active(p_client, workload)
wait_for_pod_images(p_client, workload, ns.name, con[0]["image"], scale)
wait_for_pods_in_workload(p_client, workload, scale)
validate_workload(p_client, workload, "deployment", ns.name, scale)
validate_workload_image(p_client, workload, con[0]["image"], ns)
def validate_dns_record_for_workload(workload, scale, record,
p_client=None, testclient_pods=None):
if p_client is None:
p_client = namespace["p_client"]
if testclient_pods is None:
testclient_pods = namespace["testclient_pods"]
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
assert len(pods) == scale
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
for pod in testclient_pods:
validate_dns_record(pod, record, expected_ips)
def test_service_discovery_when_workload_scale_up():
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-sd-up")
type = "deployment"
# deploy a workload
scale = 2
workload = create_and_validate_wl(name, con, scale, type)
# test service discovery
validate_service_discovery(workload, scale)
# workload scales up to 3 pods
scale = 3
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale)
def test_service_discovery_when_workload_scale_down():
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-sd-dw")
type = "deployment"
# deploy a workload
scale = 3
workload = create_and_validate_wl(name, con, scale, type)
# test service discovery
validate_service_discovery(workload, scale)
# workload scale down to 2 pods
scale = 2
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale)
def test_service_discovery_when_workload_upgrade():
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-sd-upgrade")
type = "deployment"
scale = 2
# deploy a workload
workload = create_and_validate_wl(name, con, scale, type)
# test service discovery
validate_service_discovery(workload, scale)
# upgrade
con = [{"name": "test1",
"image": "nginx"}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale)
# upgrade again
con = [{"name": "test1",
"image": TEST_IMAGE}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_service_discovery(workload, scale)
def test_dns_record_type_workload_when_workload_scale_up():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-dns-up")
type = "deployment"
# deploy a workload
scale = 2
workload = create_and_validate_wl(name, con, scale, type)
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
create_dns_record(record, p_client)
# test dns record for the workload
validate_dns_record_for_workload(workload, scale, record)
# workload scale up to 3 pods
scale = 3
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
def test_dns_record_type_workload_when_workload_scale_down():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-dns-dw")
type = "deployment"
# deploy a workload
scale = 3
workload = create_and_validate_wl(name, con, scale, type)
record = {"type": "dnsRecord",
"targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"),
"namespaceId": ns.id}
create_dns_record(record, p_client)
# test service discovery
validate_dns_record_for_workload(workload, scale, record)
# workload scale down to 2 pods
scale = 2
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
def test_dns_record_type_workload_when_workload_upgrade():
p_client = namespace["p_client"]
ns = namespace["ns"]
con = [{"name": "test1",
"image": TEST_IMAGE}]
name = random_test_name("test-dns-upgrade")
scale = 2
type = "deployment"
# deploy a workload
workload = create_and_validate_wl(name, con, scale, type)
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
create_dns_record(record, p_client)
# test service discovery
validate_dns_record_for_workload(workload, scale, record)
# upgrade the workload
con = [{"name": "test1",
"image": "nginx"}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
# upgrade the workload again
con = [{"name": "test1",
"image": TEST_IMAGE}]
update_and_validate_workload(workload, con, scale)
# test service discovery
time.sleep(DNS_RESOLUTION_DEFAULT_SECONDS)
validate_dns_record_for_workload(workload, scale, record)
@if_skip_ping_check_test
def test_dns_record_type_external_ip():
ns = namespace["ns"]
record = {"type": "dnsRecord", "ipAddresses": ["8.8.8.8"],
"name": random_test_name("record"), "namespaceId": ns.id}
expected = record["ipAddresses"]
create_and_validate_dns_record(record, expected)
@if_skip_ping_check_test
def test_dns_record_type_multiple_external_ips():
ns = namespace["ns"]
record = {"type": "dnsRecord", "ipAddresses": ["8.8.8.8", "8.8.4.4"],
"name": random_test_name("record"), "namespaceId": ns.id}
expected = record["ipAddresses"]
create_and_validate_dns_record(record, expected)
@if_skip_ping_check_test
def test_dns_record_type_hostname():
ns = namespace["ns"]
record = {"type": "dnsRecord", "hostname": "google.com",
"name": random_test_name("record"), "namespaceId": ns.id}
expected = [record["hostname"]]
create_and_validate_dns_record(record, expected)
@if_skip_ping_check_test
def test_dns_record_type_alias():
ns = namespace["ns"]
first_record = {"type": "dnsRecord", "hostname": "google.com",
"name": random_test_name("record"), "namespaceId": ns.id}
target_record = create_dns_record(first_record)
record = {"type": "dnsRecord", "targetDnsRecordIds": [target_record["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
expected = [first_record["hostname"]]
create_and_validate_dns_record(record, expected)
def test_dns_record_type_workload():
ns = namespace["ns"]
workload = namespace["workload"]
p_client = namespace["p_client"]
record = {"type": "dnsRecord", "targetWorkloadIds": [workload["id"]],
"name": random_test_name("record"), "namespaceId": ns.id}
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
create_and_validate_dns_record(record, expected_ips)
def test_dns_record_type_multiple_workloads():
ns = namespace["ns"]
workload = namespace["workload"]
p_client = namespace["p_client"]
wlname = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
additional_workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=1)
wait_for_wl_to_active(p_client, additional_workload)
awl_pods = wait_for_pods_in_workload(p_client, additional_workload, 1)
wait_for_pod_to_running(p_client, awl_pods[0])
record = {"type": "dnsRecord",
"targetWorkloadIds": [workload["id"], additional_workload["id"]],
"name": random_test_name("record"),
"namespaceId": ns.id}
workloads = [workload, additional_workload]
expected_ips = []
for wl in workloads:
pods = p_client.list_pod(workloadId=wl["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
create_and_validate_dns_record(record, expected_ips)
def test_dns_record_type_selector():
ns = namespace["ns"]
workload = namespace["workload"]
p_client = namespace["p_client"]
selector = \
workload["labels"]["workload.user.cattle.io/workloadselector"]
record = {"type": "dnsRecord",
"selector":
{"workload.user.cattle.io/workloadselector": selector},
"name": random_test_name("record"), "namespaceId": ns.id}
expected_ips = []
pods = p_client.list_pod(workloadId=workload["id"]).data
for pod in pods:
expected_ips.append(pod["status"]["podIp"])
create_and_validate_dns_record(record, expected_ips)
def create_and_validate_dns_record(record, expected, p_client=None,
testclient_pods=None):
if testclient_pods is None:
testclient_pods = namespace["testclient_pods"]
create_dns_record(record, p_client)
assert len(testclient_pods) > 0
for pod in testclient_pods:
validate_dns_record(pod, record, expected)
def create_dns_record(record, p_client=None):
if p_client is None:
p_client = namespace["p_client"]
created_record = p_client.create_dns_record(record)
wait_for_condition(
p_client, created_record,
lambda x: x.state == "active",
lambda x: 'State is: ' + x.state)
return created_record
@pytest.fixture(scope='module', autouse="True")
def setup(request):
client, cluster = get_admin_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(ADMIN_TOKEN, cluster, "testsd")
p_client = get_project_client_for_token(p, ADMIN_TOKEN)
c_client = get_cluster_client_for_token(cluster, ADMIN_TOKEN)
new_ns = create_ns(c_client, cluster, p)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
wlname = random_test_name("default")
con = [{"name": "test1",
"image": TEST_IMAGE}]
workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=ns.id,
scale=2)
wait_for_wl_to_active(p_client, workload)
namespace["workload"] = workload
pods = wait_for_pods_in_workload(p_client, workload, 2)
pod = wait_for_pod_to_running(p_client, pods[0])
namespace["testclient_pods"].append(pod)
workload = p_client.create_workload(name=wlname,
containers=con,
namespaceId=new_ns.id,
scale=1)
wait_for_wl_to_active(p_client, workload)
pods = wait_for_pods_in_workload(p_client, workload, 1)
pod = wait_for_pod_to_running(p_client, pods[0])
namespace["testclient_pods"].append(pod)
assert len(namespace["testclient_pods"]) == 2
def fin():
client = get_admin_client()
client.delete(namespace["project"])
if RANCHER_CLEANUP_PROJECT == "True":
request.addfinalizer(fin)
|
{
"content_hash": "f30bc48ead92419063a81ffc8a7e01c3",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 79,
"avg_line_length": 33.779620853080566,
"alnum_prop": 0.622728867064188,
"repo_name": "sabiodelhielo/rancher-validation",
"id": "07b93e5982da3bb084d3ffdb6bdb7e92e6cbb989",
"size": "14255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/v3_api/test_service_discovery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20577"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
}
|
from jinja2 import utils
import frappe
from frappe import _
from frappe.core.utils import html2text
from frappe.utils import sanitize_html
from frappe.utils.global_search import web_search
def get_context(context):
context.no_cache = 1
if frappe.form_dict.q:
query = str(utils.escape(sanitize_html(frappe.form_dict.q)))
context.title = _("Search Results for")
context.query = query
context.route = "/search"
context.update(get_search_results(query, frappe.utils.sanitize_html(frappe.form_dict.scope)))
else:
context.title = _("Search")
@frappe.whitelist(allow_guest=True)
def get_search_results(text, scope=None, start=0, as_html=False):
results = web_search(text, scope, start, limit=21)
out = frappe._dict()
if len(results) == 21:
out.has_more = 1
results = results[:20]
for d in results:
try:
d.content = html2text(d.content)
index = d.content.lower().index(text.lower())
d.content = (
d.content[:index]
+ "<mark>"
+ d.content[index:][: len(text)]
+ "</mark>"
+ d.content[index + len(text) :]
)
if index < 40:
start = 0
prefix = ""
else:
start = index - 40
prefix = "..."
suffix = ""
if (index + len(text) + 47) < len(d.content):
suffix = "..."
d.preview = prefix + d.content[start : start + len(text) + 87] + suffix
except Exception:
d.preview = html2text(d.content)[:97] + "..."
out.results = results
if as_html:
out.results = frappe.render_template("templates/includes/search_result.html", out)
return out
|
{
"content_hash": "3537a68025479cf25361d8e22ee34d41",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 95,
"avg_line_length": 24.238095238095237,
"alnum_prop": 0.6542239685658153,
"repo_name": "yashodhank/frappe",
"id": "d8a939cb157d2b065af794d4f922e72ac7f97b15",
"size": "1527",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/www/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "320627"
},
{
"name": "GCC Machine Description",
"bytes": "2474"
},
{
"name": "HTML",
"bytes": "179539"
},
{
"name": "JavaScript",
"bytes": "1099003"
},
{
"name": "Python",
"bytes": "1430023"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
import xml.etree.ElementTree as ET
import timeHelper
RESPONSE_TEXT_TEMPLATE = '''
<xml>
<ToUserName><![CDATA[{TO_USER}]]></ToUserName>
<FromUserName><![CDATA[{FROM_USER}]]></FromUserName>
<CreateTime>{TIME_STEMP}</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[{RESPONSE_CONTENT}]]></Content>
</xml>
'''
class msgHandler:
def __init__(self, data):
self.data = data
self.dict = self._xmlToDict(self.data)
if self.dict['MsgType'] == 'event':
self.worker = eventHandler(self.dict['FromUserName'],self.dict['Event'])
else:
self.worker = txtmsgHandler(self.dict['FromUserName'],self.dict['Content'])
def response(self):
responseDict = self.responseDict()
text = self.responseXML(responseDict)
return text
def _xmlToDict(self, xmlText):
xmlDict = {}
itemlist = ET.fromstring(xmlText)
for child in itemlist:
xmlDict[child.tag] = child.text
print xmlDict
return xmlDict
def responseXML(self, dataDict):
if dataDict:
text = RESPONSE_TEXT_TEMPLATE
for key, value in dataDict.items():
parameter = '{%s}' % key
text = text.replace(parameter, value)
print text
else:
text = ''
return text
def responseDict(self):
responseDict = {}
try:
responseDict['RESPONSE_CONTENT'] = self.worker.response.encode('UTF-8')
responseDict['TO_USER'] = self.dict['FromUserName']
responseDict['FROM_USER'] = self.dict['ToUserName']
responseDict['TIME_STEMP'] = str(timeHelper.unixTimeStamp())
except:
pass
return responseDict
class eventHandler:
MSG_WELCOME = u'欢迎您关注我,想了解我,就请发送“帮助”或“?”'
def __init__(self, user, event):
if event == 'subscribe':
self.response = self.MSG_WELCOME
class txtmsgHandler:
MSG_HELP = u'''我会记录您对我说的除“查询”和“帮助”以外的所有的话,“查询”会得到您最后一次对我说的话
'''
MSG_SUCCESS = [u'存储完成', u'我存好了,随时来查哦',u'搞定,收工']
def __init__(self, user, reqMsg):
self.req = reqMsg.lower()
self.db = simpledb(user)
self.response = self.MSG_HELP
self._handle_req()
def _handle_req(self):
if self.req in ['help', '帮助', '?', u'?']: return
if self.req in ['cx', '查询']: return self.chaxun()
else: return self.jilu()
def chaxun(self):
timepoint, content = self.db.chaxun()
self.response = u'您在{}说过:{}'.format(timepoint, content)
pass
def jilu(self):
self.db.jilu(self.req)
self.response = self._get_success_response()
def _get_success_response(self):
import random
return self.MSG_SUCCESS[random.randint(0,len(self.MSG_SUCCESS)-1)]
import os
import sqlite3
import timeHelper
class simpledb:
def __init__(self, dbName):
dbPath = os.path.join(os.getcwd(), 'DB')
if not os.path.isdir(dbPath):
os.mkdir(dbPath)
name = os.path.join(dbPath, dbName+'.db')
print name
createNeeded = False
if not os.path.isfile(name):
print "First Time Store, Create DB"
createNeeded = True
self.conn = sqlite3.connect(name)
self.c = self.conn.cursor()
if createNeeded:
self._create_db()
def __del__(self):
#本来想实现数据库文件可以手动删除的,但是因为sqlite的bug,close之后文件也没关闭,所以这个代码就浪费了。理想是好的,仍然保留在这里
self.conn.close()
def _create_db(self):
self.c.execute('''CREATE TABLE yulu
(id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, timestamp INTEGER, content text)''')
self.c.execute('''CREATE INDEX dateIndex ON yulu(timestamp)''')
self.conn.commit()
def jilu(self, content):
timestamp = timeHelper.unixTimeStamp()
self.c.execute('''INSERT INTO yulu (timestamp, content) VALUES({},'{}')'''.format(timestamp,content))
self.conn.commit()
def chaxun(self):
self.c.execute('''SELECT * from yulu ORDER BY id DESC LIMIT 1''')
try:
id,timestamp,content = self.c.fetchone()
timepoint = timeHelper.timestamp2datetime(timestamp)
except:
timepoint = '-'
content = '-'
return timepoint, content
|
{
"content_hash": "fd2199093e4c46eeda7a9a135fe2af2b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 109,
"avg_line_length": 31.73381294964029,
"alnum_prop": 0.5771933801858989,
"repo_name": "hizhangqi/simpleweixinservicer",
"id": "45c783bcacc66c8ca943b1ca44cef8584b45cff6",
"size": "4744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "funcIf4weixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8344"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('myapp', '0006_auto_20150629_1628'),
]
operations = [
migrations.AddField(
model_name='buildingcounter',
name='ape_kwh',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='14'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='co2_lt_m2',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='13'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='co2_tn',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='11'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='co2_tn_m2',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='12'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='cosf',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='10'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='euro_forecast',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='18'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='euro_m2_electricity',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='15'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='euro_m2_liquidfuel',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='16'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='euro_m2_monthly',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='17'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kw',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='9'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='1'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh_m2',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='2'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh_m2_cooling',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='4'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh_m2_heating',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='5'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh_m2_lighting',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='3'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh_m2_usagehours',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='8'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='kwh_m2_user',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='7'),
preserve_default=False,
),
migrations.AddField(
model_name='buildingcounter',
name='lt_m2',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='6'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='available_charging_points',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='36'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='co2_tn',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='37'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='co2_tn_user',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='38'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='euro_forecast',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='41'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='euro_m2_monthly',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='40'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='euro_user',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='39'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='kwh',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='33'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='kwh_user',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='34'),
preserve_default=False,
),
migrations.AddField(
model_name='electricvehiclecounter',
name='total_charging_points',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='35'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='ape_kwh',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='29'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='co2_lt_m2',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='28'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='co2_tn',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='26'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='co2_tn_km',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='27'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='cosf',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='24'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='euro_forecast',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='32'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='euro_line',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='30'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='euro_monthly',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='31'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='kw',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='23'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='kwh',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='19'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='kwh_km',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='22'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='kwh_light',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='21'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='kwh_line',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='20'),
preserve_default=False,
),
migrations.AddField(
model_name='streetlightingcounter',
name='operating_lights_percentage',
field=models.OneToOneField(default=0, to='myapp.Value', related_name='25'),
preserve_default=False,
),
]
|
{
"content_hash": "a3e25cee098c0905a855fe1f6827d785",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 87,
"avg_line_length": 38.95752895752896,
"alnum_prop": 0.5646184340931616,
"repo_name": "mpetyx/energagement",
"id": "0d86dca1e5dc79a662fcb16a40161961ab7a4260",
"size": "10114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "energagement/myapp/migrations/0007_auto_20150629_1643.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22435"
},
{
"name": "HTML",
"bytes": "234304"
},
{
"name": "JavaScript",
"bytes": "58711"
},
{
"name": "Python",
"bytes": "82307"
}
],
"symlink_target": ""
}
|
from testsuite_generator import Tool
|
{
"content_hash": "529b7e9094bacb60a7e3e80275c966bb",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 36,
"avg_line_length": 36,
"alnum_prop": 0.8888888888888888,
"repo_name": "xcgspring/XSTAF",
"id": "1e4be0d28f0836d9883dd5dd85ae7558b6d81d18",
"size": "36",
"binary": false,
"copies": "1",
"ref": "refs/heads/ver0.1",
"path": "XSTAF/tools/testsuite_generator/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7227"
},
{
"name": "Python",
"bytes": "980326"
}
],
"symlink_target": ""
}
|
import os
import imghdr
from collections import namedtuple
from io import BytesIO
import numpy as np
from .. import io, img_as_ubyte
from ..transform import resize
from ..color import color_dict
from ..io.util import file_or_url_context, is_url
import six
from six.moves.urllib_parse import urlparse
from six.moves.urllib import request
urlopen = request.urlopen
# Convert colors from `skimage.color` to uint8 and allow access through
# dict or a named tuple.
color_dict = dict((name, tuple(int(255 * c + 0.5) for c in rgb))
for name, rgb in six.iteritems(color_dict))
colors = namedtuple('colors', color_dict.keys())(**color_dict)
def open(path):
"""Return Picture object from the given image path."""
return Picture(path)
def _verify_picture_index(index):
"""Raise error if picture index is not a 2D index/slice."""
if not (isinstance(index, tuple) and len(index) == 2):
raise IndexError("Expected 2D index but got {0!r}".format(index))
if all(isinstance(i, int) for i in index):
return index
# In case we need to fix the array index, convert tuple to list.
index = list(index)
for i, dim_slice in enumerate(index):
# If either index is a slice, ensure index object returns 2D array.
if isinstance(dim_slice, int):
index[i] = dim_slice = slice(dim_slice, dim_slice + 1)
return tuple(index)
def rgb_transpose(array):
"""Return RGB array with first 2 axes transposed."""
return np.transpose(array, (1, 0, 2))
def array_to_xy_origin(image):
"""Return view of image transformed from array to Cartesian origin."""
return rgb_transpose(image[::-1])
def xy_to_array_origin(image):
"""Return view of image transformed from Cartesian to array origin."""
return rgb_transpose(image[:, ::-1])
class Pixel(object):
"""A single pixel in a Picture.
Attributes
----------
pic : Picture
The Picture object that this pixel references.
array : array_like
Byte array with raw image data (RGB).
x : int
Horizontal coordinate of this pixel (left = 0).
y : int
Vertical coordinate of this pixel (bottom = 0).
rgb : tuple
RGB tuple with red, green, and blue components (0-255)
alpha : int
Transparency component (0-255), 255 (opaque) by default
"""
def __init__(self, pic, array, x, y, rgb, alpha=255):
self._picture = pic
self._x = x
self._y = y
self._red = self._validate(rgb[0])
self._green = self._validate(rgb[1])
self._blue = self._validate(rgb[2])
self._alpha = self._validate(alpha)
@property
def x(self):
"""Horizontal location of this pixel in the parent image(left = 0)."""
return self._x
@property
def y(self):
"""Vertical location of this pixel in the parent image (bottom = 0)."""
return self._y
@property
def red(self):
"""The red component of the pixel (0-255)."""
return self._red
@red.setter
def red(self, value):
self._red = self._validate(value)
self._setpixel()
@property
def green(self):
"""The green component of the pixel (0-255)."""
return self._green
@green.setter
def green(self, value):
self._green = self._validate(value)
self._setpixel()
@property
def blue(self):
"""The blue component of the pixel (0-255)."""
return self._blue
@blue.setter
def blue(self, value):
self._blue = self._validate(value)
self._setpixel()
@property
def alpha(self):
"""The transparency component of the pixel (0-255)."""
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = self._validate(value)
self._setpixel()
@property
def rgb(self):
"""The RGB color components of the pixel (3 values 0-255)."""
return (self.red, self.green, self.blue)
@rgb.setter
def rgb(self, value):
if len(value) == 4:
self.rgba = value
else:
self._red, self._green, self._blue \
= (self._validate(v) for v in value)
self._alpha = 255
self._setpixel()
@property
def rgba(self):
"""The RGB color and transparency components of the pixel
(4 values 0-255).
"""
return (self.red, self.green, self.blue, self.alpha)
@rgba.setter
def rgba(self, value):
self._red, self._green, self._blue, self._alpha \
= (self._validate(v) for v in value)
self._setpixel()
def _validate(self, value):
"""Verifies that the pixel value is in [0, 255]."""
try:
value = int(value)
if (value < 0) or (value > 255):
raise ValueError()
except ValueError:
msg = "Expected an integer between 0 and 255, but got {0} instead!"
raise ValueError(msg.format(value))
return value
def _setpixel(self):
# RGB + alpha
self._picture.xy_array[self._x, self._y] = self.rgba
self._picture._array_modified()
def __eq__(self, other):
if isinstance(other, Pixel):
return self.rgba == other.rgba
def __repr__(self):
args = self.red, self.green, self.blue, self.alpha
return "Pixel(red={0}, green={1}, blue={2}, alpha={3})".format(*args)
class Picture(object):
"""A 2-D picture made up of pixels.
Attributes
----------
path : str
Path to an image file to load / URL of an image
array : array
Raw RGB or RGBA image data [0-255], with origin at top-left.
xy_array : array
Raw RGB or RGBA image data [0-255], with origin at bottom-left.
Examples
--------
Load an image from a file
>>> from skimage import novice
>>> from skimage import data
>>> picture = novice.open(data.data_dir + '/chelsea.png')
Load an image from a URL. URL must start with http(s):// or ftp(s)://
>>> picture = novice.open('http://scikit-image.org/_static/img/logo.png')
Create a blank 100 pixel wide, 200 pixel tall white image
>>> pic = Picture.from_size((100, 200), color=(255, 255, 255))
Use numpy to make an RGB byte array (shape is height x width x 3)
>>> import numpy as np
>>> data = np.zeros(shape=(200, 100, 3), dtype=np.uint8)
>>> data[:, :, 0] = 255 # Set red component to maximum
>>> pic = Picture(array=data)
Get the bottom-left pixel
>>> pic[0, 0]
Pixel(red=255, green=0, blue=0, alpha=255)
Get the top row of the picture
>>> pic[:, pic.height-1]
Picture(100 x 1)
Set the bottom-left pixel to black
>>> pic[0, 0] = (0, 0, 0)
Set the top row to red
>>> pic[:, pic.height-1] = (255, 0, 0)
"""
def __init__(self, path=None, array=None, xy_array=None):
self._modified = False
self.scale = 1
self._path = None
self._format = None
n_args = len([a for a in [path, array, xy_array] if a is not None])
if n_args != 1:
msg = "Must provide a single keyword arg (path, array, xy_array)."
ValueError(msg)
elif path is not None:
if not is_url(path):
path = os.path.abspath(path)
self._path = path
with file_or_url_context(path) as context:
self.array = img_as_ubyte(io.imread(context))
self._format = imghdr.what(context)
elif array is not None:
self.array = array
elif xy_array is not None:
self.xy_array = xy_array
# Force RGBA internally (use max alpha)
if self.array.shape[-1] == 3:
self.array = np.insert(self.array, 3, values=255, axis=2)
@staticmethod
def from_size(size, color='black'):
"""Return a Picture of the specified size and a uniform color.
Parameters
----------
size : tuple
Width and height of the picture in pixels.
color : tuple or str
RGB or RGBA tuple with the fill color for the picture [0-255] or
a valid key in `color_dict`.
"""
if isinstance(color, six.string_types):
color = color_dict[color]
rgb_size = tuple(size) + (len(color),)
color = np.array(color, dtype=np.uint8)
array = np.ones(rgb_size, dtype=np.uint8) * color
# Force RGBA internally (use max alpha)
if array.shape[-1] == 3:
array = np.insert(array, 3, values=255, axis=2)
return Picture(array=array)
@property
def array(self):
"""Image data stored as numpy array."""
return self._array
@array.setter
def array(self, array):
self._array = array
self._xy_array = array_to_xy_origin(array)
@property
def xy_array(self):
"""Image data stored as numpy array with origin at the bottom-left."""
return self._xy_array
@xy_array.setter
def xy_array(self, array):
self._xy_array = array
self._array = xy_to_array_origin(array)
def save(self, path):
"""Saves the picture to the given path.
Parameters
----------
path : str
Path (with file extension) where the picture is saved.
"""
io.imsave(path, self._rescale(self.array))
self._modified = False
self._path = os.path.abspath(path)
self._format = imghdr.what(path)
@property
def path(self):
"""The path to the picture."""
return self._path
@property
def modified(self):
"""True if the picture has changed."""
return self._modified
def _array_modified(self):
self._modified = True
self._path = None
@property
def format(self):
"""The image format of the picture."""
return self._format
@property
def size(self):
"""The size (width, height) of the picture."""
return self.xy_array.shape[:2]
@size.setter
def size(self, value):
# Don't resize if no change in size
if (value[0] != self.width) or (value[1] != self.height):
# skimage dimensions are flipped: y, x
new_size = (int(value[1]), int(value[0]))
new_array = resize(self.array, new_size, order=0,
preserve_range=True)
self.array = new_array.astype(np.uint8)
self._array_modified()
@property
def width(self):
"""The width of the picture."""
return self.size[0]
@width.setter
def width(self, value):
self.size = (value, self.height)
@property
def height(self):
"""The height of the picture."""
return self.size[1]
@height.setter
def height(self, value):
self.size = (self.width, value)
def _repr_png_(self):
return io.Image(self._rescale(self.array))._repr_png_()
def show(self):
"""Display the image."""
io.imshow(self._rescale(self.array))
io.show()
def _makepixel(self, x, y):
"""Create a Pixel object for a given x, y location."""
rgb = self.xy_array[x, y]
return Pixel(self, self.array, x, y, rgb)
def _rescale(self, array):
"""Rescale image according to scale factor."""
if self.scale == 1:
return array
new_size = (self.height * self.scale, self.width * self.scale)
return img_as_ubyte(resize(array, new_size, order=0))
def _get_channel(self, channel):
"""Return a specific dimension out of the raw image data slice."""
return self._array[:, :, channel]
def _set_channel(self, channel, value):
"""Set a specific dimension in the raw image data slice."""
self._array[:, :, channel] = value
@property
def red(self):
"""The red component of the pixel (0-255)."""
return self._get_channel(0).ravel()
@red.setter
def red(self, value):
self._set_channel(0, value)
@property
def green(self):
"""The green component of the pixel (0-255)."""
return self._get_channel(1).ravel()
@green.setter
def green(self, value):
self._set_channel(1, value)
@property
def blue(self):
"""The blue component of the pixel (0-255)."""
return self._get_channel(2).ravel()
@blue.setter
def blue(self, value):
self._set_channel(2, value)
@property
def alpha(self):
"""The transparency component of the pixel (0-255)."""
return self._get_channel(3).ravel()
@alpha.setter
def alpha(self, value):
self._set_channel(3, value)
@property
def rgb(self):
"""The RGB color components of the pixel (3 values 0-255)."""
return self.xy_array[:, :, :3]
@rgb.setter
def rgb(self, value):
self.xy_array[:, :, :3] = value
@property
def rgba(self):
"""The RGBA color components of the pixel (4 values 0-255)."""
return self.xy_array
@rgba.setter
def rgba(self, value):
self.xy_array[:] = value
def __iter__(self):
"""Iterates over all pixels in the image."""
for x in range(self.width):
for y in range(self.height):
yield self._makepixel(x, y)
def __getitem__(self, xy_index):
"""Return `Picture`s for slices and `Pixel`s for indexes."""
xy_index = _verify_picture_index(xy_index)
if all(isinstance(index, int) for index in xy_index):
return self._makepixel(*xy_index)
else:
return Picture(xy_array=self.xy_array[xy_index])
def __setitem__(self, xy_index, value):
xy_index = _verify_picture_index(xy_index)
if isinstance(value, tuple):
self[xy_index].rgb = value
elif isinstance(value, Picture):
self.xy_array[xy_index] = value.xy_array
else:
raise TypeError("Invalid value type")
self._array_modified()
def __eq__(self, other):
if not isinstance(other, Picture):
raise NotImplementedError()
return np.all(self.array == other.array)
def __repr__(self):
return "Picture({0} x {1})".format(*self.size)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "dfb11aff96b03a2104d9e57add11c7f6",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 79,
"avg_line_length": 29.11066398390342,
"alnum_prop": 0.5739563173901023,
"repo_name": "warmspringwinds/scikit-image",
"id": "24c2ff0ea63c8bd7df6fa9229317bf8ea630c29d",
"size": "14468",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "skimage/novice/_novice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "76670"
},
{
"name": "Makefile",
"bytes": "449"
},
{
"name": "Python",
"bytes": "2152461"
}
],
"symlink_target": ""
}
|
"""Decorador auxiliar
Debe instalarse 'graphviz' en el sistema para que funcione.
Ubuntu: sudo apt-get install graphviz
Mac: brew install graphviz
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import with_statement
import os
import sys
import vcr
from functools import wraps
from pycallgraph import PyCallGraph
from pycallgraph import Config
from pycallgraph import GlobbingFilter
from pycallgraph.output import GraphvizOutput
# módulo de ejemplo que se quiere analizar
import pydatajson
SAMPLES_DIR = os.path.join("tests", "samples")
TEMP_DIR = os.path.join("tests", "temp")
PROFILING_DIR = os.path.join("tests", "profiling")
os.makedirs(PROFILING_DIR) if not os.path.exists(PROFILING_DIR) else None
VCR = vcr.VCR(path_transformer=vcr.VCR.ensure_suffix('.yaml'),
cassette_library_dir=os.path.join(
"tests", "cassetes", "profiling"),
record_mode='once')
def profile(profiling_result_path):
"""Decorador de una función para que se corra haciendo profiling."""
def fn_decorator(fn):
"""Decora una función con el análisis de profiling."""
@wraps(fn)
def fn_decorated(*args, **kwargs):
"""Crea la función decorada."""
graphviz = GraphvizOutput()
graphviz.output_file = profiling_result_path
with PyCallGraph(output=graphviz, config=None):
fn(*args, **kwargs)
return fn_decorated
return fn_decorator
@VCR.use_cassette()
@profile("tests/profiling/profiling_test.png")
def main():
"""Hace un profiling de la función para guarda un catálogo en Excel"""
# ejemplo liviano
# original_catalog = pydatajson.DataJson(
# os.path.join(SAMPLES_DIR, "catalogo_justicia.json"))
# ejemplo grande
datasets_cant = 200
original_catalog = pydatajson.DataJson(
"http://infra.datos.gob.ar/catalog/sspm/data.json")
original_catalog["dataset"] = original_catalog["dataset"][:datasets_cant]
tmp_xlsx = os.path.join(TEMP_DIR, "xlsx_catalog.xlsx")
original_catalog.to_xlsx(tmp_xlsx)
if __name__ == '__main__':
main()
|
{
"content_hash": "827556f2cf29a16257504c5fe1034e4e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 28.657894736842106,
"alnum_prop": 0.6758494031221304,
"repo_name": "datosgobar/pydatajson",
"id": "844e4ea26c85b5247afe6d073dcce27b6d3c43b9",
"size": "2232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/profiling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2777"
},
{
"name": "Python",
"bytes": "534331"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
}
|
from settings import *
import sys
if '%d' in MEDIA_URL:
MEDIA_URL = MEDIA_URL % MEDIA_VERSION
if '%s' in ADMIN_MEDIA_PREFIX:
ADMIN_MEDIA_PREFIX = ADMIN_MEDIA_PREFIX % MEDIA_URL
TEMPLATE_DEBUG = DEBUG
MANAGERS = ADMINS
# You can override Django's or some apps' locales with these folders:
if os.path.exists(os.path.join(COMMON_DIR, 'locale_overrides_common')):
INSTALLED_APPS += ('locale_overrides_common',)
if os.path.exists(os.path.join(PROJECT_DIR, 'locale_overrides')):
INSTALLED_APPS += ('locale_overrides',)
# Add admin interface media files if necessary
if 'django.contrib.admin' in INSTALLED_APPS:
INSTALLED_APPS += ('django_aep_export.admin_media',)
# Always add Django templates (exported from zip)
INSTALLED_APPS += (
'django_aep_export.django_templates',
)
# Convert all COMBINE_MEDIA to lists
for key, value in COMBINE_MEDIA.items():
if not isinstance(value, list):
COMBINE_MEDIA[key] = list(value)
# Add start markers, so apps can insert JS/CSS at correct position
def add_app_media(combine, *appmedia):
if on_production_server:
return
COMBINE_MEDIA.setdefault(combine, [])
if '!START!' not in COMBINE_MEDIA[combine]:
COMBINE_MEDIA[combine].insert(0, '!START!')
index = COMBINE_MEDIA[combine].index('!START!')
COMBINE_MEDIA[combine][index:index] = appmedia
def add_uncombined_app_media(app):
"""Copy all media files directly"""
if on_production_server:
return
path = os.path.join(
os.path.dirname(__import__(app, {}, {}, ['']).__file__), 'media')
app = app.rsplit('.', 1)[-1]
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(('.css', '.js')):
base = os.path.join(root, file)[len(path):].replace(os.sep,
'/').lstrip('/')
target = '%s/%s' % (app, base)
add_app_media(target, target)
if have_appserver or on_production_server:
check_app_imports = None
else:
def check_app_imports(app):
before = sys.modules.keys()
__import__(app, {}, {}, [''])
after = sys.modules.keys()
added = [key[len(app)+1:] for key in after if key not in before and
key.startswith(app + '.') and key[len(app)+1:]]
if added:
import logging
logging.warn('The app "%(app)s" contains imports in '
'its __init__.py (at least %(added)s). This can cause '
'strange bugs due to recursive imports! You should '
'either do the import lazily (within functions) or '
'ignore the app settings/urlsauto with '
'IGNORE_APP_SETTINGS and IGNORE_APP_URLSAUTO in '
'your settings.py.'
% {'app': app, 'added': ', '.join(added)})
# Import app-specific settings
_globals = globals()
class _Module(object):
def __setattr__(self, key, value):
_globals[key] = value
def __getattribute__(self, key):
return _globals[key]
settings = _Module()
for app in INSTALLED_APPS:
# This is an optimization. Django's apps don't have special settings.
# Also, allow for ignoring some apps' settings.
if app.startswith('django.') or app.endswith('.*') or \
app == 'appenginepatcher' or app in IGNORE_APP_SETTINGS:
continue
try:
# First we check if __init__.py doesn't import anything
if check_app_imports:
check_app_imports(app)
__import__(app + '.settings', {}, {}, [''])
except ImportError:
pass
# Remove start markers
for key, value in COMBINE_MEDIA.items():
if '!START!' in value:
value.remove('!START!')
try:
from settings_overrides import *
except ImportError:
pass
|
{
"content_hash": "8d194f5d3ded0a245c5be417224a5960",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 35.583333333333336,
"alnum_prop": 0.5987509758001561,
"repo_name": "lstoll/tetherme",
"id": "ae2271402277f6636c542ea77b17db70ae3fd875",
"size": "3867",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "common/appenginepatch/ragendja/settings_post.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "134752"
},
{
"name": "Python",
"bytes": "149901"
}
],
"symlink_target": ""
}
|
from hpnnet import nips2011_dbn
from hyperopt.pyll.stochastic import sample
from functools import partial
#import numpy as np
import hyperopt
#from hyperopt import pyll
from hyperopt.fmin import fmin_pass_expr_memo_ctrl
from hpnnet.skdata_learning_algo import eval_fn
from skdata.larochelle_etal_2007.view import RectanglesVectorXV
def test_preproc_space():
rectangles_eval_fn = partial(eval_fn,
protocol_cls=RectanglesVectorXV)
fmin_pass_expr_memo_ctrl(rectangles_eval_fn)
trials = hyperopt.Trials()
space = nips2011_dbn.preproc_space()
hyperopt.fmin(
rectangles_eval_fn,
space=space,
max_evals=10,
algo=hyperopt.rand.suggest,
trials=trials,
)
|
{
"content_hash": "bf84f4b5035ade70d5ee6bc3157daebe",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 23.548387096774192,
"alnum_prop": 0.7178082191780822,
"repo_name": "hyperopt/hyperopt-nnet",
"id": "2230a47f2e161229370d54bff93ea9a0011fa99d",
"size": "730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hpnnet/tests/test_nips2011_dbn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "80791"
}
],
"symlink_target": ""
}
|
from invisibleroads_macros.log import get_log
from threading import Thread, Event
from .exceptions import ConnectionError, TimeoutError
L = get_log(__name__)
class HeartbeatThread(Thread):
daemon = True
def __init__(
self, send_heartbeat,
relax_interval_in_seconds,
hurry_interval_in_seconds):
super(HeartbeatThread, self).__init__()
self._send_heartbeat = send_heartbeat
self._relax_interval_in_seconds = relax_interval_in_seconds
self._hurry_interval_in_seconds = hurry_interval_in_seconds
self._adrenaline = Event()
self._rest = Event()
self._halt = Event()
def run(self):
try:
while not self._halt.is_set():
try:
self._send_heartbeat()
except TimeoutError:
pass
if self._adrenaline.is_set():
interval_in_seconds = self._hurry_interval_in_seconds
else:
interval_in_seconds = self._relax_interval_in_seconds
self._rest.wait(interval_in_seconds)
except ConnectionError:
L.debug('[heartbeat connection error]')
def relax(self):
self._adrenaline.clear()
def hurry(self):
self._adrenaline.set()
self._rest.set()
self._rest.clear()
@property
def hurried(self):
return self._adrenaline.is_set()
def halt(self):
self._rest.set()
self._halt.set()
|
{
"content_hash": "401112709ba6567e57a389b327ce23c6",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 73,
"avg_line_length": 27.87272727272727,
"alnum_prop": 0.5616438356164384,
"repo_name": "purduerov/XX-Core",
"id": "a039e73b1a0f9e5aac95e113f2cd6f62b21e6f92",
"size": "1533",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "tests/testsides/socketIO-client/socketIO_client/heartbeats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2366"
},
{
"name": "CSS",
"bytes": "24789"
},
{
"name": "Go",
"bytes": "22577"
},
{
"name": "HTML",
"bytes": "3240"
},
{
"name": "JavaScript",
"bytes": "103794"
},
{
"name": "Makefile",
"bytes": "374"
},
{
"name": "Python",
"bytes": "228753"
},
{
"name": "Shell",
"bytes": "1761"
}
],
"symlink_target": ""
}
|
from ..exceptions import InvalidChoiceError
from .base import MarathonObject
class MarathonContainer(MarathonObject):
"""Marathon health check.
See https://mesosphere.github.io/marathon/docs/native-docker.html
:param docker: docker field (e.g., {"image": "mygroup/myimage"})'
:type docker: :class:`marathon.models.container.MarathonDockerContainer` or dict
:param str type:
:param volumes:
:type volumes: list[:class:`marathon.models.container.MarathonContainerVolume`] or list[dict]
"""
TYPES = ['DOCKER']
"""Valid container types"""
def __init__(self, docker=None, type='DOCKER', volumes=None):
if not type in self.TYPES:
raise InvalidChoiceError('type', type, self.TYPES)
self.type = type
self.docker = docker if isinstance(docker, MarathonDockerContainer) \
else MarathonDockerContainer().from_json(docker)
self.volumes = [
v if isinstance(v, MarathonContainerVolume) else MarathonContainerVolume().from_json(v)
for v in (volumes or [])
]
class MarathonDockerContainer(MarathonObject):
"""Docker options.
See https://mesosphere.github.io/marathon/docs/native-docker.html
:param str image: docker image
:param str network:
:param port_mappings:
:type port_mappings: list[:class:`marathon.models.container.MarathonContainerPortMapping`] or list[dict]
:param list[dict] parameters:
:param bool privileged: run container in privileged mode
:param bool force_pull_image: Force a docker pull before launching
"""
NETWORK_MODES=['BRIDGE', 'HOST']
"""Valid network modes"""
def __init__(self, image=None, network='HOST', port_mappings=None, parameters=None, privileged=None,
force_pull_image=None, **kwargs):
self.image = image
if network:
if not network in self.NETWORK_MODES:
raise InvalidChoiceError('network', network, self.NETWORK_MODES)
self.network = network
self.port_mappings = [
pm if isinstance(pm, MarathonContainerPortMapping) else MarathonContainerPortMapping().from_json(pm)
for pm in (port_mappings or [])
]
self.parameters = parameters or []
self.privileged = privileged or False
self.force_pull_image = force_pull_image or False
class MarathonContainerPortMapping(MarathonObject):
"""Container port mapping.
See https://mesosphere.github.io/marathon/docs/native-docker.html
:param int container_port:
:param int host_port:
:param str protocol:
"""
PROTOCOLS=['tcp', 'udp']
"""Valid protocols"""
def __init__(self, container_port=None, host_port=0, service_port=None, protocol='tcp'):
self.container_port = container_port
self.host_port = host_port
self.service_port = service_port
if not protocol in self.PROTOCOLS:
raise InvalidChoiceError('protocol', protocol, self.PROTOCOLS)
self.protocol = protocol
class MarathonContainerVolume(MarathonObject):
"""Volume options.
See https://mesosphere.github.io/marathon/docs/native-docker.html
:param str container_path: container path
:param str host_path: host path
:param str mode: one of ['RO', 'RW']
"""
MODES=['RO', 'RW']
def __init__(self, container_path=None, host_path=None, mode='RW'):
self.container_path = container_path
self.host_path = host_path
if not mode in self.MODES:
raise InvalidChoiceError('mode', mode, self.MODES)
self.mode = mode
|
{
"content_hash": "cc6c7466f2065c0d12cb423f75e76854",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 112,
"avg_line_length": 34.80769230769231,
"alnum_prop": 0.6599447513812154,
"repo_name": "fengyehong/marathon-python",
"id": "3e88558fc269f76221a8b8198620dfe09dad6800",
"size": "3620",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "marathon/models/container.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "1293"
},
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "80169"
},
{
"name": "Shell",
"bytes": "1231"
}
],
"symlink_target": ""
}
|
import os
from bokeh.core.properties import Int, String
from bokeh.embed import file_html
from bokeh.models import Tool
from bokeh.resources import CDN
from bokeh.util.compiler import JavaScript
fn = __file__
fn = os.path.join(os.path.dirname(fn), "export_tool.js")
with open(fn) as f:
JS_CODE = f.read()
class ExportTool(Tool):
__implementation__ = JavaScript(JS_CODE)
event = Int(default=0)
content = String()
def register_plot(self, plot):
def export_callback(attr, old, new):
# really, export the doc as JSON
self.content = None
html = file_html(plot, CDN, "Task Stream")
self.content = html
self.on_change("event", export_callback)
|
{
"content_hash": "c454a8c91ff7a9d4c43c4f57d5c2c41f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 56,
"avg_line_length": 26,
"alnum_prop": 0.6510989010989011,
"repo_name": "blaze/distributed",
"id": "d93d21b881b59af35623189b8722adddc9f738f1",
"size": "728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "distributed/dashboard/export_tool.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "511624"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
}
|
from google.cloud import osconfig_v1alpha
def sample_delete_os_policy_assignment():
# Create a client
client = osconfig_v1alpha.OsConfigZonalServiceClient()
# Initialize request argument(s)
request = osconfig_v1alpha.DeleteOSPolicyAssignmentRequest(
name="name_value",
)
# Make the request
operation = client.delete_os_policy_assignment(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END osconfig_v1alpha_generated_OsConfigZonalService_DeleteOSPolicyAssignment_sync]
|
{
"content_hash": "a95a2be4eefc9198af793621d7f38316",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 85,
"avg_line_length": 26.695652173913043,
"alnum_prop": 0.7328990228013029,
"repo_name": "googleapis/python-os-config",
"id": "7d2ab42d088be84f47789de35469b2311aa10414",
"size": "2036",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/osconfig_v1alpha_generated_os_config_zonal_service_delete_os_policy_assignment_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1810720"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
}
|
"""Classes for exporting data from AFF4 to the rest of the world.
Exporters defined here convert various complex RDFValues to simple RDFValues
(without repeated fields, without recursive field definitions) that can
easily be written to a relational database or just to a set of files.
"""
import hashlib
import json
import stat
import time
import logging
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import type_info
from grr.lib import utils
from grr.lib.aff4_objects import filestore
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import export_pb2
class Error(Exception):
"""Errors generated by export converters."""
class NoConverterFound(Error):
"""Raised when no converter is found for particular value."""
class ExportOptions(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportOptions
class ExportedMetadata(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedMetadata
def __init__(self, initializer=None, age=None, payload=None, **kwarg):
super(ExportedMetadata, self).__init__(initializer=initializer,
age=age, **kwarg)
if not self.timestamp:
self.timestamp = rdfvalue.RDFDatetime().Now()
class ExportedClient(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedClient
class ExportedFile(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedFile
class ExportedRegistryKey(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedRegistryKey
class ExportedProcess(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedProcess
class ExportedNetworkConnection(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedNetworkConnection
class ExportedDNSClientConfiguration(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedDNSClientConfiguration
class ExportedOpenFile(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedOpenFile
class ExportedNetworkInterface(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedNetworkInterface
class ExportedFileStoreHash(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedFileStoreHash
class ExportedAnomaly(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedAnomaly
class ExportedCheckResult(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedCheckResult
class ExportedMatch(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedMatch
class ExportedBytes(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedBytes
class ExportConverter(object):
"""Base ExportConverter class.
ExportConverters are used to convert RDFValues to export-friendly RDFValues.
"Export-friendly" means 2 things:
* Flat structure
* No repeated fields (i.e. lists)
In order to use ExportConverters, users have to use ConvertValues.
These methods will look up all the available ExportConverters descendants
and will choose the ones that have input_rdf_type attribute equal to the
type of the values being converted. It's ok to have multiple converters with
the same input_rdf_type value. They will be applied sequentially and their
cumulative results will be returned.
"""
__metaclass__ = registry.MetaclassRegistry
# Type of values that this converter accepts.
input_rdf_type = None
# Cache used for GetConvertersByValue() lookups.
converters_cache = {}
def __init__(self, options=None):
"""Constructor.
Args:
options: ExportOptions value, which contains settings that may or
or may not affect this converter's behavior.
"""
super(ExportConverter, self).__init__()
self.options = options or ExportOptions()
def Convert(self, metadata, value, token=None):
"""Converts given RDFValue to other RDFValues.
Metadata object is provided by the caller. It contains basic information
about where the value is coming from (i.e. client_urn, session_id, etc)
as well as timestamps corresponding to when data was generated and
exported.
ExportConverter should use the metadata when constructing export-friendly
RDFValues.
Args:
metadata: ExportedMetadata to be used for conversion.
value: RDFValue to be converted.
token: Security token.
Yields:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible. Resulting RDFValues may be of different
types.
"""
raise NotImplementedError()
def BatchConvert(self, metadata_value_pairs, token=None):
"""Converts a batch of RDFValues at once.
This is a default non-optimized dumb implementation. Subclasses are
supposed to have their own optimized implementations.
Metadata object is provided by the caller. It contains basic information
about where the value is coming from (i.e. client_urn, session_id, etc)
as well as timestamps corresponding to when data was generated and
exported.
ExportConverter should use the metadata when constructing export-friendly
RDFValues.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for
conversion and value is an RDFValue to be converted.
token: Security token.
Yields:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible. Resulting RDFValues may be of different
types.
"""
for metadata, value in metadata_value_pairs:
for result in self.Convert(metadata, value, token):
yield result
@staticmethod
def GetConvertersByValue(value):
"""Returns all converters that take given value as an input value."""
try:
return ExportConverter.converters_cache[value.__class__.__name__]
except KeyError:
results = [cls for cls in ExportConverter.classes.itervalues()
if cls.input_rdf_type == value.__class__.__name__]
if not results:
results = [DataAgnosticExportConverter]
ExportConverter.converters_cache[value.__class__.__name__] = results
return results
class DataAgnosticExportConverter(ExportConverter):
"""Export converter that yields flattened versions of passed values.
NOTE: DataAgnosticExportConverter discards complex types: repeated
fields and nested messages. Only the primitive types (including enums)
are preserved.
"""
# Cache used for generated classes.
classes_cache = {}
def ExportedClassNameForValue(self, value):
return utils.SmartStr("AutoExported" + value.__class__.__name__)
def MakeFlatRDFClass(self, value):
"""Generates flattened RDFValue class definition for the given value."""
def Flatten(self, metadata, value_to_flatten):
if metadata:
self.metadata = metadata
for desc in value_to_flatten.type_infos:
if desc.name == "metadata":
continue
if hasattr(self, desc.name) and value_to_flatten.HasField(desc.name):
setattr(self, desc.name, getattr(value_to_flatten, desc.name))
output_class = type(self.ExportedClassNameForValue(value),
(rdf_structs.RDFProtoStruct,),
dict(Flatten=Flatten))
# Metadata is always the first field of exported data.
output_class.AddDescriptor(rdf_structs.ProtoEmbedded(
name="metadata", field_number=1,
nested=ExportedMetadata))
for number, desc in sorted(value.type_infos_by_field_number.items()):
# Name 'metadata' is reserved to store ExportedMetadata value.
if desc.name == "metadata":
logging.debug("Ignoring 'metadata' field in %s.",
value.__class__.__name__)
continue
# Copy descriptors for primivie values as-is, just make sure their
# field number is correct.
if isinstance(desc, (type_info.ProtoBinary,
type_info.ProtoString,
type_info.ProtoUnsignedInteger,
type_info.ProtoEnum)):
# Incrementing field number by 1, as 1 is always occuppied by metadata.
output_class.AddDescriptor(desc.Copy(field_number=number + 1))
if (isinstance(desc, type_info.ProtoEnum) and
not isinstance(desc, type_info.ProtoBoolean)):
# Attach the enum container to the class for easy reference:
setattr(output_class, desc.enum_name, desc.enum_container)
return output_class
def Convert(self, metadata, value, token=None):
class_name = self.ExportedClassNameForValue(value)
try:
class_obj = DataAgnosticExportConverter.classes_cache[class_name]
except KeyError:
class_obj = self.MakeFlatRDFClass(value)
DataAgnosticExportConverter.classes_cache[class_name] = class_obj
result_obj = class_obj()
result_obj.Flatten(metadata, value)
yield result_obj
def BatchConvert(self, metadata_value_pairs, token=None):
for metadata, value in metadata_value_pairs:
for result in self.Convert(metadata, value, token=token):
yield result
class StatEntryToExportedFileConverter(ExportConverter):
"""Converts StatEntry to ExportedFile."""
input_rdf_type = "StatEntry"
MAX_CONTENT_SIZE = 1024 * 64
def __init__(self, *args, **kwargs):
super(StatEntryToExportedFileConverter, self).__init__(*args, **kwargs)
# If either of these are true we need to open the file to get more
# information
self.open_file_for_read = (self.options.export_files_hashes or
self.options.export_files_contents)
@staticmethod
def ParseSignedData(signed_data, result):
"""Parses signed certificate data and updates result rdfvalue."""
@staticmethod
def ParseFileHash(hash_obj, result):
"""Parses Hash rdfvalue into ExportedFile's fields."""
if hash_obj.HasField("md5"):
result.hash_md5 = str(hash_obj.md5)
if hash_obj.HasField("sha1"):
result.hash_sha1 = str(hash_obj.sha1)
if hash_obj.HasField("sha256"):
result.hash_sha256 = str(hash_obj.sha256)
if hash_obj.HasField("pecoff_md5"):
result.pecoff_hash_md5 = str(hash_obj.pecoff_md5)
if hash_obj.HasField("pecoff_sha1"):
result.pecoff_hash_sha1 = str(hash_obj.pecoff_sha1)
if hash_obj.HasField("signed_data"):
StatEntryToExportedFileConverter.ParseSignedData(
hash_obj.signed_data[0], result)
def Convert(self, metadata, stat_entry, token=None):
"""Converts StatEntry to ExportedFile.
Does nothing if StatEntry corresponds to a registry entry and not to a file.
Args:
metadata: ExportedMetadata to be used for conversion.
stat_entry: StatEntry to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues. Empty list if StatEntry
corresponds to a registry entry and not to a file.
"""
return self.BatchConvert([(metadata, stat_entry)], token=token)
def _RemoveRegistryKeys(self, metadata_value_pairs):
"""Filter out registry keys to operate on files."""
filtered_pairs = []
for metadata, stat_entry in metadata_value_pairs:
# Ignore registry keys.
if stat_entry.pathspec.pathtype != rdf_paths.PathSpec.PathType.REGISTRY:
filtered_pairs.append((metadata, stat_entry))
return filtered_pairs
def _OpenFilesForRead(self, metadata_value_pairs, token):
"""Open files all at once if necessary."""
if self.open_file_for_read:
aff4_paths = [result.aff4path for _, result in metadata_value_pairs]
fds = aff4.FACTORY.MultiOpen(aff4_paths, mode="r", token=token)
fds_dict = dict([(fd.urn, fd) for fd in fds])
return fds_dict
def _ExportHash(self, aff4_object, result):
"""Add hashes from aff4_object to result."""
if self.options.export_files_hashes:
hash_obj = aff4_object.Get(aff4_object.Schema.HASH)
if hash_obj:
self.ParseFileHash(hash_obj, result)
def _ExportFileContent(self, aff4_object, result):
"""Add file content from aff4_object to result."""
if self.options.export_files_contents:
try:
result.content = aff4_object.Read(self.MAX_CONTENT_SIZE)
result.content_sha256 = hashlib.sha256(result.content).hexdigest()
except (IOError, AttributeError) as e:
logging.warning("Can't read content of %s: %s",
result.aff4path, e)
def _CreateExportedFile(self, metadata, stat_entry):
return ExportedFile(metadata=metadata, urn=stat_entry.aff4path,
basename=stat_entry.pathspec.Basename(),
st_mode=stat_entry.st_mode, st_ino=stat_entry.st_ino,
st_dev=stat_entry.st_dev, st_nlink=stat_entry.st_nlink,
st_uid=stat_entry.st_uid, st_gid=stat_entry.st_gid,
st_size=stat_entry.st_size,
st_atime=stat_entry.st_atime,
st_mtime=stat_entry.st_mtime,
st_ctime=stat_entry.st_ctime,
st_blocks=stat_entry.st_blocks,
st_blksize=stat_entry.st_blksize,
st_rdev=stat_entry.st_rdev, symlink=stat_entry.symlink)
def BatchConvert(self, metadata_value_pairs, token=None):
"""Converts a batch of StatEntry value to ExportedFile values at once.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for
conversion and value is a StatEntry to be converted.
token: Security token:
Yields:
Resulting ExportedFile values. Empty list is a valid result and means that
conversion wasn't possible.
"""
filtered_pairs = self._RemoveRegistryKeys(metadata_value_pairs)
fds_dict = self._OpenFilesForRead(filtered_pairs, token=token)
for metadata, stat_entry in filtered_pairs:
result = self._CreateExportedFile(metadata, stat_entry)
if self.open_file_for_read:
try:
aff4_object = fds_dict[stat_entry.aff4path]
self._ExportHash(aff4_object, result)
self._ExportFileContent(aff4_object, result)
except KeyError:
pass
yield result
class StatEntryToExportedRegistryKeyConverter(ExportConverter):
"""Converts StatEntry to ExportedRegistryKey."""
input_rdf_type = "StatEntry"
def Convert(self, metadata, stat_entry, token=None):
"""Converts StatEntry to ExportedRegistryKey.
Does nothing if StatEntry corresponds to a file and not a registry entry.
Args:
metadata: ExportedMetadata to be used for conversion.
stat_entry: StatEntry to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues. Empty list if StatEntry
corresponds to a file and not to a registry entry.
"""
if stat_entry.pathspec.pathtype != rdf_paths.PathSpec.PathType.REGISTRY:
return []
result = ExportedRegistryKey(metadata=metadata,
urn=stat_entry.aff4path,
last_modified=stat_entry.st_mtime)
if (stat_entry.HasField("registry_type") and
stat_entry.HasField("registry_data")):
result.type = stat_entry.registry_type
try:
data = str(stat_entry.registry_data.GetValue())
except UnicodeEncodeError:
# If we can't represent this as a string...
# let's just get the byte representation *shrug*
data = stat.registry_data.GetValue()
# Get the byte representation of the string
data = unicode(data).encode("utf-16be")
result.data = data
return [result]
class NetworkConnectionToExportedNetworkConnectionConverter(ExportConverter):
"""Converts NetworkConnection to ExportedNetworkConnection."""
input_rdf_type = "NetworkConnection"
def Convert(self, metadata, conn, token=None):
"""Converts NetworkConnection to ExportedNetworkConnection."""
result = ExportedNetworkConnection(metadata=metadata,
family=conn.family,
type=conn.type,
local_address=conn.local_address,
remote_address=conn.remote_address,
state=conn.state,
pid=conn.pid,
ctime=conn.ctime)
return [result]
class ProcessToExportedProcessConverter(ExportConverter):
"""Converts Process to ExportedProcess."""
input_rdf_type = "Process"
def Convert(self, metadata, process, token=None):
"""Converts Process to ExportedProcess."""
result = ExportedProcess(metadata=metadata,
pid=process.pid,
ppid=process.ppid,
name=process.name,
exe=process.exe,
cmdline=" ".join(process.cmdline),
ctime=process.ctime,
real_uid=process.real_uid,
effective_uid=process.effective_uid,
saved_uid=process.saved_uid,
real_gid=process.real_gid,
effective_gid=process.effective_gid,
saved_gid=process.saved_gid,
username=process.username,
terminal=process.terminal,
status=process.status,
nice=process.nice,
cwd=process.cwd,
num_threads=process.num_threads,
user_cpu_time=process.user_cpu_time,
system_cpu_time=process.system_cpu_time,
cpu_percent=process.cpu_percent,
rss_size=process.RSS_size,
vms_size=process.VMS_size,
memory_percent=process.memory_percent)
return [result]
class ProcessToExportedNetworkConnectionConverter(ExportConverter):
"""Converts Process to ExportedNetworkConnection."""
input_rdf_type = "Process"
def Convert(self, metadata, process, token=None):
"""Converts Process to ExportedNetworkConnection."""
conn_converter = NetworkConnectionToExportedNetworkConnectionConverter(
options=self.options)
return conn_converter.BatchConvert([(metadata, conn)
for conn in process.connections],
token=token)
class ProcessToExportedOpenFileConverter(ExportConverter):
"""Converts Process to ExportedOpenFile."""
input_rdf_type = "Process"
def Convert(self, metadata, process, token=None):
"""Converts Process to ExportedOpenFile."""
for f in process.open_files:
yield ExportedOpenFile(metadata=metadata,
pid=process.pid,
path=f)
class InterfaceToExportedNetworkInterfaceConverter(ExportConverter):
input_rdf_type = "Interface"
def Convert(self, metadata, interface, token=None):
"""Converts Interface to ExportedNetworkInterfaces."""
ip4_addresses = []
ip6_addresses = []
for addr in interface.addresses:
if addr.address_type == addr.Family.INET:
ip4_addresses.append(addr.human_readable_address)
elif addr.address_type == addr.Family.INET6:
ip6_addresses.append(addr.human_readable_address)
else:
raise ValueError("Invalid address type: %s", addr.address_type)
result = ExportedNetworkInterface(
metadata=metadata,
ifname=interface.ifname,
ip4_addresses=" ".join(ip4_addresses),
ip6_addresses=" ".join(ip6_addresses))
if interface.mac_address:
result.mac_address = interface.mac_address.human_readable_address
yield result
class DNSClientConfigurationToExportedDNSClientConfiguration(ExportConverter):
input_rdf_type = "DNSClientConfiguration"
def Convert(self, metadata, config, token=None):
"""Converts DNSClientConfiguration to ExportedDNSClientConfiguration."""
result = ExportedDNSClientConfiguration(
metadata=metadata,
dns_servers=" ".join(config.dns_server),
dns_suffixes=" ".join(config.dns_suffix))
yield result
class ClientSummaryToExportedNetworkInterfaceConverter(
InterfaceToExportedNetworkInterfaceConverter):
input_rdf_type = "ClientSummary"
def Convert(self, metadata, client_summary, token=None):
"""Converts ClientSummary to ExportedNetworkInterfaces."""
for interface in client_summary.interfaces:
yield super(
ClientSummaryToExportedNetworkInterfaceConverter, self).Convert(
metadata, interface, token=token).next()
class ClientSummaryToExportedClientConverter(ExportConverter):
input_rdf_type = "ClientSummary"
def Convert(self, metadata, unused_client_summary, token=None):
return [ExportedClient(metadata=metadata)]
class BufferReferenceToExportedMatchConverter(ExportConverter):
"""Export converter for BufferReference instances."""
input_rdf_type = "BufferReference"
def Convert(self, metadata, buffer_reference, token=None):
yield ExportedMatch(metadata=metadata,
offset=buffer_reference.offset,
length=buffer_reference.length,
data=buffer_reference.data,
urn=aff4.AFF4Object.VFSGRRClient.PathspecToURN(
buffer_reference.pathspec,
metadata.client_urn))
class FileFinderResultConverter(StatEntryToExportedFileConverter):
"""Export converter for FileFinderResult instances."""
input_rdf_type = "FileFinderResult"
def __init__(self, *args, **kwargs):
super(FileFinderResultConverter, self).__init__(*args, **kwargs)
# We only need to open the file if we're going to export the contents, we
# already have the hash in the FileFinderResult
self.open_file_for_read = self.options.export_files_contents
def _SeparateTypes(self, metadata_value_pairs):
"""Separate files, registry keys, grep matches."""
registry_pairs = []
file_pairs = []
match_pairs = []
for metadata, result in metadata_value_pairs:
if (result.stat_entry.pathspec.pathtype ==
rdf_paths.PathSpec.PathType.REGISTRY):
registry_pairs.append((metadata, result.stat_entry))
else:
file_pairs.append((metadata, result))
match_pairs.extend([(metadata, match) for match in result.matches])
return registry_pairs, file_pairs, match_pairs
def BatchConvert(self, metadata_value_pairs, token=None):
"""Convert FileFinder results.
Args:
metadata_value_pairs: array of ExportedMetadata and rdfvalue tuples.
token: ACLToken
Yields:
ExportedFile, ExportedRegistryKey, or ExportedMatch
FileFinderResult objects have 3 types of results that need to be handled
separately. Files, registry keys, and grep matches. The file results are
similar to statentry exports, and share some code, but different because we
already have the hash available without having to go back to the database to
retrieve it from the aff4 object.
"""
registry_pairs, file_pairs, match_pairs = self._SeparateTypes(
metadata_value_pairs)
# Export files first
fds_dict = self._OpenFilesForRead(
[(metadata, val.stat_entry) for metadata, val in file_pairs],
token=token)
for metadata, ff_result in file_pairs:
result = self._CreateExportedFile(metadata, ff_result.stat_entry)
# FileFinderResult has hashes in "hash_entry" attribute which is not
# passed to ConvertValuesWithMetadata call. We have to process these
# explicitly here.
self.ParseFileHash(ff_result.hash_entry, result)
if self.options.export_files_contents:
try:
aff4_object = fds_dict[ff_result.stat_entry.aff4path]
self._ExportFileContent(aff4_object, result)
except KeyError:
logging.warn("Couldn't open %s for export",
ff_result.stat_entry.aff4path)
yield result
# Now export the registry keys
for result in ConvertValuesWithMetadata(registry_pairs, token=token,
options=self.options):
yield result
# Now export the grep matches.
for result in ConvertValuesWithMetadata(match_pairs, token=token,
options=self.options):
yield result
def Convert(self, metadata, result, token=None):
return self.BatchConvert([(metadata, result)], token=token)
class RDFURNConverter(ExportConverter):
"""Follows RDFURN and converts its target object into a set of RDFValues.
If urn points to a RDFValueCollection, RDFURNConverter goes through the
collection and converts every value there. If urn points to an object
with "STAT" attribute, it converts just that attribute.
"""
input_rdf_type = "RDFURN"
def Convert(self, metadata, stat_entry, token=None):
return self.BatchConvert([(metadata, stat_entry)], token=token)
def BatchConvert(self, metadata_value_pairs, token=None):
urn_metadata_pairs = []
for metadata, value in metadata_value_pairs:
if isinstance(value, rdfvalue.RDFURN):
urn_metadata_pairs.append((value, metadata))
urns_dict = dict(urn_metadata_pairs)
fds = aff4.FACTORY.MultiOpen(urns_dict.iterkeys(), mode="r", token=token)
batch = []
for fd in fds:
batch.append((urns_dict[fd.urn], fd))
try:
return ConvertValuesWithMetadata(batch, token=token)
except NoConverterFound as e:
logging.debug(e)
return []
class RDFValueCollectionConverter(ExportConverter):
input_rdf_type = "RDFValueCollection"
BATCH_SIZE = 1000
def Convert(self, metadata, collection, token=None):
if not collection:
return
for batch in utils.Grouper(collection, self.BATCH_SIZE):
converted_batch = ConvertValues(metadata, batch, token=token,
options=self.options)
for v in converted_batch:
yield v
class VFSFileToExportedFileConverter(ExportConverter):
input_rdf_type = "VFSFile"
def Convert(self, metadata, vfs_file, token=None):
stat_entry = vfs_file.Get(vfs_file.Schema.STAT)
if not stat_entry:
return []
result = ExportedFile(metadata=metadata,
urn=stat_entry.aff4path,
basename=stat_entry.pathspec.Basename(),
st_mode=stat_entry.st_mode,
st_ino=stat_entry.st_ino,
st_dev=stat_entry.st_dev,
st_nlink=stat_entry.st_nlink,
st_uid=stat_entry.st_uid,
st_gid=stat_entry.st_gid,
st_size=stat_entry.st_size,
st_atime=stat_entry.st_atime,
st_mtime=stat_entry.st_mtime,
st_ctime=stat_entry.st_ctime,
st_blocks=stat_entry.st_blocks,
st_blksize=stat_entry.st_blksize,
st_rdev=stat_entry.st_rdev,
symlink=stat_entry.symlink)
hash_obj = vfs_file.Get(vfs_file.Schema.HASH)
if hash_obj:
StatEntryToExportedFileConverter.ParseFileHash(hash_obj, result)
return [result]
class RDFBytesToExportedBytesConverter(ExportConverter):
input_rdf_type = "RDFBytes"
def Convert(self, metadata, data, token=None):
result = ExportedBytes(metadata=metadata,
data=data.SerializeToString(),
length=len(data))
return [result]
class GrrMessageConverter(ExportConverter):
"""Converts GrrMessage's payload into a set of RDFValues.
GrrMessageConverter converts given GrrMessages to a set of exportable
RDFValues. It looks at the payload of every message and applies necessary
converters to produce the resulting RDFValues.
Usually, when a value is converted via one of the ExportConverter classes,
metadata (ExportedMetadata object describing the client, session id, etc)
are provided by the caller. But when converting GrrMessages, the caller can't
provide any reasonable metadata. In order to understand where the messages
are coming from, one actually has to inspect the messages source and this
is done by GrrMessageConverter and not by the caller.
Although ExportedMetadata should still be provided for the conversion to
happen, only "source_urn" and value will be used. All other metadata will be
fetched from the client object pointed to by GrrMessage.source.
"""
input_rdf_type = "GrrMessage"
def __init__(self, *args, **kw):
super(GrrMessageConverter, self).__init__(*args, **kw)
self.cached_metadata = {}
def Convert(self, metadata, grr_message, token=None):
"""Converts GrrMessage into a set of RDFValues.
Args:
metadata: ExportedMetadata to be used for conversion.
grr_message: GrrMessage to be converted.
token: Security token.
Returns:
List or generator with resulting RDFValues.
"""
return self.BatchConvert([(metadata, grr_message)], token=token)
def BatchConvert(self, metadata_value_pairs, token=None):
"""Converts a batch of GrrMessages into a set of RDFValues at once.
Args:
metadata_value_pairs: a list or a generator of tuples (metadata, value),
where metadata is ExportedMetadata to be used for
conversion and value is a GrrMessage to be
converted.
token: Security token.
Returns:
Resulting RDFValues. Empty list is a valid result and means that
conversion wasn't possible.
"""
# Group messages by source (i.e. by client urn).
msg_dict = {}
for metadata, msg in metadata_value_pairs:
msg_dict.setdefault(msg.source, []).append((metadata, msg))
metadata_objects = []
metadata_to_fetch = []
# Open the clients we don't have metadata for and fetch metadata.
for client_urn in msg_dict.iterkeys():
try:
metadata_objects.append(self.cached_metadata[client_urn])
except KeyError:
metadata_to_fetch.append(client_urn)
if metadata_to_fetch:
client_fds = aff4.FACTORY.MultiOpen(metadata_to_fetch, mode="r",
token=token)
fetched_metadata = [GetMetadata(client_fd, token=token)
for client_fd in client_fds]
for metadata in fetched_metadata:
self.cached_metadata[metadata.client_urn] = metadata
metadata_objects.extend(fetched_metadata)
data_by_type = {}
for metadata in metadata_objects:
try:
for original_metadata, message in msg_dict[metadata.client_urn]:
# Get source_urn and annotations from the original metadata
# provided and original_timestamp from the payload age.
new_metadata = ExportedMetadata(metadata)
new_metadata.source_urn = original_metadata.source_urn
new_metadata.annotations = original_metadata.annotations
new_metadata.original_timestamp = message.payload.age
cls_name = message.payload.__class__.__name__
# Create a dict of values for conversion keyed by type, so we can
# apply the right converters to the right object types
if cls_name not in data_by_type:
converters_classes = ExportConverter.GetConvertersByValue(
message.payload)
data_by_type[cls_name] = {
"converters": [cls(self.options) for cls in converters_classes],
"batch_data": [(new_metadata, message.payload)]}
else:
data_by_type[cls_name]["batch_data"].append(
(new_metadata, message.payload))
except KeyError:
pass
# Run all converters against all objects of the relevant type
converted_batch = []
for dataset in data_by_type.values():
for converter in dataset["converters"]:
converted_batch.extend(converter.BatchConvert(dataset["batch_data"],
token=token))
return converted_batch
class FileStoreHashConverter(ExportConverter):
input_rdf_type = "FileStoreHash"
def Convert(self, metadata, stat_entry, token=None):
"""Convert a single FileStoreHash."""
return self.BatchConvert([(metadata, stat_entry)], token=token)
def BatchConvert(self, metadata_value_pairs, token=None):
"""Convert batch of FileStoreHashs."""
urns = [urn for metadata, urn in metadata_value_pairs]
urns_dict = dict([(urn, metadata)
for metadata, urn in metadata_value_pairs])
results = []
for hash_urn, client_files in filestore.HashFileStore.GetClientsForHashes(
urns, token=token):
for hit in client_files:
metadata = ExportedMetadata(urns_dict[hash_urn])
metadata.client_urn = rdfvalue.RDFURN(hit).Split(2)[0]
result = ExportedFileStoreHash(
metadata=metadata,
hash=hash_urn.hash_value,
fingerprint_type=hash_urn.fingerprint_type,
hash_type=hash_urn.hash_type,
target_urn=hit)
results.append(result)
return results
class CheckResultConverter(ExportConverter):
input_rdf_type = "CheckResult"
def Convert(self, metadata, checkresult, token=None):
"""Converts a single CheckResult.
Args:
metadata: ExportedMetadata to be used for conversion.
checkresult: CheckResult to be converted.
token: Security token.
Yields:
Resulting ExportedCheckResult. Empty list is a valid result and means that
conversion wasn't possible.
"""
if checkresult.HasField("anomaly"):
for anomaly in checkresult.anomaly:
exported_anomaly = ExportedAnomaly(
type=anomaly.type,
severity=anomaly.severity,
confidence=anomaly.confidence)
if anomaly.symptom:
exported_anomaly.symptom = anomaly.symptom
if anomaly.explanation:
exported_anomaly.explanation = anomaly.explanation
if anomaly.generated_by:
exported_anomaly.generated_by = anomaly.generated_by
if anomaly.anomaly_reference_id:
exported_anomaly.anomaly_reference_id.Extend(
anomaly.anomaly_reference_id)
if anomaly.finding:
exported_anomaly.finding.Extend(anomaly.finding)
yield ExportedCheckResult(
metadata=metadata,
check_id=checkresult.check_id,
anomaly=exported_anomaly)
else:
yield ExportedCheckResult(
metadata=metadata,
check_id=checkresult.check_id)
class RekallResponseConverter(ExportConverter):
"""Export converter for RekallResponse objects."""
__abstract = True # pylint: disable=g-bad-name
input_rdf_type = "RekallResponse"
def HandleMessage(self, message):
"""Handles a single Rekall message.
Args:
message: Rekall message.
Yields:
Converted objects suitable for export.
"""
raise NotImplementedError()
def HandleMessages(self, parsed_messages):
"""Handle all parsed messages.
Args:
parsed_messages: List of Rekall messages (every Rekall message is a list
itself with message type being the first element).
Yields:
Converted objects suitable for export.
"""
for message in parsed_messages:
# We do not decode lexicon-based responses. If there's non empty
# lexicon in the message, we ignore the whole response altogether.
if message[0] == "l" and message[1]:
# TODO(user): messages like these should bubble up and end up
# somewhere in the hunt log UI.
logging.warn("Non-empty lexicon found. Client %s is too old.",
self.rekall_response.client_urn)
return
if message[0] in ["s", "t"]:
self.context_dict[message[0]] = message[1]
for result in self.HandleMessage(message):
yield result
def Convert(self, metadata, rekall_response, token=None):
"""Convert a single RekallResponse."""
# Original RekallResponse object.
self.rekall_response = rekall_response
# Metadata used for constructing exported values.
self.metadata = metadata
if rekall_response.HasField("json_context_messages"):
parsed_context_messages = json.loads(
rekall_response.json_context_messages)
else:
parsed_context_messages = []
# Dictionary with current context. Context is defined by
# context-specific messages. For example, message "t" that defines
# a table header is a context-specific message. Section message "s"
# is context-specific too. By inspecting context_dict it's possible
# to understand what table and section current message belongs to.
self.context_dict = dict(parsed_context_messages)
parsed_messages = json.loads(rekall_response.json_messages)
for result in self.HandleMessages(parsed_messages):
yield result
def RekallStringRenderer(x):
"""Function used to render Rekall 'str' objects."""
try:
return x["str"]
except KeyError:
return x["b64"]
def RekallEProcessRenderer(x):
"""Function used to render Rekall '_EPROCESS' objects."""
return"%s (%s)" % (x["Cybox"]["Name"], x["Cybox"]["PID"])
class DynamicRekallResponseConverter(RekallResponseConverter):
"""Export converter for RekallResponse objects."""
OUTPUT_CLASSES = {}
OBJECT_RENDERERS = {
"_EPROCESS": RekallEProcessRenderer,
"Address": lambda x: utils.FormatAsHexString(x["value"]),
"AddressSpace": lambda x: x["name"],
"BaseObject": lambda x: "@%s" % utils.FormatAsHexString(x["offset"]),
"Enumeration": lambda x: "%s (%s)" % (x["enum"], x["value"]),
"Instruction": lambda x: utils.SmartStr(x["value"]),
"Literal": lambda x: utils.SmartStr(x["value"]),
"NativeType": lambda x: utils.SmartStr(x["value"]),
"NoneObject": lambda x: "-",
"Pointer": lambda x: utils.FormatAsHexString(x["target"], 14),
"PaddedAddress": lambda x: utils.FormatAsHexString(x["value"], 14),
"str": RekallStringRenderer,
"Struct": lambda x: utils.FormatAsHexString(x["offset"]),
"UnixTimeStamp": lambda x: utils.FormatAsTimestamp(x["epoch"])
}
def _RenderObject(self, obj):
"""Renders a single object - i.e. a table cell."""
if not hasattr(obj, "iteritems"):
# Maybe we have to deal with legacy strings, ecnoded as lists with first
# element being "+" for base64 strings and "*" for unicode strings -
# check it.
if isinstance(obj, list) and len(obj) == 2 and obj[0] in ["*", "+"]:
return utils.SmartStr(obj[1])
return utils.SmartStr(obj)
if "string_value" in obj:
return obj["string_value"]
if "mro" in obj:
obj_mro = obj["mro"]
if isinstance(obj_mro, basestring):
obj_mro = obj_mro.split(":")
for mro_type in obj_mro:
if mro_type in self.OBJECT_RENDERERS:
return self.OBJECT_RENDERERS[mro_type](obj)
return utils.SmartStr(obj)
def _GenerateOutputClass(self, class_name, tables):
"""Generates output class with a given name for a given set of tables."""
output_class = type(utils.SmartStr(class_name),
(rdf_structs.RDFProtoStruct,),
{})
if not tables:
raise RuntimeError("Can't generate output class without Rekall table "
"definition.")
field_number = 1
output_class.AddDescriptor(rdf_structs.ProtoEmbedded(
name="metadata", field_number=field_number,
nested=ExportedMetadata))
field_number += 1
output_class.AddDescriptor(
rdf_structs.ProtoString(name="section_name",
field_number=field_number))
field_number += 1
output_class.AddDescriptor(
rdf_structs.ProtoString(name="text",
field_number=field_number))
# All the tables are merged into one. This is done so that if plugin
# outputs multiple tables, we get all possible columns in the output
# RDFValue.
used_names = set()
for table in tables:
for column_header in table:
column_name = None
try:
column_name = column_header["cname"]
except KeyError:
pass
if not column_name:
column_name = column_header["name"]
if not column_name:
raise RuntimeError("Can't determine column name in table header.")
if column_name in used_names:
continue
field_number += 1
used_names.add(column_name)
output_class.AddDescriptor(
rdf_structs.ProtoString(name=column_name,
field_number=field_number))
return output_class
def _GetOutputClass(self, plugin_name, tables):
output_class_name = "RekallExport_" + plugin_name
try:
return DynamicRekallResponseConverter.OUTPUT_CLASSES[output_class_name]
except KeyError:
output_class = self._GenerateOutputClass(output_class_name, tables)
DynamicRekallResponseConverter.OUTPUT_CLASSES[
output_class_name] = output_class
return output_class
def _HandleTableRow(self, metadata, context_dict, message, output_class):
"""Handles a single row in one of the tables in RekallResponse."""
attrs = {}
for key, value in message[1].iteritems():
if hasattr(output_class, key):
# ProtoString expects a unicode object, so let's convert
# everything to unicode strings.
attrs[key] = utils.SmartUnicode(self._RenderObject(value))
result = output_class(**attrs)
result.metadata = metadata
try:
result.section_name = self._RenderObject(context_dict["s"]["name"])
except KeyError:
pass
return result
def HandleMessages(self, parsed_messages):
"""Handles all messages in a Rekall response."""
if "t" in self.context_dict:
tables = [self.context_dict["t"]]
else:
tables = []
# First scan all the messages and find all table definitions there.
for message in parsed_messages:
# We do not decode lexicon-based responses. If there's non empty
# lexicon in the message, we ignore the whole response altogether.
if message[0] == "l" and message[1]:
# TODO(user): messages like these should bubble up and end up
# somewhere in the hunt log UI.
logging.warn("Non-empty lexicon found. Client %s is too old.",
self.rekall_response.client_urn)
break
if message[0] == "t":
tables.append(message[1])
# Generate output class based on all table definitions.
output_class = self._GetOutputClass(self.rekall_response.plugin, tables)
# Fill generated output class instances with values from every row.
for message in parsed_messages:
if message[0] in ["s", "t"]:
self.context_dict[message[0]] = message[1]
if message[0] == "r":
yield self._HandleTableRow(self.metadata, self.context_dict, message,
output_class)
class ExportedRekallProcess(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedRekallProcess
class RekallResponseToExportedRekallProcessConverter(RekallResponseConverter):
"""Converts free-form RekallResponse to ExportedRekallProcess."""
@staticmethod
def HandleTableRow(metadata, message):
"""Handles a table row, converting it if possile."""
row = message[1]
try:
eprocess = row["_EPROCESS"]
result = ExportedRekallProcess(
pid=eprocess["Cybox"]["PID"],
parent_pid=eprocess["Cybox"]["Parent_PID"],
name=eprocess["Cybox"]["Name"],
creation_time=eprocess["Cybox"]["Creation_Time"]["epoch"] * 1000000)
if metadata:
result.metadata = metadata
commandline = eprocess["Cybox"]["Image_Info"]["Command_Line"]
if isinstance(commandline, basestring):
result.commandline = commandline
fullpath = eprocess["Cybox"]["Image_Info"]["Path"]
if isinstance(fullpath, basestring):
result.fullpath = fullpath
trusted_fullpath = eprocess["Cybox"]["Image_Info"]["TrustedPath"]
if isinstance(trusted_fullpath, basestring):
result.trusted_fullpath = trusted_fullpath
return result
except KeyError:
return
def HandleMessage(self, message):
"""Handles a single Rekall message."""
if message[0] == "r":
result = self.HandleTableRow(self.metadata, message)
if result:
yield result
class ExportedRekallWindowsLoadedModule(rdf_structs.RDFProtoStruct):
protobuf = export_pb2.ExportedRekallWindowsLoadedModule
class RekallResponseToExportedRekallWindowsLoadedModuleConverter(
RekallResponseConverter):
"""Converts suitable RekallResponses to ExportedRekallWindowsLoadedModules."""
input_rdf_type = "RekallResponse"
@staticmethod
def HandleTableRow(metadata, message):
"""Handles a table row, converting it if possile."""
row = message[1]
try:
result = ExportedRekallWindowsLoadedModule(
metadata=metadata,
fullpath=row["mapped_filename"],
address=row["base_address"],
is_in_load_list=row["in_load"],
is_in_init_list=row["in_init"],
is_in_mem_list=row["in_mem"])
in_load_fullpath = row["in_load_path"]
if isinstance(in_load_fullpath, basestring):
result.in_load_fullpath = in_load_fullpath
in_init_fullpath = row["in_init_path"]
if isinstance(in_init_fullpath, basestring):
result.in_init_fullpath = in_init_fullpath
in_mem_fullpath = row["in_mem_path"]
if isinstance(in_mem_fullpath, basestring):
result.in_mem_fullpath = in_mem_fullpath
except KeyError:
return
process = RekallResponseToExportedRekallProcessConverter.HandleTableRow(
None, message)
if process:
result.process = process
return result
def HandleMessage(self, message):
"""Handles a single Rekall message."""
if message[0] == "r":
result = self.HandleTableRow(self.metadata, message)
if result:
yield result
def GetMetadata(client, token=None):
"""Builds ExportedMetadata object for a given client id.
Args:
client: RDFURN of a client or VFSGRRClient object itself.
token: Security token.
Returns:
ExportedMetadata object with metadata of the client.
"""
if isinstance(client, rdfvalue.RDFURN):
client_fd = aff4.FACTORY.Open(client, mode="r", token=token)
else:
client_fd = client
metadata = ExportedMetadata()
metadata.client_urn = client_fd.urn
metadata.client_age = client_fd.urn.age
metadata.hostname = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.HOSTNAME, u""))
metadata.os = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.SYSTEM, u""))
metadata.uname = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.UNAME, u""))
metadata.os_release = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.OS_RELEASE, u""))
metadata.os_version = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.OS_VERSION, u""))
metadata.usernames = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.USERNAMES, u""))
metadata.mac_address = utils.SmartUnicode(
client_fd.Get(client_fd.Schema.MAC_ADDRESS, u""))
client_info = client_fd.Get(client_fd.Schema.CLIENT_INFO)
if client_info is not None:
metadata.labels = u",".join(client_info.labels)
return metadata
def ConvertValuesWithMetadata(metadata_value_pairs, token=None, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
metadata_value_pairs: Tuples of (metadata, rdf_value), where metadata is
an instance of ExportedMetadata and rdf_value is
an RDFValue subclass instance to be exported.
token: Security token.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Yields:
Converted values. Converted values may be of different types.
Raises:
NoConverterFound: in case no suitable converters were found for a value in
metadata_value_pairs. This error is only raised after
all values in metadata_value_pairs are attempted to be
converted. If there are multiple value types that could
not be converted because of the lack of corresponding
converters, only the last one will be specified in the
exception message.
"""
no_converter_found_error = None
for rdf_type, metadata_values_group in utils.GroupBy(
metadata_value_pairs,
lambda pair: pair[1].__class__.__name__).iteritems():
_ = rdf_type
_, first_value = metadata_values_group[0]
converters_classes = ExportConverter.GetConvertersByValue(first_value)
if not converters_classes:
no_converter_found_error = "No converters found for value: %s" % str(
first_value)
continue
converters = [cls(options) for cls in converters_classes]
for converter in converters:
for result in converter.BatchConvert(metadata_values_group, token=token):
yield result
if no_converter_found_error is not None:
raise NoConverterFound(no_converter_found_error)
def ConvertValues(default_metadata, values, token=None, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: export.ExportedMetadata instance with basic
information about where the values come from.
This metadata will be passed to exporters.
values: Values to convert. They should be of the same type.
token: Security token.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values.
"""
batch_data = [(default_metadata, obj) for obj in values]
return ConvertValuesWithMetadata(batch_data, token=token, options=options)
|
{
"content_hash": "6294c57a8cdddce0bc030d35c1de6c1d",
"timestamp": "",
"source": "github",
"line_count": 1450,
"max_line_length": 80,
"avg_line_length": 35.296551724137935,
"alnum_prop": 0.6593005080109418,
"repo_name": "ahojjati/grr",
"id": "a7c5ef8dfcd336e1be68cb05a225dd7f95d478aa",
"size": "51202",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/export.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "276081"
},
{
"name": "CMake",
"bytes": "3044"
},
{
"name": "CSS",
"bytes": "12677"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "71683"
},
{
"name": "JavaScript",
"bytes": "228300"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "198203"
},
{
"name": "Python",
"bytes": "5181684"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43112"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
}
|
"""Tests that step_data can accept multiple specs at once."""
from recipe_engine import post_process
from recipe_engine.recipe_api import composite_step
PYTHON_VERSION_COMPATIBILITY = 'PY2+3'
DEPS = [
]
# note that the frames from composite_step are omitted in the stack during
# training.
@composite_step
def my_function(): # pragma: no cover
raise TypeError("BAD DOGE")
def RunSteps(api):
my_function()
def GenTests(api):
yield api.test(
'basic',
api.expect_exception('TypeError'),
api.post_process(post_process.ResultReason,
"Uncaught Exception: TypeError('BAD DOGE')"))
|
{
"content_hash": "3e55a7d49a53af3eca23a15d5301eb69",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 23.296296296296298,
"alnum_prop": 0.6931637519872814,
"repo_name": "luci/recipes-py",
"id": "6896997a40bf8c8b3afc5d3a820fa124c8f9cdb4",
"size": "803",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "recipes/engine_tests/expect_exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "900422"
},
{
"name": "Shell",
"bytes": "5746"
}
],
"symlink_target": ""
}
|
class ainput: # We can't use input as it is a existent function name, so we use AInput for Advance Input
''' This class will create a object with a simpler coding interface to retrieve console input'''
def __init__(self,msg=""):
''' This will create a instance of ainput object'''
self.data = "" # Initialize a empty data variable
if not msg == "":
self.ask(msg)
def ask(self,msg, req=0):
''' This will display the prompt and retrieve the user input.'''
if req == 0:
self.data = raw_input(msg) # Save the user input to a local object variable
else:
self.data = raw_input(msg + " (Required)")
# Verify that the information was entered and its not empty. This will accept a space character. Better Validation needed
if req == 1 and self.data == "":
self.ask(msg,req)
def getString(self, default=""):
''' Returns the user input as String'''
if self.data == "":
self.data = default
return self.data
def getInteger(self):
''' Returns the user input as a Integer'''
return int(self.data)
def getNumber(self):
''' Returns the user input as a Float number'''
return float(self.data)
|
{
"content_hash": "4b1cfb07a943f3a41a74b1a5425d081f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 127,
"avg_line_length": 38.90625,
"alnum_prop": 0.6160642570281124,
"repo_name": "kwminnick/kwmqueues",
"id": "770a10787bfbba22f97ca0027e0b6d83e8e264eb",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/ainput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26194"
}
],
"symlink_target": ""
}
|
""" RSyncBackup - Wrapper to perform backup management using rsync.
--------------------------------------------------------------------
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The RSyncBackup class in this module can be used to perform automated
backups using the rsync program. Be very careful how you use this code!
It DOES delete files!
"""
__version__ = "1.3"
import time, commands, re
import logging
import os, os.path
class RSyncBackup:
""" RSyncBackup is used to automate the control of the rsync utility to perform backups.
It also has a few other features, including:
- Record when the last backup was taken, and whether it's time to run another one
- Delete archives once a specified number exist
"""
def __init__ (self, lastRunFile="/var/state/backupLastRun.lrf", rsync="/usr/bin/rsync", testRun=0):
""" Creates an object that can perform backups using Rsync.
lastRunFile - A file to record when the backup was last performed.
rsync - Specify the location of the rsync binary
testRun - Set to 1 to log out what will be done, rather than doing it.
"""
self.log = logging.getLogger ("RSyncBackup")
self.lastRunFile = lastRunFile
self.rsync = rsync
self.testRun = testRun
if (testRun):
self.log.info ("TestRun detected - no file operations will be performed.")
self.backupStarted = time.gmtime()
def timeToBackup (self, backupInterval=24*60):
""" Parse the lastRunFile and determine whether the interval specified has
ellapsed.
backupInterval - Time interval in minutes between backups
"""
try:
lrf = open (self.lastRunFile, 'r')
oldTime = lrf.read()
lrf.close()
except Exception, e:
self.log.warn ("Exception occured reading the last run file %s. Error: %s" % (self.lastRunFile, str (e)))
self.backupStarted = time.gmtime()
return 1
try:
lastTime = time.mktime (time.strptime (oldTime))
nowTime = time.mktime (time.gmtime())
self.log.debug ("lastTime: %s nowTime: %s" % (str (lastTime), str (nowTime)))
if (lastTime + backupInterval * 60 < nowTime):
self.log.info ("Time to perform backup.")
self.backupStarted = time.gmtime()
return 1
else:
self.log.info ("Not yet time to backup.")
return 0
except Exception, e:
self.log.warn ("Exception occured parsing last run file %s. Error: %s" % (self.lastRunFile, str (e)))
self.backupStarted = time.gmtime()
return 1
def backup (self, source, destination, archive = None, excludeList = None, includeList = None,):
""" Perform a backup using rsync.
source - The source directory who's contents should be backed up.
destination - The directory that the backup should go into
archive - (Optional) The directory that previous versions of the files should
be copied to.
excludeList - (Optional) A list of paths that should be excluded from the backup.
Returns true if successful, false if an error occurs.
"""
if (self.testRun):
self.log.info ("Starting test run of backup.")
else:
self.log.info ("Starting backup.")
dateTime = time.strftime ("%d%m%Y-%H%M%S")
if (archive is not None):
self.log.debug ("Determining archive directory.")
thisArchive = os.path.join (archive, dateTime[4:8], dateTime[2:4], dateTime)
self.log.debug ("Archive destination is: %s" % thisArchive)
else:
thisArchive = None
cmnd = "%s --archive" % self.rsync
if (thisArchive is not None):
cmnd = "%s --backup --backup-dir=%s" % (cmnd, thisArchive)
cmnd = "%s --delete" % cmnd
if (excludeList is not None):
for exclude in excludeList:
cmnd = '%s --exclude="%s"' % (cmnd, exclude)
if (includeList is not None):
for include in includeList:
cmnd = '%s --include="%s"' % (cmnd, include)
cmnd = "%s '%s' '%s'" % (cmnd, source, destination)
self.log.debug ("Running command: %s" % cmnd)
if (self.testRun):
self.log.warn ("TestRun - would execute command: " + cmnd)
else:
result = commands.getstatusoutput (cmnd)
if (result[0] != 0):
self.log.error ("Error running rsync: %s" % result[1])
return 0
else:
self.log.info ("Rsync backup successful.")
self.log.debug ("Rsync output: %s" % result[1])
return 1
def trimArchives (self, archiveDir, filter=None, entriesToKeep=10, removeParentIfEmpty=1):
""" Delete old archives - WARNING: This deletes files, be careful with it!
archiveDir - The directory containing the archives
filter - (Optional) Regular expression used to determine which parts of an
archive should be deleted. Use with caution!
entriesToKeep - (Optional) The number of entries in the archive to leave,
defaults to 10
removeParentIfEmpty - (Optional) By default if an archive directory is empty it
will be removed, set to 0 to disable.
"""
if (filter is None):
# Default to triming the total number of archives.
# Filter on archiveDir/yyyy/dd/ddmmyyyy-hhmmss
filterStr = os.path.join (re.escape (archiveDir), '[0-9]{4,4}', '[0-9]{2,2}', '[0-9]{8,8}-[0-9]{6,6}$')
self.log.debug ("Using trim filter of: %s" % filterStr)
nameFilter = re.compile (filterStr)
else:
nameFilter = re.compile (filter)
walker = pathWalker (nameFilter)
matchingPaths = walker.walk(archiveDir)
# Sort the paths so that the oldest archives are first
matchingPaths.sort()
# Trim the paths down to just the ones we care about
pathsToTrim = matchingPaths [0:-1 * entriesToKeep]
pathKiller = pathRemover ()
for pathToRemove in pathsToTrim:
if (self.testRun):
self.log.warn ("TestRun - would remove path: " + pathToRemove)
else:
if (os.path.isdir (pathToRemove)):
self.log.info ("Recursively deleting all files in %s" % pathToRemove)
pathKiller.walk (pathToRemove)
self.log.info ("Removing directory %s" % pathToRemove)
os.rmdir (pathToRemove)
else:
self.log.error ("Removing file %s" % pathToRemove)
os.remove (pathToRemove)
if (removeParentIfEmpty):
self.log.debug ("Checking to see whether parent directories are empty")
lastParent = pathToRemove
looking = 1
while (looking):
parent = os.path.split (lastParent)[0]
if (lastParent == parent):
self.log.info ("Reach top of hierachy when looking for parents to delete.")
looking = 0
else:
if (len (os.listdir (parent)) == 0):
if (self.testRun):
self.log.warn ("TestRun - would remove path: " + parent)
else:
self.log.info ("Found that parent directory %s is empty - deleting." % parent)
os.rmdir (parent)
# We are going to carry on looking, so note this as the last parent
lastParent = parent
else:
self.log.debug ("Parent directory is not empty - preserving.")
looking = 0
def finish (self):
""" Write out the time the backup started to the last run file (if one used). """
if (self.testRun):
self.log.debug ("TestRun - not writing out last run file.")
return
if (self.lastRunFile is None):
self.log.debug ("Last Run File not given, skipping writing.")
return
try:
self.log.debug ("Time backup was started: %s" % time.asctime (self.backupStarted))
lrf = open (self.lastRunFile, 'w')
lrf.write (time.asctime (self.backupStarted))
lrf.close()
self.log.info ("Last Run File updated succesfully")
except Exception, e:
self.log.error ("Exception occured writing the last run file %s. Error: %s" % (self.lastRunFile, str (e)))
raise Exception ("Error writing to last run file!")
class pathWalker:
def __init__ (self, regex):
self.regex = regex
self.foundPaths = []
def walk (self, path):
os.path.walk (path, self.walking, None)
return self.foundPaths
def walking (self, arg, dirname, names):
for name in names:
if (self.regex.search (os.path.join (dirname, name))):
self.foundPaths.append (os.path.join (dirname, name))
class pathRemover:
def __init__ (self):
self.dirsToRemove = []
self.log = logging.getLogger ("RSyncBackup.pathRemover")
def walk (self, path):
os.path.walk (path, self.walking, None)
# Now remove all of the directories we saw, starting with the last one
self.dirsToRemove.reverse()
for dir in self.dirsToRemove:
os.rmdir (dir)
self.dirsToRemove = []
def walking (self, arg, dirname, names):
for name in names:
#self.log.debug ("Would delete file: %s" % os.path.join (dirname, name))
target = os.path.join (dirname, name)
if (os.path.isdir (target)):
self.dirsToRemove.append (target)
else:
os.remove (target)
|
{
"content_hash": "ccd35b57342e1aff25790ebb35a9043b",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 110,
"avg_line_length": 39.98393574297189,
"alnum_prop": 0.6856167135395741,
"repo_name": "mick-t/RSyncBackup",
"id": "f81af9266043cecbabebf1fe0526d8060d652bee",
"size": "9956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/RSyncBackup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10555"
}
],
"symlink_target": ""
}
|
import logging
from common import http
from galaxy import wrapper
from bootstrap import settings
from console.taskgroup import helper
from console.service import decorator as s_decorator
from console.taskgroup import decorator as t_decorator
from django.db import transaction
from django.views.decorators.csrf import csrf_exempt
LOG = logging.getLogger("console")
# service group 0
SHOW_G_BYTES_LIMIT = 1024 * 1024 * 1024
def str_pretty(total_bytes):
if total_bytes < SHOW_G_BYTES_LIMIT:
return "%sM"%(total_bytes/(1024*1024))
return "%sG"%(total_bytes/(1024*1024*1024))
@t_decorator.task_group_id_required
def update_task_group(request):
pass
def get_task_status(request):
builder = http.ResponseBuilder()
id = request.GET.get('id',None)
agent = request.GET.get('agent',None)
master_addr = request.GET.get('master',None)
if not master_addr:
return builder.error('master is required').build_json()
galaxy = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)
tasklist = []
if id :
status,tasklist = galaxy.list_task_by_job_id(id)
if not status:
return builder.error("fail to get task list")\
.build_json()
if agent:
status,tasklist = galaxy.list_task_by_host(agent)
if not status:
return builder.error("fail to get task list")\
.build_json()
statics = {"RUNNING":0,"DEPLOYING":0,"ERROR":0}
for task in tasklist:
task['mem_used'] = str_pretty(task['mem_used'])
task['mem_limit'] = str_pretty(task['mem_limit'])
task['cpu_used'] ="%0.2f"%(task['cpu_limit'] * task['cpu_used'])
if task['status'] in statics:
statics[task['status']] += 1
return builder.ok(data={'needInit':False,'taskList':tasklist,'statics':statics}).build_json()
def get_job_sched_history(request):
builder = http.ResponseBuilder()
id = request.GET.get('id',None)
master_addr = request.GET.get('master',None)
if not master_addr:
return builder.error('master is required').build_json()
galaxy = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)
if not id :
return builder.error("id is required").build_json()
status,tasklist = galaxy.job_history(id)
for task in tasklist:
task['mem_used'] = str_pretty(task['mem_used'])
task['mem_limit'] = str_pretty(task['mem_limit'])
task['cpu_used'] ="%0.2f"%(task['cpu_limit'] * task['cpu_used'])
return builder.ok(data={'needInit':False,'taskList':tasklist}).build_json()
|
{
"content_hash": "edac79121c982d308ac9e84e217908c2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 97,
"avg_line_length": 36.63380281690141,
"alnum_prop": 0.6478277585544021,
"repo_name": "leoYY/galaxy",
"id": "436d155ba60a7dbfd801c3d0d4256a2ba926d135",
"size": "2835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "console/backend/src/console/taskgroup/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "C++",
"bytes": "254517"
},
{
"name": "CSS",
"bytes": "60830"
},
{
"name": "Go",
"bytes": "2257"
},
{
"name": "HTML",
"bytes": "27661"
},
{
"name": "JavaScript",
"bytes": "45749"
},
{
"name": "Makefile",
"bytes": "2810"
},
{
"name": "Protocol Buffer",
"bytes": "5607"
},
{
"name": "Python",
"bytes": "87029"
},
{
"name": "Shell",
"bytes": "8770"
},
{
"name": "Smarty",
"bytes": "948"
}
],
"symlink_target": ""
}
|
import math, sys
def cross_track_distance(d_13, theta_13, theta_12):
"""Calculate distance from great circle path (1 -> 2) to point (3).
Adapted from http://www.movable-type.co.uk/scripts/latlong.html
This implementation does not produce a great-circle distance but a mere
straight-line (through the earth) distance. We don't need anything more
complicated for our purposes of comparison.
d_13: Distance from origin to third point (any distance measure)
theta_13: Initial bearing from origin to third point (degrees)
theta_12: Initial bearing from origin to destination (degrees)
"""
return d_13 * math.sin( math.radians (theta_13 - theta_12))
if __name__ == "__main__":
print cross_track_distance (34, 237, 187)
|
{
"content_hash": "5a8c92f7236bedede295f74298f78a60",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 38.3,
"alnum_prop": 0.6997389033942559,
"repo_name": "mauzeh/formation-flight",
"id": "f7f2580df28ff22c0b3fe69e1019736f306d236f",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/cross_track_distance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "209702"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^', include('polls.urls')),
url(r'^admin/', admin.site.urls),
]
|
{
"content_hash": "fc56282417e066d1306cd040000556bb",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.6764705882352942,
"repo_name": "megancoyle/polling_app",
"id": "a8025fe0570765c4579572470ed252741d1c3bb1",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "933"
},
{
"name": "HTML",
"bytes": "3676"
},
{
"name": "Procfile",
"bytes": "39"
},
{
"name": "Python",
"bytes": "16816"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meas_models', '0012_formula_formula'),
]
operations = [
migrations.CreateModel(
name='FormulaIndex',
fields=[
('indexkey', models.CharField(max_length=64, primary_key=True, serialize=False, verbose_name='index key')),
('docsids', models.CharField(blank=True, max_length=9192, null=True)),
('df', models.PositiveIntegerField(blank=True, default=1, verbose_name='frequency')),
],
),
migrations.RemoveField(
model_name='formula',
name='formula',
),
migrations.RemoveField(
model_name='formula',
name='name',
),
]
|
{
"content_hash": "92dc1f2764d50823106df11abb840e86",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 123,
"avg_line_length": 29.586206896551722,
"alnum_prop": 0.5617715617715617,
"repo_name": "deka108/meas_deka",
"id": "eb5078c79ed702f357bc69ebdd00796104a7b896",
"size": "931",
"binary": false,
"copies": "2",
"ref": "refs/heads/release-deka",
"path": "meas_models/migrations/0013_auto_20170116_2251.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120893"
},
{
"name": "HTML",
"bytes": "500260"
},
{
"name": "JavaScript",
"bytes": "1112443"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "350763"
}
],
"symlink_target": ""
}
|
"""Module containing nginx installation functions."""
from perfkitbenchmarker import errors
RHEL_REPO = ('[nginx]\n'
'name=nginx repo\n'
'baseurl=https://nginx.org/packages/rhel/$releasever/$basearch/\n'
'gpgcheck=0\n'
'enabled=1')
def YumInstall(vm):
"""Installs nginx on the VM."""
vm.RemoteCommand('echo \'%s\' | '
'sudo tee /etc/yum.repos.d/nginx.repo' % RHEL_REPO)
try:
vm.InstallPackages('nginx')
except errors.VmUtil.SshConnectionError:
# Amazon Linux does not have a releasever configured.
vm.RemoteCommand('sudo sed -i -e "s/\\$releasever/6/" '
'/etc/yum.repos.d/nginx.repo')
vm.InstallPackages('nginx')
def AptInstall(vm):
"""Installs nginx on the VM."""
vm.InstallPackages('nginx')
|
{
"content_hash": "f7fdee6f62cae39e419965b4db266d2c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6153846153846154,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "75c278713662b3a3cdef8e3c6bcb71260a9eb5ac",
"size": "1430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/linux_packages/nginx.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
}
|
import django_filters
from django.contrib.auth import get_user_model
from .models import Task, Sprint
User = get_user_model()
class NullFilter(django_filters.BooleanFilter):
"""Filter on a field set as null or not."""
def filter(self, qs, value):
if value is not None:
isnullKey = "{}__isnull".format(self.name)
return qs.filter(**{isnullKey: value})
return qs
class SprintFilter(django_filters.FilterSet):
"""
BUGFIX: lookup_type is DEPRECATED in django_filters
it's `lookup_expr` now (and thanks to jedi-vim for that)
SEE: page 103 / PDF
"""
end_min = django_filters.DateFilter(name='end', lookup_expr='gte')
end_max = django_filters.DateFilter(name='end', lookup_expr='lte')
class Meta:
model = Sprint
fields = ('end_min', 'end_max',)
class TaskFilter(django_filters.FilterSet):
backlog = NullFilter(name='sprint')
class Meta:
model = Task
fields = ('sprint', 'status', 'assigned', 'backlog', )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters['assigned'].extra.update(
{'to_field_name': User.USERNAME_FIELD}
)
|
{
"content_hash": "d40021d09bba3e93cd4ddb30eeb08850",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 26.127659574468087,
"alnum_prop": 0.6148208469055375,
"repo_name": "mezklador/lightweight-django",
"id": "5b897c79940bc5dee0ae93cb25db56e52b98ce7e",
"size": "1228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "04/scrum/board/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "177618"
},
{
"name": "HTML",
"bytes": "32617"
},
{
"name": "JavaScript",
"bytes": "31535"
},
{
"name": "Python",
"bytes": "82936"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
}
|
INPUT_DATASET_DOES_NOT_RESOLVE_TO_FILENAME = \
"Input OPeNDAP Dataset: URL {} does not resolve to a filename."
INPUT_DATASET_URL_MALFORMED = \
"Input OPeNDAP Dataset: URL {} is malformed or references inaccessible " \
"data."
INPUT_DATASET_GENERIC_ERROR = \
"Input OPeNDAP Dataset: URL {}: {}."
INPUT_FILE_DOES_NOT_EXIST = \
"Input File or URL String: Dataset {} does not exist or is not supported."
INPUT_FILE_GENERIC_ERROR = \
"Input File: URL {}: {}."
VARIABLES_DO_NOT_EXIST = \
"Variables: {} do not exist in the {}."
NONE_OF_VARIABLES_EXISTS = \
"Variables: No valid fields specified."
VARIABLES_MUST_SHARE_DIMENSIONS = \
"Variables: All spatial variables must share the same spatial dimensions."
OUTPUT_FILE_EXTENSION_MUST_BE_NC = \
"Output netCDF File: Output file extension must be .nc."
DIMENSION_NOT_PRESENT = \
"Dimensions: Dimension {} not present in selected variable(s)."
MULTIDIMENSIONAL_DIMENSIONS_NOT_SUPPORTED = \
"Dimensions: Dimension {} depends on multiple dimensions. This is not supported."
INVALID_DATE_TIME = \
"Dimensions: Invalid date/time value."
SKIPPING_SPATIAL_DIMENSION = \
"Dimensions: Spatial coordinate variables may not be used as " \
"dimensions. Select spatial coordinates using the Extent parameter."
OPENDAP_TO_NETCDF_HISTORY = \
"Created {} by ArcGIS OPeNDAP to NetCDF tool using the following " \
"OPeNDAP URL: {}"
TEMPORAL_AGGREGATION_HISTORY = \
"Created {} by ArcGIS using the folowing command: {}"
|
{
"content_hash": "1b4f66620f18c4191f3a5c29dc07f312",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 40.026315789473685,
"alnum_prop": 0.702827087442472,
"repo_name": "pshowalter/solutions-geoprocessing-toolbox",
"id": "6b7e7c0382260b6ca3cf3868d244869e64f98406",
"size": "1546",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "military_aspects_of_weather/scripts/MultidimensionSupplementalTools/MultidimensionSupplementalTools/Scripts/mds/messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6218"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "220035"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "807424"
}
],
"symlink_target": ""
}
|
"""Tests for Google Life Sciences Run Pipeline operator """
import unittest
from unittest import mock
from airflow.providers.google.cloud.operators.life_sciences import LifeSciencesRunPipelineOperator
TEST_BODY = {"pipeline": {"actions": [{}], "resources": {}, "environment": {}, "timeout": '3.5s'}}
TEST_OPERATION = {
"name": 'operation-name',
"metadata": {"@type": 'anytype'},
"done": True,
"response": "response",
}
TEST_PROJECT_ID = "life-science-project-id"
TEST_LOCATION = 'test-location'
class TestLifeSciencesRunPipelineOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.life_sciences.LifeSciencesHook")
def test_executes(self, mock_hook):
mock_instance = mock_hook.return_value
mock_instance.run_pipeline.return_value = TEST_OPERATION
operator = LifeSciencesRunPipelineOperator(
task_id='task-id', body=TEST_BODY, location=TEST_LOCATION, project_id=TEST_PROJECT_ID
)
result = operator.execute(None)
self.assertEqual(result, TEST_OPERATION)
@mock.patch("airflow.providers.google.cloud.operators.life_sciences.LifeSciencesHook")
def test_executes_without_project_id(self, mock_hook):
mock_instance = mock_hook.return_value
mock_instance.run_pipeline.return_value = TEST_OPERATION
operator = LifeSciencesRunPipelineOperator(
task_id='task-id',
body=TEST_BODY,
location=TEST_LOCATION,
)
result = operator.execute(None)
self.assertEqual(result, TEST_OPERATION)
|
{
"content_hash": "c5e077c8f477647f25bbe870df395f83",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 98,
"avg_line_length": 38.53658536585366,
"alnum_prop": 0.6879746835443038,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "cc08e13ff8ff850f07379c3de3a0bd275c708da6",
"size": "2367",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/operators/test_life_sciences.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
}
|
"""Qt dock window."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from collections import defaultdict
from functools import partial
import logging
from .qt import (
QApplication, QWidget, QDockWidget, QHBoxLayout, QVBoxLayout, QPushButton, QLabel, QCheckBox,
QMenu, QToolBar, QStatusBar, QMainWindow, QMessageBox, Qt, QPoint, QSize, _load_font,
_wait, prompt, show_box, screenshot as make_screenshot)
from .state import GUIState, _gui_state_path, _get_default_state_path
from .actions import Actions, Snippets
from phylib.utils import emit, connect
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# GUI utils
# -----------------------------------------------------------------------------
def _try_get_matplotlib_canvas(view):
"""Get the Qt widget from a matplotlib figure."""
try:
from matplotlib.pyplot import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
if isinstance(view, Figure):
view = FigureCanvasQTAgg(view)
# Case where the view has a .figure property which is a matplotlib figure.
elif isinstance(getattr(view, 'figure', None), Figure):
view = FigureCanvasQTAgg(view.figure)
elif isinstance(getattr(getattr(view, 'canvas', None), 'figure', None), Figure):
view = FigureCanvasQTAgg(view.canvas.figure)
except ImportError as e: # pragma: no cover
logger.warning("Import error: %s", e)
return view
def _try_get_opengl_canvas(view):
"""Convert from QOpenGLWindow to QOpenGLWidget."""
from phy.plot.base import BaseCanvas
if isinstance(view, BaseCanvas):
return QWidget.createWindowContainer(view)
elif isinstance(getattr(view, 'canvas', None), BaseCanvas):
return QWidget.createWindowContainer(view.canvas)
return view
def _widget_position(widget): # pragma: no cover
return widget.parentWidget().mapToGlobal(widget.geometry().topLeft())
# -----------------------------------------------------------------------------
# Dock widget
# -----------------------------------------------------------------------------
DOCK_TITLE_STYLESHEET = '''
* {
padding: 0;
margin: 0;
border: 0;
background: #232426;
color: white;
}
QPushButton {
padding: 4px;
margin: 0 1px;
}
QCheckBox {
padding: 2px 4px;
margin: 0 1px;
}
QLabel {
padding: 3px;
}
QPushButton:hover, QCheckBox:hover {
background: #323438;
}
QPushButton:pressed {
background: #53575e;
}
QPushButton:checked {
background: #6c717a;
}
'''
DOCK_STATUS_STYLESHEET = '''
* {
padding: 0;
margin: 0;
border: 0;
background: black;
color: white;
}
QLabel {
padding: 3px;
}
'''
class DockWidget(QDockWidget):
"""A dock widget with a custom title bar.
The title bar has a status text at the middle, and a group of buttons on the right.
By default, the buttons on the right are screenshot and close. New buttons can be added
in this group, from right to left.
"""
confirm_before_close_view = False
max_status_length = 64
def __init__(self, *args, widget=None, **kwargs):
super(DockWidget, self).__init__(*args, **kwargs)
# Load the font awesome font.
self._font = _load_font('fa-solid-900.ttf')
self._dock_widgets = {}
self._widget = widget
def closeEvent(self, e):
"""Qt slot when the window is closed."""
emit('close_dock_widget', self)
super(DockWidget, self).closeEvent(e)
def add_button(
self, callback=None, text=None, icon=None, checkable=False,
checked=False, event=None, name=None):
"""Add a button to the dock title bar, to the right.
Parameters
----------
callback : function
Callback function when the button is clicked.
text : str
Text of the button.
icon : str
Fontawesome icon of the button specified as a unicode string with 4 hexadecimal
characters.
checkable : boolean
Whether the button is checkable.
checked : boolean
Whether the checkable button is initially checked.
event : str
Name of the event that is externally raised when the status of the button is changed.
This is used to synchronize the button's checked status when the value changes
via another mean than clicking on the button.
name : str
Name of the button.
"""
if callback is None:
return partial(
self.add_button, text=text, icon=icon, name=name,
checkable=checkable, checked=checked, event=event)
name = name or getattr(callback, '__name__', None) or text
assert name
button = QPushButton(chr(int(icon, 16)) if icon else text)
if self._font:
button.setFont(self._font)
button.setCheckable(checkable)
if checkable:
button.setChecked(checked)
button.setToolTip(name)
if callback:
@button.clicked.connect
def on_clicked(state):
return callback(state)
# Change the state of the button when this event is called.
if event:
@connect(event=event, sender=self.view)
def on_state_changed(sender, checked):
button.setChecked(checked)
assert name not in self._dock_widgets
self._dock_widgets[name] = button
self._buttons_layout.addWidget(button, 1)
return button
def add_checkbox(self, callback=None, text=None, checked=False, name=None):
"""Add a checkbox to the dock title bar, to the right.
Parameters
----------
callback : function
Callback function when the checkbox is clicked.
text : str
Text of the checkbox.
checked : boolean
Whether the checkbox is initially checked.
name : str
Name of the button.
"""
if callback is None:
return partial(self.add_checkbox, text=text, checked=checked, name=name)
name = name or getattr(callback, '__name__', None) or text
assert name
checkbox = QCheckBox(text)
checkbox.setLayoutDirection(2)
checkbox.setToolTip(name)
if checked:
checkbox.setCheckState(Qt.Checked if checked else Qt.Unchecked)
if callback:
@checkbox.stateChanged.connect
def on_state_changed(state):
return callback(state == Qt.Checked)
assert name not in self._dock_widgets
self._dock_widgets[name] = checkbox
self._buttons_layout.addWidget(checkbox, 1)
return checkbox
def get_widget(self, name):
"""Get a dock title bar widget by its name."""
return self._dock_widgets[name]
@property
def status(self):
"""Current status text of the title bar."""
return self._status.text()
def set_status(self, text):
"""Set the status text of the widget."""
n = self.max_status_length
if len(text) >= n:
text = text[:n // 2] + ' ... ' + text[-n // 2:]
self._status.setText(text)
def _default_buttons(self):
"""Create the default buttons on the right."""
# Only show the close button if the dock widget is closable.
if int(self.features()) % 2 == 1:
# Close button.
@self.add_button(name='close', text='✕')
def on_close(e): # pragma: no cover
if not self.confirm_before_close_view or show_box(
prompt(
"Close %s?" % self.windowTitle(),
buttons=['yes', 'no'], title='Close?')) == 'yes':
self.close()
# Screenshot button.
@self.add_button(name='screenshot', icon='f030')
def on_screenshot(e): # pragma: no cover
if hasattr(self.view, 'screenshot'):
self.view.screenshot()
else:
make_screenshot(self.view)
# View menu button.
@self.add_button(name='view_menu', icon='f0c9')
def on_view_menu(e): # pragma: no cover
# Display the view menu.
button = self._dock_widgets['view_menu']
x = _widget_position(button).x()
y = _widget_position(self._widget).y()
self._menu.exec(QPoint(x, y))
def _create_menu(self):
"""Create the contextual menu for this view."""
self._menu = QMenu("%s menu" % self.objectName(), self)
def _create_title_bar(self):
"""Create the title bar."""
self._title_bar = QWidget(self)
self._layout = QHBoxLayout(self._title_bar)
self._layout.setContentsMargins(0, 0, 0, 0)
self._layout.setSpacing(0)
self._title_bar.setStyleSheet(DOCK_TITLE_STYLESHEET)
# Left part of the bar.
# ---------------------
# Widget name.
label = QLabel(self.windowTitle())
self._layout.addWidget(label)
# Space.
# ------
self._layout.addStretch(1)
# Buttons on the right.
# ---------------------
self._buttons = QWidget(self._title_bar)
self._buttons_layout = QHBoxLayout(self._buttons)
self._buttons_layout.setDirection(1)
self._buttons_layout.setContentsMargins(0, 0, 0, 0)
self._buttons_layout.setSpacing(1)
self._buttons.setLayout(self._buttons_layout)
# Add the default buttons.
self._default_buttons()
# Layout margin.
self._layout.addWidget(self._buttons)
self._title_bar.setLayout(self._layout)
self.setTitleBarWidget(self._title_bar)
def _create_status_bar(self):
# Dock has requested widget and status bar.
widget_container = QWidget(self)
widget_layout = QVBoxLayout(widget_container)
widget_layout.setContentsMargins(0, 0, 0, 0)
widget_layout.setSpacing(0)
widget_layout.addWidget(self._widget, 100)
# Widget status text.
self._status = QLabel('')
self._status.setMaximumHeight(30)
self._status.setStyleSheet(DOCK_STATUS_STYLESHEET)
widget_layout.addWidget(self._status, 1)
widget_container.setLayout(widget_layout)
self.setWidget(widget_container)
def _create_dock_widget(widget, name, closable=True, floatable=True):
"""Create a dock widget wrapping any Qt widget."""
dock = DockWidget(widget=widget)
dock.setObjectName(name)
dock.setWindowTitle(name)
# Set gui widget options.
options = QDockWidget.DockWidgetMovable
if closable:
options = options | QDockWidget.DockWidgetClosable
if floatable:
options = options | QDockWidget.DockWidgetFloatable
dock.setFeatures(options)
dock.setAllowedAreas(
Qt.LeftDockWidgetArea |
Qt.RightDockWidgetArea |
Qt.TopDockWidgetArea |
Qt.BottomDockWidgetArea
)
dock._create_menu()
dock._create_title_bar()
dock._create_status_bar()
return dock
def _get_dock_position(position):
return {'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea,
'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
}[position or 'right']
def _prompt_save(): # pragma: no cover
"""Show a prompt asking the user whether he wants to save or not.
Output is 'save', 'cancel', or 'close'
"""
b = prompt(
"Do you want to save your changes before quitting?",
buttons=['save', 'cancel', 'close'], title='Save')
return show_box(b)
def _remove_duplicates(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
# -----------------------------------------------------------------------------
# GUI main window
# -----------------------------------------------------------------------------
class GUI(QMainWindow):
"""A Qt main window containing docking widgets. This class derives from `QMainWindow`.
Constructor
-----------
position : 2-tuple
Coordinates of the GUI window on the screen, in pixels.
size : 2-tuple
Requested size of the GUI window, in pixels.
name : str
Name of the GUI window, set in the title bar.
subtitle : str
Subtitle of the GUI window, set in the title bar after the name.
view_creator : dict
Map view classnames to functions that take no arguments and return a new view instance
of that class.
view_count : dict
Map view classnames to integers specifying the number of views to create for every
view class.
default_views : list-like
List of view names to create by default (overriden by `view_count` if not empty).
config_dir : str or Path
User configuration directory used to load/save the GUI state
enable_threading : boolean
Whether to enable threading in views or not (used in `ManualClusteringView`).
Events
------
close(gui)
show(gui)
close_view(view, gui)
"""
default_shortcuts = {
'enable_snippet_mode': ':',
'save': 'ctrl+s',
'about': '?',
'show_all_shortcuts': 'h',
'exit': 'ctrl+q',
}
default_snippets = {}
has_save_action = True
def __init__(
self, position=None, size=None, name=None, subtitle=None, view_creator=None,
view_count=None, default_views=None, config_dir=None, enable_threading=True, **kwargs):
# HACK to ensure that closeEvent is called only twice (seems like a
# Qt bug).
self._enable_threading = enable_threading
self._closed = False
if not QApplication.instance(): # pragma: no cover
raise RuntimeError("A Qt application must be created.")
super(GUI, self).__init__()
self.setDockOptions(
QMainWindow.AllowTabbedDocks | QMainWindow.AllowNestedDocks)
self.setAnimated(False)
logger.debug("Creating GUI.")
self._set_name(name, str(subtitle or ''))
position = position or (200, 200)
size = size or (800, 600)
self._set_pos_size(position, size)
# Registered functions.
self._registered = {}
# List of attached Actions instances.
self.actions = []
# Mapping {name: menuBar}.
self._menus = {}
ds = self.default_shortcuts
self.file_actions = Actions(self, name='File', menu='&File', default_shortcuts=ds)
self.view_actions = Actions(self, name='View', menu='&View', default_shortcuts=ds)
self.help_actions = Actions(self, name='Help', menu='&Help', default_shortcuts=ds)
# Views,
self._views = []
self._view_class_indices = defaultdict(int) # Dictionary {view_name: next_usable_index}
# Create the GUI state.
state_path = _gui_state_path(self.name, config_dir=config_dir)
default_state_path = kwargs.pop('default_state_path', _get_default_state_path(self))
self.state = GUIState(state_path, default_state_path=default_state_path, **kwargs)
# View creator: dictionary {view_class: function_that_adds_view}
self.default_views = default_views or ()
self.view_creator = view_creator or {}
# View count: take the requested one, or the GUI state one.
self._requested_view_count = (
view_count if view_count is not None else self.state.get('view_count', {}))
# If there is still no view count, use a default one.
self._requested_view_count = self._requested_view_count or {
view_name: 1 for view_name in default_views or ()}
# Status bar.
self._lock_status = False
self._status_bar = QStatusBar(self)
self.setStatusBar(self._status_bar)
# Toolbar.
self._toolbar = QToolBar('Toolbar', self)
self._toolbar.setObjectName('Toolbar')
self._toolbar.setIconSize(QSize(24, 24))
self._toolbar.hide()
self.addToolBar(self._toolbar)
# Create and attach snippets.
self.snippets = Snippets(self)
@connect(sender=self)
def on_show(sender):
logger.debug("Load the geometry state.")
gs = self.state.get('geometry_state', None)
self.restore_geometry_state(gs)
def _set_name(self, name, subtitle):
"""Set the GUI name."""
if name is None:
name = self.__class__.__name__
title = name if not subtitle else name + ' - ' + subtitle
self.setWindowTitle(title)
self.setObjectName(name)
# Set the name in the GUI.
self.name = name
def _set_pos_size(self, position, size):
"""Set the position and size of the GUI."""
if position is not None:
self.move(position[0], position[1])
if size is not None:
self.resize(QSize(size[0], size[1]))
def set_default_actions(self):
"""Create the default actions (file, views, help...)."""
# File menu.
if self.has_save_action:
@self.file_actions.add(icon='f0c7', toolbar=True)
def save():
emit('request_save', self)
@self.file_actions.add
def exit():
"""Close the GUI."""
self.close()
# Add "Add view" action.
for view_name in sorted(self.view_creator.keys()):
self.view_actions.add(
partial(self.create_and_add_view, view_name),
name='Add %s' % view_name,
docstring="Add %s" % view_name,
show_shortcut=False)
self.view_actions.separator()
# Help menu.
@self.help_actions.add(shortcut=('HelpContents', 'h'))
def show_all_shortcuts():
"""Show the shortcuts of all actions."""
for actions in self.actions:
actions.show_shortcuts()
@self.help_actions.add(shortcut='?')
def about(): # pragma: no cover
"""Display an about dialog."""
from phy import __version_git__
msg = "phy {} v{}".format(self.name, __version_git__)
try:
from phylib import __version__
msg += "\nphylib v{}".format(__version__)
except ImportError:
pass
QMessageBox.about(self, "About", msg)
# Events
# -------------------------------------------------------------------------
def closeEvent(self, e):
"""Qt slot when the window is closed."""
if self._closed:
return
_wait(250)
res = emit('close', self)
# Discard the close event if False is returned by one of the callback
# functions.
if False in res: # pragma: no cover
e.ignore()
return
super(GUI, self).closeEvent(e)
self._closed = True
# Save the state to disk when closing the GUI.
logger.debug("Save the geometry state.")
gs = self.save_geometry_state()
self.state['geometry_state'] = gs
self.state['view_count'] = self.view_count
self.state.save()
def show(self):
"""Show the window."""
super(GUI, self).show()
emit('show', self)
# Views
# -------------------------------------------------------------------------
@property
def views(self):
"""Return the list of views in the GUI."""
# NOTE: need to do a copy because the list will be modified when iterating through
# views for closing them.
return self._views.copy()
@property
def view_count(self):
"""Return the number of views of every type, as a dictionary mapping view class names
to an integer."""
vc = defaultdict(int)
for v in self.views:
vc[v.__class__.__name__] += 1
return dict(vc)
def list_views(self, *classes):
"""Return the list of views which are instances of one or several classes."""
s = set(classes)
return [
view for view in self._views
if s.intersection({view.__class__, view.__class__.__name__})]
def get_view(self, cls, index=0):
"""Return a view from a given class. If there are multiple views of the same class,
specify the view index (0 by default)."""
views = self.list_views(cls)
if index <= len(views) - 1:
return views[index]
def _set_view_name(self, view):
"""Set a unique name for a view: view class name, followed by the view index."""
assert view not in self._views
# Get all views of the same class.
cls = view.__class__
basename = cls.__name__
views = self.list_views(view.__class__)
if not views:
# If the view is the first of its class, just use the base name.
name = basename
else:
# index is the next usable index for the view's class.
index = self._view_class_indices.get(cls, 0)
assert index >= 1
name = '%s (%d)' % (basename, index)
view.name = name
return name
def create_and_add_view(self, view_name):
"""Create a view and add it to the GUI."""
assert isinstance(view_name, str)
fn = self.view_creator.get(view_name, None)
if fn is None:
return
# Create the view with the view creation function.
view = fn()
if view is None: # pragma: no cover
logger.warning("Could not create view %s.", view_name)
return
# Attach the view to the GUI if it has an attach(gui) method,
# otherwise add the view.
if hasattr(view, 'attach'):
view.attach(self)
else:
self.add_view(view)
return view
def create_views(self):
"""Create and add as many views as specified in view_count."""
self.view_actions.separator()
# Keep the order of self.default_views.
view_names = [vn for vn in self.default_views if vn in self._requested_view_count]
# We add the views in the requested view count, but not in the default views.
view_names.extend([
vn for vn in self._requested_view_count.keys() if vn not in self.default_views])
# Remove duplicates in view names.
view_names = _remove_duplicates(view_names)
# We add the view in the order they appear in the default views.
for view_name in view_names:
n_views = self._requested_view_count[view_name]
if n_views <= 0:
continue
assert n_views >= 1
for i in range(n_views):
self.create_and_add_view(view_name)
def add_view(self, view, position=None, closable=True, floatable=True, floating=None):
"""Add a dock widget to the main window.
Parameters
----------
view : View
position : str
Relative position where to add the view (left, right, top, bottom).
closable : boolean
Whether the view can be closed by the user.
floatable : boolean
Whether the view can be detached from the main GUI.
floating : boolean
Whether the view should be added in floating mode or not.
"""
logger.debug("Add view %s to GUI.", view.__class__.__name__)
name = self._set_view_name(view)
self._views.append(view)
self._view_class_indices[view.__class__] += 1
# Get the Qt canvas for matplotlib/OpenGL views.
widget = _try_get_matplotlib_canvas(view)
widget = _try_get_opengl_canvas(widget)
dock = _create_dock_widget(widget, name, closable=closable, floatable=floatable)
self.addDockWidget(_get_dock_position(position), dock, Qt.Horizontal)
if floating is not None:
dock.setFloating(floating)
dock.view = view
view.dock = dock
# Emit the close_view event when the dock widget is closed.
@connect(sender=dock)
def on_close_dock_widget(sender):
self._views.remove(view)
emit('close_view', view, self)
dock.show()
logger.log(5, "Add %s to GUI.", name)
return dock
# Menu bar
# -------------------------------------------------------------------------
def get_menu(self, name, insert_before=None):
"""Get or create a menu."""
if name not in self._menus:
menu = QMenu(name)
if not insert_before:
self.menuBar().addMenu(menu)
else:
self.menuBar().insertMenu(self.get_menu(insert_before).menuAction(), menu)
self._menus[name] = menu
return self._menus[name]
def get_submenu(self, menu, name):
"""Get or create a submenu."""
if name not in self._menus:
self._menus[name] = self.get_menu(menu).addMenu(name)
return self._menus[name]
def remove_menu(self, name):
"""Remove a menu."""
if name in self._menus:
menu = self._menus[name]
menu.clear()
menu.setVisible(False)
self.menuBar().removeAction(menu.menuAction())
def dialog(self, message):
"""Show a message in a dialog box."""
box = QMessageBox(self)
box.setText(message)
return box
# Status bar
# -------------------------------------------------------------------------
@property
def status_message(self):
"""The message in the status bar, can be set by the user."""
return str(self._status_bar.currentMessage())
@status_message.setter
def status_message(self, value):
if self._lock_status:
return
self._status_bar.showMessage(str(value))
def lock_status(self):
"""Lock the status bar."""
self._lock_status = True
def unlock_status(self):
"""Unlock the status bar."""
self._lock_status = False
# State
# -------------------------------------------------------------------------
def save_geometry_state(self):
"""Return picklable geometry and state of the window and docks.
This function can be called in `on_close()`.
"""
return {
'geometry': self.saveGeometry(),
'state': self.saveState(),
}
def restore_geometry_state(self, gs):
"""Restore the position of the main window and the docks.
The GUI widgets need to be recreated first.
This function can be called in `on_show()`.
"""
if not gs:
return
if gs.get('geometry', None):
self.restoreGeometry((gs['geometry']))
if gs.get('state', None):
self.restoreState((gs['state']))
|
{
"content_hash": "d30986e79cfbeab0203a8c1efd9feb38",
"timestamp": "",
"source": "github",
"line_count": 826,
"max_line_length": 99,
"avg_line_length": 33.246973365617436,
"alnum_prop": 0.5612846842910203,
"repo_name": "kwikteam/phy",
"id": "947aac5ef043730916d4c150d30dd6ac7e650dfa",
"size": "27489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phy/gui/gui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "32"
},
{
"name": "CSS",
"bytes": "1171"
},
{
"name": "GLSL",
"bytes": "6782"
},
{
"name": "JavaScript",
"bytes": "9051"
},
{
"name": "Makefile",
"bytes": "499"
},
{
"name": "Python",
"bytes": "547713"
},
{
"name": "Shell",
"bytes": "218"
}
],
"symlink_target": ""
}
|
desc = 'Noise with a color bar'
# phash = 'f54b0ba50bb10bf4'
phash = 'fd5a0bb503f50354'
def plot():
from numpy.random import randn
from matplotlib import pyplot as plt
import numpy as np
# Make plot with vertical (default) colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
np.random.seed(123)
data = np.clip(randn(250, 250), -1, 1)
cax = ax.imshow(data, interpolation='nearest')
ax.set_title('Gaussian noise with vertical colorbar')
# Add colorbar, make sure to specify tick locations
# to match desired ticklabels.
cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
# vertically oriented colorbar
cbar.ax.set_yticklabels(['< -1', '0', '> 1'])
return fig
|
{
"content_hash": "060a167b3a3245c361ede538d3c46d50",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 26.74074074074074,
"alnum_prop": 0.6551246537396122,
"repo_name": "dougnd/matplotlib2tikz",
"id": "200672ba7b076f251a9a2c7bb61871cfcc50f941",
"size": "748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/testfunctions/noise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "542"
},
{
"name": "Python",
"bytes": "125199"
}
],
"symlink_target": ""
}
|
"""
This script changes all the queues within a VPN affecting the reject-msg-to-sender-on-discard setting.
"""
import sys
sys.path.append("/opt/pipeline")
import logging
import libsolace.settingsloader as settings
from libsolace.SolaceAPI import SolaceAPI
from libsolace.SolaceXMLBuilder import SolaceXMLBuilder
from libsolace.SolaceCommandQueue import SolaceCommandQueue
from optparse import OptionParser
import pprint
settings.debugmode = False
queues = []
def generateXMLForManagingRejectMsgToSenderOnDiscard(vpn_name=None, queues=None, reject_msg_to_sender_on_discard=False):
"""
:param vpn_name: string name of vpn
:param queues: list of queues to manipulate
:return: commands
"""
# commandQueue is used to stack and validate solace commands
commands = SolaceCommandQueue()
try:
for queue in queues:
# Disable reject-msg-to-sender-on-discard
'''
<rpc xmlns="http://www.solacesystems.com/semp/topic_routing/6_0">
<message-spool>
<vpn-name>dev_domainevent</vpn-name>
<queue>
<name>unibet.TestStatusChangedEvents.customertest</name>
<no>
<reject-msg-to-sender-on-discard/>
</no>
</queue>
</message-spool>
</rpc>
'''
if reject_msg_to_sender_on_discard:
prefix = "Enabling"
else:
prefix = "Disabling"
cmd = SolaceXMLBuilder("%s reject-msg-to-sender-on-discard for queue: %s" % (prefix,queue))
cmd.message_spool.vpn_name = vpn_name
cmd.message_spool.queue.name = queue
if reject_msg_to_sender_on_discard:
cmd.message_spool.queue.reject_msg_to_sender_on_discard
else:
cmd.message_spool.queue.no.reject_msg_to_sender_on_discard
commands.enqueue(cmd)
except Exception, e:
print("Error %s" % e)
print("Returning the plan")
return commands
if __name__ == '__main__':
""" parse opts, read site.xml, start provisioning vpns. """
usage = ''
parser = OptionParser(usage=usage)
parser.add_option("-e", "--env", "--environment", action="store", type="string", dest="env",
help="environment to run job in eg:[ dev | ci1 | si1 | qa1 | pt1 | prod ]")
parser.add_option("-V", "--vpn", action="store", type="string", dest="vpn_name",
help="literal name of vpn, eg: pt1_domainevent")
parser.add_option("-t", "--testmode", action="store_true", dest="testmode",
default=False, help="only test configuration and exit")
parser.add_option("-r", "--reject_msg_to_sender_on_discard", action="store_true", dest="reject_msg_to_sender_on_discard",
default=False, help="set to enable reject-msg-to-sender-on-discard")
(options, args) = parser.parse_args()
if not options.env:
parser.print_help()
sys.exit()
if not options.vpn_name:
parser.print_help()
sys.exit()
if options.testmode:
logging.info("TEST MODE ACTIVE!!!")
settings.env = options.env.lower()
logging.info("Connecting to appliance in %s, testmode:%s" % (settings.env, options.testmode))
connection = SolaceAPI(settings.env, testmode=options.testmode)
queues = connection.list_queues(options.vpn_name)
commands = generateXMLForManagingRejectMsgToSenderOnDiscard(vpn_name=options.vpn_name, queues=queues,
reject_msg_to_sender_on_discard=options.reject_msg_to_sender_on_discard)
print("The following queues will be manipulated in %s environment! " % settings.env)
pprint.pprint(queues)
s = raw_input('Do you want to continue? N/y? ')
if s.lower() == 'y':
for cmd in commands.commands:
connection.rpc(str(cmd))
else:
print("chickening out...")
|
{
"content_hash": "86fad7b2fb1a61e101ad7b5ab94e688a",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 136,
"avg_line_length": 38.31428571428572,
"alnum_prop": 0.609992542878449,
"repo_name": "unixunion/python-libsolace",
"id": "5f405dcc287b5062934221cde6ea9520cb5ed6b8",
"size": "4046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/solace-modify-all-queues-vpn.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "8090"
},
{
"name": "Python",
"bytes": "400777"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ssl_info
short_description: Gather info of ESXi host system about SSL
description:
- This module can be used to gather information of the SSL thumbprint information for a host.
version_added: '2.9'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- SSL thumbprint information about all ESXi host system in the given cluster will be reported.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname.
- SSL thumbprint information of this ESXi host system will be reported.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster
vmware_host_ssl_info:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_host_ssl_info
- name: Get SSL Thumbprint info about "{{ esxi_hostname }}"
vmware_host_ssl_info:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: '{{ esxi_hostname }}'
register: ssl_info
- set_fact:
ssl_thumbprint: "{{ ssl_info['host_ssl_info'][esxi_hostname]['ssl_thumbprints'][0] }}"
- debug:
msg: "{{ ssl_thumbprint }}"
- name: Add ESXi Host to vCenter
vmware_host:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
cluster_name: '{{ cluster_name }}'
esxi_hostname: '{{ esxi_hostname }}'
esxi_username: '{{ esxi_username }}'
esxi_password: '{{ esxi_password }}'
esxi_ssl_thumbprint: '{{ ssl_thumbprint }}'
state: present
'''
RETURN = r'''
host_ssl_info:
description:
- dict with hostname as key and dict with SSL thumbprint related info
returned: info
type: dict
sample:
{
"10.76.33.215": {
"owner_tag": "",
"principal": "vpxuser",
"ssl_thumbprints": [
"E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE",
"F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01"
]
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VMwareHostSslManager(PyVmomi):
def __init__(self, module):
super(VMwareHostSslManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.hosts_info = {}
def gather_ssl_info(self):
for host in self.hosts:
self.hosts_info[host.name] = dict(
principal='',
owner_tag='',
ssl_thumbprints=[])
host_ssl_info_mgr = host.config.sslThumbprintInfo
if host_ssl_info_mgr:
self.hosts_info[host.name]['principal'] = host_ssl_info_mgr.principal
self.hosts_info[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag
self.hosts_info[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints]
self.module.exit_json(changed=False, host_ssl_info=self.hosts_info)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str'),
esxi_hostname=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
vmware_host_accept_config = VMwareHostSslManager(module)
vmware_host_accept_config.gather_ssl_info()
if __name__ == "__main__":
main()
|
{
"content_hash": "47c715232abd2f89e92eeef5b27bbbfb",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 109,
"avg_line_length": 31.153846153846153,
"alnum_prop": 0.621773288439955,
"repo_name": "thaim/ansible",
"id": "b76414a0fe413fa9266876d578c8e2f9cc6cdc8f",
"size": "4654",
"binary": false,
"copies": "20",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vmware/vmware_host_ssl_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from rest_auth.models import TokenModel
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url', 'name')
class AuthUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'is_superuser')
class TokenSerializer(serializers.ModelSerializer):
user = AuthUserSerializer()
class Meta:
model = TokenModel
fields = ('key', 'user')
|
{
"content_hash": "4ca5f7cbd75ba995af548938cbf05b63",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 62,
"avg_line_length": 30.041666666666668,
"alnum_prop": 0.6907073509015257,
"repo_name": "willy-claes/django-react",
"id": "5a6b812e03e31747828b954ed47360bded88efb2",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "431"
},
{
"name": "JavaScript",
"bytes": "17372"
},
{
"name": "Python",
"bytes": "7569"
}
],
"symlink_target": ""
}
|
import numpy
import unittest
from annoy import AnnoyIndex
class MultithreadedBuildTest(unittest.TestCase):
def _test_building_with_threads(self, n_jobs):
n, f = 10000, 10
n_trees = 31
i = AnnoyIndex(f, 'euclidean')
for j in range(n):
i.add_item(j, numpy.random.normal(size=f))
self.assertTrue(i.build(n_trees, n_jobs=n_jobs))
self.assertEqual(n_trees, i.get_n_trees())
def test_one_thread(self):
self._test_building_with_threads(1)
def test_two_threads(self):
self._test_building_with_threads(2)
def test_four_threads(self):
self._test_building_with_threads(4)
def test_eight_threads(self):
self._test_building_with_threads(8)
|
{
"content_hash": "05428adb0d0b77aa120275d3e1f4b0e3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 56,
"avg_line_length": 28.807692307692307,
"alnum_prop": 0.6328437917222964,
"repo_name": "spotify/annoy",
"id": "0fc9f24f7ec7d74735b68da81a2117d5718a4587",
"size": "749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/multithreaded_build_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5791"
},
{
"name": "C++",
"bytes": "80693"
},
{
"name": "CMake",
"bytes": "579"
},
{
"name": "Go",
"bytes": "7567"
},
{
"name": "Lua",
"bytes": "18178"
},
{
"name": "Python",
"bytes": "60108"
},
{
"name": "SWIG",
"bytes": "1795"
}
],
"symlink_target": ""
}
|
from puck.backend import echo_backend
from puck.parser import parse_requirement
def test_parse_req():
expected = {
'name': 'foo',
'pinned_version': '1.2.3',
'latest_version': 'foo',
'source': None
}
assert parse_requirement(
'foo==1.2.3',
None,
backend=echo_backend
) == expected
def test_parse_with_spaces():
expected = {
'name': 'foo',
'pinned_version': '1.2.3',
'latest_version': 'foo',
'source': None
}
assert parse_requirement(
'foo == 1.2.3',
None,
backend=echo_backend
) == expected
def test_parse_invalid_req():
assert parse_requirement(
'foo<1.2.3',
None,
backend=echo_backend
) is None
|
{
"content_hash": "85fc15490780e016d90f155d93bded87",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 41,
"avg_line_length": 20.473684210526315,
"alnum_prop": 0.5308483290488432,
"repo_name": "NativeInstruments/puck",
"id": "03e00f994436b612a899fbb4a7f2f38994e07ce9",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puck/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "398"
},
{
"name": "Python",
"bytes": "11244"
}
],
"symlink_target": ""
}
|
import contextlib
import hashlib
import os
import re
import tarfile
import urlparse
from anvil import colorizer
from anvil import downloader as down
from anvil import importer
from anvil import log
from anvil import shell as sh
from anvil import utils
LOG = log.getLogger(__name__)
# Extensions that tarfile knows how to work with
TAR_EXTS = ['.tgz', '.gzip', '.gz', '.bz2', '.tar']
# Used to attempt to produce a name for images (to see if we already have it)
# And to use as the final name...
# Reverse sorted so that .tar.gz replaces before .tar (and so on)
NAME_CLEANUPS = [
'.tar.gz',
'.img.gz',
'.qcow2',
'.img',
] + TAR_EXTS
NAME_CLEANUPS.sort()
NAME_CLEANUPS.reverse()
# Used to match various file names with what could be a kernel image
KERNEL_CHECKS = [
re.compile(r"(.*)vmlinuz(.*)$", re.I),
re.compile(r'(.*?)aki-tty/image$', re.I),
]
# Used to match various file names with what could be a root image
ROOT_CHECKS = [
re.compile(r"(.*)img$", re.I),
re.compile(r"(.*)qcow2$", re.I),
re.compile(r'(.*?)aki-tty/image$', re.I),
]
# Used to match various file names with what could be a ram disk image
RAMDISK_CHECKS = [
re.compile(r"(.*)-initrd$", re.I),
re.compile(r"initrd[-]?(.*)$", re.I),
re.compile(r"(.*)initramfs(.*)$", re.I),
re.compile(r'(.*?)ari-tty/image$', re.I),
]
# Skip files that match these patterns
SKIP_CHECKS = [
re.compile(r"^[.]", re.I),
]
# File extensions we will skip over (typically of content hashes)
BAD_EXTENSIONS = ['md5', 'sha', 'sfv']
def _hash_it(content, hash_algo='md5'):
hasher = hashlib.new(hash_algo)
hasher.update(content)
digest = hasher.hexdigest()
return digest
class Unpacker(object):
def _get_tar_file_members(self, arc_fn):
LOG.info("Finding what exists in %s.", colorizer.quote(arc_fn))
files = []
with contextlib.closing(tarfile.open(arc_fn, 'r')) as tfh:
for tmemb in tfh.getmembers():
if not tmemb.isfile():
continue
files.append(tmemb.name)
return files
def _pat_checker(self, fn, patterns):
(_root_fn, fn_ext) = os.path.splitext(fn)
if utils.has_any(fn_ext.lower(), *BAD_EXTENSIONS):
return False
for pat in patterns:
if pat.search(fn):
return True
return False
def _find_pieces(self, files, files_location):
"""Match files against the patterns in KERNEL_CHECKS,
RAMDISK_CHECKS, and ROOT_CHECKS to determine which files
contain which image parts.
"""
kernel_fn = None
ramdisk_fn = None
img_fn = None
utils.log_iterable(files, logger=LOG,
header="Looking at %s files from %s to find the kernel/ramdisk/root images" % (len(files), colorizer.quote(files_location)))
for fn in files:
if self._pat_checker(fn, KERNEL_CHECKS):
kernel_fn = fn
LOG.debug("Found kernel: %r" % (fn))
elif self._pat_checker(fn, RAMDISK_CHECKS):
ramdisk_fn = fn
LOG.debug("Found ram disk: %r" % (fn))
elif self._pat_checker(fn, ROOT_CHECKS):
img_fn = fn
LOG.debug("Found root image: %r" % (fn))
else:
LOG.debug("Unknown member %r - skipping" % (fn))
return (img_fn, ramdisk_fn, kernel_fn)
def _unpack_tar_member(self, tarhandle, member, output_location):
LOG.info("Extracting %s to %s.", colorizer.quote(member.name), colorizer.quote(output_location))
with contextlib.closing(tarhandle.extractfile(member)) as mfh:
with open(output_location, "wb") as ofh:
return sh.pipe_in_out(mfh, ofh)
def _describe(self, root_fn, ramdisk_fn, kernel_fn):
"""Make an "info" dict that describes the path, disk format, and
container format of each component of an image.
"""
info = dict()
if kernel_fn:
info['kernel'] = {
'file_name': kernel_fn,
'disk_format': 'aki',
'container_format': 'aki',
}
if ramdisk_fn:
info['ramdisk'] = {
'file_name': ramdisk_fn,
'disk_format': 'ari',
'container_format': 'ari',
}
info['file_name'] = root_fn
info['disk_format'] = 'ami'
info['container_format'] = 'ami'
return info
def _filter_files(self, files):
filtered = []
for fn in files:
if self._pat_checker(fn, SKIP_CHECKS):
pass
else:
filtered.append(fn)
return filtered
def _unpack_tar(self, file_name, file_location, tmp_dir):
(root_name, _) = os.path.splitext(file_name)
tar_members = self._filter_files(self._get_tar_file_members(file_location))
(root_img_fn, ramdisk_fn, kernel_fn) = self._find_pieces(tar_members, file_location)
if not root_img_fn:
msg = "Tar file %r has no root image member" % (file_name)
raise IOError(msg)
kernel_real_fn = None
root_real_fn = None
ramdisk_real_fn = None
self._log_pieces_found('archive', root_img_fn, ramdisk_fn, kernel_fn)
extract_dir = sh.mkdir(sh.joinpths(tmp_dir, root_name))
with contextlib.closing(tarfile.open(file_location, 'r')) as tfh:
for m in tfh.getmembers():
if m.name == root_img_fn:
root_real_fn = sh.joinpths(extract_dir, sh.basename(root_img_fn))
self._unpack_tar_member(tfh, m, root_real_fn)
elif ramdisk_fn and m.name == ramdisk_fn:
ramdisk_real_fn = sh.joinpths(extract_dir, sh.basename(ramdisk_fn))
self._unpack_tar_member(tfh, m, ramdisk_real_fn)
elif kernel_fn and m.name == kernel_fn:
kernel_real_fn = sh.joinpths(extract_dir, sh.basename(kernel_fn))
self._unpack_tar_member(tfh, m, kernel_real_fn)
return self._describe(root_real_fn, ramdisk_real_fn, kernel_real_fn)
def _log_pieces_found(self, src_type, root_fn, ramdisk_fn, kernel_fn):
pieces = []
if root_fn:
pieces.append("%s (root image)" % (colorizer.quote(root_fn)))
if ramdisk_fn:
pieces.append("%s (ramdisk image)" % (colorizer.quote(ramdisk_fn)))
if kernel_fn:
pieces.append("%s (kernel image)" % (colorizer.quote(kernel_fn)))
if pieces:
utils.log_iterable(pieces, logger=LOG,
header="Found %s images from a %s" % (len(pieces), src_type))
def _unpack_dir(self, dir_path):
"""Pick through a directory to figure out which files are which
image pieces, and create a dict that describes them.
"""
potential_files = set()
for fn in self._filter_files(sh.listdir(dir_path)):
full_fn = sh.joinpths(dir_path, fn)
if sh.isfile(full_fn):
potential_files.add(sh.canon_path(full_fn))
(root_fn, ramdisk_fn, kernel_fn) = self._find_pieces(potential_files, dir_path)
if not root_fn:
msg = "Directory %r has no root image member" % (dir_path)
raise IOError(msg)
self._log_pieces_found('directory', root_fn, ramdisk_fn, kernel_fn)
return self._describe(root_fn, ramdisk_fn, kernel_fn)
def unpack(self, file_name, file_location, tmp_dir):
if sh.isdir(file_location):
return self._unpack_dir(file_location)
elif sh.isfile(file_location):
(_, fn_ext) = os.path.splitext(file_name)
fn_ext = fn_ext.lower()
if fn_ext in TAR_EXTS:
return self._unpack_tar(file_name, file_location, tmp_dir)
elif fn_ext in ['.img', '.qcow2']:
info = dict()
info['file_name'] = file_location
if fn_ext == '.img':
info['disk_format'] = 'raw'
else:
info['disk_format'] = 'qcow2'
info['container_format'] = 'bare'
return info
msg = "Currently we do not know how to unpack %r" % (file_location)
raise IOError(msg)
class Registry(object):
def __init__(self, client):
self.client = client
def _extract_names(self):
names = dict()
images = self.client.images.list()
for image in images:
name = image.name
names[name] = image.id
return names
def __contains__(self, name):
names = self._extract_names()
if name in names:
return True
else:
return False
class Image(object):
def __init__(self, client, url, is_public, cache_dir):
self.client = client
self.registry = Registry(client)
self.url = url
self.parsed_url = urlparse.urlparse(url)
self.is_public = is_public
self.cache_dir = cache_dir
def _check_name(self, name):
LOG.info("Checking if image %s already exists already in glance.", colorizer.quote(name))
if name in self.registry:
raise IOError("Image named %s already exists." % (name))
def _register(self, image_name, location):
# Upload the kernel, if we have one
kernel = location.pop('kernel', None)
kernel_id = ''
if kernel:
kernel_image_name = "%s-vmlinuz" % (image_name)
self._check_name(kernel_image_name)
LOG.info('Adding kernel %s to glance.', colorizer.quote(kernel_image_name))
LOG.info("Please wait installing...")
args = {
'container_format': kernel['container_format'],
'disk_format': kernel['disk_format'],
'name': kernel_image_name,
'is_public': self.is_public,
}
with open(kernel['file_name'], 'r') as fh:
resource = self.client.images.create(data=fh, **args)
kernel_id = resource.id
# Upload the ramdisk, if we have one
initrd = location.pop('ramdisk', None)
initrd_id = ''
if initrd:
ram_image_name = "%s-initrd" % (image_name)
self._check_name(ram_image_name)
LOG.info('Adding ramdisk %s to glance.', colorizer.quote(ram_image_name))
LOG.info("Please wait installing...")
args = {
'container_format': initrd['container_format'],
'disk_format': initrd['disk_format'],
'name': ram_image_name,
'is_public': self.is_public,
}
with open(initrd['file_name'], 'r') as fh:
resource = self.client.images.create(data=fh, **args)
initrd_id = resource.id
# Upload the root, we must have one...
LOG.info('Adding image %s to glance.', colorizer.quote(image_name))
self._check_name(image_name)
args = {
'name': image_name,
'container_format': location['container_format'],
'disk_format': location['disk_format'],
'is_public': self.is_public,
'properties': {},
}
if kernel_id or initrd_id:
if kernel_id:
args['properties']['kernel_id'] = kernel_id
if initrd_id:
args['properties']['ramdisk_id'] = initrd_id
LOG.info("Please wait installing...")
with open(location['file_name'], 'r') as fh:
resource = self.client.images.create(data=fh, **args)
img_id = resource.id
return img_id
def _generate_img_name(self, url_fn):
name = url_fn
for look_for in NAME_CLEANUPS:
name = name.replace(look_for, '')
return name
def _extract_url_fn(self):
return sh.basename(self.parsed_url.path)
def _is_url_local(self):
return (sh.exists(self.url) or (self.parsed_url.scheme == '' and self.parsed_url.netloc == ''))
def _cached_paths(self):
digest = _hash_it(self.url)
path = sh.joinpths(self.cache_dir, digest)
details_path = sh.joinpths(self.cache_dir, digest + ".details")
return (path, details_path)
def _validate_cache(self, cache_path, details_path):
for path in [cache_path, details_path]:
if not sh.exists(path):
return False
check_files = []
try:
unpack_info = utils.load_yaml_text(sh.load_file(details_path))
check_files.append(unpack_info['file_name'])
if 'kernel' in unpack_info:
check_files.append(unpack_info['kernel']['file_name'])
if 'ramdisk' in unpack_info:
check_files.append(unpack_info['ramdisk']['file_name'])
except Exception:
return False
for path in check_files:
if not sh.isfile(path):
return False
return True
def install(self):
url_fn = self._extract_url_fn()
if not url_fn:
raise IOError("Can not determine file name from url: %r" % (self.url))
(cache_path, details_path) = self._cached_paths()
use_cached = self._validate_cache(cache_path, details_path)
if use_cached:
LOG.info("Found valid cached image + metadata at: %s", colorizer.quote(cache_path))
unpack_info = utils.load_yaml_text(sh.load_file(details_path))
else:
sh.mkdir(cache_path)
if not self._is_url_local():
(fetched_fn, bytes_down) = down.UrlLibDownloader(self.url,
sh.joinpths(cache_path, url_fn)).download()
LOG.debug("For url %s we downloaded %s bytes to %s", self.url, bytes_down, fetched_fn)
else:
fetched_fn = self.url
unpack_info = Unpacker().unpack(url_fn, fetched_fn, cache_path)
sh.write_file(details_path, utils.prettify_yaml(unpack_info))
tgt_image_name = self._generate_img_name(url_fn)
img_id = self._register(tgt_image_name, unpack_info)
return (tgt_image_name, img_id)
class UploadService(object):
def __init__(self, glance, keystone, cache_dir='/usr/share/anvil/glance/cache', is_public=True):
self.glance_params = glance
self.keystone_params = keystone
self.cache_dir = cache_dir
self.is_public = is_public
def _get_token(self, kclient_v2):
LOG.info("Getting your keystone token so that image uploads may proceed.")
k_params = self.keystone_params
client = kclient_v2.Client(username=k_params['admin_user'],
password=k_params['admin_password'],
tenant_name=k_params['admin_tenant'],
auth_url=k_params['endpoints']['public']['uri'])
return client.auth_token
def install(self, urls):
am_installed = 0
try:
# Done at a function level since this module may be used
# before these libraries actually exist...
gclient_v1 = importer.import_module('glanceclient.v1.client')
gexceptions = importer.import_module('glanceclient.common.exceptions')
kclient_v2 = importer.import_module('keystoneclient.v2_0.client')
kexceptions = importer.import_module('keystoneclient.exceptions')
except RuntimeError as e:
LOG.exception("Failed at importing required client modules: %s", e)
return am_installed
if urls:
try:
# Ensure all services ok
for params in [self.glance_params, self.keystone_params]:
utils.wait_for_url(params['endpoints']['public']['uri'])
g_params = self.glance_params
client = gclient_v1.Client(endpoint=g_params['endpoints']['public']['uri'],
token=self._get_token(kclient_v2))
except (RuntimeError, gexceptions.ClientException,
kexceptions.ClientException, IOError) as e:
LOG.exception('Failed fetching needed clients for image calls due to: %s', e)
return am_installed
utils.log_iterable(urls, logger=LOG,
header="Attempting to download+extract+upload %s images" % len(urls))
for url in urls:
try:
img_handle = Image(client, url,
is_public=self.is_public,
cache_dir=self.cache_dir)
(name, img_id) = img_handle.install()
LOG.info("Installed image named %s with image id %s.", colorizer.quote(name), colorizer.quote(img_id))
am_installed += 1
except (IOError,
tarfile.TarError,
gexceptions.ClientException,
kexceptions.ClientException) as e:
LOG.exception('Installing %r failed due to: %s', url, e)
return am_installed
def get_shared_params(ip, api_port=9292, protocol='http', reg_port=9191, **kwargs):
mp = {}
mp['service_host'] = ip
glance_host = ip
glance_port = api_port
glance_protocol = protocol
glance_registry_port = reg_port
# Uri's of the http/https endpoints
mp['endpoints'] = {
'admin': {
'uri': utils.make_url(glance_protocol, glance_host, glance_port),
'port': glance_port,
'host': glance_host,
'protocol': glance_protocol,
},
'registry': {
'uri': utils.make_url(glance_protocol, glance_host, glance_registry_port),
'port': glance_registry_port,
'host': glance_host,
'protocol': glance_protocol,
}
}
mp['endpoints']['internal'] = dict(mp['endpoints']['admin'])
mp['endpoints']['public'] = dict(mp['endpoints']['admin'])
return mp
|
{
"content_hash": "33931e1feced2fb03b5ef6af1e657732",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 138,
"avg_line_length": 39.07036247334755,
"alnum_prop": 0.5544640908098668,
"repo_name": "pombredanne/anvil",
"id": "02dc593540ed4161c0335a65beef3a7c7ce2dfc9",
"size": "19001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anvil/components/helpers/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import json
import logging
from typing import Any
import requests
from zerver.lib.cache import cache_with_key
from zerver.lib.outgoing_http import OutgoingSession
logger = logging.getLogger(__name__)
class GithubSession(OutgoingSession):
def __init__(self, **kwargs: Any) -> None:
super().__init__(role="github", timeout=5, **kwargs)
def get_latest_github_release_version_for_repo(repo: str) -> str:
api_url = f"https://api.github.com/repos/zulip/{repo}/releases/latest"
try:
return GithubSession().get(api_url).json()["tag_name"]
except (requests.RequestException, json.JSONDecodeError, KeyError):
logger.exception(
"Unable to fetch the latest release version from GitHub %s", api_url, stack_info=True
)
return ""
def verify_release_download_link(link: str) -> bool:
try:
GithubSession().head(link).raise_for_status()
return True
except requests.RequestException:
logger.error("App download link is broken %s", link)
return False
PLATFORM_TO_SETUP_FILE = {
"linux": "Zulip-{version}-x86_64.AppImage",
"mac": "Zulip-{version}-x64.dmg",
"mac-arm64": "Zulip-{version}-arm64.dmg",
"windows": "Zulip-Web-Setup-{version}.exe",
}
class InvalidPlatform(Exception):
pass
@cache_with_key(lambda platform: f"download_link:{platform}", timeout=60 * 30)
def get_latest_github_release_download_link_for_platform(platform: str) -> str:
if platform not in PLATFORM_TO_SETUP_FILE:
raise InvalidPlatform()
latest_version = get_latest_github_release_version_for_repo("zulip-desktop")
if latest_version:
if latest_version[0] in ["v", "V"]:
latest_version = latest_version[1:]
setup_file = PLATFORM_TO_SETUP_FILE[platform].format(version=latest_version)
link = f"https://desktop-download.zulip.com/v{latest_version}/{setup_file}"
if verify_release_download_link(link):
return link
return "https://github.com/zulip/zulip-desktop/releases/latest"
|
{
"content_hash": "27942533b8606a8450ba469d8fa9145c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 97,
"avg_line_length": 32.523809523809526,
"alnum_prop": 0.6715470961444607,
"repo_name": "andersk/zulip",
"id": "08397c3ff30b8846baf7f407e52703a59997430c",
"size": "2049",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "zerver/lib/github.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "490256"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "749848"
},
{
"name": "Handlebars",
"bytes": "377098"
},
{
"name": "JavaScript",
"bytes": "4006373"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10168530"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284837"
}
],
"symlink_target": ""
}
|
from .Factor import Factor, factor_product, factor_divide, State
from .FactorSet import FactorSet, factorset_product, factorset_divide
from .CPD import TabularCPD
from .JointProbabilityDistribution import JointProbabilityDistribution
__all__ = ['Factor',
'State',
'factor_product',
'factor_divide',
'TabularCPD',
'JointProbabilityDistribution',
'FactorSet',
'factorset_product',
'factorset_divide']
|
{
"content_hash": "63fae86a36539f20254b8513798a61db",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 34.857142857142854,
"alnum_prop": 0.6495901639344263,
"repo_name": "liquidmetal/pgmpy",
"id": "39e63b935dde2aa83228c0ef17b6112166796588",
"size": "488",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "pgmpy/factors/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "897788"
},
{
"name": "Shell",
"bytes": "1022"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from typing import Optional
from typing import List
from libcloud.common.base import ConnectionUserAndKey, BaseDriver
from libcloud.container.types import ContainerState
__all__ = [
"Container",
"ContainerImage",
"ContainerCluster",
"ClusterLocation",
"ContainerDriver",
]
class Container(object):
"""
Container.
"""
def __init__(
self,
id, # type: str
name, # type: str
image, # type: ContainerImage
state, # type: ContainerState
ip_addresses, # type: List[str]
driver, # type: ContainerDriver
extra=None, # type: dict
):
"""
:param id: Container id.
:type id: ``str``
:param name: The name of the container.
:type name: ``str``
:param image: The image this container was deployed using.
:type image: :class:`.ContainerImage`
:param state: The state of the container, e.g. running
:type state: :class:`libcloud.container.types.ContainerState`
:param ip_addresses: A list of IP addresses for this container
:type ip_addresses: ``list`` of ``str``
:param driver: ContainerDriver instance.
:type driver: :class:`.ContainerDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.image = image
self.state = state
self.ip_addresses = ip_addresses
self.driver = driver
self.extra = extra or {}
def start(self):
# type: () -> Container
return self.driver.start_container(container=self)
def stop(self):
# type: () -> Container
return self.driver.stop_container(container=self)
def restart(self):
# type: () -> Container
return self.driver.restart_container(container=self)
def destroy(self):
# type: () -> bool
return self.driver.destroy_container(container=self)
def __repr__(self):
return "<Container: id=%s, name=%s," "state=%s, provider=%s ...>" % (
self.id,
self.name,
self.state,
self.driver.name,
)
class ContainerImage(object):
"""
Container Image.
"""
def __init__(
self,
id, # type: str
name, # type: str
path, # type: str
version, # type: str
driver, # type: ContainerDriver
extra=None, # type: dict
):
"""
:param id: Container Image id.
:type id: ``str``
:param name: The name of the image.
:type name: ``str``
:param path: The path to the image
:type path: ``str``
:param version: The version of the image
:type version: ``str``
:param driver: ContainerDriver instance.
:type driver: :class:`.ContainerDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.path = path
self.version = version
self.driver = driver
self.extra = extra or {}
def deploy(self, name, parameters, cluster=None, start=True):
# type: (str, str, Optional[ContainerCluster], bool) -> Container
return self.driver.deploy_container(
name=name, image=self, parameters=parameters, cluster=cluster, start=start
)
def __repr__(self):
return "<ContainerImage: id=%s, name=%s, path=%s ...>" % (
self.id,
self.name,
self.path,
)
class ContainerCluster(object):
"""
A cluster group for containers
"""
def __init__(
self,
id, # type: str
name, # type: str
driver, # type: ContainerDriver
extra=None, # type: dict
):
"""
:param id: Container Image id.
:type id: ``str``
:param name: The name of the image.
:type name: ``str``
:param driver: ContainerDriver instance.
:type driver: :class:`.ContainerDriver`
:param extra: (optional) Extra attributes (driver specific).
:type extra: ``dict``
"""
self.id = str(id) if id else None
self.name = name
self.driver = driver
self.extra = extra or {}
def list_containers(self):
# type: () -> List[Container]
return self.driver.list_containers(cluster=self)
def destroy(self):
# type: () -> bool
return self.driver.destroy_cluster(cluster=self)
def __repr__(self):
return "<ContainerCluster: id=%s, name=%s, provider=%s ...>" % (
self.id,
self.name,
self.driver.name,
)
class ClusterLocation(object):
"""
A physical location where clusters can be.
>>> from libcloud.container.drivers.dummy import DummyContainerDriver
>>> driver = DummyContainerDriver(0)
>>> location = driver.list_locations()[0]
>>> location.country
'US'
"""
def __init__(
self,
id, # type: str
name, # type: str
country, # type: str
driver, # type: ContainerDriver
):
"""
:param id: Location ID.
:type id: ``str``
:param name: Location name.
:type name: ``str``
:param country: Location country.
:type country: ``str``
:param driver: Driver this location belongs to.
:type driver: :class:`.ContainerDriver`
"""
self.id = str(id)
self.name = name
self.country = country
self.driver = driver
def __repr__(self):
return ("<ClusterLocation: id=%s, name=%s, country=%s, driver=%s>") % (
self.id,
self.name,
self.country,
self.driver.name,
)
class ContainerDriver(BaseDriver):
"""
A base ContainerDriver class to derive from
This class is always subclassed by a specific driver.
"""
connectionCls = ConnectionUserAndKey
name = None
website = None
supports_clusters = False
"""
Whether the driver supports containers being deployed into clusters
"""
def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(ContainerDriver, self).__init__(
key=key, secret=secret, secure=secure, host=host, port=port, **kwargs
)
def install_image(self, path):
# type: (str) -> ContainerImage
"""
Install a container image from a remote path.
:param path: Path to the container image
:type path: ``str``
:rtype: :class:`.ContainerImage`
"""
raise NotImplementedError("install_image not implemented for this driver")
def list_images(self):
# type: () -> List[ContainerImage]
"""
List the installed container images
:rtype: ``list`` of :class:`.ContainerImage`
"""
raise NotImplementedError("list_images not implemented for this driver")
def list_containers(
self,
image=None, # type: Optional[ContainerImage]
cluster=None, # type: Optional[ContainerCluster]
):
# type: (...) -> List[Container]
"""
List the deployed container images
:param image: Filter to containers with a certain image
:type image: :class:`.ContainerImage`
:param cluster: Filter to containers in a cluster
:type cluster: :class:`.ContainerCluster`
:rtype: ``list`` of :class:`.Container`
"""
raise NotImplementedError("list_containers not implemented for this driver")
def deploy_container(
self,
name, # type: str
image, # type: ContainerImage
cluster=None, # type: Optional[ContainerCluster]
parameters=None, # type: Optional[str]
start=True, # type: bool
):
# type: (...) -> Container
"""
Deploy an installed container image
:param name: The name of the new container
:type name: ``str``
:param image: The container image to deploy
:type image: :class:`.ContainerImage`
:param cluster: The cluster to deploy to, None is default
:type cluster: :class:`.ContainerCluster`
:param parameters: Container Image parameters
:type parameters: ``str``
:param start: Start the container on deployment
:type start: ``bool``
:rtype: :class:`.Container`
"""
raise NotImplementedError("deploy_container not implemented for this driver")
def get_container(self, id):
# type: (str) -> Container
"""
Get a container by ID
:param id: The ID of the container to get
:type id: ``str``
:rtype: :class:`.Container`
"""
raise NotImplementedError("get_container not implemented for this driver")
def start_container(self, container):
# type: (Container) -> Container
"""
Start a deployed container
:param container: The container to start
:type container: :class:`.Container`
:rtype: :class:`.Container`
"""
raise NotImplementedError("start_container not implemented for this driver")
def stop_container(self, container):
# type: (Container) -> Container
"""
Stop a deployed container
:param container: The container to stop
:type container: :class:`.Container`
:rtype: :class:`.Container`
"""
raise NotImplementedError("stop_container not implemented for this driver")
def restart_container(self, container):
# type: (Container) -> Container
"""
Restart a deployed container
:param container: The container to restart
:type container: :class:`.Container`
:rtype: :class:`.Container`
"""
raise NotImplementedError("restart_container not implemented for this driver")
def destroy_container(self, container):
# type: (Container) -> bool
"""
Destroy a deployed container
:param container: The container to destroy
:type container: :class:`.Container`
:rtype: ``bool``
"""
raise NotImplementedError("destroy_container not implemented for this driver")
def list_locations(self):
# type: () -> List[ClusterLocation]
"""
Get a list of potential locations to deploy clusters into
:rtype: ``list`` of :class:`.ClusterLocation`
"""
raise NotImplementedError("list_locations not implemented for this driver")
def create_cluster(self, name, location=None):
# type: (str, Optional[ClusterLocation]) -> ContainerCluster
"""
Create a container cluster
:param name: The name of the cluster
:type name: ``str``
:param location: The location to create the cluster in
:type location: :class:`.ClusterLocation`
:rtype: :class:`.ContainerCluster`
"""
raise NotImplementedError("create_cluster not implemented for this driver")
def destroy_cluster(self, cluster):
# type: (ContainerCluster) -> bool
"""
Delete a cluster
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
"""
raise NotImplementedError("destroy_cluster not implemented for this driver")
def list_clusters(self, location=None):
# type: (Optional[ClusterLocation]) -> List[ContainerCluster]
"""
Get a list of potential locations to deploy clusters into
:param location: The location to search in
:type location: :class:`.ClusterLocation`
:rtype: ``list`` of :class:`.ContainerCluster`
"""
raise NotImplementedError("list_clusters not implemented for this driver")
def get_cluster(self, id):
# type: (str) -> ContainerCluster
"""
Get a cluster by ID
:param id: The ID of the cluster to get
:type id: ``str``
:rtype: :class:`.ContainerCluster`
"""
raise NotImplementedError("list_clusters not implemented for this driver")
|
{
"content_hash": "8689f2681f3fd8c83e60f06865774b24",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 86,
"avg_line_length": 28.428260869565218,
"alnum_prop": 0.5726848665596085,
"repo_name": "mistio/libcloud",
"id": "38c53d33c629fe832b6845635d4562bf8522c493",
"size": "13859",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/container/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
}
|
from style import *
class Agent(object):
def __init__(self,name):
self.name = name
def __str__(self):
return Style.RED + self.name + Style.END
|
{
"content_hash": "bbcf811c32c06c65eae704a6bdca90ff",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 48,
"avg_line_length": 23.857142857142858,
"alnum_prop": 0.5868263473053892,
"repo_name": "thomvangessel/inquisitive-action-models",
"id": "7816d5629ec588bc92ce217c94f3ce2191e50ecb",
"size": "213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iamlq/agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21836"
}
],
"symlink_target": ""
}
|
from api.lib.testutils import BaseTestCase
import api.companies.unittest
class TestDeleteCompany(BaseTestCase):
def test_delete_company(self):
resp = self.app.delete('/companies/deerwalk',
headers={'content-type': 'application/json'})
self.assertEqual(resp.status_code, 200)
assert "deerwalk" in resp.data
if __name__ == "__main__":
api.companies.unittest.main()
|
{
"content_hash": "1df2c529a01134d91eb2da2717ca687d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 35.416666666666664,
"alnum_prop": 0.6541176470588236,
"repo_name": "girisagar46/flask_restipy",
"id": "33b672e0bae7d943c61e0cb15701baf0e0908297",
"size": "425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/companies/unittest/delete_company_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4695"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
maze = np.fromfile("../build/test.dat", dtype=np.int32)
maze1 = maze.reshape(1001,1001)
plt.figure()
plt.imshow(maze1)
plt.show()
|
{
"content_hash": "da46045ba89c5a37e082c72852a77182",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 55,
"avg_line_length": 22.75,
"alnum_prop": 0.7362637362637363,
"repo_name": "thomasgt/labyrinth",
"id": "78e0d14ea12aa9d558d6b1956e4eb0871a571bfb",
"size": "182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6975"
},
{
"name": "CMake",
"bytes": "169"
},
{
"name": "Python",
"bytes": "182"
}
],
"symlink_target": ""
}
|
import json
def index():
envObject = request.env
envJSON = {}
for key in envObject:
value = envObject[ key ]
if isinstance( value, dict ) or \
isinstance( value, list ) or isinstance( value, tuple ) or \
isinstance( value, str ) or isinstance( value, unicode ) or \
isinstance( value, int ) or isinstance( value, long ) or isinstance( value, float ) or \
value is None or value is True or value is False:
envJSON[ key ] = value
else:
envJSON[ key ] = 'Value not JSON-serializable'
data = {
'env' : envJSON,
'cookies' : request.cookies,
'vars' : request.vars,
'get_vars' : request.get_vars,
'post_vars' : request.post_vars,
'folder' : request.folder,
'application' : request.application,
'controller' : request.controller,
'function' : request.function,
'args' : request.args,
'extension' : request.extension,
'now' : str( request.now )
}
dataStr = json.dumps( data, encoding = 'utf-8', indent = 2, sort_keys = True )
response.headers['Content-Type'] = 'application/json'
return dataStr
|
{
"content_hash": "20958ab77f7d9e978438259be613cf5c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 93,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.6606463878326996,
"repo_name": "uwdata/termite-data-server",
"id": "a90d1b740c876e419b23233a82cd256f14caa526",
"size": "1075",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "echo_src/controllers/default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "717883"
},
{
"name": "Java",
"bytes": "4515"
},
{
"name": "JavaScript",
"bytes": "2239861"
},
{
"name": "Perl",
"bytes": "2402"
},
{
"name": "Python",
"bytes": "6125625"
},
{
"name": "Shell",
"bytes": "104651"
}
],
"symlink_target": ""
}
|
import logging
import click
from flask.cli import with_appcontext
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
@click.command("panels", short_help="Display gene panels")
@click.option("-i", "--institute", help="institute id")
@with_appcontext
def panels(institute):
"""Show all gene panels in the database"""
LOG.info("Running scout view panels")
adapter = store
panel_objs = [
panel for panel in adapter.gene_panels(institute_id=institute, include_hidden=True)
]
if len(panel_objs) == 0:
LOG.info("No panels found")
raise click.Abort()
click.echo("#panel_name\tversion\tnr_genes\thidden\tdate")
for panel_obj in panel_objs:
click.echo(
"{0}\t{1}\t{2}\t{3}\t{4}".format(
panel_obj["panel_name"],
str(panel_obj["version"]),
len(panel_obj["genes"]),
panel_obj.get("hidden", False),
str(panel_obj["date"].strftime("%Y-%m-%d")),
)
)
|
{
"content_hash": "8f9c94d220ac30ee9d28e6fbacdc8ccc",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 91,
"avg_line_length": 28.97222222222222,
"alnum_prop": 0.5915627996164909,
"repo_name": "Clinical-Genomics/scout",
"id": "c7ccad7814f7dbcef74b90854757b4c18da61528",
"size": "1043",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scout/commands/view/panels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
}
|
"""
Copyright [2011] [y42sora]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'y42sora'
import base64
import urllib
import time
import random
import hmac
import binascii
import cgi
class OAuthConsumer(object):
"""
This class have Consumer key and secret
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OauthToken(object):
"""
This class have
"""
key = None
secret = None
callback = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
|
{
"content_hash": "df358f843efe1c34787ec8810a30d8d9",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 75,
"avg_line_length": 24.458333333333332,
"alnum_prop": 0.6448040885860307,
"repo_name": "y42sora/Twitter4Py3",
"id": "1f9cf967748da974fe6168f9cb4dd9a9b48e0c0c",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Oauth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4335"
}
],
"symlink_target": ""
}
|
import falcon
import os
from sqlalchemy.exc import SQLAlchemyError
from db import session
import model
import util
class EvalCode(object):
def _file_or_error(self, fn):
if not os.path.isfile(fn):
return "Soubor %s neexistuje." % (fn)
with open(fn, "r") as f:
return f.read()
def on_get(self, req, resp, id):
try:
user = req.context['user']
if (not user.is_logged_in()) or (not user.is_org()):
resp.status = falcon.HTTP_400
return
code = session.query(model.SubmittedCode).\
filter(model.SubmittedCode.evaluation == id).first()
if not code:
req.context['result'] = {
'errors': [{'id': 5, 'title': "Code not found in db"}]
}
return
evaluation = session.query(model.Evaluation).get(code.evaluation)
if not evaluation:
req.context['result'] = {
'errors': [
{'id': 5, 'title': "Evaluation not found in db"}
]
}
return
eval_dir = os.path.join(
'data',
'exec',
'module_' + str(evaluation.module),
'user_' + str(evaluation.user))
CODE_PATH = os.path.join(eval_dir, 'box', 'run')
STDOUT_PATH = os.path.join(eval_dir, 'stdout')
STDERR_PATH = os.path.join(eval_dir, 'stderr')
MERGE_STDOUT = os.path.join(eval_dir, 'merge.stdout')
CHECK_STDOUT = os.path.join(eval_dir, 'check.stdout')
SOURCE_PATH = os.path.join(eval_dir, util.programming.SOURCE_FILE)
lines = []
if os.path.isfile(SOURCE_PATH):
with open(SOURCE_PATH, 'r') as s:
lines = s.read().split('\n')
if len(lines) >= 2 and (lines[0] != "evaluation" or
lines[1] != str(id)):
req.context['result'] = {
'evalCode': {
'id': evaluation.id,
'code': code.code,
'merged': ('Další záznamy o vyhodnocení už nejsou k '
'dispozici, byly nahrazeny novým opravením '
'nebo spuštěním.'),
}
}
return
req.context['result'] = {
'evalCode': {
'id': evaluation.id,
'code': code.code,
'merged': self._file_or_error(CODE_PATH),
'stdout': self._file_or_error(STDOUT_PATH),
'stderr': self._file_or_error(STDERR_PATH),
'merge_stdout': self._file_or_error(MERGE_STDOUT),
'check_stdout': self._file_or_error(CHECK_STDOUT),
}
}
except SQLAlchemyError:
session.rollback()
raise
finally:
session.close()
|
{
"content_hash": "a5a0a2a8a9b0fb7f97bb36b14a59f0ac",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 34.34065934065934,
"alnum_prop": 0.448,
"repo_name": "fi-ksi/web-backend",
"id": "3f74c659e36304822013bc8cd239f7cb846ce81a",
"size": "3135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "endpoint/admin/evalCode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1669"
},
{
"name": "Python",
"bytes": "386983"
},
{
"name": "Shell",
"bytes": "2561"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/space/asteroid/shared_asteroid_small_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "d481f3b6513bbc72757942212a069a57",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6938110749185668,
"repo_name": "obi-two/Rebelion",
"id": "0022dd7d2dbef31d3deb4528dd9f0371655186c6",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/static/space/asteroid/shared_asteroid_small_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
""" Sahana Eden Module Automated Tests - INV002 Receive Items
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
import unittest
from tests.web2unittest import SeleniumUnitTest
from selenium.common.exceptions import NoSuchElementException
from s3 import s3_debug
from helper import InvTestFunctions
class ReceiveItem(InvTestFunctions):
"""
Inventory Test - Receive Workflow (Receive items)
@Case: INV002
@param items: This test receives a specific item from another party.
This test assume that regression/inv-mngt has been added to prepop
- e.g. via demo/IFRC_Train
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
# -------------------------------------------------------------------------
def test_inv002a_receive_items(self):
""" Tests for Receive Workflow """
user = "admin"
recv_data = [("send_ref",
"WB_TEST_000002a",
),
("purchase_ref",
"PO_TEST_000002a",
),
("site_id",
"Same Warehouse (Warehouse)",
"option",
),
("type",
"Internal Shipment",
"option",
),
("from_site_id",
"Ainaro Warehouse (Warehouse)",
"option",
),
]
item_data = [
[
("item_id",
"Blankets",
"supply_widget",
),
("item_pack_id",
"Piece",
"option",
),
("quantity",
"3",
),
]
]
# Create the receive shipment
result = self.receive(user, recv_data)
recv_id = self.recv_get_id(result)
# Add items to the shipment
item_list = []
for data in item_data:
result = self.track_recv_item(user, recv_id, data)
text = "%s %s" % (data[2][1], data[0][1])
item_list.append({"text": text,
"record":result["after"].records[0]
})
# Receive the shipment
self.recv_shipment(user, recv_id, item_list)
def test_inv002b_receive_items(self):
""" Tests for Receive Workflow """
user = "admin"
recv_data = [("send_ref",
"WB_TEST_000002b",
),
("purchase_ref",
"PO_TEST_000002b",
),
("site_id",
"Same Warehouse (Warehouse)",
"option",
),
("type",
"Internal Shipment",
"option",
),
("from_site_id",
"Timor-Leste Red Cross Society (CVTL) National Warehouse (Warehouse)",
"option",
),
]
item_data = [
[
("item_id",
"Blankets",
"supply_widget",
),
("item_pack_id",
"Piece",
"option",
),
("quantity",
"3",
),
]
]
# Create the receive shipment
result = self.receive(user, recv_data)
recv_id = self.recv_get_id(result)
# Add items to the shipment
item_list = []
for data in item_data:
result = self.track_recv_item(user, recv_id, data)
text = "%s %s" % (data[2][1], data[0][1])
item_list.append({"text": text,
"record":result["after"].records[0]
})
# Receive the shipment
self.recv_shipment(user, recv_id, item_list)
|
{
"content_hash": "31ab8fb679fbf9f95b9e28fb195e48e5",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 110,
"avg_line_length": 36.751633986928105,
"alnum_prop": 0.46505424150809177,
"repo_name": "flavour/rgims_as_diff",
"id": "fc6ebd49c07cbcf9d87e34110e31929be6dd88e9",
"size": "5623",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "modules/tests/inv/receive_item.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1079144"
},
{
"name": "JavaScript",
"bytes": "15122049"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "23100673"
},
{
"name": "Racket",
"bytes": "166"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "XSLT",
"bytes": "1307376"
}
],
"symlink_target": ""
}
|
import struct
from .exceptions import AMQPError
from datetime import datetime, timezone
def rethrow_as(expected_cls, to_throw):
def decorator(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except expected_cls as e:
raise to_throw from e
return wrapper
return decorator
###########################################################
# Deserialisation
###########################################################
@rethrow_as(struct.error, AMQPError('failed to read an octet'))
def read_octet(stream):
return _read_octet(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a short'))
def read_short(stream):
return _read_short(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned short'))
def read_unsigned_short(stream):
return _read_unsigned_short(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long'))
def read_long(stream):
return _read_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned long'))
def read_unsigned_long(stream):
return _read_unsigned_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long long'))
def read_long_long(stream):
return _read_long_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read an unsigned long long'))
def read_unsigned_long_long(stream):
return _read_unsigned_long_long(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a short string'))
def read_short_string(stream):
return _read_short_string(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a long string'))
def read_long_string(stream):
return _read_long_string(stream)[0]
@rethrow_as(KeyError, AMQPError('failed to read a table'))
@rethrow_as(struct.error, AMQPError('failed to read a table'))
def read_table(stream):
return _read_table(stream)[0]
@rethrow_as(KeyError, AMQPError('failed to read an array'))
@rethrow_as(struct.error, AMQPError('failed to read an array'))
def read_array(stream):
return _read_array(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_bool(stream):
return _read_bool(stream)[0]
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_bools(byte, number_of_bools):
bits = "{0:b}".format(byte)
bits = "0" * (number_of_bools - len(bits)) + bits
return (b == "1" for b in reversed(bits))
@rethrow_as(struct.error, AMQPError('failed to read a boolean'))
def read_timestamp(stream):
return _read_timestamp(stream)[0]
def qpid_rabbit_mq_table():
# TODO: fix amqp 0.9.1 compatibility
# TODO: Add missing types
TABLE_VALUE_PARSERS = {
b't': _read_bool,
b'b': _read_signed_byte,
b's': _read_short,
b'I': _read_long,
b'l': _read_long_long,
b'f': _read_float,
b'S': _read_long_string,
b'A': _read_array,
b'V': _read_void,
b'x': _read_byte_array,
b'F': _read_table,
b'T': _read_timestamp
}
return TABLE_VALUE_PARSERS
def _read_table(stream):
TABLE_VALUE_PARSERS = qpid_rabbit_mq_table()
table = {}
table_length, initial_long_size = _read_unsigned_long(stream)
consumed = initial_long_size
while consumed < table_length + initial_long_size:
key, x = _read_short_string(stream)
consumed += x
value_type_code = stream.read(1)
consumed += 1
value, x = TABLE_VALUE_PARSERS[value_type_code](stream)
consumed += x
table[key] = value
return table, consumed
def _read_short_string(stream):
str_length, x = _read_octet(stream)
string = stream.read(str_length).decode('utf-8')
return string, x + str_length
def _read_long_string(stream):
str_length, x = _read_unsigned_long(stream)
buffer = stream.read(str_length)
if len(buffer) != str_length:
raise AMQPError("Long string had incorrect length")
return buffer.decode('utf-8'), x + str_length
def _read_octet(stream):
x, = struct.unpack('!B', stream.read(1))
return x, 1
def _read_signed_byte(stream):
x, = struct.unpack_from('!b', stream.read(1))
return x, 1
def _read_bool(stream):
x, = struct.unpack('!?', stream.read(1))
return x, 1
def _read_short(stream):
x, = struct.unpack('!h', stream.read(2))
return x, 2
def _read_unsigned_short(stream):
x, = struct.unpack('!H', stream.read(2))
return x, 2
def _read_long(stream):
x, = struct.unpack('!l', stream.read(4))
return x, 4
def _read_unsigned_long(stream):
x, = struct.unpack('!L', stream.read(4))
return x, 4
def _read_long_long(stream):
x, = struct.unpack('!q', stream.read(8))
return x, 8
def _read_unsigned_long_long(stream):
x, = struct.unpack('!Q', stream.read(8))
return x, 8
def _read_float(stream):
x, = struct.unpack('!f', stream.read(4))
return x, 4
def _read_timestamp(stream):
x, = struct.unpack('!Q', stream.read(8))
# From datetime.fromutctimestamp converts it to a local timestamp without timezone information
return datetime.fromtimestamp(x * 1e-3, timezone.utc), 8
def _read_array(stream):
TABLE_VALUE_PARSERS = qpid_rabbit_mq_table()
field_array = []
# The standard says only long, but unsigned long seems sensible
array_length, initial_long_size = _read_unsigned_long(stream)
consumed = initial_long_size
while consumed < array_length + initial_long_size:
value_type_code = stream.read(1)
consumed += 1
value, x = TABLE_VALUE_PARSERS[value_type_code](stream)
consumed += x
field_array.append(value)
return field_array, consumed
def _read_void(stream):
return None, 0
def _read_byte_array(stream):
byte_array_length, x = _read_unsigned_long(stream)
return stream.read(byte_array_length), byte_array_length + x
###########################################################
# Serialisation
###########################################################
def pack_short_string(string):
buffer = string.encode('utf-8')
return pack_octet(len(buffer)) + buffer
def pack_long_string(string):
buffer = string.encode('utf-8')
return pack_unsigned_long(len(buffer)) + buffer
def pack_field_value(value):
if value is None:
return b'V'
if isinstance(value, bool):
return b't' + pack_bool(value)
if isinstance(value, dict):
return b'F' + pack_table(value)
if isinstance(value, list):
return b'A' + pack_array(value)
if isinstance(value, bytes):
return b'x' + pack_byte_array(value)
if isinstance(value, str):
return b'S' + pack_long_string(value)
if isinstance(value, datetime):
return b'T' + pack_timestamp(value)
if isinstance(value, int):
if value.bit_length() < 8:
return b'b' + pack_signed_byte(value)
if value.bit_length() < 32:
return b'I' + pack_long(value)
if isinstance(value, float):
return b'f' + pack_float(value)
raise NotImplementedError()
def pack_table(d):
buffer = b''
for key, value in d.items():
buffer += pack_short_string(key)
# todo: more values
buffer += pack_field_value(value)
return pack_unsigned_long(len(buffer)) + buffer
def pack_octet(number):
return struct.pack('!B', number)
def pack_signed_byte(number):
return struct.pack('!b', number)
def pack_unsigned_byte(number):
return struct.pack('!B', number)
def pack_short(number):
return struct.pack('!h', number)
def pack_unsigned_short(number):
return struct.pack('!H', number)
def pack_long(number):
return struct.pack('!l', number)
def pack_unsigned_long(number):
return struct.pack('!L', number)
def pack_long_long(number):
return struct.pack('!q', number)
def pack_unsigned_long_long(number):
return struct.pack('!Q', number)
def pack_float(number):
return struct.pack('!f', number)
def pack_bool(b):
return struct.pack('!?', b)
def pack_timestamp(timeval):
number = int(timeval.timestamp() * 1e3)
return struct.pack('!Q', number)
def pack_byte_array(value):
buffer = pack_unsigned_long(len(value))
buffer += value
return buffer
def pack_array(items):
buffer = b''
for value in items:
buffer += pack_field_value(value)
return pack_unsigned_long(len(buffer)) + buffer
def pack_bools(*bs):
tot = 0
for n, b in enumerate(bs):
x = 1 if b else 0
tot += (x << n)
return pack_octet(tot)
|
{
"content_hash": "cb33b7d5a9ac42da633dfc8a39f52a55",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 98,
"avg_line_length": 24.582386363636363,
"alnum_prop": 0.6229053507454062,
"repo_name": "socketpair/asynqp",
"id": "694a369ff088a8a0fd70baa0a97f7fee2cb7dea0",
"size": "8653",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/asynqp/serialisation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "181316"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.shortcuts import render, redirect, get_object_or_404
from servo.models.rules import *
def get_data(request):
pass
def list_rules(request):
title = _('Rules')
object_list = Rule.objects.all()
return render(request, "rules/list_rules.html", locals())
def edit_rule(request, pk=None):
title = _('Rules')
object_list = Rule.objects.all()
if pk:
rule = get_object_or_404(Rule, pk=pk)
if request.method == 'POST':
if pk:
rule = Rule.objects.get(pk=pk)
else:
rule = Rule()
rule.description = request.POST.get('description')
#rule.match = request.POST.get('description')
rule.save()
rule.condition_set.all().delete()
rule.action_set.all().delete()
keys = request.POST.getlist('condition-key')
values = request.POST.getlist('condition-value')
for k, v in enumerate(keys):
cond = Condition(rule=rule)
cond.key = v
cond.value = values[k]
cond.save()
keys = request.POST.getlist('action-key')
values = request.POST.getlist('action-value')
for k, v in enumerate(keys):
action = Action(rule=rule)
action.key = v
action.value = values[k]
action.save()
return render(request, "rules/form.html", locals())
def view_rule(request, pk):
pass
def delete_rule(request, pk):
action = request.path
title = _('Delete rule')
rule = get_object_or_404(Rule, pk=pk)
if request.method == 'POST':
rule.delete()
messages.error(request, _('Rule deleted'))
return redirect(list_rules)
return render(request, "generic/delete.html", locals())
|
{
"content_hash": "341046e3656ee68856ccece6b7202b28",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 64,
"avg_line_length": 25.2,
"alnum_prop": 0.5962962962962963,
"repo_name": "filipp/Servo",
"id": "ec193a897b806b51af3b8dd227debce94473b5e1",
"size": "3268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "servo/views/rules.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "114750"
},
{
"name": "HTML",
"bytes": "493143"
},
{
"name": "JavaScript",
"bytes": "430810"
},
{
"name": "Makefile",
"bytes": "297"
},
{
"name": "Python",
"bytes": "998166"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from awips.dataaccess import DataAccessLayer as DAL
from dynamicserialize.dstypes.com.raytheon.uf.common.dataquery.requests import RequestConstraint
from awips.test.dafTests import baseDafTestCase
from awips.test.dafTests import params
import unittest
#
# Test DAF support for bufrua data
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/19/16 4795 mapeters Initial Creation.
# 04/11/16 5548 tgurney Cleanup
# 04/18/16 5548 tgurney More cleanup
# 06/09/16 5587 bsteffen Add getIdentifierValues tests
# 06/13/16 5574 tgurney Add advanced query tests
# 06/30/16 5725 tgurney Add test for NOT IN
# 12/07/16 5981 tgurney Parameterize
# 12/15/16 5981 tgurney Add envelope test
#
#
class BufrUaTestCase(baseDafTestCase.DafTestCase):
"""Test DAF support for bufrua data"""
datatype = "bufrua"
location = params.STATION_ID
def testGetAvailableParameters(self):
req = DAL.newDataRequest(self.datatype)
self.runParametersTest(req)
def testGetAvailableLocations(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier("reportType", "2020")
self.runLocationsTest(req)
def testGetAvailableTimes(self):
req = DAL.newDataRequest(self.datatype)
req.setLocationNames(self.location)
req.addIdentifier("reportType", "2020")
self.runTimesTest(req)
def testGetGeometryData(self):
req = DAL.newDataRequest(self.datatype)
req.setLocationNames(self.location)
req.addIdentifier("reportType", "2020")
req.setParameters("sfcPressure", "staName", "rptType", "tdMan")
print("Testing getGeometryData()")
geomData = DAL.getGeometryData(req)
self.assertIsNotNone(geomData)
print("Number of geometry records: " + str(len(geomData)))
print("Sample geometry data:")
for record in geomData[:self.sampleDataLimit]:
print("level=", record.getLevel(), end="")
# One dimensional parameters are reported on the 0.0UNKNOWN level.
# 2D parameters are reported on MB levels from pressure.
if record.getLevel() == "0.0UNKNOWN":
print(" sfcPressure=" + record.getString("sfcPressure") + record.getUnit("sfcPressure"), end="")
print(" staName=" + record.getString("staName"), end="")
print(" rptType=" + record.getString("rptType") + record.getUnit("rptType"), end="")
else:
print(" tdMan=" + str(record.getNumber("tdMan")) + record.getUnit("tdMan"), end="")
print(" geometry=", record.getGeometry())
print("getGeometryData() complete\n\n")
def testGetGeometryDataWithEnvelope(self):
req = DAL.newDataRequest(self.datatype)
req.setParameters("staName", "rptType")
req.setEnvelope(params.ENVELOPE)
data = self.runGeometryDataTest(req)
for item in data:
self.assertTrue(params.ENVELOPE.contains(item.getGeometry()))
def testGetIdentifierValues(self):
req = DAL.newDataRequest(self.datatype)
optionalIds = set(DAL.getOptionalIdentifiers(req))
self.runGetIdValuesTest(optionalIds)
def testGetInvalidIdentifierValuesThrowsException(self):
self.runInvalidIdValuesTest()
def testGetNonexistentIdentifierValuesThrowsException(self):
self.runNonexistentIdValuesTest()
def _runConstraintTest(self, key, operator, value):
req = DAL.newDataRequest(self.datatype)
constraint = RequestConstraint.new(operator, value)
req.addIdentifier(key, constraint)
# As an identifier it is "reportType" but as a parameter it is
# "rptType"... this is weird...
req.setParameters("staName", "rptType")
return self.runGeometryDataTest(req)
def testGetDataWithEqualsString(self):
geometryData = self._runConstraintTest('reportType', '=', '2022')
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
def testGetDataWithEqualsUnicode(self):
geometryData = self._runConstraintTest('reportType', '=', u'2022')
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
def testGetDataWithEqualsInt(self):
geometryData = self._runConstraintTest('reportType', '=', 2022)
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
def testGetDataWithEqualsLong(self):
geometryData = self._runConstraintTest('reportType', '=', 2022)
for record in geometryData:
self.assertEqual(record.getString('rptType'), '2022')
# No float test because no float identifiers are available
def testGetDataWithEqualsNone(self):
geometryData = self._runConstraintTest('reportType', '=', None)
for record in geometryData:
self.assertEqual(record.getType('rptType'), 'NULL')
def testGetDataWithNotEquals(self):
geometryData = self._runConstraintTest('reportType', '!=', 2022)
for record in geometryData:
self.assertNotEqual(record.getString('rptType'), '2022')
def testGetDataWithNotEqualsNone(self):
geometryData = self._runConstraintTest('reportType', '!=', None)
for record in geometryData:
self.assertNotEqual(record.getType('rptType'), 'NULL')
def testGetDataWithGreaterThan(self):
geometryData = self._runConstraintTest('reportType', '>', 2022)
for record in geometryData:
self.assertGreater(record.getString('rptType'), '2022')
def testGetDataWithLessThan(self):
geometryData = self._runConstraintTest('reportType', '<', 2022)
for record in geometryData:
self.assertLess(record.getString('rptType'), '2022')
def testGetDataWithGreaterThanEquals(self):
geometryData = self._runConstraintTest('reportType', '>=', 2022)
for record in geometryData:
self.assertGreaterEqual(record.getString('rptType'), '2022')
def testGetDataWithLessThanEquals(self):
geometryData = self._runConstraintTest('reportType', '<=', 2022)
for record in geometryData:
self.assertLessEqual(record.getString('rptType'), '2022')
def testGetDataWithInTuple(self):
collection = ('2022', '2032')
geometryData = self._runConstraintTest('reportType', 'in', collection)
for record in geometryData:
self.assertIn(record.getString('rptType'), collection)
def testGetDataWithInList(self):
collection = ['2022', '2032']
geometryData = self._runConstraintTest('reportType', 'in', collection)
for record in geometryData:
self.assertIn(record.getString('rptType'), collection)
def testGetDataWithInGenerator(self):
collection = ('2022', '2032')
generator = (item for item in collection)
geometryData = self._runConstraintTest('reportType', 'in', generator)
for record in geometryData:
self.assertIn(record.getString('rptType'), collection)
def testGetDataWithNotInList(self):
collection = ('2022', '2032')
geometryData = self._runConstraintTest('reportType', 'not in', collection)
for record in geometryData:
self.assertNotIn(record.getString('rptType'), collection)
def testGetDataWithInvalidConstraintTypeThrowsException(self):
with self.assertRaises(ValueError):
self._runConstraintTest('reportType', 'junk', '2022')
def testGetDataWithInvalidConstraintValueThrowsException(self):
with self.assertRaises(TypeError):
self._runConstraintTest('reportType', '=', {})
def testGetDataWithEmptyInConstraintThrowsException(self):
with self.assertRaises(ValueError):
self._runConstraintTest('rptType', 'in', [])
def testGetDataWithNestedInConstraintThrowsException(self):
collection = ('2022', '2032', ())
with self.assertRaises(TypeError):
self._runConstraintTest('rptType', 'in', collection)
|
{
"content_hash": "21b6658af07ddf08f2de54566d002394",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 112,
"avg_line_length": 42.0547263681592,
"alnum_prop": 0.6488820537087424,
"repo_name": "mjames-upc/python-awips",
"id": "5c3f6ffae453b669aea4faa5a7e1ec5ab995fac5",
"size": "8453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awips/test/dafTests/testBufrUa.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "27192"
},
{
"name": "Python",
"bytes": "714011"
}
],
"symlink_target": ""
}
|
from os.path import exists
from setuptools import setup
setup(name='castra',
version='0.1.2',
description='On-disk partitioned store',
url='http://github.com/Blosc/Castra/',
maintainer='Matthew Rocklin',
maintainer_email='mrocklin@gmail.com',
license='BSD',
keywords='',
packages=['castra'],
package_data={'castra': ['tests/*.py']},
install_requires=list(open('requirements.txt').read().strip().split('\n')),
long_description=(open('README.rst').read() if exists('README.rst')
else ''),
zip_safe=False)
|
{
"content_hash": "e77c909f588bab847d97623984749ee6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 81,
"avg_line_length": 35.294117647058826,
"alnum_prop": 0.6016666666666667,
"repo_name": "mrocklin/castra",
"id": "d0c97b6114c2b688cba4749bd5d0ca0433f201cc",
"size": "623",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24175"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/droid_interface/shared_ddi_rss_enhanced_imperial_1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","ddi_rss_enhanced_imperial_1_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "38514ede8ab7dc5e243ca85a12f5d744",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 107,
"avg_line_length": 27.923076923076923,
"alnum_prop": 0.71900826446281,
"repo_name": "obi-two/Rebelion",
"id": "822277147d39aed9c3d78a479680c491311ea00c",
"size": "508",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/ship/components/droid_interface/shared_ddi_rss_enhanced_imperial_1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-payments'
copyright = u'2010-2013, Mirumee Software'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-paymentsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-payments.tex', u'django-payments Documentation',
u'Mirumee Software', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "754f3717ae59a682fe1900330eb57da1",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 31.711764705882352,
"alnum_prop": 0.714153218326841,
"repo_name": "imakin/pysar-payments",
"id": "90151137f0c3eb977b67ca23f496fe497fb4e291",
"size": "6323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "899"
},
{
"name": "JavaScript",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "168133"
}
],
"symlink_target": ""
}
|
"""This code example gets all line item creative associations (LICA) for a given
line item id.
To create LICAs, run create_licas.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
# Set the id of the line item to get LICAs by.
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client, line_item_id):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201411')
# Create statement object to only select LICAs for the given line item id.
values = [{
'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': line_item_id
}
}]
query = 'WHERE lineItemId = :lineItemId'
statement = dfp.FilterStatement(query, values)
while True:
# Get LICAs by statement.
response = lica_service.getLineItemCreativeAssociationsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for lica in response['results']:
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['lineItemId'], lica['creativeId'],
lica['status']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, LINE_ITEM_ID)
|
{
"content_hash": "9f55334105169ac6fd6456354c4ddc2f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 30.653846153846153,
"alnum_prop": 0.6386449184441656,
"repo_name": "coxmediagroup/googleads-python-lib",
"id": "130e8c3b0a350c9eadb671432335e6248d4720cb",
"size": "2212",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201411/line_item_creative_association_service/get_licas_by_statement.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "HTML",
"bytes": "8336"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2535137"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from functools import reduce
import math
import operator
import random
from flax.component import Breakable, IPhysics, Empty
import flax.entity as e
from flax.entity import (
Entity, CaveWall, Floor, Tree, Grass, CutGrass, Salamango, Armor,
Potion, StairsDown, StairsUp,
KadathGate
)
from flax.geometry import Blob, Direction, Point, Rectangle, Size, Span
from flax.map import Map
from flax.noise import discrete_perlin_noise_factory
def random_normal_int(mu, sigma):
"""Return a normally-distributed random integer, given a mean and standard
deviation. The return value is guaranteed never to lie outside µ ± 3σ, and
anything beyond µ ± 2σ is very unlikely (4% total).
"""
ret = int(random.gauss(mu, sigma) + 0.5)
# We have to put a limit /somewhere/, and the roll is only outside these
# bounds 0.3% of the time.
lb = int(math.ceil(mu - 2 * sigma))
ub = int(math.floor(mu + 2 * sigma))
if ret < lb:
return lb
elif ret > ub:
return ub
else:
return ret
def random_normal_range(lb, ub):
"""Return a normally-distributed random integer, given an upper bound and
lower bound. Like `random_normal_int`, but explicitly specifying the
limits. Return values will be clustered around the midpoint.
"""
# Like above, we assume the lower and upper bounds are 6σ apart
mu = (lb + ub) / 2
sigma = (ub - lb) / 4
ret = int(random.gauss(mu, sigma) + 0.5)
if ret < lb:
return lb
elif ret > ub:
return ub
else:
return ret
class MapCanvas:
def __init__(self, size):
self.rect = size.to_rect(Point.origin())
# TODO i think using types instead of entities /most of the time/ is
# more trouble than it's worth
self._arch_grid = {
point: CaveWall for point in self.rect.iter_points()}
self._item_grid = {point: [] for point in self.rect.iter_points()}
self._creature_grid = {
point: None for point in self.rect.iter_points()}
self.floor_spaces = set()
def clear(self, entity_type):
for point in self.rect.iter_points():
self._arch_grid[point] = entity_type
if entity_type.components.get(IPhysics) is Empty:
self.floor_spaces = set(self.rect.iter_points())
else:
self.floor_spaces = set()
def set_architecture(self, point, entity_type):
self._arch_grid[point] = entity_type
# TODO this is a little hacky, but it's unclear how this /should/ work
# before there are other kinds of physics
if isinstance(entity_type, Entity):
entity_type = entity_type.type
if entity_type.components.get(IPhysics) is Empty:
self.floor_spaces.add(point)
else:
self.floor_spaces.discard(point)
def add_item(self, point, entity_type):
self._item_grid[point].append(entity_type)
def set_creature(self, point, entity_type):
# assert entity_type.layer is Layer.creature
self._creature_grid[point] = entity_type
def maybe_create(self, type_or_thing):
if isinstance(type_or_thing, Entity):
return type_or_thing
else:
return type_or_thing()
def to_map(self):
map = Map(self.rect.size)
maybe_create = self.maybe_create
for point in self.rect.iter_points():
map.place(maybe_create(self._arch_grid[point]), point)
for item_type in self._item_grid[point]:
map.place(maybe_create(item_type), point)
if self._creature_grid[point]:
map.place(maybe_create(self._creature_grid[point]), point)
return map
class Room:
"""A room, which has not yet been drawn.
"""
def __init__(self, rect):
self.rect = rect
@classmethod
def randomize(cls, region, *, minimum_size=Size(5, 5)):
"""Place a room randomly in a region, randomizing its size and position.
"""
# TODO need to guarantee the region is big enough
size = Size(
random_normal_range(minimum_size.width, region.width),
random_normal_range(minimum_size.height, region.height),
)
left = region.left + random.randint(0, region.width - size.width)
top = region.top + random.randint(0, region.height - size.height)
rect = Rectangle(Point(left, top), size)
return cls(rect)
def draw_to_canvas(self, canvas):
assert self.rect in canvas.rect
for point in self.rect.iter_points():
canvas.set_architecture(point, e.Floor)
for point, _ in self.rect.iter_border():
canvas.set_architecture(point, e.Wall)
class Fractor:
"""The agent noun form of 'fractal'. An object that generates maps in a
particular style.
This is a base class, containing some generally-useful functionality; the
interesting differentiation happens in subclasses.
"""
def __init__(self, map_size, region=None):
self.map_canvas = MapCanvas(map_size)
if region is None:
self.region = self.map_canvas.rect
else:
self.region = region
def generate_map(self, up=None, down=None):
"""The method you probably want to call. Does some stuff, then spits
out a map.
"""
self.generate()
self.place_stuff()
# TODO putting this here doesn't seem right, given that the first floor
# explicitly needs to put the down portal in a specific area
# TODO also not really sure how this works for multiple connections, or
# special kinds of portals, or whatever. that's, like, half about the
# particular kind of map. i'm starting to think that a map design
# itself may need to be an object/function.
if up:
self.place_portal(StairsUp, up)
if down:
self.place_portal(StairsDown, down)
return self.map_canvas.to_map()
def generate(self):
"""Implement in subclasses. Ought to do something to the canvas."""
raise NotImplementedError
# Utility methods follow
def generate_room(self, region):
# TODO lol not even using room_size
room = Room.randomize(region)
room.draw_to_canvas(self.map_canvas)
def place_stuff(self):
# TODO this probably varies by room style too, but we don't have a huge
# variety yet of stuff to generate yet, so.
assert self.map_canvas.floor_spaces, \
"can't place player with no open spaces"
points = random.sample(list(self.map_canvas.floor_spaces), 10)
self.map_canvas.set_creature(points[0], Salamango)
self.map_canvas.add_item(points[1], Armor)
self.map_canvas.add_item(points[2], Potion)
self.map_canvas.add_item(points[3], Potion)
self.map_canvas.add_item(points[4], e.Gem)
self.map_canvas.add_item(points[5], e.Crate)
def place_portal(self, portal_type, destination):
from flax.component import Portal
portal = portal_type(Portal(destination=destination))
# TODO not guaranteed
assert self.map_canvas.floor_spaces, \
"can't place portal with no open spaces"
point = random.choice(list(self.map_canvas.floor_spaces))
self.map_canvas.set_architecture(point, portal)
# TODO this is better, but still not great. rooms need to be guaranteed
# to not touch each other, for one. also has some biases towards big rooms
# still (need a left-leaning distribution for room size?) and it's easy to end
# up with an obvious grid
# TODO also lol needs hallways
class BinaryPartitionFractor(Fractor):
# TODO should probably accept a (minimum) room size instead, and derive
# minimum partition size from that
def __init__(self, *args, minimum_size):
super().__init__(*args)
self.minimum_size = minimum_size
def generate(self):
regions = self.maximally_partition()
for region in regions:
self.generate_room(region)
def maximally_partition(self):
# TODO this should preserve the tree somehow, so a hallway can be drawn
# along the edges
regions = [self.region]
# TODO configurable? with fewer, could draw bigger interesting things
# in the big spaces
wanted = 7
while regions and len(regions) < wanted:
region = regions.pop(0)
new_regions = self.partition(region)
regions.extend(new_regions)
regions.sort(key=lambda r: r.size.area, reverse=True)
return regions
def partition(self, region):
# Partition whichever direction has more available space
rel_height = region.height / self.minimum_size.height
rel_width = region.width / self.minimum_size.width
if rel_height < 2 and rel_width < 2:
# Can't partition at all
return [region]
if rel_height > rel_width:
return self.partition_horizontal(region)
else:
return self.partition_vertical(region)
def partition_horizontal(self, region):
# We're looking for the far edge of the top partition, so subtract 1
# to allow it on the border of the minimum size
min_height = self.minimum_size.height
top = region.top + min_height - 1
bottom = region.bottom - min_height
assert top <= bottom
midpoint = random.randint(top, bottom + 1)
return [
region.replace(bottom=midpoint),
region.replace(top=midpoint + 1),
]
def partition_vertical(self, region):
# Exactly the same as above
min_width = self.minimum_size.width
left = region.left + min_width - 1
right = region.right - min_width
assert left <= right
midpoint = random.randint(left, right + 1)
return [
region.replace(right=midpoint),
region.replace(left=midpoint + 1),
]
class PerlinFractor(Fractor):
def _a_star(self, start, goals, costs):
assert goals
# TODO need to figure out which points should join to which! need a...
# minimum number of paths? some kind of spanning tree that's
# minimal...
# TODO technically there might only be one local minima
seen = set()
pending = [start] # TODO actually a sorted set heap thing
paths = {}
def estimate_cost(start, goal):
dx, dy = goal - start
dx = abs(dx)
dy = abs(dy)
return max(dx, dy) * min(costs[start], costs[goal])
g_score = {start: 0}
f_score = {start: min(estimate_cost(start, goal) for goal in goals)}
while pending:
pending.sort(key=f_score.__getitem__)
current = pending.pop(0)
if current in goals:
# CONSTRUCT PATH HERE
break
seen.add(current)
for npt in current.neighbors:
if npt not in self.region or npt in seen:
continue
tentative_score = g_score[current] + costs[npt]
if npt not in pending or tentative_score < g_score[npt]:
paths[npt] = current
g_score[npt] = tentative_score
f_score[npt] = tentative_score + min(
estimate_cost(npt, goal) for goal in goals)
pending.append(npt)
final_path = []
while current in paths:
final_path.append(current)
current = paths[current]
final_path.reverse()
return final_path
def _generate_river(self, noise):
# TODO seriously starting to feel like i need a Feature type for these
# things? like, passing `noise` around is a really weird way to go
# about this. what would the state even look like though?
'''
# TODO i think this needs another flooding algorithm, which probably
# means it needs to be a lot simpler and faster...
noise_factory = discrete_perlin_noise_factory(
*self.region.size, resolution=2, octaves=1)
noise = {
point: abs(noise_factory(*point) - 0.5) * 2
for point in self.region.iter_points()
}
for point, n in noise.items():
if n < 0.2:
self.map_canvas.set_architecture(point, e.Water)
return
'''
# Build some Blob internals representing the two halves of the river.
left_side = {}
right_side = {}
river = {}
center_factory = discrete_perlin_noise_factory(
self.region.height, resolution=3)
width_factory = discrete_perlin_noise_factory(
self.region.height, resolution=6, octaves=2)
center = random_normal_int(
self.region.center().x, self.region.width / 4 / 3)
for y in self.region.range_height():
center += (center_factory(y) - 0.5) * 3
width = width_factory(y) * 2 + 5
x0 = int(center - width / 2)
x1 = int(x0 + width + 0.5)
for x in range(x0, x1 + 1):
self.map_canvas.set_architecture(Point(x, y), e.Water)
left_side[y] = (Span(self.region.left, x0 - 1),)
right_side[y] = (Span(x1 + 1, self.region.right),)
river[y] = (Span(x0, x1),)
return Blob(left_side), Blob(river), Blob(right_side)
def generate(self):
# This noise is interpreted roughly as the inverse of "frequently
# travelled" -- low values are walked often (and are thus short grass),
# high values are left alone (and thus are trees).
noise_factory = discrete_perlin_noise_factory(
*self.region.size, resolution=6)
noise = {
point: noise_factory(*point)
for point in self.region.iter_points()
}
local_minima = set()
for point, n in noise.items():
# We want to ensure that each "walkable region" is connected.
# First step is to collect all local minima -- any walkable tile is
# guaranteed to be conneted to one.
if all(noise[npt] >= n for npt in point.neighbors if npt in noise):
local_minima.add(point)
if n < 0.3:
arch = CutGrass
elif n < 0.6:
arch = Grass
else:
arch = Tree
self.map_canvas.set_architecture(point, arch)
left_bank, river_blob, right_bank = self._generate_river(noise)
# Decide where bridges should go. They can only cross where there's
# walkable space on both sides, so find all such areas.
# TODO maybe a nicer api for testing walkability here
# TODO this doesn't detect a walkable area on one side that has no
# walkable area on the other side, and tbh i'm not sure what to do in
# such a case anyway. could forcibly punch a path through the trees, i
# suppose? that's what i'll have to do anyway, right?
# TODO this will break if i ever add a loop in the river, but tbh i
# have no idea how to draw bridges in that case
new_block = True
start = None
end = None
blocks = []
for y, (span,) in river_blob.spans.items():
if self.map_canvas._arch_grid[Point(span.start - 1, y)] is not Tree and \
self.map_canvas._arch_grid[Point(span.end + 1, y)] is not Tree:
if new_block:
start = y
end = y
new_block = False
else:
end = y
else:
if not new_block:
blocks.append((start, end))
new_block = True
if not new_block:
blocks.append((start, end))
for start, end in blocks:
y = random_normal_range(start, end)
span = river_blob.spans[y][0]
local_minima.add(Point(span.start - 1, y))
local_minima.add(Point(span.end + 1, y))
for x in span:
self.map_canvas.set_architecture(Point(x, y), e.Bridge)
# Consider all local minima along the edges, as well.
for x in self.region.range_width():
for y in (self.region.top, self.region.bottom):
point = Point(x, y)
n = noise[point]
if (n < noise.get(Point(x - 1, y), 1) and
n < noise.get(Point(x + 1, y), 1)):
local_minima.add(point)
for y in self.region.range_height():
for x in (self.region.left, self.region.right):
point = Point(x, y)
n = noise[point]
if (n < noise.get(Point(x, y - 1), 1) and
n < noise.get(Point(x, y + 1), 1)):
local_minima.add(point)
for point in local_minima:
if point not in river_blob:
self.map_canvas.set_architecture(point, e.Dirt)
for blob in (left_bank, right_bank):
paths = self.flood_valleys(blob, local_minima, noise)
for path_point in paths:
self.map_canvas.set_architecture(path_point, e.Dirt)
# Whoops time for another step: generating a surrounding cave wall.
for edge in Direction.orthogonal:
width = self.region.edge_length(edge)
wall_noise = discrete_perlin_noise_factory(width, resolution=6)
for n in self.region.edge_span(edge):
offset = int(wall_noise(n) * 4 + 1)
for m in range(offset):
point = self.region.edge_point(edge, n, m)
self.map_canvas.set_architecture(point, e.CaveWall)
def flood_valleys(self, region, goals, depthmap):
# We want to connect all the minima with a forest path.
# Let's flood the forest. The algorithm is as follows:
# - All the local minima are initally full of water, forming a set of
# distinct puddles.
# - Raise the water level. Each newly-flooded tile must touch at least
# one other flooded tile; it becomes part of that puddle, and remembers
# the tile that flooded it.
# - Whenever a tile touches two or more puddles, they merge into one
# large puddle. That tile is part of the forest path. For each
# puddle, walk back along the chain of flooded tiles to the original
# minima; these tiles are also part of the forest path.
# When only one puddle remains, we're done, and all the minima are
# joined by a path along the lowest route.
flooded = {}
puddle_map = {}
path_from_puddle = defaultdict(dict)
paths = set()
for puddle, point in enumerate(goals):
if point not in region:
continue
flooded[point] = puddle
puddle_map[puddle] = puddle
flood_order = sorted(
frozenset(region.iter_points()) - flooded.keys(),
key=depthmap.__getitem__)
for point in flood_order:
# Group any flooded neighbors by the puddle they're in.
# puddle => [neighboring points...]
adjacent_puddles = defaultdict(list)
for npt in point.neighbors:
if npt not in flooded:
continue
puddle = puddle_map[flooded[npt]]
adjacent_puddles[puddle].append(npt)
# Every point is either a local minimum OR adjacent to a point
# lower than itself, by the very definition of "local minimum".
# Thus there must be at least one adjacent puddle.
# TODO not so true any more... maybe should determine local minima
# automatically here...
if not adjacent_puddles:
continue
assert adjacent_puddles
# Remember how to get from adjacent puddles to this point.
# Only store the lowest adjacent point.
for puddle, points in adjacent_puddles.items():
path_from_puddle[point][puddle] = min(
points, key=depthmap.__getitem__)
flooded[point] = this_puddle = min(adjacent_puddles)
if len(adjacent_puddles) > 1:
# Draw the path from both puddles' starting points to here
paths.add(point)
for puddle in adjacent_puddles:
path_point = point
while path_point:
paths.add(path_point)
next_point = None
cand_paths = path_from_puddle[path_point]
for cand_puddle, cand_point in cand_paths.items():
if puddle_map[cand_puddle] == puddle and (
next_point is None or
depthmap[cand_point] < depthmap[next_point]
):
next_point = cand_point
path_point = next_point
# This point connects two puddles; merge them. Have to update
# the whole mapping, in case some other puddle is already
# mapped to one we're about to remap.
for from_puddle, to_puddle in puddle_map.items():
if {from_puddle, to_puddle} & adjacent_puddles.keys():
puddle_map[from_puddle] = this_puddle
# If there's only one puddle left, we're done!
if len(frozenset(puddle_map.values())) == 1:
break
return paths
def place_stuff(self):
super().place_stuff()
assert self.map_canvas.floor_spaces, \
"can't place player with no open spaces"
floor = self.map_canvas.floor_spaces
points = random.sample(list(floor), 1)
self.map_canvas.add_item(points[0], e.Key)
def generate_caves(map_canvas, region, wall_tile, force_walls=(), force_floors=()):
"""Uses cellular automata to generate a cave system.
Idea from: http://www.roguebasin.com/index.php?title=Cellular_Automata_Method_for_Generating_Random_Cave-Like_Levels
"""
base_grid = {}
for point in force_walls:
base_grid[point] = True
for point in force_floors:
base_grid[point] = False
grid = {point: random.random() < 0.40 for point in region.iter_points()}
grid.update(base_grid)
for generation in range(5):
next_grid = base_grid.copy()
for point in region.iter_points():
neighbors = grid[point] + sum(grid.get(neighbor, True) for neighbor in point.neighbors)
# The 4-5 rule: the next gen is a wall if either:
# - the current gen is a wall and 4+ neighbors are walls;
# - the current gen is a space and 5+ neighbors are walls.
next_grid[point] = neighbors >= 5
grid = next_grid
# TODO need to connect any remaining areas here
# TODO maybe i should LET this become a lot of small disjoint caves, so it
# acts like a bunch of rooms. then connect them with doors + hallways!
for point in region.iter_points():
if grid[point]:
map_canvas.set_architecture(point, wall_tile)
else:
map_canvas.set_architecture(point, e.CaveFloor)
# TODO it would be slick to have a wizard menu with commands like "regenerate
# this entire level"
class RuinFractor(Fractor):
# TODO should really really let this wrap something else
def generate(self):
self.map_canvas.clear(Floor)
# So what I want here is to have a cave system with a room in the
# middle, then decay the room.
# Some constraints:
# - the room must have a wall where the entrance could go, which faces
# empty space
# - a wall near the entrance must be destroyed
# - the player must start in a part of the cave connected to the
# destroyed entrance
# - none of the decay applied to the room may block off any of its
# interesting features
# TODO it would be nice if i could really write all this without ever
# having to hardcode a specific direction, so the logic could always be
# rotated freely
side = random.choice([Direction.left, Direction.right])
# TODO assert region is big enough
room_size = Size(
random_normal_range(9, int(self.region.width * 0.4)),
random_normal_range(9, int(self.region.height * 0.4)),
)
room_position = self.region.center() - room_size // 2
room_position += Point(
random_normal_int(0, self.region.width * 0.1),
random_normal_int(0, self.region.height * 0.1),
)
room_rect = Rectangle(room_position, room_size)
self.room_region = room_rect
room = Room(room_rect)
cave_area = (
Blob.from_rectangle(self.region)
- Blob.from_rectangle(room_rect)
)
self.cave_region = cave_area
walls = [point for (point, _) in self.region.iter_border()]
floors = []
for point, edge in room_rect.iter_border():
if edge is side or edge.adjacent_to(side):
floors.append(point)
floors.append(point + side)
generate_caves(
self.map_canvas, cave_area, CaveWall,
force_walls=walls, force_floors=floors,
)
room.draw_to_canvas(self.map_canvas)
# OK, now draw a gate in the middle of the side wall
if side is Direction.left:
x = room_rect.left
else:
x = room_rect.right
mid_y = room_rect.top + room_rect.height // 2
if room_rect.height % 2 == 1:
min_y = mid_y - 1
max_y = mid_y + 1
else:
min_y = mid_y - 2
max_y = mid_y + 1
for y in range(min_y, max_y + 1):
self.map_canvas.set_architecture(Point(x, y), KadathGate)
# Beat up the border of the room near the gate
y = random.choice(
tuple(range(room_rect.top, min_y))
+ tuple(range(max_y + 1, room_rect.bottom))
)
for dx in range(-2, 3):
for dy in range(-2, 3):
point = Point(x + dx, y + dy)
# TODO i think what i may want is to have the cave be a
# "Feature", where i can check whether it has already claimed a
# tile, or draw it later, or whatever.
if self.map_canvas._arch_grid[point] is not CaveWall:
distance = abs(dx) + abs(dy)
ruination = random_normal_range(0, 0.2) + distance * 0.2
self.map_canvas.set_architecture(
point, e.Rubble(Breakable(ruination)))
# And apply some light ruination to the inside of the room
border = list(room_rect.iter_border())
# TODO don't do this infinitely; give up after x tries
while True:
point, edge = random.choice(border)
if self.map_canvas._arch_grid[point + edge] is CaveWall:
break
self.map_canvas.set_architecture(point, CaveWall)
self.map_canvas.set_architecture(point - edge, CaveWall)
# TODO this would be neater if it were a slightly more random pattern
for direction in (
Direction.up, Direction.down, Direction.left, Direction.right):
self.map_canvas.set_architecture(
point - edge + direction, CaveWall)
def place_stuff(self):
assert self.map_canvas.floor_spaces, \
"can't place player with no open spaces"
cave_floor = frozenset(self.cave_region.iter_points())
cave_floor &= self.map_canvas.floor_spaces
points = random.sample(list(cave_floor), 5)
from flax.component import Portal
# TODO this should exit. also confirm. should be part of the ladder
# entity? also, world doesn't place you here. maybe the map itself
# should know this?
# TODO lol this is such a stupid hack
ladder = e.Ladder(Portal(destination='__exit__'))
self.map_canvas.set_architecture(points[0], ladder)
self.map_canvas.add_item(points[1], e.Gem)
self.map_canvas.add_item(points[2], e.Crate)
def place_portal(self, portal_type, destination):
from flax.component import Portal
if portal_type is e.StairsDown:
# Add the down stairs to the room, surrounded by some pillars
room_center = self.room_region.center()
self.map_canvas.set_architecture(
room_center,
portal_type(Portal(destination=destination)),
)
for direction in (
Direction.up_right, Direction.down_right,
Direction.up_left, Direction.down_left
):
self.map_canvas.set_architecture(room_center + direction, e.Pillar)
else:
super().place_portal(portal_type, destination)
class RuinedHallFractor(Fractor):
def generate(self):
self.map_canvas.clear(CaveWall)
# First create a bunch of hallways and rooms.
# For now, just carve a big area, run a hallway through the middle, and
# divide either side into rooms.
area = Room.randomize(self.region, minimum_size=self.region.size // 2)
area.draw_to_canvas(self.map_canvas)
center = area.rect.center()
y0 = center.y - 2
y1 = center.y + 2
hallway = Rectangle(origin=Point(area.rect.left, center.y - 2), size=Size(area.rect.width, 5))
Room(hallway).draw_to_canvas(self.map_canvas)
top_space = area.rect.replace(bottom=hallway.top)
bottom_space = area.rect.replace(top=hallway.bottom)
rooms = []
for orig_space in (top_space, bottom_space):
space = orig_space
# This includes walls!
minimum_width = 7
# Note that the rooms overlap where they touch, so we subtract one
# from both the total width and the minimum width, in effect
# ignoring all the walls on one side
maximum_rooms = (space.width - 1) // (minimum_width - 1)
# The maximum number of rooms that will fit also affects how much
# wiggle room we're willing to have. For example, if at most 3 rooms
# will fit, then generating 2 rooms is also reasonable. But if 10
# rooms will fit, generating 2 rooms is a bit silly. We'll arbitrarily
# use 1/3 the maximum as the minimum. (Plus 1, to avoid rounding down
# to zero.)
minimum_rooms = maximum_rooms // 6 + 1
num_rooms = random_normal_range(minimum_rooms, maximum_rooms)
# TODO normal distribution doesn't have good results here. think
# more about how people use rooms -- often many of similar size,
# with some exceptions. also different shapes, bathrooms or
# closets nestled together, etc.
while num_rooms > 1:
# Now we want to divide a given amount of space into n chunks, where
# the size of each chunk is normally-distributed. I have no idea how
# to do this in any strict mathematical sense, so instead we'll just
# carve out one room at a time and hope for the best.
min_width = minimum_width
avg_width = (space.width - 1) // num_rooms + 1
max_width = space.width - (minimum_width - 1) * (num_rooms - 1)
room_width = random_normal_int(avg_width, min(max_width - avg_width, avg_width - min_width) // 3)
room = space.replace(right=space.left + room_width - 1)
rooms.append(room)
space = space.replace(left=room.right)
num_rooms -= 1
rooms.append(space)
for rect in rooms:
Room(rect).draw_to_canvas(self.map_canvas)
from flax.component import Lockable
# Add some doors for funsies.
locked_room = random.choice(rooms)
for rect in rooms:
x = random.randrange(rect.left + 1, rect.right - 1)
if rect.top > hallway.top:
side = Direction.down
else:
side = Direction.up
point = rect.edge_point(side.opposite, x, 0)
door = e.Door(Lockable(locked=rect is locked_room))
self.map_canvas.set_architecture(point, door)
self.hallway_area = Blob.from_rectangle(hallway)
self.locked_area = Blob.from_rectangle(locked_room)
self.rooms_area = reduce(operator.add, (Blob.from_rectangle(rect) for rect in rooms if rect is not locked_room))
def place_stuff(self):
# TODO having to override this per room is becoming increasingly
# tedious and awkward and copy-pastey.
assert self.map_canvas.floor_spaces, \
"can't place player with no open spaces"
floor_spaces = self.map_canvas.floor_spaces
room_floors = floor_spaces & frozenset(self.rooms_area.iter_points())
hall_floors = floor_spaces & frozenset(self.hallway_area.iter_points())
lock_floors = floor_spaces & frozenset(self.locked_area.iter_points())
points = random.sample(list(room_floors), 8)
self.map_canvas.set_creature(points[0], Salamango)
self.map_canvas.set_creature(points[1], Salamango)
self.map_canvas.set_creature(points[2], Salamango)
self.map_canvas.add_item(points[3], e.Armor)
self.map_canvas.add_item(points[4], e.Potion)
self.map_canvas.add_item(points[5], e.Potion)
self.map_canvas.add_item(points[6], e.Gem)
self.map_canvas.add_item(points[7], e.Crate)
points = random.sample(list(lock_floors), 1)
self.map_canvas.add_item(points[0], e.Crown)
def place_portal(self, portal_type, destination):
# TODO and this part is even worse yes
from flax.component import Portal
portal = portal_type(Portal(destination=destination))
# TODO not guaranteed
assert self.map_canvas.floor_spaces, \
"can't place portal with no open spaces"
floor_spaces = self.map_canvas.floor_spaces
room_floors = floor_spaces & frozenset(self.rooms_area.iter_points())
hall_floors = floor_spaces & frozenset(self.hallway_area.iter_points())
lock_floors = floor_spaces & frozenset(self.locked_area.iter_points())
if portal_type is e.StairsDown:
# Down stairs go in an unlocked room
point = random.choice(list(room_floors))
else:
# Up stairs go in the hallway
point = random.choice(list(hall_floors))
self.map_canvas.set_architecture(point, portal)
class MapLayout:
"""Knows how to generate a specific style of map, based on some set of
parameters.
"""
def generate_map(self):
raise NotImplementedError
|
{
"content_hash": "92e8a3d708b4491781921a14d04d89da",
"timestamp": "",
"source": "github",
"line_count": 894,
"max_line_length": 120,
"avg_line_length": 39.75167785234899,
"alnum_prop": 0.5864426810737802,
"repo_name": "eevee/flax",
"id": "c18e48ac9b3156731c8d2a3b65f885a73ccbfc8c",
"size": "35545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flax/fractor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149956"
}
],
"symlink_target": ""
}
|
"""
@author Joelmir Ribacki 2015
"""
from lxml import etree
def incorporate_imports(tags, location,imports):
'''
Get file's tags to incorporate on the base xsd file.
'''
xsd_doc = etree.XML(open(location).read())
for import_tag in xsd_doc.iter():
#No import 'Comments' elements
if type(import_tag) == 'lxml.etree._Element' and not 'schema' not in tag.tag:
if 'import' in import_tag.tag:
#Verify the file name if is the first time to import,
if import_tag.attrib['schemaLocation'] not in imports:
imports.append(import_tag.attrib['schemaLocation'])
#append tags recursive from others imports
tags.append(incorporate_imports(xsd_root, import_tag.attrib['schemaLocation']))
else:
tags.append(import_tag)
def validate(xml_filename, xsd_filename):
'''
xml_filename: File to validate
xsd_filename: File with the schema rules
'''
#List of imports
imports = []
#Open Files
xml_file = open(xml_filename)
xsd_file = open(xsd_filename)
#Get Objects
xml_doc = etree.XML(xml_file.read())
xsd_doc = etree.XML(xsd_file.read())
#list of tags to append on the xsd file
tags_import = []
#verify the import tags
for import_tag in [tag for tag in xsd_doc.iter() if type(tag) == 'lxml.etree._Element' and 'import' in tag.tag]:
imports.append(import_tag.attrib['schemaLocation'])
#get tags from import file
sub_element = incorporate_imports(tags_import, import_tag.attrib['schemaLocation'], imports)
#Append tags from files
for tag in tags_import:
xsd_doc.append(tag)
#Create a schema object
xmlschema = etree.XMLSchema(xsd_doc)
#Try validade
xmlschema.assertValid(xml_doc)
return xml_doc
|
{
"content_hash": "d5388669ab0a3226a971fb8beaebaa91",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 116,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6173684210526316,
"repo_name": "joelmir/validate-xml-from-multiple-xsd-schema",
"id": "4863dc83e6412dd52d484a35ac53b87c11faac05",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1942"
}
],
"symlink_target": ""
}
|
import logging
import os
from okonomiyaki.versions import EnpkgVersion
from simplesat.constraints import PrettyPackageStringParser, Requirement
from simplesat.dependency_solver import DependencySolver
from simplesat.errors import NoPackageFound, SatisfiabilityError
from simplesat.pool import Pool
from simplesat.repository import Repository
from simplesat.request import Request
from fusesoc.core import Core
from fusesoc.librarymanager import LibraryManager
logger = logging.getLogger(__name__)
class DependencyError(Exception):
def __init__(self, value, msg=""):
self.value = value
self.msg = msg
def __str__(self):
return repr(self.value)
class CoreDB:
def __init__(self):
self._cores = {}
self._solver_cache = {}
# simplesat doesn't allow ':', '-' or leading '_'
def _package_name(self, vlnv):
_name = f"{vlnv.vendor}_{vlnv.library}_{vlnv.name}".lstrip("_")
return _name.replace("-", "__")
def _package_version(self, vlnv):
return f"{vlnv.version}-{vlnv.revision}"
def _parse_depend(self, depends):
# FIXME: Handle conflicts
deps = []
_s = "{} {} {}"
for d in depends:
for simple in d.simpleVLNVs():
deps.append(
_s.format(
self._package_name(simple),
simple.relation,
self._package_version(simple),
)
)
return ", ".join(deps)
def _parse_virtual(self, virtuals):
package_names = []
for virtual in virtuals:
for simple in virtual.simpleVLNVs():
package_names.append("{}".format(self._package_name(simple)))
return ", ".join(package_names)
def add(self, core, library):
self._solver_cache_invalidate_all()
name = str(core.name)
logger.debug("Adding core " + name)
if name in self._cores:
_s = "Replacing {} in {} with the version found in {}"
logger.debug(
_s.format(name, self._cores[name]["core"].core_root, core.core_root)
)
self._cores[name] = {"core": core, "library": library}
def find(self, vlnv=None):
if vlnv:
found = self._solve(vlnv, only_matching_vlnv=True)[-1]
else:
found = list([core["core"] for core in self._cores.values()])
return found
def _solver_cache_lookup(self, key):
if key in self._solver_cache:
return self._solver_cache[key]
return False
def _solver_cache_store(self, key, value):
self._solver_cache[key] = value
def _solver_cache_invalidate(self, key):
if key in self._solver_cache:
del self._solver_cache[key]
def _solver_cache_invalidate_all(self):
self._solver_cache = {}
def _hash_flags_dict(self, flags):
"""Hash the flags dict.
Python's mutable sequences, like dict, are not generally hashable. For
the dict we're using for the flags, we can simply implement hashing
ourselves without the need to worry about nested dicts.
"""
h = 0
for pair in sorted(flags.items()):
h ^= hash(pair)
return h
def solve(self, top_core, flags):
return self._solve(top_core, flags)
def _solve(self, top_core, flags={}, only_matching_vlnv=False):
def eq_vln(this, that):
return (
this.vendor == that.vendor
and this.library == that.library
and this.name == that.name
)
# Try to return a cached result
solver_cache_key = (top_core, self._hash_flags_dict(flags), only_matching_vlnv)
cached_solution = self._solver_cache_lookup(solver_cache_key)
if cached_solution:
return cached_solution
repo = Repository()
_flags = flags.copy()
cores = [x["core"] for x in self._cores.values()]
for core in cores:
if only_matching_vlnv:
if not any(
[eq_vln(core.name, top_core)]
+ [
eq_vln(virtual_vlnv, top_core)
for virtual_vlnv in core.get_virtuals()
]
):
continue
# Build a "pretty" package string in a format expected by
# PrettyPackageStringParser()
package_str = "{} {}-{}".format(
self._package_name(core.name), core.name.version, core.name.revision
)
_virtuals = core.get_virtuals()
if _virtuals:
_s = "; provides ( {} )"
package_str += _s.format(self._parse_virtual(_virtuals))
# Add dependencies only if we want to build the whole dependency
# tree.
if not only_matching_vlnv:
_flags["is_toplevel"] = core.name == top_core
_depends = core.get_depends(_flags)
if _depends:
_s = "; depends ( {} )"
package_str += _s.format(self._parse_depend(_depends))
parser = PrettyPackageStringParser(EnpkgVersion.from_string)
package = parser.parse_to_package(package_str)
package.core = core
repo.add_package(package)
request = Request()
_top_dep = "{} {} {}".format(
self._package_name(top_core),
top_core.relation,
self._package_version(top_core),
)
request.install(Requirement._from_string(_top_dep))
installed_repository = Repository()
pool = Pool([repo])
pool.add_repository(installed_repository)
solver = DependencySolver(pool, [repo], installed_repository)
try:
transaction = solver.solve(request)
except SatisfiabilityError as e:
raise DependencyError(top_core.name, msg=e.unsat.to_string(pool))
except NoPackageFound as e:
raise DependencyError(top_core.name)
objdict = {}
if len(transaction.operations) > 1:
for op in transaction.operations:
for p in op.package.provides:
objdict[p[0]] = str(op.package.core.name)
op.package.core.direct_deps = [
objdict[n[0]] for n in op.package.install_requires
]
result = [op.package.core for op in transaction.operations]
# Cache the solution for further lookups
self._solver_cache_store(solver_cache_key, result)
return result
class CoreManager:
def __init__(self, config):
self.config = config
self.db = CoreDB()
self._lm = LibraryManager(config.library_root)
def find_cores(self, library):
found_cores = []
path = os.path.expanduser(library.location)
exclude = {".git"}
if os.path.isdir(path) == False:
raise OSError(path + " is not a directory")
logger.debug("Checking for cores in " + path)
for root, dirs, files in os.walk(path, followlinks=True):
if "FUSESOC_IGNORE" in files:
del dirs[:]
continue
dirs[:] = [directory for directory in dirs if directory not in exclude]
for f in files:
if f.endswith(".core"):
core_file = os.path.join(root, f)
try:
if self._detect_capi_version(core_file) != 2:
# Skip core files which are not in CAPI2 format.
logger.error(
"Core file {} is in CAPI1 format, which is not supported "
"any more since FuseSoC 2.0. The core file is ignored. "
"Please migrate your cores to the CAPI2 file format, or "
"use FuseSoC 1.x as stop-gap.".format(core_file)
)
continue
core = Core(
core_file,
self.config.cache_root,
)
found_cores.append(core)
except SyntaxError as e:
w = "Parse error. Ignoring file " + core_file + ": " + e.msg
logger.warning(w)
except ImportError as e:
w = 'Failed to register "{}" due to unknown provider: {}'
logger.warning(w.format(core_file, str(e)))
return found_cores
def _detect_capi_version(self, core_file) -> int:
"""Detect the CAPI version in a .core file
Returns:
Version of the core file (1 or 2)
"""
with open(core_file) as f:
l = f.readline().split()
if l:
first_line = l[0]
else:
first_line = ""
if first_line == "CAPI=1":
return 1
elif first_line == "CAPI=2:":
return 2
else:
error_msg = (
"The first line of the core file {} must be "
' "CAPI=1" or "CAPI=2:".'.format(core_file)
)
error_msg += ' The first line of this core file is "{}".'.format(
first_line
)
if first_line == "CAPI=2":
error_msg += " Just add a colon on the end!"
logger.warning(error_msg)
raise ValueError(
"Unable to determine CAPI version from core file {}.".format(
core_file
)
)
def _load_cores(self, library):
found_cores = self.find_cores(library)
for core in found_cores:
self.db.add(core, library)
def add_library(self, library):
""" Register a library """
abspath = os.path.abspath(os.path.expanduser(library.location))
_library = self._lm.get_library(abspath, "location")
if _library:
_s = "Not adding library {} ({}). Library {} already registered for this location"
logger.warning(_s.format(library.name, abspath, _library.name))
return
self._load_cores(library)
self._lm.add_library(library)
def get_libraries(self):
""" Get all registered libraries """
return self._lm.get_libraries()
def get_depends(self, core, flags):
"""Get an ordered list of all dependencies of a core
All direct and indirect dependencies are resolved into a dependency
tree, the tree is flattened, and an ordered list of dependencies is
created.
The first element in the list is a leaf dependency, the last element
is the core at the root of the dependency tree.
"""
logger.debug(
"Calculating dependencies for {}{} with flags {}".format(
core.relation, str(core), str(flags)
)
)
resolved_core = self.db.find(core)
deps = self.db.solve(resolved_core.name, flags)
logger.debug(" Resolved core to {}".format(str(resolved_core.name)))
logger.debug(" with dependencies " + ", ".join([str(c.name) for c in deps]))
return deps
def get_cores(self):
""" Get a dict with all cores, indexed by the core name """
return {str(x.name): x for x in self.db.find()}
def get_core(self, name):
""" Get a core with a given name """
c = self.db.find(name)
c.name.relation = "=="
return c
def get_generators(self):
""" Get a dict with all registered generators, indexed by name """
generators = {}
for core in self.db.find():
if hasattr(core, "get_generators"):
_generators = core.get_generators()
if _generators:
generators[str(core.name)] = _generators
return generators
|
{
"content_hash": "25acdca2ca0655399e744d1e0a2a6be8",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 94,
"avg_line_length": 35.87683284457478,
"alnum_prop": 0.5279548798430603,
"repo_name": "lowRISC/fusesoc",
"id": "fc6f299a680b335b666ae47a11f893fd73f297ed",
"size": "12376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fusesoc/coremanager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "170342"
},
{
"name": "Shell",
"bytes": "739"
},
{
"name": "SystemVerilog",
"bytes": "2485"
},
{
"name": "Tcl",
"bytes": "176"
},
{
"name": "Verilog",
"bytes": "1141"
}
],
"symlink_target": ""
}
|
def split_and_join(line):
# write your code here
line = line.split()
return '-'.join(line)
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
|
{
"content_hash": "993e48dcea5406fc7205a7a24c207158",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 33,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.5693069306930693,
"repo_name": "avtomato/HackerRank",
"id": "e0cb14bb0d4cbb6873cc0ab46a7295389d561b81",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/_03_Strings/_02_String_Split_and_Join/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42781"
},
{
"name": "Shell",
"bytes": "1075"
}
],
"symlink_target": ""
}
|
from pybluetooth import HCIThread
from pybluetooth.address import *
from scapy.layers.bluetooth import *
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
def _create_patched_hci_thread():
h = HCIThread(MagicMock())
h.sent_packets = []
def send_cmd(scapy_packet, *args, **kwargs):
h.sent_packets.append(scapy_packet)
h.send_cmd = send_cmd
return h
def test_hci_thread_cmd_le_create_connection():
h = _create_patched_hci_thread()
a = Address("c9:ea:a5:b8:c8:1", AddressType.random)
h.cmd_le_create_connection(a)
p = h.sent_packets[0]
assert p.getlayer(HCI_Cmd_LE_Create_Connection)
assert p.patype == 0x01 # Random
assert p.paddr == "c9:ea:a5:b8:c8:01"
|
{
"content_hash": "f71b6bfd1dbb9d763caf70f438b5c90a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 55,
"avg_line_length": 26.275862068965516,
"alnum_prop": 0.6811023622047244,
"repo_name": "pebble/pybluetooth",
"id": "8e0d8ebfe86ba0b906aabdd0e135be52a63e28d2",
"size": "762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hcithread.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44095"
}
],
"symlink_target": ""
}
|
import os
import socket
import sys
sys.path.insert(0, os.getcwd())
from dynamite.message import Message
def main():
s = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8888))
message = Message()
s.send(message.to_bytes())
finally:
if s is not None:
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except socket.error:
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "a47c16f54ea3a54032e4a3f13d289a2a",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 19.51851851851852,
"alnum_prop": 0.523719165085389,
"repo_name": "svisser/dynamite",
"id": "d2f7105347ddc44e21d8f562cf9e84aee324311b",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4827"
}
],
"symlink_target": ""
}
|
import sqlite3 as sql
import pandas as pd
from DataGeneration.MapLocation import MapLocation
class DatabaseHandler:
def __init__(self, db_file_name='db.sqlite3', full=True):
if full:
self.conn = sql.connect(db_file_name)
self.initialize_db()
def initialize_db(self):
self._add_addresses_table()
self._add_stops_table()
self._add_routes_table()
self.conn.commit()
def _add_addresses_table(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS addresses
(id INTEGER PRIMARY KEY,
latitude real NOT NULL,
longitude real NOT NULL)
""")
c.close()
def _add_stops_table(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS stops
(id INTEGER PRIMARY KEY,
stop_id INTEGER NOT NULL,
stop_name text NOT NULL,
latitude real NOT NULL,
longitude real NOT NULL)
""")
c.close()
def _add_routes_table(self):
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS routes
(id INTEGER PRIMARY KEY,
address_id INTEGER NOT NULL,
stop_id INTEGER NOT NULL,
distance INTEGER NOT NULL,
time INTEGER NOT NULL,
FOREIGN KEY(address_id) REFERENCES addresses(id),
FOREIGN KEY(stop_id) REFERENCES stops(id))
""")
c.close()
def add_addresses_from_file(self, file_name):
df = pd.read_csv(file_name)
df.to_sql('addresses', self.conn, if_exists='append', index=False)
def add_stops_from_file(self, file_name):
df = pd.read_csv(file_name)
df = df[["stop_id", "stop_name", "longitude", "latitude"]]
df.to_sql('stops', self.conn, if_exists='append', index=False)
def add_address(self, location):
if not hasattr(location, 'latitude'):
raise TypeError('location must have latitude property')
if not hasattr(location, 'longitude'):
raise TypeError('location must have longitude property')
c = self.conn.cursor()
if location.id != 0:
c.execute("INSERT INTO addresses (id, latitude, longitude) "
"VALUES (?, ?, ?)",
(location.id, location.latitude, location.longitude))
else:
c.execute("INSERT INTO addresses (latitude, longitude) "
"VALUES (?, ?)", (location.latitude, location.longitude))
self.conn.commit()
c.close()
def add_stop(self, location):
if not hasattr(location, 'latitude'):
raise TypeError('location must have latitude property')
if not hasattr(location, 'longitude'):
raise TypeError('location must have longitude property')
c = self.conn.cursor()
if location.id != 0:
c.execute("INSERT INTO stops (id, latitude, longitude) "
"VALUES (?, ?, ?)",
(location.id, location.latitude, location.longitude))
else:
c.execute("INSERT INTO stops (latitude, longitude) "
"VALUES (?, ?)",
(location.latitude, location.longitude))
self.conn.commit()
c.close()
def add_route(self, address, stop, distance, time):
c = self.conn.cursor()
c.execute("INSERT INTO routes "
"(address_id, stop_id, distance, time) "
"VALUES (?, ?, ?, ?)",
(address, stop, distance, time))
self.conn.commit()
c.close()
# Information Retrieval
def get_address_generator(self, verbose=False):
c = self.conn.cursor()
c.execute("SELECT "
"addresses.latitude, addresses.longitude, addresses.id "
"FROM addresses LEFT JOIN routes "
"ON routes.address_id = addresses.id "
"WHERE routes.id IS NULL")
if verbose:
print("fetching all addresses without routes...")
rows = c.fetchall()
c.close()
if verbose:
print("fetched {} addresses".format(len(rows)))
for row in rows:
yield MapLocation(latitude=row[0], longitude=row[1], id=row[2])
def get_all_stops(self):
c = self.conn.cursor()
c.execute("SELECT * from stops")
rows = c.fetchall()
c.close()
return [MapLocation(latitude=row[3], longitude=row[4], id=row[0])
for row in rows]
def output_routes(self, file_path, closest_stops_only=False):
"""
Args:
file_path (str): the file path to save the .csv output
closest_stops_only (bool): If true, only save the stops that are the
closest (by the distance column) to each address. If there are
multiple stops per address that have the same distance, both are
returned.
Returns:
A pandas DataFrame with the following columns:
address_latitude
address_longitude
stop_latitude
stop_longitude
distance
time
This dataframe contains each route in the database that has been
collected by the begin() function of the DataGenerator class. If the
closest_stops_only parameter is set to true, then the output only
contains stops for each address which are equal to minimum distance
for routes associated with specific addresses. This means that it
can return multiple stops per address if their associated routes
have equal distances.
"""
if closest_stops_only:
return self.routes_dataframe_closest_stops().to_csv(file_path)
else:
return self.routes_dataframe().to_csv(file_path)
def routes_dataframe(self):
"""
Returns:
all routes along with the stop and address latitudes and longitudes
as a pandas DataFrame.
"""
return pd.read_sql_query(
"SELECT "
"addresses.latitude AS address_latitude,"
"addresses.longitude AS address_longitude,"
"stops.latitude AS stop_latitude,"
"stops.longitude AS stop_longitude,"
"routes.distance AS distance,"
"routes.time AS time "
"FROM routes "
"LEFT JOIN addresses ON routes.address_id = addresses.id "
"LEFT JOIN stops ON routes.stop_id = stops.id",
self.conn)
def routes_dataframe_closest_stops(self):
"""
Collects all routes and groups them by address. Returns the nearest stop
to each address as well as the time and distance to that stop. Sorts by
distance.
"""
df = self.routes_dataframe()
df_grouped = df.groupby(['address_latitude', 'address_longitude']).\
agg({'distance': 'min'})
df_grouped = df_grouped.reset_index()
df_grouped = df_grouped.rename(columns={'distance':'distance_min'})
df = pd.merge(df, df_grouped, how='left',
on=['address_latitude', 'address_longitude'])
df = df[df['distance'] == df['distance_min']]
return df[['address_latitude', 'address_longitude',
'stop_latitude', 'stop_longitude',
'distance', 'time']].reset_index()
|
{
"content_hash": "5e1001c490b68cdf1775c88d6ee4a7da",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 80,
"avg_line_length": 39.44387755102041,
"alnum_prop": 0.5496054844134006,
"repo_name": "skorasaurus/RTAHeatMap",
"id": "f5cf79ed3b42972b11c8eac8cc46844522479734",
"size": "7778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "DataGeneration/DatabaseHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "75023"
}
],
"symlink_target": ""
}
|
'''
Created on 2016年2月12日
@author: root
'''
import xlstool
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
tempdir=parentdir+'/../../'
dirpath=tempdir.replace('\\','/')
sys.path.insert(0,dirpath)
from tool import SQLTool,config
import uuid
def loadteacher(path):
data=xlstool.getdata(path, u'教师')
teachdata=[]
attenddata=[]
logindata=[]
for i in data:
temp=list(i)
teacherid=uuid.uuid1()
teachdata.append((temp[0],teacherid,temp[1],temp[2],temp[3],temp[4],'123456'))
logindata.append((temp[5],'e10adc3949ba59abbe56e057f20f883e',teacherid))
sqlhelp=SQLTool.getObject()
sqlhelp.connectdb()
result=sqlhelp.inserttableinfo_byparams(config.Config.teachertable, ['schoolId','teacherId','teacherName','teacherPhone','offer','jobTitle','password'], teachdata)
if result:
result=sqlhelp.inserttableinfo_byparams(config.Config.usertable, ['username','password','userId'], logindata)
return result
if __name__ == '__main__':
loadteacher('/root/github/Behavior_culture/Back_ground/xlsdeal/xls_opertate/student.xls')
|
{
"content_hash": "365c80bb74aebf1b98d4153e2dc641ed",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 167,
"avg_line_length": 28.21951219512195,
"alnum_prop": 0.6784788245462403,
"repo_name": "sherwel/Behavior_culture",
"id": "0bc1065441542a6cef3c95945c440cb8d00fbc7d",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Back_ground/xlsdeal/xls_opertate/teacher_operate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "111658"
},
{
"name": "HTML",
"bytes": "258116"
},
{
"name": "JavaScript",
"bytes": "1560904"
},
{
"name": "Python",
"bytes": "86292"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserProfile.points_progress'
db.delete_column('accounts_userprofile', 'points_progress')
# Adding field 'UserProfile.yesterday_points'
db.add_column('accounts_userprofile', 'yesterday_points', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Adding field 'UserProfile.points_progress'
db.add_column('accounts_userprofile', 'points_progress', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Deleting field 'UserProfile.yesterday_points'
db.delete_column('accounts_userprofile', 'yesterday_points')
models = {
'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'api_token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'has_skin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_connection_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_played_server': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'players'", 'null': 'True', 'to': "orm['race.Server']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'points_history': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}),
'registration_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'skin_body_color': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'skin_body_color_raw': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'skin_feet_color': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'skin_feet_color_raw': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'skin_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'yesterday_points': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'race.map': {
'Meta': {'object_name': 'Map'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'crc': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'grenade_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'has_deathtiles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_image': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_unhookables': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'map_type': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['race.MapType']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'shield_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'race.maptype': {
'Meta': {'object_name': 'MapType'},
'displayed_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '20', 'db_index': 'True'})
},
'race.server': {
'Meta': {'object_name': 'Server'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'anonymous_players': ('picklefield.fields.PickledObjectField', [], {}),
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_connection_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'maintained_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'maintained_servers'", 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'played_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['race.Map']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['accounts']
|
{
"content_hash": "c10dcaf98478e8d5bcbd7ae99276030d",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 182,
"avg_line_length": 74.58677685950413,
"alnum_prop": 0.5585595567867035,
"repo_name": "SushiTee/teerace",
"id": "d3fe94f09b131580fb8a209995cb5f3e5525d2c8",
"size": "9043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teerace/accounts/south_migrations/0003_auto__del_field_userprofile_points_progress__add_field_userprofile_yes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "40169"
},
{
"name": "HTML",
"bytes": "95280"
},
{
"name": "JavaScript",
"bytes": "17213"
},
{
"name": "Python",
"bytes": "409303"
}
],
"symlink_target": ""
}
|
import calendar
import reversion
import base64
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.sites.models import Site
from django.shortcuts import render, get_object_or_404, redirect
from django.utils.translation import ugettext as _
from django.core.mail import send_mail
from django.http import HttpResponse
from django.conf import settings
from members.models import User
from reversion.models import Revision
from .models import Project
from .forms import ProjectForm, RestrictedProjectForm, SearchProjectForm
@login_required
@reversion.create_revision()
def add_project(request):
data = request.POST if request.POST else None
files = request.FILES if request.FILES else None
form = ProjectForm(data=data, files=files, user=request.user)
if form.is_valid():
form.save()
project = form.instance
name = project.name
domain = Site.objects.get().domain
for participant in project.team.all():
link = "https://{}/projects/confirm/{}/".format(domain, base64.b64encode("{}_{}".format(project.pk, participant.pk)))
send_mail(u"Потвърждаване на участие в проект",
u"Отидете да този линк, за да потвърдите участието си в проект {} посетете {}".format(name, link),
settings.EMAIL_HOST_USER,
[participant.email])
return redirect('members:user-projects')
return render(request, 'projects/add.html', locals())
@reversion.create_revision()
def edit_project(request, project_id=None):
project = get_object_or_404(Project, id=project_id)
if request.user == project.user and (project.status == 'unrevised'
or project.status == 'returned'):
data = request.POST if request.POST else None
files = request.FILES if request.FILES else None
form = ProjectForm(data=data, user=request.user, files=files, instance=project)
if form.is_valid():
form.save()
return redirect('members:user-projects')
return render(request, 'projects/edit.html', locals())
else:
return redirect('members:user-projects')
@permission_required('projects.change_project', login_url="members:user-projects")
def edit_status(request, project_id=None):
project = get_object_or_404(Project, id=project_id)
data = request.POST if request.POST else None
form = RestrictedProjectForm(data=data, instance=project)
if form.is_valid():
project.save()
return redirect('members:user-projects')
return render(request, 'projects/edit_status.html', locals())
def projects_archive(request):
unrevised = Project.objects.filter(status='unrevised')
returned = Project.objects.filter(status='returned')
pending = Project.objects.filter(status='pending')
approved = Project.objects.filter(status='approved')
rejected = Project.objects.filter(status='rejected')
form = SearchProjectForm(request.GET if request.GET else None)
if form.is_valid():
projects = form.search()
if len(projects) == 0:
error = u"Няма намерени резултати."
return render(request, 'projects/archive.html', locals())
def show_project(request, project_id):
project_show = get_object_or_404(Project, id=project_id)
if len(reversion.get_for_object(project_show)) > 1:
old_versions = True
return render(request, 'projects/show_project.html', locals())
def projects_year_month(request, year, month):
projects = Project.objects.filter(created_at__year=year,
created_at__month=month)
month_name = _(calendar.month_name[int(month)])
return render(request, 'projects/show_month_year.html', locals())
def show_project_versions(request, project_id):
project = get_object_or_404(Project, id=project_id)
version_history = [ver for ver in reversion.get_for_object(project)]
for ver in version_history:
ver.created_at = Revision.objects.get(id=ver.id).date_created
ver.team = [User.objects.get(id=mem) for mem in ver.field_dict['team']]
ver.flp = User.objects.get(id=ver.field_dict['flp'])
ver.user = User.objects.get(id=ver.field_dict['user'])
return render(request, 'projects/previous_project_versions.html', locals())
@login_required
def confirm_participation(request, confirmation):
project_id, participant_id = base64.b64decode(confirmation).split('_')
project = Project.objects.filter(id=project_id)[0]
project.participating.add(participant_id)
return render(request, 'projects/confirm.html', locals())
@login_required
def remove_file(request, project_id, file_id):
project = get_object_or_404(Project, id=project_id, user=request.user)
project.files.remove(file_id)
return HttpResponse()
def projects_by_creator(request, searched_creator):
projects = Project.objects.filter(user=searched_creator)
return render(request, 'projects/archive.html', locals())
def projects_by_date_range(request, start_date, end_date):
projects = Project.objects.filter(created_at__range=(start_date, end_date))
return render(request, 'projects/archive.html', locals())
def projects_by_name(request, searched_name):
projects = Project.objects.filter(name=searched_name)
return render(request, 'projects/archive.html', locals())
def projects_by_status(request, searched_status):
projects = Project.objects.filter(status=searched_status)
return render(request, 'projects/archive.html', locals())
|
{
"content_hash": "c1d9b69ff62fb3fa6c68543954a6ea46",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 129,
"avg_line_length": 38.80555555555556,
"alnum_prop": 0.6957766642806013,
"repo_name": "Hackfmi/Diaphanum",
"id": "f83ded9251b927c16ac2485dde7353459d7420b2",
"size": "5711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4870"
},
{
"name": "JavaScript",
"bytes": "72598"
},
{
"name": "Python",
"bytes": "272926"
}
],
"symlink_target": ""
}
|
"""
Like the old south.modelsparser, but using introspection where possible
rather than direct inspection of models.py.
"""
import datetime
import re
import decimal
from south.utils import get_attribute, auto_through
from django.db import models
from django.db.models.base import ModelBase, Model
from django.db.models.fields import NOT_PROVIDED
from django.conf import settings
from django.utils.functional import Promise
from django.contrib.contenttypes import generic
from django.utils.datastructures import SortedDict
from django.utils import datetime_safe
NOISY = False
# Gives information about how to introspect certain fields.
# This is a list of triples; the first item is a list of fields it applies to,
# (note that isinstance is used, so superclasses are perfectly valid here)
# the second is a list of positional argument descriptors, and the third
# is a list of keyword argument descriptors.
# Descriptors are of the form:
# [attrname, options]
# Where attrname is the attribute on the field to get the value from, and options
# is an optional dict.
#
# The introspector uses the combination of all matching entries, in order.
introspection_details = [
(
(models.Field, ),
[],
{
"null": ["null", {"default": False}],
"blank": ["blank", {"default": False, "ignore_if":"primary_key"}],
"primary_key": ["primary_key", {"default": False}],
"max_length": ["max_length", {"default": None}],
"unique": ["_unique", {"default": False}],
"db_index": ["db_index", {"default": False}],
"default": ["default", {"default": NOT_PROVIDED, "ignore_dynamics": True}],
"db_column": ["db_column", {"default": None}],
"db_tablespace": ["db_tablespace", {"default": settings.DEFAULT_INDEX_TABLESPACE}],
},
),
(
(models.ForeignKey, models.OneToOneField),
[],
{
"to": ["rel.to", {}],
"to_field": ["rel.field_name", {"default_attr": "rel.to._meta.pk.name"}],
"related_name": ["rel.related_name", {"default": None}],
"db_index": ["db_index", {"default": True}],
},
),
(
(models.ManyToManyField,),
[],
{
"to": ["rel.to", {}],
"symmetrical": ["rel.symmetrical", {"default": True}],
"related_name": ["rel.related_name", {"default": None}],
"db_table": ["db_table", {"default": None}],
# TODO: Kind of ugly to add this one-time-only option
"through": ["rel.through", {"ignore_if_auto_through": True}],
},
),
(
(models.DateField, models.TimeField),
[],
{
"auto_now": ["auto_now", {"default": False}],
"auto_now_add": ["auto_now_add", {"default": False}],
},
),
(
(models.DecimalField, ),
[],
{
"max_digits": ["max_digits", {"default": None}],
"decimal_places": ["decimal_places", {"default": None}],
},
),
(
(models.BooleanField, ),
[],
{
"default": ["default", {"default": NOT_PROVIDED, "converter": bool}],
"blank": ["blank", {"default": True, "ignore_if":"primary_key"}],
},
),
(
(models.FilePathField, ),
[],
{
"path": ["path", {"default": ''}],
"match": ["match", {"default": None}],
"recursive": ["recursive", {"default": False}],
},
),
(
(generic.GenericRelation, ),
[],
{
"to": ["rel.to", {}],
"symmetrical": ["rel.symmetrical", {"default": True}],
"object_id_field": ["object_id_field_name", {"default": "object_id"}],
"content_type_field": ["content_type_field_name", {"default": "content_type"}],
"blank": ["blank", {"default": True}],
},
),
]
# Regexes of allowed field full paths
allowed_fields = [
"^django\.db",
"^django\.contrib\.contenttypes\.generic",
"^django\.contrib\.localflavor",
]
# Regexes of ignored fields (custom fields which look like fields, but have no column behind them)
ignored_fields = [
"^django\.contrib\.contenttypes\.generic\.GenericRelation",
"^django\.contrib\.contenttypes\.generic\.GenericForeignKey",
]
# Similar, but for Meta, so just the inner level (kwds).
meta_details = {
"db_table": ["db_table", {"default_attr_concat": ["%s_%s", "app_label", "module_name"]}],
"db_tablespace": ["db_tablespace", {"default": settings.DEFAULT_TABLESPACE}],
"unique_together": ["unique_together", {"default": []}],
"ordering": ["ordering", {"default": []}],
"proxy": ["proxy", {"default": False, "ignore_missing": True}],
}
# 2.4 compatability
any = lambda x: reduce(lambda y, z: y or z, x, False)
def add_introspection_rules(rules=[], patterns=[]):
"Allows you to add some introspection rules at runtime, e.g. for 3rd party apps."
assert isinstance(rules, (list, tuple))
assert isinstance(patterns, (list, tuple))
allowed_fields.extend(patterns)
introspection_details.extend(rules)
def add_ignored_fields(patterns):
"Allows you to add some ignore field patterns."
assert isinstance(patterns, (list, tuple))
ignored_fields.extend(patterns)
def can_ignore(field):
"""
Returns True if we know for certain that we can ignore this field, False
otherwise.
"""
full_name = "%s.%s" % (field.__class__.__module__, field.__class__.__name__)
for regex in ignored_fields:
if re.match(regex, full_name):
return True
return False
def can_introspect(field):
"""
Returns True if we are allowed to introspect this field, False otherwise.
('allowed' means 'in core'. Custom fields can declare they are introspectable
by the default South rules by adding the attribute _south_introspects = True.)
"""
# Check for special attribute
if hasattr(field, "_south_introspects") and field._south_introspects:
return True
# Check it's an introspectable field
full_name = "%s.%s" % (field.__class__.__module__, field.__class__.__name__)
for regex in allowed_fields:
if re.match(regex, full_name):
return True
return False
def matching_details(field):
"""
Returns the union of all matching entries in introspection_details for the field.
"""
our_args = []
our_kwargs = {}
for classes, args, kwargs in introspection_details:
if any([isinstance(field, x) for x in classes]):
our_args.extend(args)
our_kwargs.update(kwargs)
return our_args, our_kwargs
class IsDefault(Exception):
"""
Exception for when a field contains its default value.
"""
def get_value(field, descriptor):
"""
Gets an attribute value from a Field instance and formats it.
"""
attrname, options = descriptor
# If the options say it's not a attribute name but a real value, use that.
if options.get('is_value', False):
value = attrname
else:
try:
value = get_attribute(field, attrname)
except AttributeError:
if options.get("ignore_missing", False):
raise IsDefault
else:
raise
# Lazy-eval functions get eval'd.
if isinstance(value, Promise):
value = unicode(value)
# If the value is the same as the default, omit it for clarity
if "default" in options and value == options['default']:
raise IsDefault
# If there's an ignore_if, use it
if "ignore_if" in options:
if get_attribute(field, options['ignore_if']):
raise IsDefault
# If there's an ignore_if_auto_through which is True, use it
if options.get("ignore_if_auto_through", False):
if auto_through(field):
raise IsDefault
# Some default values need to be gotten from an attribute too.
if "default_attr" in options:
default_value = get_attribute(field, options['default_attr'])
if value == default_value:
raise IsDefault
# Some are made from a formatting string and several attrs (e.g. db_table)
if "default_attr_concat" in options:
format, attrs = options['default_attr_concat'][0], options['default_attr_concat'][1:]
default_value = format % tuple(map(lambda x: get_attribute(field, x), attrs))
if value == default_value:
raise IsDefault
# Callables get called.
if callable(value) and not isinstance(value, ModelBase):
# Datetime.datetime.now is special, as we can access it from the eval
# context (and because it changes all the time; people will file bugs otherwise).
if value == datetime.datetime.now:
return "datetime.datetime.now"
if value == datetime.datetime.utcnow:
return "datetime.datetime.utcnow"
if value == datetime.date.today:
return "datetime.date.today"
# All other callables get called.
value = value()
# Models get their own special repr()
if isinstance(value, ModelBase):
# If it's a proxy model, follow it back to its non-proxy parent
if getattr(value._meta, "proxy", False):
value = value._meta.proxy_for_model
return "orm['%s.%s']" % (value._meta.app_label, value._meta.object_name)
# As do model instances
if isinstance(value, Model):
if options.get("ignore_dynamics", False):
raise IsDefault
return "orm['%s.%s'].objects.get(pk=%r)" % (value.__class__._meta.app_label, value.__class__._meta.object_name, value.pk)
# Make sure Decimal is converted down into a string
if isinstance(value, decimal.Decimal):
value = str(value)
# datetime_safe has an improper repr value
if isinstance(value, datetime_safe.datetime):
value = datetime.datetime(*value.utctimetuple()[:7])
if isinstance(value, datetime_safe.date):
value = datetime.date(*value.timetuple()[:3])
# Now, apply the converter func if there is one
if "converter" in options:
value = options['converter'](value)
# Return the final value
return repr(value)
def introspector(field):
"""
Given a field, introspects its definition triple.
"""
arg_defs, kwarg_defs = matching_details(field)
args = []
kwargs = {}
# For each argument, use the descriptor to get the real value.
for defn in arg_defs:
try:
args.append(get_value(field, defn))
except IsDefault:
pass
for kwd, defn in kwarg_defs.items():
try:
kwargs[kwd] = get_value(field, defn)
except IsDefault:
pass
return args, kwargs
def get_model_fields(model, m2m=False):
"""
Given a model class, returns a dict of {field_name: field_triple} defs.
"""
field_defs = SortedDict()
inherited_fields = {}
# Go through all bases (that are themselves models, but not Model)
for base in model.__bases__:
if base != models.Model and issubclass(base, models.Model):
if not base._meta.abstract:
# Looks like we need their fields, Ma.
inherited_fields.update(get_model_fields(base))
# Now, go through all the fields and try to get their definition
source = model._meta.local_fields[:]
if m2m:
source += model._meta.local_many_to_many
for field in source:
# Can we ignore it completely?
if can_ignore(field):
continue
# Does it define a south_field_triple method?
if hasattr(field, "south_field_triple"):
if NOISY:
print " ( Nativing field: %s" % field.name
field_defs[field.name] = field.south_field_triple()
# Can we introspect it?
elif can_introspect(field):
# Get the full field class path.
field_class = field.__class__.__module__ + "." + field.__class__.__name__
# Run this field through the introspector
args, kwargs = introspector(field)
# Workaround for Django bug #13987
if model._meta.pk.column == field.column and 'primary_key' not in kwargs:
kwargs['primary_key'] = True
# That's our definition!
field_defs[field.name] = (field_class, args, kwargs)
# Shucks, no definition!
else:
if NOISY:
print " ( Nodefing field: %s" % field.name
field_defs[field.name] = None
# If they've used the horrific hack that is order_with_respect_to, deal with
# it.
if model._meta.order_with_respect_to:
field_defs['_order'] = ("django.db.models.fields.IntegerField", [], {"default": "0"})
return field_defs
def get_model_meta(model):
"""
Given a model class, will return the dict representing the Meta class.
"""
# Get the introspected attributes
meta_def = {}
for kwd, defn in meta_details.items():
try:
meta_def[kwd] = get_value(model._meta, defn)
except IsDefault:
pass
# Also, add on any non-abstract model base classes.
# This is called _ormbases as the _bases variable was previously used
# for a list of full class paths to bases, so we can't conflict.
for base in model.__bases__:
if base != models.Model and issubclass(base, models.Model):
if not base._meta.abstract:
# OK, that matches our terms.
if "_ormbases" not in meta_def:
meta_def['_ormbases'] = []
meta_def['_ormbases'].append("%s.%s" % (
base._meta.app_label,
base._meta.object_name,
))
return meta_def
# Now, load the built-in South introspection plugins
import south.introspection_plugins
|
{
"content_hash": "a42350629e4ba3ba76257f78a0676963",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 129,
"avg_line_length": 36.11082474226804,
"alnum_prop": 0.5934622796374277,
"repo_name": "mcr/ietfdb",
"id": "0f7fd467fcb4ea19cbee08ead0e92518467ddd7d",
"size": "14011",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "south/modelsinspector.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "239198"
},
{
"name": "JavaScript",
"bytes": "450755"
},
{
"name": "Perl",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "10286676"
},
{
"name": "Ruby",
"bytes": "3468"
},
{
"name": "Shell",
"bytes": "39950"
},
{
"name": "TeX",
"bytes": "23944"
}
],
"symlink_target": ""
}
|
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
from automatedbrewery.AlarmControl import AlarmController
import time
import RPi.GPIO as GPIO
print("Turning the alarm off and on every 5 seconds")
Alarm = AlarmController()
time.sleep(5)
try:
while True:
Alarm.alarm = 1
time.sleep(5)
Alarm.alarm = 0
time.sleep(5)
except KeyboardInterrupt:
Alarm.alarm = 0
GPIO.cleanup()
print("\nEnding alarm test")
|
{
"content_hash": "f9ba4c8083b4438a2b1e027b9b719897",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 57,
"avg_line_length": 18.88,
"alnum_prop": 0.6716101694915254,
"repo_name": "Bobstin/AutomatedBrewery",
"id": "b3e27ece6c73ab822ffa34e96f18b9a7f625d4c5",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/AlarmTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "101704"
},
{
"name": "Makefile",
"bytes": "23892"
},
{
"name": "Python",
"bytes": "292153"
},
{
"name": "QMake",
"bytes": "415"
}
],
"symlink_target": ""
}
|
"""Tests for transformer-based text encoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.modeling import activations
from official.nlp.modeling import layers
from official.nlp.modeling.networks import encoder_scaffold
# Test class that wraps a standard transformer layer. If this layer is called
# at any point, the list passed to the config object will be filled with a
# boolean 'True'. We register this class as a Keras serializable so we can
# test serialization below.
@tf.keras.utils.register_keras_serializable(package="TestOnly")
class ValidatedTransformerLayer(layers.Transformer):
def __init__(self, call_list, **kwargs):
super(ValidatedTransformerLayer, self).__init__(**kwargs)
self.list = call_list
def call(self, inputs):
self.list.append(True)
return super(ValidatedTransformerLayer, self).call(inputs)
def get_config(self):
config = super(ValidatedTransformerLayer, self).get_config()
config["call_list"] = []
return config
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class EncoderScaffoldLayerClassTest(keras_parameterized.TestCase):
def tearDown(self):
super(EncoderScaffoldLayerClassTest, self).tearDown()
tf.keras.mixed_precision.experimental.set_policy("float32")
@parameterized.named_parameters(
dict(testcase_name="only_final_output", return_all_layer_outputs=False),
dict(testcase_name="all_layer_outputs", return_all_layer_outputs=True))
def test_network_creation(self, return_all_layer_outputs):
hidden_size = 32
sequence_length = 21
num_hidden_instances = 3
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=num_hidden_instances,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=ValidatedTransformerLayer,
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg,
return_all_layer_outputs=return_all_layer_outputs)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
output_data, pooled = test_network([word_ids, mask, type_ids])
if return_all_layer_outputs:
self.assertIsInstance(output_data, list)
self.assertLen(output_data, num_hidden_instances)
data = output_data[-1]
else:
data = output_data
self.assertIsInstance(test_network.hidden_layers, list)
self.assertLen(test_network.hidden_layers, num_hidden_instances)
self.assertIsInstance(test_network.pooler_layer, tf.keras.layers.Dense)
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# The default output dtype is float32.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float32, pooled.dtype)
# If call_list[0] exists and is True, the passed layer class was
# instantiated from the given config properly.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_network_creation_with_float16_dtype(self):
tf.keras.mixed_precision.experimental.set_policy("mixed_float16")
hidden_size = 32
sequence_length = 21
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
expected_data_shape = [None, sequence_length, hidden_size]
expected_pooled_shape = [None, hidden_size]
self.assertAllEqual(expected_data_shape, data.shape.as_list())
self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list())
# If float_dtype is set to float16, the data output is float32 (from a layer
# norm) and pool output should be float16.
self.assertAllEqual(tf.float32, data.dtype)
self.assertAllEqual(tf.float16, pooled.dtype)
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data, type_id_data])
# Creates a EncoderScaffold with max_sequence_length != sequence_length
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length * 2,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
_ = model.predict([word_id_data, mask_data, type_id_data])
def test_serialize_deserialize(self):
# Create a network object that sets all of its config options.
hidden_size = 32
sequence_length = 21
embedding_cfg = {
"vocab_size": 100,
"type_vocab_size": 16,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cfg=embedding_cfg)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(network.get_config(), new_network.get_config())
@keras_parameterized.run_all_keras_modes
class EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase):
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
# Build an embedding network to swap in for the default network. This one
# will have 2 inputs (mask and word_ids) instead of 3, and won't use
# positional embeddings.
word_ids = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name="input_word_ids")
mask = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name="input_mask")
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
word_embeddings = embedding_layer(word_ids)
attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])
network = tf.keras.Model([word_ids, mask],
[word_embeddings, attention_mask])
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cls=network,
embedding_data=embedding_layer.embeddings)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data])
# Test that we can get the embedding data that we passed to the object. This
# is necessary to support standard language model training.
self.assertIs(embedding_layer.embeddings,
test_network.get_embedding_table())
def test_serialize_deserialize(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
# Build an embedding network to swap in for the default network. This one
# will have 2 inputs (mask and word_ids) instead of 3, and won't use
# positional embeddings.
word_ids = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name="input_word_ids")
mask = tf.keras.layers.Input(
shape=(sequence_length,), dtype=tf.int32, name="input_mask")
embedding_layer = layers.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=hidden_size,
initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02),
name="word_embeddings")
word_embeddings = embedding_layer(word_ids)
attention_mask = layers.SelfAttentionMask()([word_embeddings, mask])
network = tf.keras.Model([word_ids, mask],
[word_embeddings, attention_mask])
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
}
# Create a small EncoderScaffold for testing.
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cfg=hidden_cfg,
embedding_cls=network,
embedding_data=embedding_layer.embeddings)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
test_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_network.get_config(), new_network.get_config())
# Create a model based off of the old and new networks:
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = new_network([word_ids, mask])
new_model = tf.keras.Model([word_ids, mask], [data, pooled])
data, pooled = test_network([word_ids, mask])
model = tf.keras.Model([word_ids, mask], [data, pooled])
# Copy the weights between models.
new_model.set_weights(model.get_weights())
# Invoke the models.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
data, cls = model.predict([word_id_data, mask_data])
new_data, new_cls = new_model.predict([word_id_data, mask_data])
# The output should be equal.
self.assertAllEqual(data, new_data)
self.assertAllEqual(cls, new_cls)
# We should not be able to get a reference to the embedding data.
with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"):
new_network.get_embedding_table()
@keras_parameterized.run_all_keras_modes
class EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase):
def test_network_invocation(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
xformer = ValidatedTransformerLayer(**hidden_cfg)
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=xformer,
embedding_cfg=embedding_cfg)
# Create the inputs (note that the first dimension is implicit).
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = test_network([word_ids, mask, type_ids])
# Create a model based off of this network:
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Invoke the model. We can't validate the output data here (the model is too
# complex) but this will catch structural runtime errors.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
_ = model.predict([word_id_data, mask_data, type_id_data])
# If call_list[0] exists and is True, the passed layer class was
# called as part of the graph creation.
self.assertNotEmpty(call_list)
self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.")
def test_serialize_deserialize(self):
hidden_size = 32
sequence_length = 21
vocab_size = 57
num_types = 7
embedding_cfg = {
"vocab_size": vocab_size,
"type_vocab_size": num_types,
"hidden_size": hidden_size,
"seq_length": sequence_length,
"max_seq_length": sequence_length,
"initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02),
"dropout_rate": 0.1,
}
call_list = []
hidden_cfg = {
"num_attention_heads":
2,
"intermediate_size":
3072,
"intermediate_activation":
activations.gelu,
"dropout_rate":
0.1,
"attention_dropout_rate":
0.1,
"kernel_initializer":
tf.keras.initializers.TruncatedNormal(stddev=0.02),
"call_list":
call_list
}
# Create a small EncoderScaffold for testing. This time, we pass an already-
# instantiated layer object.
xformer = ValidatedTransformerLayer(**hidden_cfg)
test_network = encoder_scaffold.EncoderScaffold(
num_hidden_instances=3,
pooled_output_dim=hidden_size,
pooler_layer_initializer=tf.keras.initializers.TruncatedNormal(
stddev=0.02),
hidden_cls=xformer,
embedding_cfg=embedding_cfg)
# Create another network object from the first object's config.
new_network = encoder_scaffold.EncoderScaffold.from_config(
test_network.get_config())
# Validate that the config can be forced to JSON.
_ = new_network.to_json()
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(test_network.get_config(), new_network.get_config())
# Create a model based off of the old and new networks:
word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32)
data, pooled = new_network([word_ids, mask, type_ids])
new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
data, pooled = test_network([word_ids, mask, type_ids])
model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled])
# Copy the weights between models.
new_model.set_weights(model.get_weights())
# Invoke the models.
batch_size = 3
word_id_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(
num_types, size=(batch_size, sequence_length))
data, cls = model.predict([word_id_data, mask_data, type_id_data])
new_data, new_cls = new_model.predict(
[word_id_data, mask_data, type_id_data])
# The output should be equal.
self.assertAllEqual(data, new_data)
self.assertAllEqual(cls, new_cls)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "cd752d9c566f48236c07bb1b2e33f6e3",
"timestamp": "",
"source": "github",
"line_count": 632,
"max_line_length": 101,
"avg_line_length": 36.76898734177215,
"alnum_prop": 0.6490231517342284,
"repo_name": "tombstone/models",
"id": "664bccd08e11720918e0060458dc934350d2d594",
"size": "23927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/nlp/modeling/networks/encoder_scaffold_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
determine_ext,
float_or_none,
int_or_none,
smuggle_url,
try_get,
unsmuggle_url,
ExtractorError,
)
class LimelightBaseIE(InfoExtractor):
_PLAYLIST_SERVICE_URL = 'http://production-ps.lvp.llnw.net/r/PlaylistService/%s/%s/%s'
@classmethod
def _extract_urls(cls, webpage, source_url):
lm = {
'Media': 'media',
'Channel': 'channel',
'ChannelList': 'channel_list',
}
def smuggle(url):
return smuggle_url(url, {'source_url': source_url})
entries = []
for kind, video_id in re.findall(
r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})',
webpage):
entries.append(cls.url_result(
smuggle('limelight:%s:%s' % (lm[kind], video_id)),
'Limelight%s' % kind, video_id))
for mobj in re.finditer(
# As per [1] class attribute should be exactly equal to
# LimelightEmbeddedPlayerFlash but numerous examples seen
# that don't exactly match it (e.g. [2]).
# 1. http://support.3playmedia.com/hc/en-us/articles/227732408-Limelight-Embedding-the-Captions-Plugin-with-the-Limelight-Player-on-Your-Webpage
# 2. http://www.sedona.com/FacilitatorTraining2017
r'''(?sx)
<object[^>]+class=(["\'])(?:(?!\1).)*\bLimelightEmbeddedPlayerFlash\b(?:(?!\1).)*\1[^>]*>.*?
<param[^>]+
name=(["\'])flashVars\2[^>]+
value=(["\'])(?:(?!\3).)*(?P<kind>media|channel(?:List)?)Id=(?P<id>[a-z0-9]{32})
''', webpage):
kind, video_id = mobj.group('kind'), mobj.group('id')
entries.append(cls.url_result(
smuggle('limelight:%s:%s' % (kind, video_id)),
'Limelight%s' % kind.capitalize(), video_id))
# http://support.3playmedia.com/hc/en-us/articles/115009517327-Limelight-Embedding-the-Audio-Description-Plugin-with-the-Limelight-Player-on-Your-Web-Page)
for video_id in re.findall(
r'(?s)LimelightPlayerUtil\.embed\s*\(\s*{.*?\bmediaId["\']\s*:\s*["\'](?P<id>[a-z0-9]{32})',
webpage):
entries.append(cls.url_result(
smuggle('limelight:media:%s' % video_id),
LimelightMediaIE.ie_key(), video_id))
return entries
def _call_playlist_service(self, item_id, method, fatal=True, referer=None):
headers = {}
if referer:
headers['Referer'] = referer
try:
return self._download_json(
self._PLAYLIST_SERVICE_URL % (self._PLAYLIST_SERVICE_PATH, item_id, method),
item_id, 'Downloading PlaylistService %s JSON' % method,
fatal=fatal, headers=headers)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
error = self._parse_json(e.cause.read().decode(), item_id)['detail']['contentAccessPermission']
if error == 'CountryDisabled':
self.raise_geo_restricted()
raise ExtractorError(error, expected=True)
raise
def _extract(self, item_id, pc_method, mobile_method, referer=None):
pc = self._call_playlist_service(item_id, pc_method, referer=referer)
mobile = self._call_playlist_service(
item_id, mobile_method, fatal=False, referer=referer)
return pc, mobile
def _extract_info(self, pc, mobile, i, referer):
get_item = lambda x, y: try_get(x, lambda x: x[y][i], dict) or {}
pc_item = get_item(pc, 'playlistItems')
mobile_item = get_item(mobile, 'mediaList')
video_id = pc_item.get('mediaId') or mobile_item['mediaId']
title = pc_item.get('title') or mobile_item['title']
formats = []
urls = []
for stream in pc_item.get('streams', []):
stream_url = stream.get('url')
if not stream_url or stream.get('drmProtected') or stream_url in urls:
continue
urls.append(stream_url)
ext = determine_ext(stream_url)
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
stream_url, video_id, f4m_id='hds', fatal=False))
else:
fmt = {
'url': stream_url,
'abr': float_or_none(stream.get('audioBitRate')),
'fps': float_or_none(stream.get('videoFrameRate')),
'ext': ext,
}
width = int_or_none(stream.get('videoWidthInPixels'))
height = int_or_none(stream.get('videoHeightInPixels'))
vbr = float_or_none(stream.get('videoBitRate'))
if width or height or vbr:
fmt.update({
'width': width,
'height': height,
'vbr': vbr,
})
else:
fmt['vcodec'] = 'none'
rtmp = re.search(r'^(?P<url>rtmpe?://(?P<host>[^/]+)/(?P<app>.+))/(?P<playpath>mp[34]:.+)$', stream_url)
if rtmp:
format_id = 'rtmp'
if stream.get('videoBitRate'):
format_id += '-%d' % int_or_none(stream['videoBitRate'])
http_format_id = format_id.replace('rtmp', 'http')
CDN_HOSTS = (
('delvenetworks.com', 'cpl.delvenetworks.com'),
('video.llnw.net', 's2.content.video.llnw.net'),
)
for cdn_host, http_host in CDN_HOSTS:
if cdn_host not in rtmp.group('host').lower():
continue
http_url = 'http://%s/%s' % (http_host, rtmp.group('playpath')[4:])
urls.append(http_url)
if self._is_valid_url(http_url, video_id, http_format_id):
http_fmt = fmt.copy()
http_fmt.update({
'url': http_url,
'format_id': http_format_id,
})
formats.append(http_fmt)
break
fmt.update({
'url': rtmp.group('url'),
'play_path': rtmp.group('playpath'),
'app': rtmp.group('app'),
'ext': 'flv',
'format_id': format_id,
})
formats.append(fmt)
for mobile_url in mobile_item.get('mobileUrls', []):
media_url = mobile_url.get('mobileUrl')
format_id = mobile_url.get('targetMediaPlatform')
if not media_url or format_id in ('Widevine', 'SmoothStreaming') or media_url in urls:
continue
urls.append(media_url)
ext = determine_ext(media_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
media_url, video_id, 'mp4', 'm3u8_native',
m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
stream_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': media_url,
'format_id': format_id,
'preference': -1,
'ext': ext,
})
self._sort_formats(formats)
subtitles = {}
for flag in mobile_item.get('flags'):
if flag == 'ClosedCaptions':
closed_captions = self._call_playlist_service(
video_id, 'getClosedCaptionsDetailsByMediaId',
False, referer) or []
for cc in closed_captions:
cc_url = cc.get('webvttFileUrl')
if not cc_url:
continue
lang = cc.get('languageCode') or self._search_regex(r'/[a-z]{2}\.vtt', cc_url, 'lang', default='en')
subtitles.setdefault(lang, []).append({
'url': cc_url,
})
break
get_meta = lambda x: pc_item.get(x) or mobile_item.get(x)
return {
'id': video_id,
'title': title,
'description': get_meta('description'),
'formats': formats,
'duration': float_or_none(get_meta('durationInMilliseconds'), 1000),
'thumbnail': get_meta('previewImageUrl') or get_meta('thumbnailImageUrl'),
'subtitles': subtitles,
}
class LimelightMediaIE(LimelightBaseIE):
IE_NAME = 'limelight'
_VALID_URL = r'''(?x)
(?:
limelight:media:|
https?://
(?:
link\.videoplatform\.limelight\.com/media/|
assets\.delvenetworks\.com/player/loader\.swf
)
\?.*?\bmediaId=
)
(?P<id>[a-z0-9]{32})
'''
_TESTS = [{
'url': 'http://link.videoplatform.limelight.com/media/?mediaId=3ffd040b522b4485b6d84effc750cd86',
'info_dict': {
'id': '3ffd040b522b4485b6d84effc750cd86',
'ext': 'mp4',
'title': 'HaP and the HB Prince Trailer',
'description': 'md5:8005b944181778e313d95c1237ddb640',
'thumbnail': r're:^https?://.*\.jpeg$',
'duration': 144.23,
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# video with subtitles
'url': 'limelight:media:a3e00274d4564ec4a9b29b9466432335',
'md5': '2fa3bad9ac321e23860ca23bc2c69e3d',
'info_dict': {
'id': 'a3e00274d4564ec4a9b29b9466432335',
'ext': 'mp4',
'title': '3Play Media Overview Video',
'thumbnail': r're:^https?://.*\.jpeg$',
'duration': 78.101,
# TODO: extract all languages that were accessible via API
# 'subtitles': 'mincount:9',
'subtitles': 'mincount:1',
},
}, {
'url': 'https://assets.delvenetworks.com/player/loader.swf?mediaId=8018a574f08d416e95ceaccae4ba0452',
'only_matching': True,
}]
_PLAYLIST_SERVICE_PATH = 'media'
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
source_url = smuggled_data.get('source_url')
self._initialize_geo_bypass({
'countries': smuggled_data.get('geo_countries'),
})
pc, mobile = self._extract(
video_id, 'getPlaylistByMediaId',
'getMobilePlaylistByMediaId', source_url)
return self._extract_info(pc, mobile, 0, source_url)
class LimelightChannelIE(LimelightBaseIE):
IE_NAME = 'limelight:channel'
_VALID_URL = r'''(?x)
(?:
limelight:channel:|
https?://
(?:
link\.videoplatform\.limelight\.com/media/|
assets\.delvenetworks\.com/player/loader\.swf
)
\?.*?\bchannelId=
)
(?P<id>[a-z0-9]{32})
'''
_TESTS = [{
'url': 'http://link.videoplatform.limelight.com/media/?channelId=ab6a524c379342f9b23642917020c082',
'info_dict': {
'id': 'ab6a524c379342f9b23642917020c082',
'title': 'Javascript Sample Code',
'description': 'Javascript Sample Code - http://www.delvenetworks.com/sample-code/playerCode-demo.html',
},
'playlist_mincount': 3,
}, {
'url': 'http://assets.delvenetworks.com/player/loader.swf?channelId=ab6a524c379342f9b23642917020c082',
'only_matching': True,
}]
_PLAYLIST_SERVICE_PATH = 'channel'
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
channel_id = self._match_id(url)
source_url = smuggled_data.get('source_url')
pc, mobile = self._extract(
channel_id, 'getPlaylistByChannelId',
'getMobilePlaylistWithNItemsByChannelId?begin=0&count=-1',
source_url)
entries = [
self._extract_info(pc, mobile, i, source_url)
for i in range(len(pc['playlistItems']))]
return self.playlist_result(
entries, channel_id, pc.get('title'), mobile.get('description'))
class LimelightChannelListIE(LimelightBaseIE):
IE_NAME = 'limelight:channel_list'
_VALID_URL = r'''(?x)
(?:
limelight:channel_list:|
https?://
(?:
link\.videoplatform\.limelight\.com/media/|
assets\.delvenetworks\.com/player/loader\.swf
)
\?.*?\bchannelListId=
)
(?P<id>[a-z0-9]{32})
'''
_TESTS = [{
'url': 'http://link.videoplatform.limelight.com/media/?channelListId=301b117890c4465c8179ede21fd92e2b',
'info_dict': {
'id': '301b117890c4465c8179ede21fd92e2b',
'title': 'Website - Hero Player',
},
'playlist_mincount': 2,
}, {
'url': 'https://assets.delvenetworks.com/player/loader.swf?channelListId=301b117890c4465c8179ede21fd92e2b',
'only_matching': True,
}]
_PLAYLIST_SERVICE_PATH = 'channel_list'
def _real_extract(self, url):
channel_list_id = self._match_id(url)
channel_list = self._call_playlist_service(
channel_list_id, 'getMobileChannelListById')
entries = [
self.url_result('limelight:channel:%s' % channel['id'], 'LimelightChannel')
for channel in channel_list['channelList']]
return self.playlist_result(
entries, channel_list_id, channel_list['title'])
|
{
"content_hash": "e32d0026afcbc64a0db74840b39bde72",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 163,
"avg_line_length": 41.661064425770306,
"alnum_prop": 0.48961204867881397,
"repo_name": "yasoob/youtube-dl-GUI",
"id": "39f74d2822bc7296df8a5c16e5edfce3298e82ab",
"size": "14889",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/limelight.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "1335226"
}
],
"symlink_target": ""
}
|
import os
from datetime import datetime, timedelta
from re import sub
import praw
from pytz import UTC
from icfwp import app, PUNCTUATION, STRIPPABLE_PUNCTUATION
from markov.model import TransitionTable
USERAGENT = "Writingprompts title scraper by /u/reostra"
def now():
result = datetime.utcnow()
result.replace(tzinfo=UTC)
return result
def expand_punctuation(word):
result = []
word = word.strip()
if word[0] in PUNCTUATION:
result.append(word[0])
word = word[1:]
if word and word[-1] in PUNCTUATION:
result.append(word[:-1])
result.append(word[-1])
elif word:
result.append(word)
return " ".join(result)
def sanitize(title):
# Remove all the [WP], [TT], etc tags
title = sub("\[.*\]", "", title)
# Get rid of the quotes and stuff, it makes for terrible markovs
for punct in STRIPPABLE_PUNCTUATION:
title = title.replace(punct, "")
# Expand all punctuation
result = [expand_punctuation(word) for word in title.split()]
return " ".join(result)
def main(days=1):
count = 0
# Load the brain, if it's there
markov_filename = app.config['MARKOV_STORAGE']
if os.path.isfile(markov_filename):
tt = TransitionTable.from_filename(markov_filename)
else:
tt = TransitionTable()
cutoff = now() - timedelta(days=days)
reddit = praw.Reddit(user_agent=USERAGENT)
hq = reddit.get_subreddit('writingprompts')
submissions = hq.get_new(limit=None)
for submission in submissions:
created = datetime.utcfromtimestamp(submission.created_utc)
created.replace(tzinfo=UTC)
if created < cutoff:
break
tt.add_from_string(sanitize(submission.title))
# Save
tt.persist(markov_filename)
print "Populated %d entries over %d days" % (count, days)
if __name__ == '__main__':
main(days=1)
|
{
"content_hash": "8f0db512e3f4135f4501a5d721a61d1d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 68,
"avg_line_length": 25.07894736842105,
"alnum_prop": 0.64900314795383,
"repo_name": "atiaxi/itcamefromwritingprompts",
"id": "8f4fe434e2b88352d64aab506641768c88ead9de",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icfwp/populator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4825"
},
{
"name": "Python",
"bytes": "8553"
}
],
"symlink_target": ""
}
|
from typing import Dict, Union
from merakicommons.cache import lazy_property
from merakicommons.container import searchable
from ...data import Region, Platform
from ..common import CoreData, CassiopeiaGhost, provide_default_region, ghost_load_on
from ...dto.staticdata import realm as dto
##############
# Data Types #
##############
class RealmData(CoreData):
_dto_type = dto.RealmDto
_renamed = {"lg": "legacyMode", "dd": "latestDataDragon", "l": "language", "n": "latestVersions",
"profileiconmax": "maxProfileIconId", "v": "version", "css": "cssVersion"}
##############
# Core Types #
##############
@searchable({})
class Realms(CassiopeiaGhost):
_data_types = {RealmData}
@provide_default_region
def __init__(self, region: Union[Region, str] = None):
kwargs = {"region": region}
super().__init__(**kwargs)
def __get_query__(self):
return {"region": self.region, "platform": self.platform}
@lazy_property
def region(self) -> Region:
"""The region for this realm."""
return Region(self._data[RealmData].region)
@lazy_property
def platform(self) -> Platform:
"""The platform for this realm."""
return self.region.platform
@lazy_property
def locale(self) -> Platform:
"""The locale for this realm."""
return self._data[RealmData].locale
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def version(self) -> str:
return self._data[RealmData].version
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def language(self) -> str:
"""Default language for this realm."""
return self._data[RealmData].language
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def latest_versions(self) -> Dict[str, str]:
"""Latest changed version for each data type listed."""
return self._data[RealmData].latestVersions
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def legacy_mode(self) -> str:
return self._data[RealmData].legacyMode
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def latest_data_dragon(self) -> str:
return self._data[RealmData].latestDataDragon
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def language(self) -> str:
return self._data[RealmData].language
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def max_profile_icon_id(self) -> int:
return self._data[RealmData].maxProfileIconId
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def store(self) -> str:
return self._data[RealmData].store
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def cdn(self) -> str:
return self._data[RealmData].cdn
@CassiopeiaGhost.property(RealmData)
@ghost_load_on
def css_version(self) -> str:
return self._data[RealmData].css_version
|
{
"content_hash": "77d2192c6f6c5762aab9d9dd07a907c3",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 101,
"avg_line_length": 28.221153846153847,
"alnum_prop": 0.6432708688245315,
"repo_name": "10se1ucgo/cassiopeia",
"id": "54f8ee9922f2435218297d37b638a549656ef6ab",
"size": "2935",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cassiopeia/core/staticdata/realm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "560840"
}
],
"symlink_target": ""
}
|
"""This is the main executable of Maraschino. It parses the command line arguments, does init and calls the start function of Maraschino."""
import sys
import os
# Check if frozen by py2exe
def check_frozen():
return hasattr(sys, 'frozen')
def get_rundir():
if check_frozen():
return os.path.abspath(unicode(sys.executable, sys.getfilesystemencoding( )))
return os.path.abspath(__file__)[:-13]
# Set the rundir
rundir = get_rundir()
# Include paths
sys.path.insert(0, rundir)
sys.path.insert(0, os.path.join(rundir, 'lib'))
# Create Flask instance
from flask import Flask
app = Flask(__name__)
# If frozen, we need define static and template paths
if check_frozen():
app.root_path = rundir
app.static_path = '/static'
app.add_url_rule(
app.static_path + '/<path:filename>',
endpoint='static',
view_func=app.send_static_file
)
from jinja2 import FileSystemLoader
app.jinja_loader = FileSystemLoader(os.path.join(rundir, 'templates'))
def import_modules():
"""All modules that are available in Maraschino are at this point imported."""
import modules.applications
import modules.controls
import modules.couchpotato
import modules.currently_playing
import modules.diskspace
import modules.headphones
import modules.index
import modules.library
import modules.log
import modules.nzbget
import modules.recently_added
import modules.remote
import modules.sabnzbd
import modules.script_launcher
import modules.search
import modules.sickbeard
import modules.trakt
import modules.traktplus
import modules.transmission
import modules.updater
import modules.utorrent
import modules.weather
import modules.xbmc_notify
import modules.process_check
import modules.nzbdrone
import mobile
import modules.ipcamera
@app.teardown_request
def shutdown_session(exception=None):
"""This function is called as soon as a session is shutdown and makes sure, that the db session is also removed."""
from maraschino.database import db_session
db_session.remove()
import maraschino
def main():
"""Main function that is called at the startup of Maraschino."""
from optparse import OptionParser
p = OptionParser()
# define command line options
p.add_option('-p', '--port',
dest='port',
default=None,
help="Force webinterface to listen on this port")
p.add_option('-d', '--daemon',
dest='daemon',
action='store_true',
help='Run as a daemon')
p.add_option('--pidfile',
dest='pidfile',
help='Create a pid file (only relevant when running as a daemon)')
p.add_option('--log',
dest='log',
help='Create a log file at a desired location')
p.add_option('-v', '--verbose',
dest='verbose',
action='store_true',
help='Silence the logger')
p.add_option('--develop',
action="store_true",
dest='develop',
help="Start instance of development server")
p.add_option('--database',
dest='database',
help='Custom database file location')
p.add_option('--webroot',
dest='webroot',
help='Web root for Maraschino')
p.add_option('--host',
dest='host',
help='Web host for Maraschino')
p.add_option('--kiosk',
dest='kiosk',
action='store_true',
help='Disable settings in the UI')
p.add_option('--datadir',
dest='datadir',
help='Write program data to custom location')
p.add_option('--noupdate',
action="store_true",
dest='noupdate',
help='Disable the internal updater')
# parse command line for defined options
options, args = p.parse_args()
if options.datadir:
data_dir = options.datadir
else:
data_dir = rundir
if options.daemon:
maraschino.DAEMON = True
maraschino.VERBOSE = False
if options.pidfile:
maraschino.PIDFILE = options.pidfile
maraschino.VERBOSE = False
if options.port:
PORT = int(options.port)
else:
PORT = 7000
if options.log:
maraschino.LOG_FILE = options.log
if options.verbose:
maraschino.VERBOSE = True
if options.develop:
maraschino.DEVELOPMENT = True
if options.database:
DATABASE = options.database
else:
DATABASE = os.path.join(data_dir, 'maraschino.db')
if options.webroot:
maraschino.WEBROOT = options.webroot
if options.host:
maraschino.HOST = options.host
if options.kiosk:
maraschino.KIOSK = True
if options.noupdate:
maraschino.UPDATER = False
maraschino.RUNDIR = rundir
maraschino.DATA_DIR = data_dir
maraschino.FULL_PATH = os.path.join(rundir, 'Maraschino.py')
maraschino.ARGS = sys.argv[1:]
maraschino.PORT = PORT
maraschino.DATABASE = DATABASE
maraschino.initialize()
if maraschino.PIDFILE or maraschino.DAEMON:
maraschino.daemonize()
import_modules()
maraschino.init_updater()
maraschino.start()
if __name__ == '__main__':
main()
|
{
"content_hash": "2f1b5ac50bd65efb1bce0130e99c23b0",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 140,
"avg_line_length": 27.651515151515152,
"alnum_prop": 0.6160730593607306,
"repo_name": "robweber/maraschino",
"id": "77fa1ebe40fd8d525800a15f0c16ac9dac56bc42",
"size": "5521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Maraschino.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1449"
},
{
"name": "CSS",
"bytes": "133035"
},
{
"name": "HTML",
"bytes": "253779"
},
{
"name": "JavaScript",
"bytes": "141891"
},
{
"name": "Python",
"bytes": "5472574"
},
{
"name": "Shell",
"bytes": "4083"
}
],
"symlink_target": ""
}
|
"""
Import sample data for recommendation engine
"""
import predictionio
import argparse
import random
ITEM_ACTIONS_DELIMITER = "|"
RATE_ACTIONS_DELIMITER = "\t"
SEED = 3
GENRES = ["unknown", "action", "adventure", "animation", "children's", "comedy","crime", "documentary", "drama",
"fantasy", "film-noir", "horror", "musical", "mystery", "romance", "sci-fi", "thriller", "war", "western"]
def import_events(client, items_file, ratings_file):
random.seed(SEED)
f = open(items_file, 'r')
print "Importing items..."
items = 0
for line in f:
data = line.rstrip('\r\n').split(ITEM_ACTIONS_DELIMITER)
id = data[0]
genres_str = data[5:]
genres_bool = [bool(int(g)) for g in genres_str]
genres = [g for b, g in zip(genres_bool, GENRES) if b]
client.create_event(
event="$set",
entity_type="item",
entity_id=id,
properties= { "categories" : genres }
)
items += 1
print "%s items are imported." % items
f.close()
f = open(ratings_file, 'r')
print "Importing ratings..."
ratings = 0
for line in f:
data = line.rstrip('\r\n').split(RATE_ACTIONS_DELIMITER)
# For demonstration purpose, randomly mix in some buy events
if random.randint(0, 1) == 1:
client.create_event(
event="rate",
entity_type="user",
entity_id=data[0],
target_entity_type="item",
target_entity_id=data[1],
properties= { "rating" : float(data[2]) }
)
else:
client.create_event(
event="buy",
entity_type="user",
entity_id=data[0],
target_entity_type="item",
target_entity_id=data[1]
)
ratings += 1
f.close()
print "%s ratings are imported." % ratings
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for recommendation engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
parser.add_argument('--ratings_file')
parser.add_argument('--items_file')
args = parser.parse_args()
print args
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client, args.items_file, args.ratings_file)
|
{
"content_hash": "5b1e1adc3aa493aeb659fc9d44d2b448",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 116,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.6204793028322441,
"repo_name": "pferrel/PredictionIO",
"id": "643f4dbb5422df2afb81c56c8d6572fd3afd9275",
"size": "3080",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "examples/scala-parallel-recommendation/filter-by-category/data/import_eventserver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "6930"
},
{
"name": "Java",
"bytes": "8118"
},
{
"name": "Scala",
"bytes": "833854"
},
{
"name": "Shell",
"bytes": "90694"
}
],
"symlink_target": ""
}
|
from Root import application
import unittest
class test1(unittest.TestCase):
def test_one(self):
self.assertEqual(application.foo(),1)
def test_two(self):
self.assertEqual(application.foo(),2)
def test_three(self):
self.assertEqual(application.foo(),1)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0c1c462f3ecb8fce884be3b362f6e343",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 39,
"avg_line_length": 27.818181818181817,
"alnum_prop": 0.7222222222222222,
"repo_name": "codeboardio/kali",
"id": "ae909bcfff066a87b37b1b20bd8d84025bdf982f",
"size": "306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/src_examples/python/py_one_file_severalTests/Root/test/test1.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "11421"
},
{
"name": "Java",
"bytes": "30103"
},
{
"name": "JavaScript",
"bytes": "93944"
},
{
"name": "Python",
"bytes": "15727"
},
{
"name": "Shell",
"bytes": "1242"
}
],
"symlink_target": ""
}
|
from __future__ import division
from collections import deque
from copy import deepcopy
import colorsys
import warnings
import time
import math
import random
import tween
from envelope import ADSREnvelope, EnvelopeSegment
# from scene import SceneManager
from birdfish.colors import DIYC_DIM
from birdfish.output.base import DefaultNetwork
from birdfish.log_setup import logger
class PhysicalDevice(object):
"""
This item represents an element that provides channel data to a network.
"""
def __init__(self, start_channel=1, *args, **kwargs):
"""
start_channel: The first channel this device occupies in a
network.
The channels dictionary contains a mapping of channel numbers to object
attributes.
"""
# signal intensity is the value set by the note on velocity -
# does not reflect current brightness
self.channels = {}
self.intensity = 0
self.channels[start_channel] = 'intensity'
self.gamma = None
self.start_channel = start_channel
def update_channels(self):
# apply dimming or other adjustments
if self.gamma:
# TODO for now gamma is as an 8 bit lookup
dmx_val = int(self.intensity * 255)
val = self.gamma[dmx_val]
self.intensity = val / 255
def set_intensity(self, intensity):
# TODO note this setter may be superfluos
self.intensity = intensity
def update_data(self, data):
"""
This method is called by the network containing this item in order to
retrieve the current channel values.
Data is an array of data (ie DMX) that should be updated
with this light's channels
"""
self.update_channels()
for channel, value in self.channels.items():
try:
# targeted optimization:
val = self.__dict__[value]
except AttributeError:
val = getattr(self, value)
# TODO current design flaw/limitation
# using byte arrays instead of float arrays for base
# data structure - means forcing to bytes here instead
# of output network level - practically this is OK as all
# networks are using 1 byte max per channel
# Here the channel values are highest value wins
data[channel - 1] = max(data[channel - 1], int(val * 255))
class RGBDevice(PhysicalDevice):
def __init__(self, *args, **kwargs):
super(RGBDevice, self).__init__(*args, **kwargs)
# need to add in the self.channels[start_channel+n] = 'red'
self.red = 0
self.green = 0
self.blue = 0
self.channels[self.start_channel] = 'red'
self.channels[self.start_channel + 1] = 'green'
self.channels[self.start_channel + 2] = 'blue'
self.gamma = DIYC_DIM
def update_channels(self):
# update RGB only once per cycle here, instead of
# every hue update
super(RGBDevice, self).update_channels()
class BaseLightElement(object):
"""
This class handles trigger events, and is updated with the show timeline.
"""
# TODO need to factor the ADSR related parts out of this class
def __init__(self,
name="unamed_LightElements",
bell_mode=False,
simple=False,
trigger_toggle=False,
*args, **kwargs):
"""
name is used for retrieving elements from the show
bell_mode is a boolean that determines how the element proceeds through
the Attack-Decay-Sustain-Release envelope in response to triggers. When
bell_mode is True, the envelope continues through the end of release on
the "on" trigger alone. When bell_mode is False the ADSR envelope halts
at sustain, until an off trigger event.
simple is an attribute, which will disable the update process, allowing
the elements attributes to be set directly. This can be useful for
situations where a parent object manages all the attribute changes for
child elements.
trigger_toggle determines the way on and off triggers are handled. When
True, only 'on' trigger events are responded to, and they toggle the
element on and off. This can be useful if the device only supports
momentary push buttons.
Effects contain an array of :class:`~birdfish.effects.BaseEffect`
objects.
"""
self.trigger_intensity = 0.0
self.bell_mode = bell_mode
self.name = name
self.adsr_envelope = ADSREnvelope(**kwargs)
# a simple element has values set externally and does not update
self.simple = simple
self.trigger_state = 0
self.last_update = -1
self.trigger_toggle = trigger_toggle
self.effects = []
self.pre_update_effects = []
self._intensity = 0
def bell_reset(self):
# TODO is this method still needed?
self._off_trigger()
@property
def update_active(self):
"""
This property is the API for any class to determine whether updates
should continue to be passed to this element.
"""
return self.trigger_intensity
def update(self, show):
"""
The update method is called once per iteration of the main show loop.
"""
if (self.simple or not (self.update_active)):
# light is inactive or in sustain mode
return self.intensity
if self.bell_mode and self.adsr_envelope.segments[0].index == 1:
# bell mode ignores trigger off - simulate trigger off once
# sustain levels are reached
self.bell_reset()
return
if self.adsr_envelope.advancing:
intensity_scale = self.adsr_envelope.update(show.time_delta)
self.set_intensity(self.trigger_intensity * intensity_scale)
elif self.trigger_intensity:
logger.debug(self.name)
logger.debug('not advancing, intensity: {}'.format(self.intensity))
self.trigger_intensity = 0.0
self.intensity = max(0, self.intensity)
logger.debug('not advancing, intensity: {}'.format(self.intensity))
logger.debug('not advancing, trigger intensity: {}'.format(
self.trigger_intensity))
# only turn off effects here so they can continue to effect
# releases
[x.trigger(0) for x in self.effects]
# moved dmx update to show update, to accomodate effects
# self.dmx_update(show.universes[self.universe].dmx)
# if self.last_used_intensity != self.intensity:
# print int(self.intensity)
for effect in self.effects:
effect.update(show, [self])
self.device.set_intensity(self.intensity)
return self.intensity
def set_intensity(self, intensity):
# mostly to be overridden by subclasses
self._intensity = intensity
if hasattr(self, 'device'):
self.device.intensity = intensity
def get_intensity(self):
return self._intensity
intensity = property(get_intensity, set_intensity)
def _on_trigger(self, intensity, **kwargs):
pass
def _off_trigger(self):
self.trigger_state = 0
logger.debug("%s: trigger off" % self.name)
self.adsr_envelope.trigger(state=0)
# note can not set trigger_intensity to 0 here
def trigger(self, intensity, **kwargs):
# @@ need toggle mode implementation here
if self.simple:
self.set_intensity(intensity)
return
if intensity > 0 and self.trigger_state == 0:
if self.bell_mode:
self.bell_reset()
self.trigger_state = 1
[x.trigger(intensity) for x in self.effects]
self.trigger_intensity = intensity
logger.debug("%s: trigger on @ %s" % (self.name, intensity))
self.set_intensity(0.0) # reset light on trigger
self.adsr_envelope.trigger(state=1)
self._on_trigger(intensity, **kwargs)
elif intensity == 0 and (self.trigger_state and not self.trigger_toggle
and not self.bell_mode):
self._off_trigger()
elif intensity and self.trigger_state and self.trigger_toggle:
self._off_trigger()
elif intensity > self.intensity and self.trigger_state == 1:
# a greater trigger intensity has occured - override
self.trigger_intensity = intensity
logger.debug("%s: override trigger on @ %s" %
(self.name, intensity))
self.intensity = 0.0 # reset light on trigger
# reset the envelope with a forced on trigger
self.adsr_envelope.trigger(state=1, force=True)
# else redundant trigger
def off(self):
"""convenience for off, synonym for trigger(0)"""
self.trigger(0)
class LightElement(BaseLightElement, PhysicalDevice):
"""
This is a composed class that represents a basic light that has both
behaviors and channel data
"""
def __init__(self, device_element=None, *args, **kwargs):
BaseLightElement.__init__(self, *args, **kwargs)
if not device_element:
self.device = PhysicalDevice(*args, **kwargs)
else:
self.device = device_element
class RGBLight(LightElement):
def __init__(self, device_element=None, *args, **kwargs):
BaseLightElement.__init__(self, *args, **kwargs)
if not device_element:
self.device = RGBDevice(*args, **kwargs)
else:
self.device = device_element
self._hue = 0.0
self._saturation = 0
self.normalize = False
def update(self, show):
return_value = super(RGBLight, self).update(show)
# TODO - this funciton needed when tweening hue - but can't be used
# tweening RGB directly
self.update_rgb()
return return_value
def update_rgb(self):
hue = self._hue
saturation = self._saturation
if 'intensity' in self.device.channels.values():
# if the fixture has its own intensity slider - always calc RGB
# values at full intensity
intensity = 1.0
else:
intensity = self.intensity
# this funct takes all 0-1 values
r, g, b = colorsys.hsv_to_rgb(hue, saturation, intensity)
if self.normalize and any((r, g, b)):
maxval = max((r, g, b))
adj = maxval / 1
r, g, b = colorsys.hsv_to_rgb(hue, saturation, intensity * adj)
self.red = self.device.red = r
self.green = self.device.green = g
self.blue = self.device.blue = b
# TODO need R, G, B setters - and an update hue mirror
#
def _get_hue(self):
# TODO need to update with function in case r,g,b were updated other
# than through hue
return self._hue
def _set_hue(self, hue):
self._hue = hue
def _get_saturation(self):
return self._saturation
def _set_saturation(self, saturation):
self._saturation = saturation
# TODO concept of intensity should be converted to raw RGB for base RGB
# light no assumption of 4th channel
# @@ need to address the attribute of intensity in the context of RGB
def update_hue(self):
"""
updates hue property from RGB values, RGB is always updated when hue
changed
"""
adjusted_rgb = [x * self.intensity for x in [
self.red, self.green, self.blue]]
h, s, v = colorsys.rgb_to_hsv(*tuple(adjusted_rgb))
self._hue = h
self._saturation = s
hue = property(_get_hue, _set_hue)
saturation = property(_get_saturation, _set_saturation)
class LightGroup(BaseLightElement):
# TODO why base light element, and not light element?
"""
A collection of light Elements triggered in collectively in some form
"""
def __init__(self, *args, **kwargs):
super(LightGroup, self).__init__(*args, **kwargs)
self.elements = kwargs.get('elements', [])
self.name = kwargs.get("name", "lightgroup")
# TODO need min and max intensity - at a more baseclass level
self.max_intensity = 1.0
def trigger(self, sig_intensity, **kwargs):
if sig_intensity:
intensity = min(self.max_intensity, sig_intensity)
self.trigger_state = 1
else:
self.trigger_state = 0
intensity = 0.0
self.trigger_intensity = intensity
[l.trigger(intensity) for l in self.elements]
[x.trigger(intensity) for x in self.effects]
def set_intensity(self, intensity):
# the group element always has a pseudo-intensity of 1
[e.set_intensity(e.intensity * intensity) for e in self.elements]
@property
def update_active(self):
return any([e.update_active for e in self.elements])
def update(self, show):
if self.trigger_state or self.update_active:
for element in self.elements:
if element.last_update != show.timecode:
# avoide updated sub elements twice if they are also in the
# main show list of elements
element.update(show)
element.last_update = show.timecode
# TODO - setting trigger_intensity here messes up chases
# but without it need a better way to remove spent spawn
# self.trigger_intensity = self.update_active
# TODO could have hue, saturation and other basic property passthrough?
class Chase(LightGroup):
def __init__(self,
# group=None,
**kwargs):
super(Chase, self).__init__(**kwargs)
# self.group = group
self.anti_alias = False
self.center_position = 0
self.moveto = None
self.current_moveto = None
self.speed_mode = 'duration' # or 'speed' of units per second
self.speed = kwargs.get('speed', 1)
self.move_envelope = None
self.move_tween = kwargs.get('move_tween', tween.LINEAR)
self.start_pos = kwargs.get('start_pos', 0)
self.end_pos = kwargs.get('end_pos', 10)
self.moveto = self.end_pos
self.last_center = None
self.moving = False
# off mode may be all, follow, reverse
self.off_mode = "all"
self.continuation_mode = None
self.move_complete = False
self.sweep = True
self.width = 1
def _off_trigger(self):
self.trigger_state = 0
# TODO - setting trigger intensity to 0 here - works for sweeps
# but non-sweep has to use trigger 1 as it doesn't have access to the
# original trigger_intensity
self.trigger_intensity = 0
# if self.bell_mode:
# TODO does bell apply to chase classes?
# ignore release in bell mode
# return
logger.debug("%s: pulse trigger off" % self.name)
self.reset_positions()
if self.off_mode == "all":
# TODO some cleanup needed - moving set to false in
# reset_positions, need to more clearly define between
# these functions what does what
self.moving = False
for e in self.elements:
# blackout
e.trigger(0)
elif self.off_mode in ["follow", "reverse"]:
# reset the chase to follow itself as trigger off
# TODO - placeholder, not sure anything needs to be done
self.moving = True
def trigger(self, intensity, **kwargs):
if intensity > 0 and self.trigger_state == 0: # or note off message
if self.moving:
# we are already in either in an active on or off chase
# TODO - do we reset everything - draw on top...?
# print "Already moving"
return
# self.reset_positions()
# TODO so reset positions only for off trigger?
self.trigger_state = 1
self.trigger_intensity = intensity
self.center_position = self.last_center = self.start_pos
self.moveto = self.end_pos
logger.debug("%s: chase trigger on @ %s" % (self.name, intensity))
self.moving = True
self.setup_move()
self._on_trigger(intensity, **kwargs)
elif intensity == 0 and (self.trigger_state and not self.trigger_toggle
and not self.bell_mode):
self._off_trigger()
elif intensity and self.trigger_state and self.trigger_toggle:
logger.info("%s: chase trigger toggle off @ %s" % (self.name,
intensity))
self._off_trigger()
def setup_move(self, moveto=None):
"""
Sets up the move envelope from the current position
"""
if moveto:
# allow this to be a single method to be called
# by others to both set a moveto, and start the move
self.moveto = moveto
# TODO need to differentiate between first move - and subsequent moves
if not self.move_envelope:
# TODO the tween type needs to be a settable attr on self
self.move_envelope = EnvelopeSegment(tween=self.move_tween)
self.last_center = self.center_position = self.start_pos
self.move_envelope.profile.change = self.moveto - self.center_position
self.move_envelope.profile.start = self.center_position
if self.speed_mode == 'duration':
self.move_envelope.profile.duration = self.speed
elif self.speed_mode == 'speed':
self.move_envelope.profile.duration = (
# ie moving 9 spaces, at 3 spaces per sec = 3 sec
self.move_envelope.profile.change / self.speed)
self.move_envelope.reset()
self.current_moveto = self.moveto
self.move_complete = False
if self.trigger_state:
self.moving = True
def _get_move_toward(self):
return self.moveto
def _set_move_toward(self, value):
self.setup_move(moveto=value)
move_toward = property(_get_move_toward, _set_move_toward)
def _move_completed(self):
# called at the end of a move, for looping, pong, etc
# TODO while pulse paused at one end - this is firing multiple
# times
if self.continuation_mode == 'pong':
if round(self.center_position) == self.end_pos:
logger.debug("%s pong-end @ %s" % (self.name, self.end_pos))
self.moveto = self.start_pos
if round(self.center_position) == self.start_pos:
self.moveto = self.end_pos
elif self.continuation_mode == 'loop':
# TODO the last_center reset is an easy one to miss, and should
# be built into something else
self.last_center = self.center_position = self.start_pos
self.setup_move()
else:
self.moving = False
self.move_complete = True
if self.bell_mode and self.trigger_state:
self._off_trigger()
def reset_positions(self):
# called in association with off trigger
if (self.off_mode == "reverse"):
if self.center_position == self.start_pos:
self.moveto = self.end_pos
else:
self.moveto = self.start_pos
self.moveto = int(self.moveto)
else: # all or follow
if self.trigger_state:
self.moveto = int(self.center_position) # self.end_pos
else:
self.moveto = self.end_pos
self.center_position = self.last_center = self.start_pos
self.moveto = int(self.moveto)
# setup_move only called from update_position if moveto != current
# moveto in all off situations, current_moveto never changes.
self.setup_move()
self.moving = False
def update_position(self, show):
if self.current_moveto != self.moveto:
self.setup_move()
if self.moveto is not None:
self.center_position = self.move_envelope.update(
show.time_delta)
@property
def update_active(self):
return self.moving or super(Chase, self).update_active
def update(self, show):
# super handles sending update to sub-elements
super(Chase, self).update(show)
# always keep time delta updated
if not self.trigger_intensity:
if self.off_mode == "all":
return
if self.moving:
self.update_position(show)
self.render()
for effect in self.effects:
effect.update(show, [self])
if self.moving and self.move_envelope.completed:
# this reset should happen after the render
# to give the final "frame" a chance to draw itself
# this is not called if moveto is reached through rounding in
# update_position
# self.reset_positions()
self._move_completed()
def render(self):
# TODO needs to handle reverse situations better
if self.last_center is None:
self.last_center = self.start_pos
# TODO need to determine whether there shell be a generic anti-alias
# support - Pulse currently does this in its own render
current_center = int(self.center_position)
if self.sweep:
# trigger everything up to current center
if self.last_center > current_center:
[e.trigger(self.trigger_intensity) for
e in self.elements[current_center:self.last_center]]
else:
[e.trigger(self.trigger_intensity) for
e in self.elements[self.last_center:current_center]]
else:
# trigger only the width
[e.trigger(0) for e in self.elements]
if current_center > self.moveto:
# note, currently there is no difference between start and end
# based on direction, the width is always to the left of the
# current center - more of a sweep effect can be made with
# bell modes on the sub elements
start = max(self.lower_bound, current_center - self.width)
end = current_center
else:
start = max(self.lower_bound, current_center - self.width)
end = current_center
if self.sweep:
intensity = self.trigger_intensity
else:
# TODO - we can't get the original trigger intensity anymore
# it shouldn't be 1 - the fix will be to make sweep renders
# above work with the trigger state instead of intensity?
intensity = 1
[e.trigger(intensity) for e in self.elements[start:end]]
self.last_center = current_center
@property
def upper_bound(self):
return max(self.start_pos, self.end_pos)
@property
def lower_bound(self):
return min(self.start_pos, self.end_pos)
class Spawner(BaseLightElement):
def __init__(self, *args, **kwargs):
super(Spawner, self).__init__(*args, **kwargs)
self.model = kwargs.get('model', None)
self.show = kwargs.get('show')
self.network = kwargs.get('network')
self.spawned = {}
self.channels = []
self.unique_per_key = True
self._spawn_counter = 0
def spawn(self, key):
if self.unique_per_key and key in self.spawned:
return self.spawned[key]
instance = deepcopy(self.model)
self.show.add_element(instance)
if self.unique_per_key:
self.spawned[key] = instance
else:
self.spawned[self._spawn_counter] = instance
self._spawn_counter += 1
# TODO need a recurse way to add only the end elements
# that actually have channels
[self.network.add_element(e) for e in instance.elements]
try:
instance.spawned()
except AttributeError:
pass
return instance
def update(self, show):
# remove completed items
remove = []
# return
for key, e in self.spawned.items():
# TODO need a more abstract way of determining if element is
# 'complete'
if not e.update_active:
# print 'removing element for ', key
self.show.remove_element(e)
remove.append(key)
for key in remove:
del(self.spawned[key])
def trigger(self, intensity, **kwargs):
if intensity > 0:
key = kwargs['key'][1]
new_spawn = self.spawn(key)
new_spawn.bell_mode = True
new_spawn.continuation_mode = None
new_spawn.trigger(intensity)
class HitPulse(Spawner):
def __init__(self, *args, **kwargs):
super(HitPulse, self).__init__(*args, **kwargs)
self.elements = []
self.width = 8
self.network = None
def spawn(self, key):
if key in self.spawned:
return self.spawned[key]
center = key
# TODO the roles of trigger and spawn need to be better divided
random_hue = random.random()
chase_pair = LightGroup()
for rev in (True, False):
chase = Chase(
start_pos=0,
end_pos=self.width,
speed=.15,
# move_tween=tween.OUT_EXPO,
)
chase.off_mode = "reverse"
# chase.bell_mode = True
if rev:
elements = self.elements[center - self.width:center]
elements.reverse()
else:
elements = self.elements[center:center + self.width]
chase.elements = [deepcopy(x) for x in elements]
for x in chase.elements:
x.hue = random_hue
self.network.add_element(x)
# self.show.add_element(x, network=self.network)
chase_pair.elements.append(chase)
# chase_pair.elements = [deepcopy(x) for x in self.elements[30:31]]
# self.show.add_element(chase)
# chase_pair.elements.extend(chase.elements)
# for x in chase_pair.elements:
# x.hue = random_hue
# self.network.add_element(x)
self.spawned[key] = chase_pair
self.show.add_element(chase_pair)
return chase_pair
def trigger(self, intensity, **kwargs):
if intensity > 0:
# TODO need input range
# key = kwargs['key'][1] - 50
# TODO need to handle trigger's more abstractly for OSC etc
# TODO need to have self.spawned be a dict with keys so that
# off triggers can find their matching spawned item, to support
# more than just bell_mode
key = kwargs['key'][1]
spawned_pair = self.spawn(key=key)
spawned_pair.trigger(intensity)
else:
# off trigger
key = kwargs['key'][1]
if key in self.spawned:
# may not be present if bell mode already removed
spawned_pair = self.spawned[key]
spawned_pair.trigger(0)
class Pulse(object):
"""
handles the rendering of a pulse in the abstract sense
a range of values that change over distance
"""
def __init__(self,
left_width=3,
left_shape=tween.LINEAR,
right_width=3,
right_shape=tween.LINEAR,
**kwargs):
self.left_width = left_width
self.left_shape = left_shape
self.right_width = right_width
self.right_shape = right_shape
self.nodes = [] # a list of element values for pulse
self.node_range = [] # index range of current pulse
self.current_position = 0
def set_current_nodes(self):
"""
the node array becomes a list of values - generally for intensity
that describes the left and right shape of the pulse around
the center_position.
The node_range specifies the location start and end of the pulse
overall
"""
node_offset = self.center_position % 1
left_of_center = math.floor(self.center_position)
far_left = int(left_of_center - self.left_width)
self.nodes = []
for n in range(self.left_width + 1):
self.nodes.append(self.left_shape(
n + node_offset, 1, -1, self.left_width + 1.0))
if far_left >= 1:
self.nodes.append(0)
far_left -= 1
self.nodes.reverse()
for n in range(1, self.right_width + 1):
self.nodes.append(self.right_shape(
max(0, n - node_offset), 1, -1, self.right_width + 1.0))
self.nodes.append(0)
self.node_range = range(far_left, far_left + len(self.nodes))
logger.debug("NodeData:")
logger.debug(self.node_range)
logger.debug(self.nodes)
class PulseChase(Chase, Pulse):
"""
a cylon like moving pulse
center is always full on, and 0 width
width will then be node-node width
if width 3 - third node would be off when pulse squarely centered on a node
width == duration for tweens
change is always 0 to 1
"""
def __init__(self,
# group=None,
# TODO once the kwargs settle down - make them explicit
left_width=3,
left_shape=tween.LINEAR,
right_width=3,
right_shape=tween.LINEAR,
**kwargs):
super(PulseChase, self).__init__(**kwargs)
Pulse.__init__(self, **kwargs)
self.anti_alias = True
self.continuation_mode = 'pong'
def update(self, show):
super(PulseChase, self).update(show)
logger.debug("%s Centered @ %s -> %s" %
(self.name, self.center_position, self.end_pos))
def render(self):
self.set_current_nodes()
for i, e in enumerate(self.elements):
e.trigger(0)
if i in self.node_range:
# TODO issue here with a moving pulse:
# how does the element handle multiple on triggers
# the trigger 0 is needed otherwise the leading edge just stays
# dim
e.trigger(self.nodes[i - self.node_range[0]])
class LightShow(object):
def send_viewer_data(self):
dd = ''.join([chr(int(i)) for i in self.networks[1].data])
f = open('/tmp/dmxpipe', 'wb', 0)
pad_dd = dd.ljust(512, '\x00')
f.write(pad_dd)
f.close()
def __init__(self):
super(LightShow, self).__init__()
self.networks = []
self.effects = []
self.frame_rate = 40
# self.scenemanager = SceneManager()
self.frame_delay = 1 / self.frame_rate
self.running = True
self.named_elements = {}
self.default_network = DefaultNetwork()
self.networks.append(self.default_network)
self.time_delta = 0
self.recent_frames = deque()
self.average_framerate = self.frame_delay
self.frame = 0
self.elements = []
def add_element(self, element, network=None):
if network:
network.add_element(element)
if network not in self.networks:
self.networks.append(network)
if element not in self.elements:
self.elements.append(element)
def remove_element(self, element, network=None):
if hasattr(element, 'elements') and element.elements:
for sub_element in element.elements:
self.remove_element(sub_element)
for network in self.networks:
network.remove_element(element)
try:
self.elements.remove(element)
return True
except ValueError:
return False
def blackout(self):
for n in self.networks:
for e in n.elements:
e.trigger(0)
if hasattr(e, 'intensity'):
e.intensity = 0
def get_named_element(self, name):
if name in self.named_elements:
# a simple cache
return self.named_elements[name]
for network in self.networks:
named_light = network.get_named_element(name)
if named_light:
self.named_elements[name] = named_light
return named_light
return False
def init_show(self):
# needed as params may be changed between __init__ and run_live
for n in self.networks:
n.init_data()
self.frame_delay = 1.0 / self.frame_rate
def frame_average(self, frame):
self.recent_frames.append(frame)
frame_count = len(self.recent_frames)
if frame_count > 8:
self.recent_frames.popleft()
frame_count -= 1
elif frame_count == 0:
return frame
return sum(self.recent_frames) / frame_count
def step(self, count=1, speed=1):
"""
Simulate a step or steps in main loop
"""
for i in range(count):
self.timecode += self.frame_delay
self.time_delta = self.frame_delay
self.update()
for n in self.networks:
n.send_data()
if speed and count > 1 and i < (count - 1):
time.sleep((1 / speed) * self.frame_delay)
def run_live(self):
self.init_show()
self.show_start = time.time()
self.timecode = 0
while self.running:
# projected frame event time
now = time.time() + self.frame_delay
timecode = now - self.show_start
self.time_delta = timecode - self.timecode
self.timecode = timecode
self.update()
post_update = time.time()
# how long did this update actually take
effective_frame = post_update - (now - self.frame_delay)
effective_framerate = self.frame_average(effective_frame)
discrepancy = effective_framerate - self.frame_delay
if discrepancy > .01:
self.frame_delay += .01
if discrepancy > .3:
warnings.warn("Slow refresh")
elif discrepancy < -.01 and self.frame_delay > 1 / self.frame_rate:
# we can speed back up
self.frame_delay -= .01
self.frame += 1
remainder = self.frame_delay - effective_frame
if remainder > 0:
# we finished early, wait to send the data
# TODO this wait could/should happen in another thread that
# handles the data sending - but currently sending the data
# is fast enough that this can be investigated later
time.sleep(remainder)
# pre_send = time.time()
for n in self.networks:
n.send_data()
if self.frame == 40:
# print [e.channels for e in self.networks[1].elements]
print('framerate: ', 1 / self.frame_delay, " Remainder: ",
remainder)
self.frame = 0
def update(self):
"""The main show update command"""
# self.scenemanager.update(self)
for element in self.elements:
if element.last_update != self.timecode:
# avoid updating the same element twice
element.update(self)
element.last_update = self.timecode
for e in self.effects:
e.update(self)
|
{
"content_hash": "6f680dedacfd3d6506b8b777e495c424",
"timestamp": "",
"source": "github",
"line_count": 968,
"max_line_length": 79,
"avg_line_length": 37.43285123966942,
"alnum_prop": 0.577093969918587,
"repo_name": "ptone/BirdFish",
"id": "84f0374ac9e1d2c3288686f1cd776613ffa93887",
"size": "36235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "birdfish/lights.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "176764"
},
{
"name": "Shell",
"bytes": "4515"
}
],
"symlink_target": ""
}
|
import random
import nest
from .lib import paramify
def simulate(data):
print('Simulate %s' % data.get('id', None))
# print(data)
# print('Set kernel')
nest.ResetKernel()
random.seed(int(data.get('random_seed', 0)))
local_num_threads = int(data['kernel'].get('local_num_threads', 1))
nest.SetKernelStatus({
'local_num_threads': local_num_threads,
'rng_seeds': [random.randint(0, 1000) for thread in range(local_num_threads)],
'resolution': float(data['kernel'].get('resolution', 1.0)),
})
nodes = data['nodes']
links = data['links']
# print('Create nodes')
recorders = []
for idx, node in enumerate(nodes):
nodes[idx]['ids'] = []
if node.get('disabled', False):
continue
if not node.get('model', False):
continue
nodes[idx]['ids'] = nest.Create(node['model'], int(node.get('n', 1)))
if node['element_type'] == 'recorder':
recorders.append(idx)
# print('Set parameters for nodes')
for idx, node in enumerate(nodes):
if len(node['ids']) == 0:
continue
if node['model'] == 'multimeter':
rec_links = filter(lambda link: link['target'] == idx, links)
recordables = []
for link in rec_links:
recorded_neuron = nodes[link['source']]
recordables.extend(map(
lambda rec: rec.name,
nest.GetStatus(recorded_neuron['ids'], 'recordables')[0]))
recordables = sorted(list(set(recordables)))
if 'params' in node:
node['params']['record_from'] = recordables
else:
node['params'] = {'record_from': recordables}
if 'params' not in node:
continue
nest.SetStatus(node['ids'], params=paramify.simulate(node))
# print('Connect nodes')
for link in data['links']:
if link.get('disabled', False):
continue
if nodes[link['source']].get('disabled', False):
continue
if nodes[link['target']].get('disabled', False):
continue
if not nodes[link['source']].get('ids', False):
continue
if not nodes[link['target']].get('ids', False):
continue
source, target, conn_spec, syn_spec = paramify.link(link)
if nodes[link['target']]['model'] in ['voltmeter', 'multimeter']:
source, target = target, source
if type(conn_spec) == dict:
if conn_spec['rule'] == 'fixed_indegree':
conn_spec['rule'] = 'fixed_outdegree'
conn_spec['outdegree'] = conn_spec['indegree']
del conn_spec['indegree']
nest.Connect(
nodes[source]['ids'], nodes[target]['ids'], conn_spec=conn_spec,
syn_spec=syn_spec)
# print('Simulate')
nest.Simulate(float(data['sim_time']))
data['kernel']['time'] = nest.GetKernelStatus('time')
# print('Get record data')
for idx in recorders:
recorderId = nodes[idx]['ids']
events = nest.GetStatus(recorderId, 'events')[0]
nodes[idx]['events'] = dict(
map(lambda X: (X[0], X[1].tolist()), events.items()))
nest.SetStatus(recorderId, {'n_events': 0})
return data
def resume(data):
print('Resume %s' % data.get('id', None))
recorders = []
for idx, node in enumerate(data['nodes']):
if len(node.get('ids', [])) == 0:
continue
if node['element_type'] != 'recorder':
nest.SetStatus(node['ids'], params=paramify.resume(node))
else:
recorders.append((idx, node['ids']))
nest.Simulate(float(data['sim_time']))
data['kernel']['time'] = nest.GetKernelStatus('time')
for idx, recorder in recorders:
events = nest.GetStatus(recorder, 'events')[0]
data['nodes'][idx]['events'] = dict(
map(lambda X: (X[0], X[1].tolist()), events.items()))
nest.SetStatus(recorder, {'n_events': 0})
return data
|
{
"content_hash": "73f6c02ddcf10f3ab1855c59cf832fce",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 86,
"avg_line_length": 34.76271186440678,
"alnum_prop": 0.5463188688444661,
"repo_name": "babsey/nest-server-simulation",
"id": "b75e39aa1459bec244edf3c1c57b7c302f774a86",
"size": "4124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nest_apps/simple_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "761"
},
{
"name": "Python",
"bytes": "7618"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
}
|
"""
Utils has nothing to do with models and views.
"""
import json
import math
from bson import json_util
import string
import uuid
import base64
import time
import random
import datetime
import os
import subprocess
import sys
from functools import partial
import pytz
from flask import session, request, url_for, current_app, g
from flask_mail import Message
from flask_babel import gettext as _
from application.extensions import mail
from application.extensions import db
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# Form validation
USERNAME_LEN_MIN = 4
USERNAME_LEN_MAX = 25
REALNAME_LEN_MIN = 4
REALNAME_LEN_MAX = 25
PASSWORD_LEN_MIN = 6
PASSWORD_LEN_MAX = 16
AGE_MIN = 1
AGE_MAX = 300
DEPOSIT_MIN = 0.00
DEPOSIT_MAX = 9999999999.99
# typical values for text_subtype are plain, html, xml
text_subtype = 'plain'
def send_mail(recipients, title, message, sender='seasonstar@126.com'):
with mail.app.app_context():
msg = Message(title, recipients=recipients)
if sender:
msg.sender = sender
msg.html = message
mail.send(msg)
def redirect_url():
return request.args.get('next') or \
request.referrer or \
url_for('.index')
def get_session_key():
return session.sid
# check if client side has passed in key
key = request.args.get('session_key')
if key:
return key
key = session.setdefault('session_key', str(uuid.uuid4()))
return key
def get_current_time():
return datetime.datetime.utcnow()
def timesince(dt, default=None, reverse=False):
"""
Returns string representing "time since" e.g.
3 days ago, 5 hours ago etc.
Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/
"""
if not dt:
return ''
if default is None:
default = u'刚刚'
now = datetime.datetime.utcnow()
diff = (dt - now) if reverse else now - dt
if diff < datetime.timedelta(days=0):
return default
periods = (
(diff.days / 365, u'年', u'年'),
(diff.days / 30, u'月', u'月'),
(diff.days / 7, u'周', u'周'),
(diff.days, u'天', u'天'),
(diff.seconds / 3600, u'小时', u'小时'),
(diff.seconds / 60, u'分钟', u'分钟'),
(diff.seconds, u'秒', u'秒'),
)
for period, singular, plural in periods:
if not period:
continue
if reverse:
if period == 1:
return u'剩余 %d %s' % (period, singular)
else:
return u'剩余 %d %s' % (period, plural)
else:
if period == 1:
return u'%d%s前' % (period, singular)
else:
return u'%d%s前' % (period, plural)
return default
def timeuntil(dt, default=None):
return timesince(dt, default, reverse=True)
def size_normal(url):
if 'upaiyun' in url:
return url + '!normal'
return url
def get_class( kls ):
"""
Returns class object specified by a string.
Args:
kls: The string representing a class.
"""
parts = kls.split('.')
module = ".".join(parts[:-1])
m = __import__( module )
for comp in parts[1:]:
m = getattr(m, comp)
return m
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_IMAGE_EXTENSIONS
def id_generator(size=10, chars=string.ascii_letters + string.digits):
#return base64.urlsafe_b64encode(os.urandom(size))
return ''.join(random.choice(chars) for x in range(size))
def to_json(obj):
return json.dumps(obj.to_mongo(), default=json_util.default, separators=(',', ':'))
def paginate(objects, page, item_per_page=20, offset=0):
start = page * item_per_page + offset
end = start + item_per_page
if start < 0:
start = 0
if end < 0:
end = 0
return objects[start:end]
def dup_aware_paging(qs, page, num_per_page, leading_id=None):
res = paginate(qs, page, num_per_page)
if leading_id:
offset = 0
for i, it in enumerate(res):
if str(it.id) == leading_id:
offset = i + 1
break
if offset:
res = paginate(qs, page, num_per_page, offset=offset)
_res = []
for i, it in enumerate(res):
if i >= num_per_page:
break
_res.append(it)
return _res
def paginate_field(query_set, field, page, item_per_page=20):
start = page * item_per_page
end = start + item_per_page
query_set = query_set.fields(**{'slice__'+field: [start, end]})
return getattr(query_set.first(), field)
def handler(event):
"""Signal decorator to allow use of callback functions as class decorators."""
def decorator(fn):
def apply(cls):
event.connect(fn, sender=cls)
return cls
fn.apply = apply
return fn
return decorator
@handler(db.pre_save)
def update_modified(sender, document):
document.modified = datetime.datetime.utcnow()
class Command(object):
def __init__(self, *args):
self.lines = ['set -e']
self.lines.extend(args)
def get_cmd(self):
return ";\n".join(self.lines)
def next(self, *args):
self.lines.extend(args)
return self
def run(self, output_to_pile=False):
out_source = None
if output_to_pile:
out_source = subprocess.PIPE
print ('\nExecuting: \n%s\n' % self.get_cmd())
proc = subprocess.Popen(self.get_cmd(), stdout=out_source, shell=True,
executable='/bin/bash', env=os.environ.copy())
out, error = proc.communicate()
# exit if the proc has error
if proc.returncode:
sys.exit(1)
def run_cmd(cmd):
Command(cmd).run()
LOCAL_TZ = pytz.timezone("Asia/Shanghai")
def to_utc(dt):
local_dt = LOCAL_TZ.localize(dt, is_dst=None)
utc_dt = local_dt.astimezone(pytz.utc)
return utc_dt
def to_local(dt):
with_tz = pytz.UTC.localize(dt)
local_dt = with_tz.astimezone(LOCAL_TZ)
return local_dt
def format_date(value, format='%Y-%m-%d %H:%M'):
if value is None:
return ''
with_tz = pytz.UTC.localize(value)
local_dt = with_tz.astimezone(LOCAL_TZ)
return local_dt.strftime(format)
def isodate_to_local(datestr):
datestr = datestr.split('+')[0]
dt = datetime.datetime.strptime(datestr.split('.')[0], '%Y-%m-%dT%H:%M:%S')
return format_date(dt)
class AttrDict(dict):
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise AttributeError("%r object has no attribute %r" %
(self.__class__, attr))
def __setattr__(self, attr, value):
self[attr] = value
def to_dict(self):
return self
# def format_date(v):
# return type(v) in [str, unicode] and datetime.datetime.strptime(v, '%Y-%m-%d') or v
def ignore_error(fn):
def handle(*args, **kwargs):
try:
return fn(*args, **kwargs)
except:
pass
return handle
def groupby(l,fn):
import itertools
return itertools.groupby(sorted(l, key=fn), fn)
def fuck_ie(fn):
def handle(*args, **kwargs):
if 'MSIE' in request.user_agent.string:
return redirect('http://www.google.cn/intl/zh-CN/chrome/browser/')
return fn(*args, **kwargs)
return handle
def cprint(obj ,color = None, background = False, ):
import random, sys
if background:
base = 40
else:
base = 30
if color is None:
color = int(random.random()*7+base)
str = '\x1b[%sm%s\x1b[0m\n'%(color, obj)
sys.stdout.write(str)
def validate_id_card_no(number):
date_str = number[6:14]
try:
birth = datetime.datetime.strptime(date_str, '%Y%m%d')
except ValueError:
return False
if not datetime.datetime(1900, 1, 1) < birth < datetime.datetime.now():
return False
try:
num_list = map(lambda x: 10 if x in 'xX' else int(x), number)
except (ValueError, TypeError):
return False
weights = map(lambda x: 2**x[0] % 11 * x[1],
zip(range(17, -1, -1), num_list))
return sum(weights) % 11 == 1
round1 = partial(round, ndigits=1)
round2 = partial(round, ndigits=2)
def round_to_string(v):
return '{:.2f}'.format(round2(v))
def ceil(v):
return int(math.ceil(v))
class Pagination(object):
def __init__(self, objects, page, per_page):
self.page = page
self.per_page = per_page
self.objects = objects
try:
self.total_count = objects.count()
except TypeError:
self.total_count = len(objects)
@property
def slice(self):
start = (self.page - 1) * self.per_page
end = start + self.per_page
if start < 0:
start = 0
if end < 0:
end = 0
return self.objects[start:end]
@property
def pages(self):
return ceil(self.total_count / float(self.per_page))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in range(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and \
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
BAN_REGEX = None
def banwords_check(content):
import re
global BAN_REGEX
banwords_file = os.path.join(
current_app.root_path, 'etc', 'banwords.txt')
if not BAN_REGEX:
try:
words = open(banwords_file).read().decode('utf8')
except:
return content
BAN_REGEX = re.compile('(%s)' % words)
return BAN_REGEX.search(content)
def checked_g_get(key, default_value):
limit = {
'num_per_page': 20,
}
ret = g.get(key, default_value)
if limit.get(key, None) and ret > limit['num_per_page']\
and ret > default_value:
return default_value
return ret
|
{
"content_hash": "100169a3c38b9c71b62d402198beffcf",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 93,
"avg_line_length": 23.794117647058822,
"alnum_prop": 0.5797280593325093,
"repo_name": "seasonstar/bibi",
"id": "de93df64a3a2cf2d2ec4221642b60830f15032bd",
"size": "10593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/utils/utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "CSS",
"bytes": "278402"
},
{
"name": "HTML",
"bytes": "227750"
},
{
"name": "JavaScript",
"bytes": "2720066"
},
{
"name": "PHP",
"bytes": "20139"
},
{
"name": "Python",
"bytes": "375043"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from fir_articles import views
urlpatterns = [
url(r'^$', views.list_articles, name='list'),
url(r'^(?P<article_id>\d+)/$', views.details, name='details'),
url(r'^new/$', views.new_article, name='new'),
url(r'^(?P<article_id>\d+)/comment/$', views.comment, name='comment'),
url(r'^comment/(?P<comment_id>\d+)$', views.update_comment, name='update_comment'),
url(r'^comment/$', views.update_comment, name='update_comment_base'),
url(r'^(?P<article_id>\d+)/comment/(?P<comment_id>\d+)/delete/$', views.delete_comment, name='delete_comment'),
url(r'^(?P<article_id>\d+)/edit/$', views.edit_article, name='edit'),
url(r'^(?P<article_id>\d+)/status/(?P<status>[OAD])$', views.change_status, name='change_status'),
url(r'^(?P<article_id>\d+)/attribute$', views.add_attribute, name='add_attribute'),
url(r'^(?P<article_id>\d+)/attribute/(?P<attribute_id>\d+)/delete/$', views.delete_attribute, name='delete_attribute'),
]
|
{
"content_hash": "e6cab4129ecbf85df5db25de3e9db773",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 123,
"avg_line_length": 58.470588235294116,
"alnum_prop": 0.635814889336016,
"repo_name": "gcrahay/fir_articles_plugin",
"id": "76ff976aa05c0173fb64e6e0c8bef56b6471be7c",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fir_articles/urls.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18510"
},
{
"name": "Python",
"bytes": "17929"
}
],
"symlink_target": ""
}
|
import numpy as np
from pyspark import keyword_only
from pyspark.ml import Estimator, Model, Transformer, UnaryTransformer
from pyspark.ml.evaluation import Evaluator
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasMaxIter, HasRegParam
from pyspark.ml.classification import Classifier, ClassificationModel
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
from pyspark.ml.wrapper import _java2py # type: ignore
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.types import DoubleType
from pyspark.testing.utils import ReusedPySparkTestCase as PySparkTestCase
def check_params(test_self, py_stage, check_params_exist=True):
"""
Checks common requirements for :py:class:`PySpark.ml.Params.params`:
- set of params exist in Java and Python and are ordered by names
- param parent has the same UID as the object's UID
- default param value from Java matches value in Python
- optionally check if all params from Java also exist in Python
"""
py_stage_str = "%s %s" % (type(py_stage), py_stage)
if not hasattr(py_stage, "_to_java"):
return
java_stage = py_stage._to_java()
if java_stage is None:
return
test_self.assertEqual(py_stage.uid, java_stage.uid(), msg=py_stage_str)
if check_params_exist:
param_names = [p.name for p in py_stage.params]
java_params = list(java_stage.params())
java_param_names = [jp.name() for jp in java_params]
test_self.assertEqual(
param_names, sorted(java_param_names),
"Param list in Python does not match Java for %s:\nJava = %s\nPython = %s"
% (py_stage_str, java_param_names, param_names))
for p in py_stage.params:
test_self.assertEqual(p.parent, py_stage.uid)
java_param = java_stage.getParam(p.name)
py_has_default = py_stage.hasDefault(p)
java_has_default = java_stage.hasDefault(java_param)
test_self.assertEqual(py_has_default, java_has_default,
"Default value mismatch of param %s for Params %s"
% (p.name, str(py_stage)))
if py_has_default:
if p.name == "seed":
continue # Random seeds between Spark and PySpark are different
java_default = _java2py(test_self.sc,
java_stage.clear(java_param).getOrDefault(java_param))
py_stage.clear(p)
py_default = py_stage.getOrDefault(p)
# equality test for NaN is always False
if isinstance(java_default, float) and np.isnan(java_default):
java_default = "NaN"
py_default = "NaN" if np.isnan(py_default) else "not NaN"
test_self.assertEqual(
java_default, py_default,
"Java default %s != python default %s of param %s for Params %s"
% (str(java_default), str(py_default), p.name, str(py_stage)))
class SparkSessionTestCase(PySparkTestCase):
@classmethod
def setUpClass(cls):
PySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
PySparkTestCase.tearDownClass()
cls.spark.stop()
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class HasFake(Params):
def __init__(self):
super(HasFake, self).__init__()
self.fake = Param(self, "fake", "fake param")
def getFake(self):
return self.getOrDefault(self.fake)
class MockTransformer(Transformer, HasFake):
def __init__(self):
super(MockTransformer, self).__init__()
self.dataset_index = None
def _transform(self, dataset):
self.dataset_index = dataset.index
dataset.index += 1
return dataset
class MockUnaryTransformer(UnaryTransformer, DefaultParamsReadable, DefaultParamsWritable):
shift = Param(Params._dummy(), # type: ignore
"shift", "The amount by which to shift " +
"data in a DataFrame",
typeConverter=TypeConverters.toFloat)
def __init__(self, shiftVal=1):
super(MockUnaryTransformer, self).__init__()
self._setDefault(shift=1)
self._set(shift=shiftVal)
def getShift(self):
return self.getOrDefault(self.shift)
def setShift(self, shift):
self._set(shift=shift)
def createTransformFunc(self):
shiftVal = self.getShift()
return lambda x: x + shiftVal
def outputDataType(self):
return DoubleType()
def validateInputType(self, inputType):
if inputType != DoubleType():
raise TypeError("Bad input type: {}. ".format(inputType) +
"Requires Double.")
class MockEstimator(Estimator, HasFake):
def __init__(self):
super(MockEstimator, self).__init__()
self.dataset_index = None
def _fit(self, dataset):
self.dataset_index = dataset.index
model = MockModel()
self._copyValues(model)
return model
class MockModel(MockTransformer, Model, HasFake):
pass
class _DummyLogisticRegressionParams(HasMaxIter, HasRegParam):
def setMaxIter(self, value):
return self._set(maxIter=value)
def setRegParam(self, value):
return self._set(regParam=value)
# This is a dummy LogisticRegression used in test for python backend estimator/model
class DummyLogisticRegression(Classifier, _DummyLogisticRegressionParams,
DefaultParamsReadable, DefaultParamsWritable):
@keyword_only
def __init__(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, rawPredictionCol="rawPrediction"):
super(DummyLogisticRegression, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
def setParams(self, *, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, rawPredictionCol="rawPrediction"):
kwargs = self._input_kwargs
self._set(**kwargs)
return self
def _fit(self, dataset):
# Do nothing but create a dummy model
return self._copyValues(DummyLogisticRegressionModel())
class DummyLogisticRegressionModel(ClassificationModel, _DummyLogisticRegressionParams,
DefaultParamsReadable, DefaultParamsWritable):
def __init__(self):
super(DummyLogisticRegressionModel, self).__init__()
def _transform(self, dataset):
# A dummy transform impl which always predict label 1
from pyspark.sql.functions import array, lit
from pyspark.ml.functions import array_to_vector
rawPredCol = self.getRawPredictionCol()
if rawPredCol:
dataset = dataset.withColumn(
rawPredCol, array_to_vector(array(lit(-100.0), lit(100.0))))
predCol = self.getPredictionCol()
if predCol:
dataset = dataset.withColumn(predCol, lit(1.0))
return dataset
@property
def numClasses(self):
# a dummy implementation for test.
return 2
@property
def intercept(self):
# a dummy implementation for test.
return 0.0
# This class only used in test. The following methods/properties are not used in tests.
@property
def coefficients(self):
raise NotImplementedError()
def predictRaw(self, value):
raise NotImplementedError()
def numFeatures(self):
raise NotImplementedError()
def predict(self, value):
raise NotImplementedError()
class DummyEvaluator(Evaluator, DefaultParamsReadable, DefaultParamsWritable):
def _evaluate(self, dataset):
# a dummy implementation for test.
return 1.0
|
{
"content_hash": "7c7baa446038172a1431b5660b157803",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 96,
"avg_line_length": 34.18884120171674,
"alnum_prop": 0.6416018076826513,
"repo_name": "milliman/spark",
"id": "d6edf9d64af49465c1cd82f3357a49f8e0cdccf8",
"size": "8748",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "python/pyspark/testing/mlutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "52464"
},
{
"name": "Batchfile",
"bytes": "27405"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "24622"
},
{
"name": "Dockerfile",
"bytes": "9429"
},
{
"name": "HTML",
"bytes": "41560"
},
{
"name": "HiveQL",
"bytes": "1859465"
},
{
"name": "Java",
"bytes": "4316296"
},
{
"name": "JavaScript",
"bytes": "221431"
},
{
"name": "Jupyter Notebook",
"bytes": "4310524"
},
{
"name": "Makefile",
"bytes": "2374"
},
{
"name": "PLpgSQL",
"bytes": "352905"
},
{
"name": "PowerShell",
"bytes": "3882"
},
{
"name": "Python",
"bytes": "7191174"
},
{
"name": "R",
"bytes": "1265563"
},
{
"name": "ReScript",
"bytes": "240"
},
{
"name": "Roff",
"bytes": "27389"
},
{
"name": "Scala",
"bytes": "39048900"
},
{
"name": "Shell",
"bytes": "229968"
},
{
"name": "Thrift",
"bytes": "2016"
},
{
"name": "q",
"bytes": "111129"
}
],
"symlink_target": ""
}
|
from datadog.dogshell.common import report_errors, report_warnings, find_localhost
from datadog import api
class MetricClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('metric', help="Post metrics.")
verb_parsers = parser.add_subparsers(title='Verbs')
post_parser = verb_parsers.add_parser('post', help="Post metrics")
post_parser.add_argument('name', help="metric name")
post_parser.add_argument('value', help="metric value (integer or decimal value)",
type=float)
post_parser.add_argument('--host', help="scopes your metric to a specific host",
default=None)
post_parser.add_argument('--device', help="scopes your metric to a specific device",
default=None)
post_parser.add_argument('--tags', help="comma-separated list of tags", default=None)
post_parser.add_argument('--localhostname', help="same as --host=`hostname`"
" (overrides --host)", action='store_true')
post_parser.add_argument('--type', help="type of the metric - gauge(32bit float)"
" or counter(64bit integer)", default=None)
parser.set_defaults(func=cls._post)
@classmethod
def _post(cls, args):
api._timeout = args.timeout
if args.localhostname:
host = find_localhost()
else:
host = args.host
if args.tags:
tags = sorted(set([t.strip() for t in
args.tags.split(',') if t]))
else:
tags = None
res = api.Metric.send(
metric=args.name, points=args.value, host=host,
device=args.device, tags=tags, metric_type=args.type)
report_warnings(res)
report_errors(res)
|
{
"content_hash": "a5671ea999cf1c77f13e4b15c4100fa8",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 93,
"avg_line_length": 44.48837209302326,
"alnum_prop": 0.5734448510193414,
"repo_name": "percipient/datadogpy",
"id": "d44b2a0238a608d353815b6cd106c57a28b2dc03",
"size": "1913",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "datadog/dogshell/metric.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "259510"
},
{
"name": "Ruby",
"bytes": "333"
}
],
"symlink_target": ""
}
|
"""Unit tests for DB API."""
import mock
from oslo.config import cfg
from oslo.utils import importutils
from oslo.db import api
from oslo.db import exception
from tests import utils as test_utils
sqla = importutils.try_import('sqlalchemy')
if not sqla:
raise ImportError("Unable to import module 'sqlalchemy'.")
def get_backend():
return DBAPI()
class DBAPI(object):
def _api_raise(self, *args, **kwargs):
"""Simulate raising a database-has-gone-away error
This method creates a fake OperationalError with an ID matching
a valid MySQL "database has gone away" situation. It also decrements
the error_counter so that we can artificially keep track of
how many times this function is called by the wrapper. When
error_counter reaches zero, this function returns True, simulating
the database becoming available again and the query succeeding.
"""
if self.error_counter > 0:
self.error_counter -= 1
orig = sqla.exc.DBAPIError(False, False, False)
orig.args = [2006, 'Test raise operational error']
e = exception.DBConnectionError(orig)
raise e
else:
return True
def api_raise_default(self, *args, **kwargs):
return self._api_raise(*args, **kwargs)
@api.safe_for_db_retry
def api_raise_enable_retry(self, *args, **kwargs):
return self._api_raise(*args, **kwargs)
def api_class_call1(_self, *args, **kwargs):
return args, kwargs
class DBAPITestCase(test_utils.BaseTestCase):
def test_dbapi_full_path_module_method(self):
dbapi = api.DBAPI('tests.test_api')
result = dbapi.api_class_call1(1, 2, kwarg1='meow')
expected = ((1, 2), {'kwarg1': 'meow'})
self.assertEqual(expected, result)
def test_dbapi_unknown_invalid_backend(self):
self.assertRaises(ImportError, api.DBAPI, 'tests.unit.db.not_existent')
def test_dbapi_lazy_loading(self):
dbapi = api.DBAPI('tests.test_api', lazy=True)
self.assertIsNone(dbapi._backend)
dbapi.api_class_call1(1, 'abc')
self.assertIsNotNone(dbapi._backend)
def test_dbapi_from_config(self):
conf = cfg.ConfigOpts()
dbapi = api.DBAPI.from_config(conf,
backend_mapping={'sqlalchemy': __name__})
self.assertIsNotNone(dbapi._backend)
class DBReconnectTestCase(DBAPITestCase):
def setUp(self):
super(DBReconnectTestCase, self).setUp()
self.test_db_api = DBAPI()
patcher = mock.patch(__name__ + '.get_backend',
return_value=self.test_db_api)
patcher.start()
self.addCleanup(patcher.stop)
def test_raise_connection_error(self):
self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__})
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError, self.dbapi._api_raise)
def test_raise_connection_error_decorated(self):
self.dbapi = api.DBAPI('sqlalchemy', {'sqlalchemy': __name__})
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError,
self.dbapi.api_raise_enable_retry)
self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry')
def test_raise_connection_error_enabled(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True)
self.test_db_api.error_counter = 5
self.assertRaises(exception.DBConnectionError,
self.dbapi.api_raise_default)
self.assertEqual(4, self.test_db_api.error_counter, 'Unexpected retry')
def test_retry_one(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1)
try:
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 1
self.assertTrue(func(), 'Single retry did not succeed.')
except Exception:
self.fail('Single retry raised an un-wrapped error.')
self.assertEqual(
0, self.test_db_api.error_counter,
'Counter not decremented, retry logic probably failed.')
def test_retry_two(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1,
inc_retry_interval=False)
try:
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 2
self.assertTrue(func(), 'Multiple retry did not succeed.')
except Exception:
self.fail('Multiple retry raised an un-wrapped error.')
self.assertEqual(
0, self.test_db_api.error_counter,
'Counter not decremented, retry logic probably failed.')
def test_retry_until_failure(self):
self.dbapi = api.DBAPI('sqlalchemy',
{'sqlalchemy': __name__},
use_db_reconnect=True,
retry_interval=1,
inc_retry_interval=False,
max_retries=3)
func = self.dbapi.api_raise_enable_retry
self.test_db_api.error_counter = 5
self.assertRaises(
exception.DBError, func,
'Retry of permanent failure did not throw DBError exception.')
self.assertNotEqual(
0, self.test_db_api.error_counter,
'Retry did not stop after sql_max_retries iterations.')
|
{
"content_hash": "0e1e634225998f359a10b2c53e8b5dca",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 36.39506172839506,
"alnum_prop": 0.5851424694708277,
"repo_name": "varunarya10/oslo.db",
"id": "2168cd5a96486d329229dabdef4aba0927c2bae9",
"size": "6533",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "348326"
}
],
"symlink_target": ""
}
|
import py
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.rmodel import inputconst
from rpython.tool.ansi_print import ansi_log
from rpython.translator.simplify import get_graph
log = py.log.Producer("backendopt")
py.log.setconsumer("backendopt", ansi_log)
def graph_operations(graph):
for block in graph.iterblocks():
for op in block.operations:
yield op
def all_operations(graphs):
for graph in graphs:
for block in graph.iterblocks():
for op in block.operations:
yield op
def annotate(translator, func, result, args):
args = [arg.concretetype for arg in args]
graph = translator.rtyper.annotate_helper(func, args)
fptr = lltype.functionptr(lltype.FuncType(args, result.concretetype), func.func_name, graph=graph)
c = inputconst(lltype.typeOf(fptr), fptr)
return c
def var_needsgc(var):
vartype = var.concretetype
return isinstance(vartype, lltype.Ptr) and vartype._needsgc()
def find_calls_from(translator, graph, memo=None):
if memo and graph in memo:
return memo[graph]
res = [i for i in _find_calls_from(translator, graph)]
if memo is not None:
memo[graph] = res
return res
def _find_calls_from(translator, graph):
for block in graph.iterblocks():
for op in block.operations:
if op.opname == "direct_call":
called_graph = get_graph(op.args[0], translator)
if called_graph is not None:
yield block, called_graph
if op.opname == "indirect_call":
graphs = op.args[-1].value
if graphs is not None:
for called_graph in graphs:
yield block, called_graph
def find_backedges(graph, block=None, seen=None, seeing=None):
"""finds the backedges in the flow graph"""
backedges = []
if block is None:
block = graph.startblock
if seen is None:
seen = set([block])
if seeing is None:
seeing = set()
seeing.add(block)
for link in block.exits:
if link.target in seen:
if link.target in seeing:
backedges.append(link)
else:
seen.add(link.target)
backedges.extend(find_backedges(graph, link.target, seen, seeing))
seeing.remove(block)
return backedges
def compute_reachability(graph):
reachable = {}
blocks = list(graph.iterblocks())
# Reversed order should make the reuse path more likely.
for block in reversed(blocks):
reach = set()
scheduled = [block]
while scheduled:
current = scheduled.pop()
for link in current.exits:
if link.target in reachable:
reach.add(link.target)
reach = reach | reachable[link.target]
continue
if link.target not in reach:
reach.add(link.target)
scheduled.append(link.target)
reachable[block] = reach
return reachable
def find_loop_blocks(graph):
"""find the blocks in a graph that are part of a loop"""
loop = {}
reachable = compute_reachability(graph)
for backedge in find_backedges(graph):
start = backedge.target
end = backedge.prevblock
loop[start] = start
loop[end] = start
scheduled = [start]
seen = {}
while scheduled:
current = scheduled.pop()
connects = end in reachable[current]
seen[current] = True
if connects:
loop[current] = start
for link in current.exits:
if link.target not in seen:
scheduled.append(link.target)
return loop
def md5digest(translator):
from hashlib import md5
graph2digest = {}
for graph in translator.graphs:
m = md5()
for op in graph_operations(graph):
m.update(op.opname + str(op.result))
for a in op.args:
m.update(str(a))
graph2digest[graph.name] = m.digest()
return graph2digest
|
{
"content_hash": "b6da1b3c1cbd4bd454df03b2624387ed",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 104,
"avg_line_length": 32.6015625,
"alnum_prop": 0.5940570333093698,
"repo_name": "jptomo/rpython-lang-scheme",
"id": "0f9b0202b16365908909fa077eaced77cb6c20bf",
"size": "4173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpython/translator/backendopt/support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "161293"
},
{
"name": "Batchfile",
"bytes": "5289"
},
{
"name": "C",
"bytes": "335765"
},
{
"name": "C++",
"bytes": "12638"
},
{
"name": "Emacs Lisp",
"bytes": "3149"
},
{
"name": "HCL",
"bytes": "155"
},
{
"name": "Makefile",
"bytes": "6988"
},
{
"name": "Objective-C",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "16129160"
},
{
"name": "Scheme",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "721"
},
{
"name": "VimL",
"bytes": "1107"
}
],
"symlink_target": ""
}
|
""" Bokeh comes with a number of interactive tools.
There are five types of tool interactions:
.. hlist::
:columns: 5
* Pan/Drag
* Click/Tap
* Scroll/Pinch
* Actions
* Inspectors
For the first three comprise the category of gesture tools, and only
one tool for each gesture can be active at any given time. The active
tool is indicated on the toolbar by a highlight next to to the tool.
Actions are immediate or modal operations that are only activated when
their button in the toolbar is pressed. Inspectors are passive tools that
merely report information or annotate the plot in some way, and may
always be active regardless of what other tools are currently active.
"""
from __future__ import absolute_import
from ..model import Model
from ..core.properties import abstract, Float, Color
from ..core.properties import (
Any, Bool, String, Enum, Instance, Either, List, Dict, Tuple
)
from ..core.enums import Dimension
from .annotations import BoxAnnotation, PolyAnnotation
from .renderers import Renderer
from .callbacks import Callback
class ToolEvents(Model):
"""
"""
geometries = List(Dict(String, Any))
@abstract
class Tool(Model):
""" A base class for all interactive tool types. ``Tool`` is
not generally useful to instantiate on its own.
"""
plot = Instance(".models.plots.Plot", help="""
The Plot that this tool will act on.
""")
class PanTool(Tool):
""" *toolbar icon*: |pan_icon|
The pan tool allows the user to pan a Plot by left-dragging
a mouse, or on touch devices by dragging a finger or stylus, across
the plot region.
The pan tool also activates the border regions of a Plot for "single
axis" panning. For instance, dragging in the vertical border or axis
will effect a pan in the vertical direction only, with the horizontal
dimension kept fixed.
.. |pan_icon| image:: /_images/icons/Pan.png
:height: 18pt
"""
dimensions = List(Enum(Dimension), default=["width", "height"], help="""
Which dimensions the pan tool is constrained to act in. By default
the pan tool will pan in any dimension, but can be configured to only
pan horizontally across the width of the plot, or vertically across the
height of the plot.
""")
class WheelZoomTool(Tool):
""" *toolbar icon*: |wheel_zoom_icon|
The wheel zoom tool will zoom the plot in and out, centered on the
current mouse location.
The wheel zoom tool also activates the border regions of a Plot for
"single axis" zooming. For instance, zooming in the vertical border or
axis will effect a zoom in the vertical direction only, with the
horizontal dimension kept fixed.
.. |wheel_zoom_icon| image:: /_images/icons/WheelZoom.png
:height: 18pt
"""
dimensions = List(Enum(Dimension), default=["width", "height"], help="""
Which dimensions the wheel zoom tool is constrained to act in. By
default the wheel zoom tool will zoom in any dimension, but can be
configured to only zoom horizontally across the width of the plot, or
vertically across the height of the plot.
""")
class PreviewSaveTool(Tool):
""" *toolbar icon*: |save_icon|
The preview/save tool is an action. When activated in the toolbar, the
tool presents a modal dialog with an image reproduction of the Plot, which
may be saved as a png image by right clicking on the image.
.. note::
Work is ongoing to support headless (svg, png) image creation without
requiring user interaction. See :bokeh-issue:`538` to track progress
or contribute.
.. |save_icon| image:: /_images/icons/Save.png
:height: 18pt
"""
class ResetTool(Tool):
""" *toolbar icon*: |reset_icon|
The reset tool is an action. When activated in the toolbar, the tool
resets the data bounds of the plot to their values when the plot was
initially created.
.. note::
This tool does not also reset the plot canvas size, if the plot
has been resized using the ``ResizeTool``. That feature may be
added in a future release.
.. |reset_icon| image:: /_images/icons/Reset.png
:height: 18pt
"""
class ResizeTool(Tool):
""" *toolbar icon*: |resize_icon|
The resize tool allows the user to left-drag a mouse or drag a finger
to resize the entire plot area on the screen.
.. |resize_icon| image:: /_images/icons/Resize.png
:height: 18pt
"""
class TapTool(Tool):
""" *toolbar icon*: |tap_select_icon|
The tap selection tool allows the user to select at single points by
left-clicking a mouse, or tapping with a finger.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. |tap_select_icon| image:: /_images/icons/TapSelect.png
:height: 18pt
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the <<shift>> key
while making a selection to append the new selection to any
previous selection that might exist.
"""
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers to hit test again. If unset,
defaults to all renderers on a plot.
""")
callback = Instance(Callback, help="""
A client-side action specification, like opening a URL, showing
a dialog box, etc. See :class:`~bokeh.models.actions.Action` for details.
""")
@abstract
class InspectTool(Tool):
pass
#active = Bool(True, help="""
#Whether the tool is intially active or not. If set to ``False``, the user
#will have to click tool's button to active it.
#""")
class CrosshairTool(InspectTool):
""" *toolbar icon*: |inspector_icon|
The crosshair tool is a passive inspector tool. It is generally on
at all times, but can be configured in the inspector's menu
associated with the *toolbar icon* shown above.
The crosshair tool draws a crosshair annotation over the plot,
centered on the current mouse position. The crosshair tool may be
configured to draw across only one dimension by setting the
``dimension`` property to only ``width`` or ``height``.
.. |inspector_icon| image:: /_images/icons/Inspector.png
:height: 18pt
"""
dimensions = List(Enum(Dimension), default=["width", "height"], help="""
Which dimensions the crosshair tool is to track. By default, both a
vertical and horizontal line will be drawn. If only "width" is supplied,
only a horizontal line will be drawn. If only "height" is supplied,
only a vertical line will be drawn.
""")
line_color = Color(default="black", help="""
A color to use to stroke paths with.
Acceptable values are:
- any of the 147 named `CSS colors`_, e.g ``'green'``, ``'indigo'``
- an RGB(A) hex value, e.g., ``'#FF0000'``, ``'#44444444'``
- a 3-tuple of integers (r,g,b) between 0 and 255
- a 4-tuple of (r,g,b,a) where r,g,b are integers between 0..255 and a is between 0..1
.. _CSS colors: http://www.w3schools.com/cssref/css_colornames.asp
""")
line_width = Float(default=1, help="""
Stroke width in units of pixels.
""")
line_alpha = Float(default=1.0, help="""
An alpha value to use to stroke paths with.
Acceptable values are floating point numbers between 0 (transparent)
and 1 (opaque).
""")
DEFAULT_BOX_OVERLAY = lambda: BoxAnnotation(
level="overlay",
render_mode="css",
top_units="screen",
left_units="screen",
bottom_units="screen",
right_units="screen",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=2,
line_dash=[4, 4]
)
class BoxZoomTool(Tool):
""" *toolbar icon*: |box_zoom_icon|
The box zoom tool allows users to define a rectangular
region of a Plot to zoom to by dragging he mouse or a
finger over the plot region. The end of the drag
event indicates the selection region is ready.
.. |box_zoom_icon| image:: /_images/icons/BoxZoom.png
:height: 18pt
"""
dimensions = List(Enum(Dimension), default=["width", "height"], help="""
Which dimensions the zoom box is to be free in. By default,
users may freely draw zoom boxes with any dimensions. If only
"width" is supplied, the box will be constrained to span the entire
vertical space of the plot, only the horizontal dimension can be
controlled. If only "height" is supplied, the box will be constrained
to span the entire horizontal space of the plot, and the vertical
dimension can be controlled.
""")
overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
match_aspect = Bool(default=False, help="""
Whether the box zoom region should be restricted to have the same
aspect ratio as the plot region.
.. note::
If the tool is restricted to one dimension, this value has
no effect.
""")
class BoxSelectTool(Tool):
""" *toolbar icon*: |box_select_icon|
The box selection tool allows users to make selections on a
Plot by indicating a rectangular region by dragging the
mouse or a finger over the plot region. The end of the drag
event indicates the selection region is ready.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. |box_select_icon| image:: /_images/icons/BoxSelect.png
:height: 18pt
"""
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers to hit test again. If unset,
defaults to all renderers on a plot.
""")
select_every_mousemove = Bool(False, help="""
Whether a selection computation should happen on every mouse
event, or only once, when the selection region is completed. Default: False
""")
dimensions = List(Enum(Dimension), default=["width", "height"], help="""
Which dimensions the box selection is to be free in. By default,
users may freely draw selections boxes with any dimensions. If only
"width" is supplied, the box will be constrained to span the entire
vertical space of the plot, only the horizontal dimension can be
controlled. If only "height" is supplied, the box will be constrained
to span the entire horizontal space of the plot, and the vertical
dimension can be controlled.
""")
callback = Instance(Callback, help="""
A callback to run in the browser on completion of drawing a selection box.
The cb_data parameter that is available to the Callback code will contain
one BoxSelectTool-specific field:
:geometry: object containing the coordinates of the selection box
""")
overlay = Instance(BoxAnnotation, default=DEFAULT_BOX_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
DEFAULT_POLY_OVERLAY = lambda: PolyAnnotation(
level="overlay",
xs_units="screen",
ys_units="screen",
fill_color="lightgrey",
fill_alpha=0.5,
line_color="black",
line_alpha=1.0,
line_width=2,
line_dash=[4, 4]
)
class LassoSelectTool(Tool):
""" *toolbar icon*: |lasso_select_icon|
The lasso selection tool allows users to make selections on a
Plot by indicating a free-drawn "lasso" region by dragging the
mouse or a finger over the plot region. The end of the drag
event indicates the selection region is ready.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the <<shift>> key
while making a selection to append the new selection to any
previous selection that might exist.
.. |lasso_select_icon| image:: /_images/icons/LassoSelect.png
:height: 18pt
"""
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers to hit test again. If unset,
defaults to all renderers on a plot.
""")
select_every_mousemove = Bool(True, help="""
Whether a selection computation should happen on every mouse
event, or only once, when the selection region is completed. Default: True
""")
overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
class PolySelectTool(Tool):
""" *toolbar icon*: |poly_select_icon|
The polygon selection tool allows users to make selections on a
Plot by indicating a polygonal region with mouse clicks. single
clicks (or taps) add successive points to the definition of the
polygon, and a double click (or tap) indicates the selection
region is ready.
See :ref:`userguide_styling_selected_unselected_glyphs` for information
on styling selected and unselected glyphs.
.. note::
Selections can be comprised of multiple regions, even those
made by different selection tools. Hold down the <<shift>> key
while making a selection to append the new selection to any
previous selection that might exist.
.. |poly_select_icon| image:: /_images/icons/PolygonSelect.png
:height: 18pt
"""
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers to hit test again. If unset,
defaults to all renderers on a plot.
""")
overlay = Instance(PolyAnnotation, default=DEFAULT_POLY_OVERLAY, help="""
A shaded annotation drawn to indicate the selection region.
""")
class HoverTool(InspectTool):
""" *toolbar icon*: |inspector_icon|
The hover tool is a passive inspector tool. It is generally on at
all times, but can be configured in the inspector's menu associated
with the *toolbar icon* shown above.
By default, the hover tool displays informational tooltips whenever
the cursor is directly over a glyph. The data to show comes from the
glyph's data source, and what is to be displayed is configurable with
the ``tooltips`` attribute that maps display names to columns in the
data source, or to special known variables.
Here is an example of how to configure and use the hover tool::
# Add tooltip (name, field) pairs to the tool. See below for a
# description of possible field values.
hover.tooltips = [
("index", "$index"),
("(x,y)", "($x, $y)"),
("radius", "@radius"),
("fill color", "$color[hex, swatch]:fill_color"),
("foo", "@foo"),
("bar", "@bar"),
]
You can also supply a ``Callback`` to the HoverTool, to build custom
interactions on hover. In this case you may want to turn the tooltips
off by setting ``tooltips=None``.
.. warning::
Hover tool does not currently work with the following glyphs:
.. hlist::
:columns: 3
* annulus
* arc
* bezier
* gear
* image
* image_rgba
* image_url
* multi_line
* oval
* patch
* quadratic
* ray
* segment
* text
.. |hover_icon| image:: /_images/icons/Inspector.png
:height: 18pt
"""
names = List(String, help="""
A list of names to query for. If set, only renderers that
have a matching value for their ``name`` attribute will be used.
""")
renderers = List(Instance(Renderer), help="""
An explicit list of renderers to hit test again. If unset,
defaults to all renderers on a plot.
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the input's value changes. The
cb_data parameter that is available to the Callback code will contain two
HoverTool specific fields:
:index: object containing the indices of the hovered points in the data source
:geometry: object containing the coordinates of the hover cursor
""")
tooltips = Either(String, List(Tuple(String, String)),
default=[
("index","$index"),
("data (x, y)","($x, $y)"),
("canvas (x, y)","($sx, $sy)"),
], help="""
The (name, field) pairs describing what the hover tool should
display when there is a hit.
Field names starting with "@" are interpreted as columns on the
data source. For instance, "@temp" would look up values to display
from the "temp" column of the data source.
Field names starting with "$" are special, known fields:
:$index: index of selected point in the data source
:$x: x-coordinate under the cursor in data space
:$y: y-coordinate under the cursor in data space
:$sx: x-coordinate under the cursor in screen (canvas) space
:$sy: y-coordinate under the cursor in screen (canvas) space
:$color: color data from data source, with the syntax:
``$color[options]:field_name``. The available options
are: 'hex' (to display the color as a hex value), and
'swatch' to also display a small color swatch.
``None`` is also a valid value for tooltips. This turns off the
rendering of tooltips. This is mostly useful when supplying other
actions on hover via the callback property.
.. note::
The tooltips attribute can also be configured with a mapping type,
e.g. ``dict`` or ``OrderedDict``. However, if a ``dict`` is used,
the visual presentation order is unspecified.
""").accepts(Dict(String, String), lambda d: list(d.items()))
mode = Enum("mouse", "hline", "vline", help="""
Whether to consider hover pointer as a point (x/y values), or a
span on h or v directions.
""")
point_policy = Enum("snap_to_data", "follow_mouse", "none", help="""
Whether the tooltip position should snap to the "center" position of
the associated glyph, or always follow the current mouse cursor
position.
""")
line_policy = Enum("prev", "next", "nearest", "interp", "none", help="""
When showing tooltips for lines, whether the tooltip position should be
the "previous" or "next" points on the line, the nearest point to the
current mouse position, or interpolate along the line to the current
mouse position.
""")
DEFAULT_HELP_TIP = "Click the question mark to learn more about Bokeh plot tools."
DEFAULT_HELP_URL = "http://bokeh.pydata.org/en/latest/docs/user_guide/tools.html"
class HelpTool(Tool):
"""
The help tool is a widget designed to replace the hardcoded 'Help' link.
The hover text can be customized through the ``help_tooltip`` attribute
and the redirect site overridden as well.
"""
help_tooltip = String(default=DEFAULT_HELP_TIP, help="""
Tooltip displayed when hovering over the help icon.
""")
redirect = String(default=DEFAULT_HELP_URL, help="""
Site to be redirected through upon click.
""")
class UndoTool(Tool):
""" *toolbar icon*: |undo_icon|
Undo tool allows to restore previous state of the plot.
.. |undo_icon| image:: /_images/icons/Undo.png
:height: 18pt
"""
class RedoTool(Tool):
""" *toolbar icon*: |redo_icon|
Redo tool reverses the last action performed by undo tool.
.. |redo_icon| image:: /_images/icons/Redo.png
:height: 18pt
"""
|
{
"content_hash": "18652914f3704602559735f7c58a6ede",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 90,
"avg_line_length": 33.67438016528926,
"alnum_prop": 0.6669120895302606,
"repo_name": "pombredanne/bokeh",
"id": "381eecd59b5c2c44f432153a14316eb5a87a1e13",
"size": "20373",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/models/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
homeassistant.components.device_tracker.snmp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports fetching WiFi associations
through SNMP.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.snmp/
"""
import logging
from datetime import timedelta
import threading
import binascii
from homeassistant.const import CONF_HOST
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
from homeassistant.components.device_tracker import DOMAIN
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pysnmp==4.2.5']
CONF_COMMUNITY = "community"
CONF_BASEOID = "baseoid"
# pylint: disable=unused-argument
def get_scanner(hass, config):
""" Validates config and returns an snmp scanner """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_COMMUNITY, CONF_BASEOID]},
_LOGGER):
return None
scanner = SnmpScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SnmpScanner(object):
"""
This class queries any SNMP capable Acces Point for connected devices.
"""
def __init__(self, config):
from pysnmp.entity.rfc3413.oneliner import cmdgen
self.snmp = cmdgen.CommandGenerator()
self.host = cmdgen.UdpTransportTarget((config[CONF_HOST], 161))
self.community = cmdgen.CommunityData(config[CONF_COMMUNITY])
self.baseoid = cmdgen.MibVariable(config[CONF_BASEOID])
self.lock = threading.Lock()
self.last_results = []
# Test the router is accessible
data = self.get_snmp_data()
self.success_init = data is not None
def scan_devices(self):
"""
Scans for new devices and return a list containing found device IDs.
"""
self._update_info()
return [client['mac'] for client in self.last_results]
# Supressing no-self-use warning
# pylint: disable=R0201
def get_device_name(self, device):
""" Returns the name of the given device or None if we don't know. """
# We have no names
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the WAP is up to date.
Returns boolean if scanning successful.
"""
if not self.success_init:
return False
with self.lock:
data = self.get_snmp_data()
if not data:
return False
self.last_results = data
return True
def get_snmp_data(self):
""" Fetch mac addresses from WAP via SNMP. """
devices = []
errindication, errstatus, errindex, restable = self.snmp.nextCmd(
self.community, self.host, self.baseoid)
if errindication:
_LOGGER.error("SNMPLIB error: %s", errindication)
return
if errstatus:
_LOGGER.error('SNMP error: %s at %s', errstatus.prettyPrint(),
errindex and restable[-1][int(errindex)-1]
or '?')
return
for resrow in restable:
for _, val in resrow:
mac = binascii.hexlify(val.asOctets()).decode('utf-8')
mac = ':'.join([mac[i:i+2] for i in range(0, len(mac), 2)])
devices.append({'mac': mac})
return devices
|
{
"content_hash": "0fbe728df02ddda138c038920e340bc7",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 30.82051282051282,
"alnum_prop": 0.6156405990016639,
"repo_name": "badele/home-assistant",
"id": "868f701673a575a7906caebdd509dac3111d7b3e",
"size": "3606",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/device_tracker/snmp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1316899"
},
{
"name": "Python",
"bytes": "1133422"
},
{
"name": "Shell",
"bytes": "3943"
}
],
"symlink_target": ""
}
|
from cached import cached
|
{
"content_hash": "ce679fe68140491d1ad862ed95f87149",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 26,
"alnum_prop": 0.8461538461538461,
"repo_name": "caryt/utensils",
"id": "db741c453b6d82390962f306f1eaa9b0b3fe74dc",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cached/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "54513"
},
{
"name": "Python",
"bytes": "153103"
}
],
"symlink_target": ""
}
|
"""Heat API exception subclasses - maps API response errors to AWS Errors."""
from oslo_utils import reflection
import webob.exc
from heat.common.i18n import _
from heat.common import serializers
class HeatAPIException(webob.exc.HTTPError):
"""webob HTTPError subclass that creates a serialized body.
Subclass webob HTTPError so we can correctly serialize the wsgi response
into the http response body, using the format specified by the request.
Note this should not be used directly, instead use the subclasses
defined below which map to AWS API errors.
"""
code = 400
title = "HeatAPIException"
explanation = _("Generic HeatAPIException, please use specific "
"subclasses!")
err_type = "Sender"
def __init__(self, detail=None):
"""Overload HTTPError constructor to create a default serialized body.
This is required because not all error responses are processed
by the wsgi controller (such as auth errors), which are further up the
paste pipeline. We serialize in XML by default (as AWS does).
"""
webob.exc.HTTPError.__init__(self, detail=detail)
serializer = serializers.XMLResponseSerializer()
serializer.default(self, self.get_unserialized_body())
def get_unserialized_body(self):
"""Return a dict suitable for serialization in the wsgi controller.
This wraps the exception details in a format which maps to the
expected format for the AWS API.
"""
# Note the aws response format specifies a "Code" element which is not
# the html response code, but the AWS API error code, e.g self.title
if self.detail:
message = ":".join([self.explanation, self.detail])
else:
message = self.explanation
return {'ErrorResponse': {'Error': {'Type': self.err_type,
'Code': self.title, 'Message': message}}}
# Common Error Subclasses:
class HeatIncompleteSignatureError(HeatAPIException):
"""The request signature does not conform to AWS standards."""
code = 400
title = "IncompleteSignature"
explanation = _("The request signature does not conform to AWS standards")
class HeatInternalFailureError(HeatAPIException):
"""The request processing has failed due to some unknown error."""
code = 500
title = "InternalFailure"
explanation = _("The request processing has failed due to an "
"internal error")
err_type = "Server"
class HeatInvalidActionError(HeatAPIException):
"""The action or operation requested is invalid."""
code = 400
title = "InvalidAction"
explanation = _("The action or operation requested is invalid")
class HeatInvalidClientTokenIdError(HeatAPIException):
"""The X.509 certificate or AWS Access Key ID provided does not exist."""
code = 403
title = "InvalidClientTokenId"
explanation = _("The certificate or AWS Key ID provided does not exist")
class HeatInvalidParameterCombinationError(HeatAPIException):
"""Parameters that must not be used together were used together."""
code = 400
title = "InvalidParameterCombination"
explanation = _("Incompatible parameters were used together")
class HeatInvalidParameterValueError(HeatAPIException):
"""A bad or out-of-range value was supplied for the input parameter."""
code = 400
title = "InvalidParameterValue"
explanation = _("A bad or out-of-range value was supplied")
class HeatInvalidQueryParameterError(HeatAPIException):
"""AWS query string is malformed, does not adhere to AWS standards."""
code = 400
title = "InvalidQueryParameter"
explanation = _("AWS query string is malformed, does not adhere to "
"AWS spec")
class HeatMalformedQueryStringError(HeatAPIException):
"""The query string is malformed."""
code = 404
title = "MalformedQueryString"
explanation = _("The query string is malformed")
class HeatMissingActionError(HeatAPIException):
"""The request is missing an action or operation parameter."""
code = 400
title = "MissingAction"
explanation = _("The request is missing an action or operation parameter")
class HeatMissingAuthenticationTokenError(HeatAPIException):
"""Does not contain a valid AWS Access Key or certificate.
Request must contain either a valid (registered) AWS Access Key ID
or X.509 certificate.
"""
code = 403
title = "MissingAuthenticationToken"
explanation = _("Does not contain a valid AWS Access Key or certificate")
class HeatMissingParameterError(HeatAPIException):
"""A mandatory input parameter is missing.
An input parameter that is mandatory for processing the request is missing.
"""
code = 400
title = "MissingParameter"
explanation = _("A mandatory input parameter is missing")
class HeatOptInRequiredError(HeatAPIException):
"""The AWS Access Key ID needs a subscription for the service."""
code = 403
title = "OptInRequired"
explanation = _("The AWS Access Key ID needs a subscription for the "
"service")
class HeatRequestExpiredError(HeatAPIException):
"""Request expired or more than 15 minutes in the future.
Request is past expires date or the request date (either with 15 minute
padding), or the request date occurs more than 15 minutes in the future.
"""
code = 400
title = "RequestExpired"
explanation = _("Request expired or more than 15mins in the future")
class HeatServiceUnavailableError(HeatAPIException):
"""The request has failed due to a temporary failure of the server."""
code = 503
title = "ServiceUnavailable"
explanation = _("Service temporarily unavailable")
err_type = "Server"
class HeatThrottlingError(HeatAPIException):
"""Request was denied due to request throttling."""
code = 400
title = "Throttling"
explanation = _("Request was denied due to request throttling")
class AlreadyExistsError(HeatAPIException):
"""Resource with the name requested already exists."""
code = 400
title = 'AlreadyExists'
explanation = _("Resource with the name requested already exists")
# Not documented in the AWS docs, authentication failure errors
class HeatAccessDeniedError(HeatAPIException):
"""Authentication fails due to user IAM group memberships.
This is the response given when authentication fails due to user
IAM group memberships meaning we deny access.
"""
code = 403
title = "AccessDenied"
explanation = _("User is not authorized to perform action")
class HeatSignatureError(HeatAPIException):
"""Authentication fails due to a bad signature."""
code = 403
title = "SignatureDoesNotMatch"
explanation = _("The request signature we calculated does not match the "
"signature you provided")
# Heat-specific errors
class HeatAPINotImplementedError(HeatAPIException):
"""API action is not yet implemented."""
code = 500
title = "APINotImplemented"
explanation = _("The requested action is not yet implemented")
err_type = "Server"
class HeatActionInProgressError(HeatAPIException):
"""Cannot perform action on stack in its current state."""
code = 400
title = 'InvalidAction'
explanation = ("Cannot perform action on stack while other actions are " +
"in progress")
class HeatRequestLimitExceeded(HeatAPIException):
"""Payload size of the request exceeds maximum allowed size."""
code = 400
title = 'RequestLimitExceeded'
explanation = _("Payload exceeds maximum allowed size")
def map_remote_error(ex):
"""Map rpc_common.RemoteError exceptions to HeatAPIException subclasses.
Map rpc_common.RemoteError exceptions returned by the engine
to HeatAPIException subclasses which can be used to return
properly formatted AWS error responses.
"""
inval_param_errors = (
'AttributeError',
'ValueError',
'InvalidTenant',
'EntityNotFound',
'ResourceActionNotSupported',
'ResourceNotFound',
'ResourceNotAvailable',
'StackValidationFailed',
'InvalidSchemaError',
'InvalidTemplateReference',
'InvalidTemplateVersion',
'InvalidTemplateSection',
'UnknownUserParameter',
'UserParameterMissing',
'MissingCredentialError',
'ResourcePropertyConflict',
'PropertyUnspecifiedError',
'NotSupported',
'InvalidBreakPointHook',
'PhysicalResourceIDAmbiguity',
)
denied_errors = ('Forbidden', 'NotAuthorized')
already_exists_errors = ('StackExists')
invalid_action_errors = ('ActionInProgress',)
request_limit_exceeded = ('RequestLimitExceeded')
ex_type = reflection.get_class_name(ex, fully_qualified=False)
if ex_type.endswith('_Remote'):
ex_type = ex_type[:-len('_Remote')]
safe = getattr(ex, 'safe', False)
detail = str(ex) if safe else None
if ex_type in inval_param_errors:
return HeatInvalidParameterValueError(detail=detail)
elif ex_type in denied_errors:
return HeatAccessDeniedError(detail=detail)
elif ex_type in already_exists_errors:
return AlreadyExistsError(detail=detail)
elif ex_type in invalid_action_errors:
return HeatActionInProgressError(detail=detail)
elif ex_type in request_limit_exceeded:
return HeatRequestLimitExceeded(detail=detail)
else:
# Map everything else to internal server error for now
return HeatInternalFailureError(detail=detail)
|
{
"content_hash": "9421ea76171f8d7d15d99ded39af7371",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 79,
"avg_line_length": 30.20186335403727,
"alnum_prop": 0.692133676092545,
"repo_name": "openstack/heat",
"id": "90366f8611cfa467fc224c40c53196f93cb4b6d5",
"size": "10436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/api/aws/exception.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
}
|
import codecs
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('n', type=int, help='Keep points from the first n frames')
parser.add_argument('mdf_filename', help='File to process')
args = parser.parse_args()
f = codecs.open(args.mdf_filename, 'r', 'mac-roman')
line = f.readline()
if not line.startswith("MTrackJ"):
raise ValueError("File {} is not in MTrackJ format.".format(args.mdf_filename))
f.seek(0)
for line in f:
if line.startswith("Point"):
split = line.split()
this_frame = int(float(split[-2]))
if this_frame <= args.n:
print line,
else:
print line,
if __name__ == '__main__':
main()
|
{
"content_hash": "eb5b78d5c535a39a30c666bed3f5ca2b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 87,
"avg_line_length": 28.25925925925926,
"alnum_prop": 0.580602883355177,
"repo_name": "tdsmith/migrationscripts",
"id": "1f008454da30f5b449ee602424f3365cd03e353c",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sting/truncate_mdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "38061"
},
{
"name": "Shell",
"bytes": "122"
}
],
"symlink_target": ""
}
|
from core.domain import widget_domain
from extensions.value_generators.models import generators
class Link(widget_domain.BaseWidget):
"""Definition of a widget.
Do NOT make any changes to this widget definition while the Oppia app is
running, otherwise things will break.
This class represents a widget, whose id is the name of the class. It is
auto-discovered when the default widgets are refreshed.
"""
# The human-readable name of the widget.
name = 'Link'
# The category the widget falls under in the widget repository.
category = 'Basic Input'
# A description of the widget.
description = (
'Link widget.'
)
# Customization parameters and their descriptions, types and default
# values. This attribute name MUST be prefixed by '_'.
_params = [{
'name': 'url',
'description': (
'The URL for this link. It must start with http:// or https://'),
'generator': generators.Copier,
'init_args': {
'disallow_parse_with_jinja': True
},
'customization_args': {
'value': ''
},
'obj_type': 'SanitizedUrl',
}]
# The HTML tag name for this non-interactive widget.
frontend_name = 'link'
# The tooltip for the icon in the rich-text editor.
tooltip = 'Insert link'
# The icon to show in the rich-text editor. This is a representation of the
# .png file in this widget folder, generated with the
# utils.convert_png_to_data_url() function.
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAQAAAC1%2BjfqA'
'AAABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXH'
'JZTwAAADpSURBVCjPY/jPgB8y0EmBHXdWaeu7ef9r%0AHuaY50jU3J33v/VdVqkdN1SBE'
'ZtP18T/L/7f/X/wf%2BO96kM3f9z9f%2BT/xP8%2BXUZsYAWGfsUfrr6L%0A2Ob9J/X/p'
'P%2BV/1P/e/%2BJ2LbiYfEHQz%2BICV1N3yen%2B3PZf977/9z/Q//X/rf/7M81Ob3pu1'
'EXWIFu%0AZvr7aSVBOx1/uf0PBEK3/46/gnZOK0l/r5sJVqCp6Xu99/2qt%2Bv%2BT/9f'
'%2BL8CSK77v%2Bpt73vf65qa%0AYAVqzPYGXvdTvmR/z/4ZHhfunP0p%2B3vKF6/79gZq'
'zPQLSYoUAABKPQ%2BkpVV/igAAAABJRU5ErkJg%0Agg%3D%3D%0A'
)
|
{
"content_hash": "99ac76ed8f633228479123649b0f7b79",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 37.793103448275865,
"alnum_prop": 0.6733576642335767,
"repo_name": "mindpin/mindpin_oppia",
"id": "82b07adea7cec0e096a0a797af8c3deccda9c4e3",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extensions/widgets/noninteractive/Link/Link.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "441"
},
{
"name": "CSS",
"bytes": "18074"
},
{
"name": "JavaScript",
"bytes": "364061"
},
{
"name": "Python",
"bytes": "735069"
},
{
"name": "Shell",
"bytes": "24026"
}
],
"symlink_target": ""
}
|
"""Test the Whirlpool Sixth Sense init."""
from unittest.mock import AsyncMock, MagicMock
import aiohttp
from homeassistant.components.whirlpool.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from . import init_integration
async def test_setup(hass: HomeAssistant):
"""Test setup."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
async def test_setup_http_exception(hass: HomeAssistant, mock_auth_api: MagicMock):
"""Test setup with an http exception."""
mock_auth_api.return_value.do_auth = AsyncMock(
side_effect=aiohttp.ClientConnectionError()
)
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_RETRY
async def test_setup_auth_failed(hass: HomeAssistant, mock_auth_api: MagicMock):
"""Test setup with failed auth."""
mock_auth_api.return_value.do_auth = AsyncMock()
mock_auth_api.return_value.is_access_token_valid.return_value = False
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_ERROR
async def test_setup_fetch_appliances_failed(
hass: HomeAssistant, mock_appliances_manager_api: MagicMock
):
"""Test setup with failed fetch_appliances."""
mock_appliances_manager_api.return_value.fetch_appliances.return_value = False
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.SETUP_ERROR
async def test_unload_entry(hass: HomeAssistant):
"""Test successful unload of entry."""
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state is ConfigEntryState.NOT_LOADED
assert not hass.data.get(DOMAIN)
|
{
"content_hash": "2e4266fb407be270166f10666d247bcc",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 83,
"avg_line_length": 37.05084745762712,
"alnum_prop": 0.7456541628545288,
"repo_name": "w1ll1am23/home-assistant",
"id": "619c2c783b7f4691c14d314b9d8b32067a213315",
"size": "2186",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/whirlpool/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import ConfigParser
CONFIG_FILE = '/etc/ci-scoreboard/ci-scoreboard.conf'
CONFIG_SECTION = 'scoreboard'
class Config:
def __init__(self):
self._cfg = ConfigParser.ConfigParser()
self._cfg.read(CONFIG_FILE)
def _value(self, option):
if self._cfg.has_option(CONFIG_SECTION, option):
return self._cfg.get(CONFIG_SECTION, option)
return None
def _int_value(self, option):
if self._cfg.has_option(CONFIG_SECTION, option):
return self._cfg.getint(CONFIG_SECTION, option)
return None
def _float_value(self, option):
if self._cfg.has_option(CONFIG_SECTION, option):
return self._cfg.getfloat(CONFIG_SECTION, option)
return None
def gerrit_user(self):
return self._value('GERRIT_USER')
def gerrit_key(self):
return self._value('GERRIT_KEY')
def gerrit_hostname(self):
return self._value('GERRIT_HOSTNAME')
def gerrit_port(self):
return self._int_value('GERRIT_PORT')
def gerrit_keepalive(self):
keepalive = self._int_value('GERRIT_KEEPALIVE')
# 0 is the safe default, meaning no keepalives
if keepalive is None:
keepalive = 0
return keepalive
def db_uri(self):
return self._value('DB_URI')
def log_file(self):
return self._value('LOG_FILE_LOCATION')
|
{
"content_hash": "e0f4b0996c9c2c8bb9633b373f25a397",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 61,
"avg_line_length": 27.8,
"alnum_prop": 0.6194244604316547,
"repo_name": "stackforge/third-party-ci-tools",
"id": "39612b341250312c017f334aa0bf4749db2398ac",
"size": "1391",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monitoring/scoreboard/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3508"
},
{
"name": "HTML",
"bytes": "4234"
},
{
"name": "JavaScript",
"bytes": "19348"
},
{
"name": "Pascal",
"bytes": "24"
},
{
"name": "Puppet",
"bytes": "5355"
},
{
"name": "Python",
"bytes": "28581"
},
{
"name": "Ruby",
"bytes": "1283"
},
{
"name": "Shell",
"bytes": "18865"
}
],
"symlink_target": ""
}
|
"""Support for EnOcean devices."""
import logging
from enocean.communicators.serialcommunicator import SerialCommunicator
from enocean.protocol.packet import Packet, RadioPacket
from enocean.utils import combine_hex
import voluptuous as vol
from homeassistant.const import CONF_DEVICE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DOMAIN = "enocean"
DATA_ENOCEAN = "enocean"
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DEVICE): cv.string})}, extra=vol.ALLOW_EXTRA
)
SIGNAL_RECEIVE_MESSAGE = "enocean.receive_message"
SIGNAL_SEND_MESSAGE = "enocean.send_message"
def setup(hass, config):
"""Set up the EnOcean component."""
serial_dev = config[DOMAIN].get(CONF_DEVICE)
dongle = EnOceanDongle(hass, serial_dev)
hass.data[DATA_ENOCEAN] = dongle
return True
class EnOceanDongle:
"""Representation of an EnOcean dongle."""
def __init__(self, hass, ser):
"""Initialize the EnOcean dongle."""
self.__communicator = SerialCommunicator(port=ser, callback=self.callback)
self.__communicator.start()
self.hass = hass
self.hass.helpers.dispatcher.dispatcher_connect(
SIGNAL_SEND_MESSAGE, self._send_message_callback
)
def _send_message_callback(self, command):
"""Send a command through the EnOcean dongle."""
self.__communicator.send(command)
def callback(self, packet):
"""Handle EnOcean device's callback.
This is the callback function called by python-enocan whenever there
is an incoming packet.
"""
if isinstance(packet, RadioPacket):
_LOGGER.debug("Received radio packet: %s", packet)
self.hass.helpers.dispatcher.dispatcher_send(SIGNAL_RECEIVE_MESSAGE, packet)
class EnOceanDevice(Entity):
"""Parent class for all devices associated with the EnOcean component."""
def __init__(self, dev_id, dev_name="EnOcean device"):
"""Initialize the device."""
self.dev_id = dev_id
self.dev_name = dev_name
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
SIGNAL_RECEIVE_MESSAGE, self._message_received_callback
)
)
def _message_received_callback(self, packet):
"""Handle incoming packets."""
if packet.sender_int == combine_hex(self.dev_id):
self.value_changed(packet)
def value_changed(self, packet):
"""Update the internal state of the device when a packet arrives."""
def send_command(self, data, optional, packet_type):
"""Send a command via the EnOcean dongle."""
packet = Packet(packet_type, data=data, optional=optional)
self.hass.helpers.dispatcher.dispatcher_send(SIGNAL_SEND_MESSAGE, packet)
|
{
"content_hash": "d35a2941b5336a7351e5a87bef3b2d63",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 88,
"avg_line_length": 31.946236559139784,
"alnum_prop": 0.6741837765062269,
"repo_name": "robbiet480/home-assistant",
"id": "90ab408775414a39b38e5de7525f40d4c32e7b78",
"size": "2971",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/enocean/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from oscar_gocardless import views
urlpatterns = patterns('',
url(r'^redirect/', views.RedirectView.as_view(), name='gocardless-redirect'),
url(r'^confirm/', views.ConfirmView.as_view(), name='gocardless-response'),
url(r'^cancel/', views.CancelView.as_view(), name='gocardless-cancel'),
)
|
{
"content_hash": "4c855f8fdfbe29c7aaa10b9b66b4da1d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 81,
"avg_line_length": 34.5,
"alnum_prop": 0.7072463768115942,
"repo_name": "django-oscar/django-oscar-gocardless",
"id": "eabe35a589b5498c675c42f3dcfd0a25294580a5",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar_gocardless/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18904"
},
{
"name": "Shell",
"bytes": "337"
}
],
"symlink_target": ""
}
|
"""Tests for grr.lib.rdfvalues.checks."""
import os
import yaml
from grr.lib import config_lib
from grr.lib import test_lib
from grr.lib.checks import checks as checks_lib
from grr.lib.checks import filters
from grr.lib.rdfvalues import anomaly
from grr.lib.rdfvalues import checks
from grr.parsers import linux_cmd_parser
from grr.parsers import wmi_parser
CONFIGS = os.path.join(config_lib.CONFIG["Test.data_dir"], "checks")
TRIGGER_1 = ("SoftwarePackage", "Linux", None, None)
TRIGGER_2 = ("WMIInstalledSoftware", "Windows", None, None)
TRIGGER_3 = ("SoftwarePackage", None, None, "foo")
# Load some dpkg data
parser = linux_cmd_parser.DpkgCmdParser()
test_data = os.path.join(config_lib.CONFIG["Test.data_dir"], "dpkg.out")
with open(test_data) as f:
DPKG_SW = list(parser.Parse(
"/usr/bin/dpkg", ["-l"], f.read(), "", 0, 5, None))
# Load some wmi data
parser = wmi_parser.WMIInstalledSoftwareParser()
test_data = os.path.join(config_lib.CONFIG["Test.data_dir"], "wmi_sw.yaml")
WMI_SW = []
with open(test_data) as f:
wmi = yaml.safe_load(f)
for sw in wmi:
WMI_SW.extend(parser.Parse(None, sw, None))
class ProbeTest(test_lib.GRRBaseTest):
"""Test 'Probe' operations."""
configs = {}
def setUp(self, **kwargs):
super(ProbeTest, self).setUp(**kwargs)
if not self.configs:
config_file = os.path.join(CONFIGS, "probes.yaml")
with open(config_file) as data:
for cfg in yaml.safe_load_all(data):
name = cfg.get("name")
probe_cfg = cfg.get("probe", [{}])
self.configs[name] = probe_cfg[0]
def Init(self, name, artifact, handler_class):
"""Helper method to verify that the Probe sets up the right handler."""
cfg = self.configs.get(name)
probe = checks.Probe(**cfg)
self.assertEqual(artifact, probe.artifact)
self.assertIsInstance(probe.handler, handler_class)
self.assertIsInstance(probe.matcher, checks_lib.Matcher)
def testInitialize(self):
"""Tests the input/output sequence validation."""
self.Init("NO-FILTER", "DpkgDb", filters.NoOpHandler)
self.Init("SERIAL", "DpkgDb", filters.SerialHandler)
self.Init("PARALLEL", "DpkgDb", filters.ParallelHandler)
self.Init("BASELINE", "DpkgDb", filters.SerialHandler)
def testParse(self):
"""Host data should be passed to filters, results should be returned."""
pass
def testParseWithBaseline(self):
pass
def testValidate(self):
cfg = self.configs.get("NO-ARTIFACT")
self.assertRaises(filters.DefinitionError, checks.Probe, cfg)
class MethodTest(test_lib.GRRBaseTest):
"""Test 'Method' operations."""
configs = {}
def setUp(self, **kwargs):
super(MethodTest, self).setUp(**kwargs)
if not self.configs:
config_file = os.path.join(CONFIGS, "sw.yaml")
with open(config_file) as data:
check_def = yaml.safe_load(data)
self.configs = check_def["method"]
def testMethodRegistersTriggers(self):
m_1, m_2, m_3 = [checks.Method(**cfg) for cfg in self.configs]
expect_1 = [TRIGGER_1]
result_1 = [c.attr for c in m_1.triggers.conditions]
self.assertEqual(expect_1, result_1)
expect_2 = [TRIGGER_2]
result_2 = [c.attr for c in m_2.triggers.conditions]
self.assertEqual(expect_2, result_2)
expect_3 = [TRIGGER_3]
result_3 = [c.attr for c in m_3.triggers.conditions]
self.assertEqual(expect_3, result_3)
def testMethodRoutesDataToProbes(self):
pass
def testValidate(self):
pass
class CheckTest(test_lib.GRRBaseTest):
"""Test 'Check' operations."""
cfg = {}
def setUp(self, **kwargs):
super(CheckTest, self).setUp(**kwargs)
if not self.cfg:
config_file = os.path.join(CONFIGS, "sw.yaml")
with open(config_file) as data:
self.cfg = yaml.safe_load(data)
self.host_data = {"SoftwarePackage": DPKG_SW,
"WMIInstalledSoftware": WMI_SW}
def testInitializeCheck(self):
chk = checks.Check(**self.cfg)
self.assertEqual("SW-CHECK", chk.check_id)
self.assertItemsEqual(["ANY"], [str(c) for c in chk.match])
def testGenerateTriggerMap(self):
chk = checks.Check(**self.cfg)
expect = [TRIGGER_1, TRIGGER_3]
result = [c.attr for c in chk.triggers.Search("SoftwarePackage")]
self.assertItemsEqual(expect, result)
expect = [TRIGGER_2]
result = [c.attr for c in chk.triggers.Search("WMIInstalledSoftware")]
self.assertItemsEqual(expect, result)
def testParseCheckFromConfig(self):
chk = checks.Check(**self.cfg)
# Triggers 1 (linux packages) & 2 (windows software) should return results.
# Trigger 3 should not return results as no host data has the label 'foo'.
result_1 = chk.Parse([TRIGGER_1], self.host_data)
result_2 = chk.Parse([TRIGGER_2], self.host_data)
result_3 = chk.Parse([TRIGGER_3], self.host_data)
self.assertTrue(result_1)
self.assertTrue(result_2)
self.assertFalse(result_3)
def testValidate(self):
pass
class CheckResultsTest(test_lib.GRRBaseTest):
"""Test 'CheckResult' operations."""
def testExtendAnomalies(self):
anomaly1 = {"finding": ["Adware 2.1.1 is installed"],
"explanation": "Found: Malicious software.",
"type": 1}
anomaly2 = {"finding": ["Java 6.0.240 is installed"],
"explanation": "Found: Old Java installation.",
"type": 1}
result = checks.CheckResult(check_id="SW-CHECK",
anomaly=anomaly.Anomaly(**anomaly1))
other = checks.CheckResult(check_id="SW-CHECK",
anomaly=anomaly.Anomaly(**anomaly2))
result.ExtendAnomalies(other)
expect = {"check_id": "SW-CHECK", "anomaly": [anomaly1, anomaly2]}
self.assertDictEqual(expect, result.ToPrimitiveDict())
class HintDefinitionTests(test_lib.GRRBaseTest):
"""Test 'Hint' operations."""
configs = {}
def setUp(self, **kwargs):
super(HintDefinitionTests, self).setUp(**kwargs)
if not self.configs:
config_file = os.path.join(CONFIGS, "sw.yaml")
with open(config_file) as data:
cfg = yaml.safe_load(data)
chk = checks.Check(**cfg)
self.lin_method, self.win_method, self.foo_method = list(chk.method)
def testInheritHintConfig(self):
lin_problem = "l337 software installed"
lin_format = "{{ name }} {{ version }} is installed"
# Methods should not have a hint template.
self.assertEqual(lin_problem, self.lin_method.hint.problem)
self.assertFalse(self.lin_method.hint.hinter.template)
# Formatting should be present in probes, if defined.
for probe in self.lin_method.probe:
self.assertEqual(lin_problem, probe.hint.problem)
self.assertEqual(lin_format, probe.hint.format)
foo_problem = "Sudo not installed"
# Methods should not have a hint template.
self.assertEqual(foo_problem, self.foo_method.hint.problem)
self.assertFalse(self.foo_method.hint.hinter.template)
# Formatting should be missing in probes, if undefined.
for probe in self.foo_method.probe:
self.assertEqual(foo_problem, probe.hint.problem)
self.assertFalse(probe.hint.format)
def testOverlayHintConfig(self):
generic_problem = "Malicious software."
java_problem = "Old Java installation."
generic_format = "{{ name }} {{ version }} is installed"
# Methods should not have a hint template.
self.assertEqual(generic_problem, self.win_method.hint.problem)
self.assertFalse(self.win_method.hint.hinter.template)
# Formatting should be present in probes.
probe_1, probe_2 = list(self.win_method.probe)
self.assertEqual(java_problem, probe_1.hint.problem)
self.assertEqual(generic_format, probe_1.hint.format)
self.assertEqual(generic_problem, probe_2.hint.problem)
self.assertEqual(generic_format, probe_2.hint.format)
|
{
"content_hash": "a6701aaccf21112ffb55500394270351",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 35.43438914027149,
"alnum_prop": 0.6743710892606308,
"repo_name": "ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert",
"id": "18e0b1ab8f64f07e422f3e619d97003b261d6058",
"size": "7853",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/rdfvalues/checks_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "C++",
"bytes": "55149"
},
{
"name": "CSS",
"bytes": "36573"
},
{
"name": "JavaScript",
"bytes": "831111"
},
{
"name": "Makefile",
"bytes": "5482"
},
{
"name": "Perl",
"bytes": "483"
},
{
"name": "Python",
"bytes": "4517593"
},
{
"name": "Shell",
"bytes": "31210"
}
],
"symlink_target": ""
}
|
"""Integration with pytorch profiler."""
import os
from pkg_resources import parse_version
import wandb
from wandb.errors import Error, UsageError
from wandb.sdk.lib import telemetry
PYTORCH_MODULE = "torch"
PYTORCH_PROFILER_MODULE = "torch.profiler"
def torch_trace_handler():
"""Creates a trace handler for traces generated by the profiler.
Provide as an argument to `torch.profiler.profile`:
```python
torch.profiler.profile(..., on_trace_ready = wandb.profiler.torch_trace_handler())
```
Calling this function ensures that profiler charts & tables can be viewed in your run dashboard
on wandb.ai.
Please note that `wandb.init()` must be called before this function is invoked.
The PyTorch (torch) version must also be at least 1.9, in order to ensure stability
of their Profiler API.
Args:
None
Returns:
None
Raises:
UsageError if wandb.init() hasn't been called before profiling.
Error if torch version is less than 1.9.0.
Examples:
```python
run = wandb.init()
run.config.id = "profile_code"
with torch.profiler.profile(
schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
on_trace_ready=wandb.profiler.torch_trace_handler(),
record_shapes=True,
with_stack=True,
) as prof:
for i, batch in enumerate(dataloader):
if step >= 5:
break
train(batch)
prof.step()
```
"""
torch = wandb.util.get_module(PYTORCH_MODULE, required=True)
torch_profiler = wandb.util.get_module(PYTORCH_PROFILER_MODULE, required=True)
if parse_version(torch.__version__) < parse_version("1.9.0"):
raise Error(
f"torch version must be at least 1.9 in order to use the PyTorch Profiler API.\
\nVersion of torch currently installed: {torch.__version__}"
)
try:
logdir = os.path.join(wandb.run.dir, "pytorch_traces") # type: ignore
os.mkdir(logdir)
except AttributeError:
raise UsageError(
"Please call `wandb.init()` before `wandb.profiler.torch_trace_handler()`"
) from None
with telemetry.context() as tel:
tel.feature.torch_profiler_trace = True
return torch_profiler.tensorboard_trace_handler(logdir)
|
{
"content_hash": "8001d1d5d0f6a5315139ba30c64e432e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 99,
"avg_line_length": 30.894736842105264,
"alnum_prop": 0.6490630323679727,
"repo_name": "wandb/client",
"id": "c035faa85e467c252f95f0f65e2106558de5ea45",
"size": "2348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wandb/sdk/internal/profiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
}
|
"""
Support for Netgear routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.netgear/
"""
import logging
import threading
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_PORT)
from homeassistant.util import Throttle
REQUIREMENTS = ['pynetgear==0.3.3']
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
DEFAULT_HOST = 'routerlogin.net'
DEFAULT_USER = 'admin'
DEFAULT_PORT = 5000
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USER): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port
})
def get_scanner(hass, config):
"""Validate the configuration and returns a Netgear scanner."""
info = config[DOMAIN]
host = info.get(CONF_HOST)
username = info.get(CONF_USERNAME)
password = info.get(CONF_PASSWORD)
port = info.get(CONF_PORT)
scanner = NetgearDeviceScanner(host, username, password, port)
return scanner if scanner.success_init else None
class NetgearDeviceScanner(DeviceScanner):
"""Queries a Netgear wireless router using the SOAP-API."""
def __init__(self, host, username, password, port):
"""Initialize the scanner."""
import pynetgear
self.last_results = []
self.lock = threading.Lock()
self._api = pynetgear.Netgear(password, host, username, port)
_LOGGER.info("Logging in")
results = self._api.get_attached_devices()
self.success_init = results is not None
if self.success_init:
self.last_results = results
else:
_LOGGER.error("Failed to Login")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return (device.mac for device in self.last_results)
def get_device_name(self, mac):
"""Return the name of the given device or None if we don't know."""
try:
return next(device.name for device in self.last_results
if device.mac == mac)
except StopIteration:
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""Retrieve latest information from the Netgear router.
Returns boolean if scanning successful.
"""
if not self.success_init:
return
with self.lock:
_LOGGER.info("Scanning")
results = self._api.get_attached_devices()
if results is None:
_LOGGER.warning("Error scanning devices")
self.last_results = results or []
|
{
"content_hash": "f4701f2438f3e5bf2dada8f191577d75",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 75,
"avg_line_length": 28.923809523809524,
"alnum_prop": 0.6608495225551531,
"repo_name": "happyleavesaoc/home-assistant",
"id": "b3ec442198ebf145564edec0e3eeb634d08f9b84",
"size": "3037",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/device_tracker/netgear.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1952235"
},
{
"name": "Python",
"bytes": "6672532"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "14949"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import blockflow
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'blockflow'
copyright = u"2017, Stuart Berg"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = blockflow.__version__
# The full version, including alpha/beta/rc tags.
release = blockflow.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'blockflowdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'blockflow.tex',
u'blockflow Documentation',
u'Stuart Berg', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'blockflow',
u'blockflow Documentation',
[u'Stuart Berg'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'blockflow',
u'blockflow Documentation',
u'Stuart Berg',
'blockflow',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "776da83e8e24c736030985d868b4f685",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 30.634615384615383,
"alnum_prop": 0.7026993094789705,
"repo_name": "stuarteberg/blockflow",
"id": "29286afbff5403aa91c2adcb505b06dfea092a8a",
"size": "8409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2282"
},
{
"name": "Python",
"bytes": "33067"
}
],
"symlink_target": ""
}
|
"""
:copyright: 2010-2015 by Ronny Pfannschmidt
:license: MIT
"""
import os
import sys
from .utils import trace
from .version import format_version
from .discover import find_matching_entrypoint
PYTHON_TEMPLATE = """\
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = {version!r}
"""
PY3 = sys.version_info > (3,)
string_types = (str,) if PY3 else (str, unicode) # noqa
def version_from_scm(root):
ep = find_matching_entrypoint(root, 'setuptools_scm.parse_scm')
if ep:
return ep.load()(root)
raise LookupError('no scm found for %r' % root)
def dump_version(root, version, write_to):
if not write_to:
return
target = os.path.normpath(os.path.join(root, write_to))
if target.endswith('.txt'):
dump = version
elif target.endswith('.py'):
dump = PYTHON_TEMPLATE.format(version=version)
else:
raise ValueError((
"bad file format: '%s' (of %s) \n"
"only *.txt and *.py are supported") % (
os.path.splitext(target)[1],
target
))
with open(target, 'w') as fp:
fp.write(dump)
def get_version(root='.',
version_scheme='guess-next-dev',
local_scheme='node-and-date',
write_to=None):
root = os.path.abspath(root)
trace('root', repr(root))
version = version_from_scm(root)
if version:
if isinstance(version, string_types):
return version
version = format_version(
version,
version_scheme=version_scheme,
local_scheme=local_scheme)
dump_version(root=root, version=version, write_to=write_to)
return version
|
{
"content_hash": "73c8f3dad6297b6bf9ea835fe12f3d89",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 67,
"avg_line_length": 26.46969696969697,
"alnum_prop": 0.6044647967945048,
"repo_name": "untitaker/setuptools_scm",
"id": "d1ec09c6ba956fcc15fd3bed5b57ba7f74c5a9d6",
"size": "1747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setuptools_scm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "73"
},
{
"name": "Python",
"bytes": "23701"
}
],
"symlink_target": ""
}
|
import os
from os.path import join
import sys
import json
import csv
import subprocess
import shutil
import itertools
from django.views.generic import ListView, DetailView
from django.views.generic.base import ContextMixin
from django.views.generic.edit import CreateView, DeleteView, UpdateView
from django.contrib.messages.views import SuccessMessageMixin
from django.apps import apps
from django.http import HttpResponse
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
from django.core import serializers
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from base.models import Project
from apps.crawl_space.models import Crawl, CrawlModel
from apps.crawl_space.forms import AddCrawlForm, AddCrawlModelForm, CrawlSettingsForm
from apps.crawl_space.utils import touch
from apps.crawl_space.viz.plot import AcheDashboard, NutchDashboard
from apps.crawl_space.settings import CRAWL_PATH, IMAGES_PATH, CCA_PATH
from task_manager.tika_tasks import create_index
from task_manager.crawl_tasks import cca_dump, nutch, ache, ache_log_statistics
import celery
from redis.connection import ConnectionError
class ProjectObjectMixin(ContextMixin):
def get_project(self):
return Project.objects.get(slug=self.kwargs['project_slug'])
def get_context_data(self, **kwargs):
# Call the base implementation first to get a context
context = super(ProjectObjectMixin, self).get_context_data(**kwargs)
context['project'] = self.get_project()
return context
def get_success_url(self):
"""
Prepend the hostname and the port to the path for an object.
"""
return self.get_project().get_absolute_url()
def handle_form_submit(self, request, form):
if form.is_valid():
form.instance.project = self.get_project()
self.object = form.save()
return HttpResponse(
json.dumps({
"url": self.object.get_absolute_url(),
"id": self.object.id,
"name": self.object.name,
"slug": self.object.slug,
"project_id": self.object.project.id,
}),
status=200,
content_type="application/json"
)
else:
return HttpResponse(
json.dumps({
"form_errors": form.errors,
}),
status=500,
content_type="application/json",
)
class AddCrawlView(SuccessMessageMixin, ProjectObjectMixin, CreateView):
model = Crawl
form_class = AddCrawlForm
template_name = "crawl_space/add_crawl.html"
success_message = "Crawl %(name)s was saved successfully."
def get_success_url(self):
return self.object.get_absolute_url()
def post(self, request, *args, **kwargs):
"""
Check if a seed list file was supplied. If not, convert the content of
the textseeds value to an in-memory file.
"""
if request.POST.get('textseeds', False) and not request.FILES.get("seeds_list", False):
request.FILES["seeds_list"] = SimpleUploadedFile(
'seeds',
bytes(request.POST["textseeds"]),
'utf-8'
)
form = AddCrawlForm(request.POST, request.FILES)
# Let add crawl work normally if it is not dealing with an xmlhttprequest.
if request.is_ajax():
return self.handle_form_submit(request, form)
else:
return super(AddCrawlView, self).post(request, *args, **kwargs)
def form_valid(self, form):
form.instance.project = self.get_project()
return super(AddCrawlView, self).form_valid(form)
class ListCrawlsView(ProjectObjectMixin, ListView):
model = Crawl
template_name = "crawl_space/crawls.html"
class CrawlView(ProjectObjectMixin, DetailView):
model = Crawl
template_name = "crawl_space/crawl.html"
def post(self, request, *args, **kwargs):
crawl_object = self.get_object()
# Start
if request.POST['action'] == "start":
# Try to ping celery to see if it is ready. If the response is an
# empty list, status is NOT READY. If there is an error connecting to
# with redis, celery status is REDIS ERROR.
try:
celery_status = "READY" if celery.current_app.control.ping() else "CELERY ERROR"
except ConnectionError:
celery_status = "REDIS ERROR"
if celery_status in ["REDIS ERROR", "CELERY ERROR"]:
crawl_object.status = celery_status
crawl_object.save()
return HttpResponse(json.dumps(dict(
status=crawl_object.status,
)),
content_type="application/json")
else:
crawl_object.status = "STARTING"
crawl_object.save()
if crawl_object.crawler == "ache":
ache.delay(crawl_object)
else:
crawl_object.rounds_left = int(request.POST["rounds"])
crawl_object.save()
nutch.delay(crawl_object)
return HttpResponse(json.dumps(dict(
status=crawl_object.status,
)),
content_type="application/json")
# Stop
elif request.POST['action'] == "stop":
crawl_path = crawl_object.get_crawl_path()
if crawl_object.crawler == "ache":
crawl_object.status = "STOPPED"
crawl_object.save()
os.killpg(crawl_object.celerytask.pid, 9)
if crawl_object.crawler == "nutch":
crawl_object.status = "STOPPING"
crawl_object.save()
return HttpResponse(json.dumps(dict(
status="STOPPING")),
content_type="application/json")
# Common Crawl Dump
elif request.POST['action'] == "ccadump":
crawl_object.status = "DUMPING"
crawl_object.save()
cca_dump(self.get_object())
crawl_object.status = "SUCCESS"
crawl_object.save()
return HttpResponse("Success")
# Dump Images
elif request.POST['action'] == "dump":
# TODO - restore dump_images
return HttpResponse("Success")
# Update status, statistics
elif request.POST['action'] == "status":
# Do not update the status if the current status is any of
# the following. This is to prevent errors or interface problems
# when checking the status of a celery task.
no_go_statuses = [
"FINISHING",
"STOPPING",
"REDIS ERROR",
"CELERY ERROR",
"NOT STARTED",
"STOPPED",
"FORCE STOPPED"
]
if crawl_object.status not in no_go_statuses and crawl_object.crawler != 'nutch':
crawl_object.status = crawl_object.celerytask.task.status
crawl_object.save()
if crawl_object.crawler == "ache":
ache_log_statistics(crawl_object)
return HttpResponse(json.dumps(dict(
status=crawl_object.status,
harvest_rate=crawl_object.harvest_rate,
pages_crawled=crawl_object.pages_crawled,
rounds_left=crawl_object.rounds_left,
)),
content_type="application/json")
return HttpResponse(json.dumps(dict(
args=args,
kwargs=kwargs,
post=request.POST)),
content_type="application/json")
def get(self, request, *args, **kwargs):
# Get Relevant Seeds File
if not request.GET:
# no url parameters, return regular response
return super(CrawlView, self).get(request, *args, **kwargs)
elif 'resource' in request.GET and request.GET['resource'] == "seeds":
seeds = self.get_ache_dashboard().get_relevant_seeds()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=relevant_seeds.txt'
response.write('\n'.join(seeds))
return response
elif 'resource' in request.GET and request.GET['resource'] == "initial_seeds":
seeds = self.get_seeds_list()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=seeds.txt'
response.write(''.join(seeds))
return response
elif 'resource' in request.GET and request.GET['resource'] == "crawl_log":
crawl_log = self.get_crawl_log()
response = HttpResponse(content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=crawl_log.txt'
response.write(crawl_log)
return response
def get_crawl_log(self):
log_path = os.path.join(self.get_object().get_crawl_path(), "crawl_proc.log")
with open(log_path) as f:
crawl_log = f.readlines()
return ''.join(crawl_log)
def get_seeds_path(self):
return self.get_object().seeds_list.path
def get_seeds_list(self, lines=None):
with open(self.get_seeds_path()) as f:
if lines:
seeds_list = list(itertools.islice(f, lines))
else:
seeds_list = f.readlines()
return seeds_list
def get_object(self):
return Crawl.objects.get(
project=self.get_project(),
slug=self.kwargs['crawl_slug'])
def get_ache_dashboard(self):
return AcheDashboard(self.get_object())
def get_context_data(self, **kwargs):
context = super(CrawlView, self).get_context_data(**kwargs)
context['project'] = self.get_project()
context['seeds'] = self.get_seeds_list(10)
context['settings'] = settings
if self.get_object().crawler == "ache":
plots = AcheDashboard(self.get_object()).get_plots()
context['scripts'] = plots['scripts']
context['divs'] = plots['divs']
elif self.get_object().crawler == "nutch":
plots = NutchDashboard(self.get_object()).get_plots()
context['scripts'] = plots['scripts']
context['divs'] = plots['divs']
return context
class CrawlSettingsView(SuccessMessageMixin, ProjectObjectMixin, UpdateView):
model = Crawl
form_class = CrawlSettingsForm
success_message = "Crawl %(name)s was edited successfully."
template_name_suffix = '_update_form'
def get_object(self):
return Crawl.objects.get(
project=self.get_project(),
slug=self.kwargs['crawl_slug'])
class AddCrawlModelView(SuccessMessageMixin, ProjectObjectMixin, CreateView):
form_class = AddCrawlModelForm
template_name = "crawl_space/add_crawl_model.html"
success_message = "Crawl model %(name)s was added successfully."
def post(self, request, *args, **kwargs):
form = AddCrawlModelForm(request.POST, request.FILES)
# Let add crawl model work normally if it is not dealing with an xmlhttprequest.
if request.is_ajax():
return self.handle_form_submit(request, form)
else:
return super(AddCrawlModelView, self).post(request, *args, **kwargs)
def form_valid(self, form):
form.instance.project = self.get_project()
return super(AddCrawlModelView, self).form_valid(form)
class DeleteCrawlView(SuccessMessageMixin, ProjectObjectMixin, DeleteView):
model = Crawl
success_message = "Crawl %(name)s was deleted successfully."
def delete(self, request, *args, **kwargs):
""" Remove crawl folder """
shutil.rmtree(self.get_object().crawl_location)
return super(DeleteCrawlView, self).delete(request, *args, **kwargs)
def get_object(self):
return Crawl.objects.get(project=self.get_project(),
slug=self.kwargs['crawl_slug'])
class DeleteCrawlModelView(SuccessMessageMixin, ProjectObjectMixin, DeleteView):
model = CrawlModel
success_message = "Crawl model %(name)s was deleted successfully."
def get_object(self):
return CrawlModel.objects.get(
project=self.get_project(),
slug=self.kwargs['model_slug'])
|
{
"content_hash": "a10cc8d4dc11b1e291d0d0541820533d",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 96,
"avg_line_length": 38.023809523809526,
"alnum_prop": 0.5973700688791485,
"repo_name": "memex-explorer/memex-explorer",
"id": "096da8c001d85d80efa949a85e2bc2cb27942b57",
"size": "12776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "source/apps/crawl_space/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1416"
},
{
"name": "CSS",
"bytes": "63041"
},
{
"name": "HTML",
"bytes": "76073"
},
{
"name": "JavaScript",
"bytes": "62691"
},
{
"name": "Nginx",
"bytes": "2995"
},
{
"name": "Python",
"bytes": "168826"
},
{
"name": "Shell",
"bytes": "9416"
}
],
"symlink_target": ""
}
|
import json
import mock
import pytest
from pyetcd import EtcdResult, EtcdKeyNotFound
from etcdb import IntegrityError
from etcdb.execute.dml.insert import insert, get_pk_field
from etcdb.lock import WriteLock
from etcdb.resultset import Column
from etcdb.sqlparser.sql_tree import SQLTree
@mock.patch('etcdb.execute.dml.insert.get_pk_field')
@mock.patch.object(WriteLock, 'acquire')
@mock.patch.object(WriteLock, 'release')
def test_insert_duplicate_raises(mock_release, mock_acquire,
mock_get_pk_field):
mock_get_pk_field.return_value = Column('id')
etcd_client = mock.Mock()
# etcd_client.read.side_effect =
tree = SQLTree()
# {"query_type": "INSERT",
# "db": null,
# "query":
# "insert into bar(id, name) values(1, 'aaa')",
# "table": "bar",
# "expressions": [],
# "success": true,
# "fields": {"id": "1", "name": "aaa"},
# "order": {"direction": "ASC", "by": null},
# "limit": null,
# "where": null,
# "options": {}}
tree.table = 'bar'
tree.fields = {"id": "1", "name": "aaa"}
with pytest.raises(IntegrityError):
# noinspection PyTypeChecker
insert(etcd_client, tree, 'foo')
@mock.patch('etcdb.execute.dml.insert.get_pk_field')
@mock.patch.object(WriteLock, 'acquire')
@mock.patch.object(WriteLock, 'release')
@mock.patch('etcdb.execute.dml.insert._set_next_auto_inc')
def test_insert(mock_set_next_auto_inc,
mock_release, mock_acquire, mock_get_pk_field):
mock_get_pk_field.return_value = Column('id')
etcd_client = mock.Mock()
etcd_client.read.side_effect = EtcdKeyNotFound
tree = SQLTree()
tree.table = 'bar'
tree.fields = {"id": "1", "name": "aaa"}
# noinspection PyTypeChecker
insert(etcd_client, tree, 'foo')
etcd_client.write.assert_called_once_with('/foo/bar/1',
json.dumps(tree.fields))
mock_set_next_auto_inc.assert_called_once_with(etcd_client, 'foo', 'bar')
def test_get_pk_field():
mock_response = mock.Mock()
content = {
"action": "get",
"node": {
"key": "/foo/bar/_fields",
"value": '{"id": {"type": "INT", '
'"options": {"primary": true, "nullable": false}}, '
'"name": {"type": "VARCHAR", '
'"options": {"nullable": true}}}',
"modifiedIndex": 2218,
"createdIndex": 2218
}
}
mock_response.content = json.dumps(content)
etcd_response = EtcdResult(mock_response)
etcd_client = mock.Mock()
etcd_client.read.return_value = etcd_response
assert get_pk_field(etcd_client, 'foo', 'bar') == Column('id')
|
{
"content_hash": "af7131fd59d2edb49137033e138241b1",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 32.416666666666664,
"alnum_prop": 0.5923613661402864,
"repo_name": "box/etcdb",
"id": "0598ed3c470fa902d08ce80be5a51fa8896209d0",
"size": "2723",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/unit/execute/dml/test_insert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2961"
},
{
"name": "Pascal",
"bytes": "43"
},
{
"name": "Puppet",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "226169"
},
{
"name": "Ruby",
"bytes": "1610"
},
{
"name": "Shell",
"bytes": "750"
}
],
"symlink_target": ""
}
|
def extractNinetysevenkoiWordpressCom(item):
'''
Parser for 'ninetysevenkoi.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "ef465667b078f4b286a6ccfd7bb99b37",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27,
"alnum_prop": 0.6402116402116402,
"repo_name": "fake-name/ReadableWebProxy",
"id": "7022f6592028f10360f2c4a24601f61a699eb4e9",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractNinetysevenkoiWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.