text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Generate a config file for fakezk topology."""
__author__ = 'enisoc@google.com (Anthony Yeh)'
import base64
import codecs
import json
class FakeZkConfig(object):
"""Create fakezk config for use as static topology for vtgate."""
def __init__(self, mysql_port, cell='test_cell', host='127.0.0.1'):
self.keyspaces = {}
self.served_from = {}
self.host = host
self.cell = cell
self.mysql_port = mysql_port
def add_shard(self, keyspace, shard, vt_port, grpc_port=None):
"""Add a shard to the config."""
# compute the start and end
start = ''
end = ''
if '-' in shard:
parts = shard.split('-', 2)
start = parts[0]
end = parts[1]
if keyspace not in self.keyspaces:
self.keyspaces[keyspace] = []
self.keyspaces[keyspace].append({
'shard': shard,
'vt_port': vt_port,
'grpc_port': grpc_port,
'start': start,
'end': end,
})
def add_redirect(self, from_keyspace, to_keyspace):
"""Set a keyspace to be ServedFrom another."""
self.served_from[from_keyspace] = to_keyspace
def keyspace_id_as_base64(self, s):
raw = codecs.decode(s, 'hex')
return base64.b64encode(raw)
def as_json(self):
"""Return the config as JSON. This is a proto3 version of SrvKeyspace."""
result = {}
tablet_types_str = ['master', 'replica', 'rdonly']
tablet_types_int = [2, 3, 4]
sharding_colname = 'keyspace_id'
sharding_coltype = 1
for keyspace, shards in self.keyspaces.iteritems():
shard_references = []
for shard in shards:
key_range = {}
if shard['start']:
key_range['start'] = self.keyspace_id_as_base64(shard['start'])
if shard['end']:
key_range['end'] = self.keyspace_id_as_base64(shard['end'])
shard_references.append({
'name': shard['shard'],
'key_range': key_range,
})
for dbtype in tablet_types_str:
path = '/zk/%s/vt/ns/%s/%s/%s' % (self.cell, keyspace,
shard['shard'], dbtype)
port_map = {
'mysql': self.mysql_port,
'vt': shard['vt_port'],
}
if shard['grpc_port']:
port_map['grpc'] = shard['grpc_port']
result[path] = {
'entries': [
{
'uid': 0,
'host': self.host,
'port_map': port_map,
},
],
}
path = '/zk/%s/vt/ns/%s' % (self.cell, keyspace)
partitions = []
for tablet_type in tablet_types_int:
partitions.append({
'served_type': tablet_type,
'shard_references': shard_references,
})
result[path] = {
'partitions': partitions,
'sharding_column_name': sharding_colname,
'sharding_column_type': sharding_coltype,
}
for from_keyspace, to_keyspace in self.served_from.iteritems():
path = '/zk/%s/vt/ns/%s' % (self.cell, from_keyspace)
served_from = []
for dbtype in tablet_types_int:
served_from.append({
'tablet_type': dbtype,
'keyspace': to_keyspace,
})
result[path] = {
'served_from': served_from,
}
return json.dumps(result)
|
{
"content_hash": "b25c3dafa0592169251c03904d364434",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 77,
"avg_line_length": 29.5,
"alnum_prop": 0.5295866785608088,
"repo_name": "yangzhongj/vitess",
"id": "85db901b9805c39c4afbb03ea5429a0f2281e05f",
"size": "3414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/vttest/fakezk_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "40319"
},
{
"name": "CSS",
"bytes": "228500"
},
{
"name": "Go",
"bytes": "4725712"
},
{
"name": "HTML",
"bytes": "86850"
},
{
"name": "Java",
"bytes": "254073"
},
{
"name": "JavaScript",
"bytes": "76067"
},
{
"name": "Liquid",
"bytes": "17316"
},
{
"name": "Makefile",
"bytes": "5231"
},
{
"name": "PHP",
"bytes": "56095"
},
{
"name": "PLpgSQL",
"bytes": "10220"
},
{
"name": "Protocol Buffer",
"bytes": "63559"
},
{
"name": "Python",
"bytes": "1001320"
},
{
"name": "Ruby",
"bytes": "466"
},
{
"name": "Shell",
"bytes": "25588"
},
{
"name": "Yacc",
"bytes": "19014"
}
],
"symlink_target": ""
}
|
"""Bucket sort task for generalization."""
import functools
from typing import Mapping
import chex
import jax
from jax import nn as jnn
from jax import numpy as jnp
from jax import random as jrandom
from neural_networks_chomsky_hierarchy.tasks import task
class BucketSort(task.GeneralizationTask):
"""A task which goal is to sort tokens from a fixed alphabet.
The input string is composed of tokens from a fixed-size alphabet, i.e.,
`{0, 1, ..., vocab_size - 1}`, and the goal is to return the sorted string (in
lexicographically increasing order).
Examples:
10204112 -> 00111224 (with `vocab_size = 5`)
1110001 -> 0001111 (with `vocab_size = 2`)
"""
def __init__(self, *args, vocab_size: int = 5, **kwargs) -> None:
"""Initializes the task.
Args:
*args: The args for the base task class.
vocab_size: The size of the alphabet.
**kwargs: The kwargs for the base task class.
"""
super().__init__(*args, **kwargs)
self._vocab_size = vocab_size
@functools.partial(jax.jit, static_argnums=(0, 2, 3))
def sample_batch(
self,
rng: chex.PRNGKey,
batch_size: int,
length: int,
) -> Mapping[str, chex.Array]:
"""Returns a batch of strings and tokens sorted by (inc.) occurrence."""
strings = jrandom.randint(
rng, shape=(batch_size, length), minval=0, maxval=self._vocab_size)
sorted_strings = jnp.sort(strings, axis=-1)
return {
'input': jnn.one_hot(strings, num_classes=self.input_size),
'output': jnn.one_hot(sorted_strings, num_classes=self.output_size),
}
@property
def input_size(self) -> int:
"""Returns the input size for the models."""
return self._vocab_size
@property
def output_size(self) -> int:
"""Returns the output size for the models."""
return self._vocab_size
def output_length(self, input_length: int) -> int:
"""Returns the output length for a given input length."""
return input_length
|
{
"content_hash": "60865df6c831dd2b2696c18dc47346de",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 29.686567164179106,
"alnum_prop": 0.6561085972850679,
"repo_name": "deepmind/neural_networks_chomsky_hierarchy",
"id": "7415a757df96ccda4eb47cc4cfbe8f34a030077f",
"size": "2663",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tasks/cs/bucket_sort.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "154821"
}
],
"symlink_target": ""
}
|
from flask.ext.wtf import Form
from wtforms import TextField, BooleanField
from wtforms.validators import Required
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
|
{
"content_hash": "be8102ef28268086b636e07457f1d566",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 62,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.7453183520599251,
"repo_name": "code-haven/flask-microblog",
"id": "6d88d413e1ed616cfc66ce3076706d405fc5fccd",
"size": "267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "application/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "337762"
},
{
"name": "JavaScript",
"bytes": "617143"
},
{
"name": "Python",
"bytes": "1661"
}
],
"symlink_target": ""
}
|
import boto3
# Create SQS client
sqs = boto3.client('sqs')
queue_url = 'SQS_QUEUE_URL'
# Send message to SQS queue
response = sqs.send_message(
QueueUrl=queue_url,
DelaySeconds=10,
MessageAttributes={
'Title': {
'DataType': 'String',
'StringValue': 'The Whistler'
},
'Author': {
'DataType': 'String',
'StringValue': 'John Grisham'
},
'WeeksOn': {
'DataType': 'Number',
'StringValue': '6'
}
},
MessageBody=(
'Information about current NY Times fiction bestseller for '
'week of 12/11/2016.'
)
)
print(response['MessageId'])
|
{
"content_hash": "65ad82c1908433b662cfe9e28713120a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 20.848484848484848,
"alnum_prop": 0.5305232558139535,
"repo_name": "imshashank/aws-doc-sdk-examples",
"id": "3eb4bb45008047ffe3fcfe6080fcbff603e48b7d",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/example_code/sqs/send_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "105917"
},
{
"name": "C++",
"bytes": "157148"
},
{
"name": "CMake",
"bytes": "5136"
},
{
"name": "Go",
"bytes": "136009"
},
{
"name": "Java",
"bytes": "216776"
},
{
"name": "JavaScript",
"bytes": "101864"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "PHP",
"bytes": "148580"
},
{
"name": "Python",
"bytes": "31842"
},
{
"name": "Ruby",
"bytes": "112274"
},
{
"name": "Shell",
"bytes": "1348"
}
],
"symlink_target": ""
}
|
import logging
from types import NoneType
from enum import Enum
from framework.plugin_loader import get_plugin, get_config
from intercom import ResourceNotFound
from intercom.tag import Tag
from intercom.user import User
from mcfw.rpc import arguments, returns
from plugins.intercom_support.intercom_support_plugin import IntercomSupportPlugin
from plugins.intercom_support.plugin_consts import NAMESPACE as INTERCOM_NAMESPACE
from plugins.tff_backend.models.user import TffProfile
from plugins.tff_backend.plugin_consts import NAMESPACE
class IntercomTags(Enum):
HOSTER = 'Hoster'
ITFT_PURCHASER = 'iTFT Purchaser'
TFT_PURCHASER = 'TFT Purchaser'
ITO_INVESTOR = 'ITO Investor'
APP_REGISTER = 'appregister'
BETTERTOKEN_CONTRACT = 'Bettertoken contract'
GREENITGLOBE_CONTRACT = 'GreenITGlobe contract'
def get_intercom_plugin():
intercom_plugin = get_plugin(INTERCOM_NAMESPACE) # type: IntercomSupportPlugin
if intercom_plugin:
assert isinstance(intercom_plugin, IntercomSupportPlugin)
return intercom_plugin
@returns(User)
@arguments(username=unicode, profile=(TffProfile, NoneType))
def upsert_intercom_user(username, profile=None):
# type: (unicode, TffProfile) -> User
intercom_plugin = get_intercom_plugin()
def _upsert(username, profile):
# type: (unicode, TffProfile) -> User
return intercom_plugin.upsert_user(username, profile.info.name, profile.info.email, None)
if profile:
return _upsert(username, profile)
else:
try:
return intercom_plugin.get_user(user_id=username)
except ResourceNotFound:
return _upsert(username, TffProfile.create_key(username).get())
def send_intercom_email(iyo_username, subject, message):
intercom_plugin = get_intercom_plugin()
if intercom_plugin:
from_ = {'type': 'admin', 'id': get_config(NAMESPACE).intercom_admin_id}
to_user = upsert_intercom_user(iyo_username)
if to_user.unsubscribed_from_emails:
logging.warning('Not sending email via intercom, user %s is unsubscribed from emails.', to_user.id)
return None
to = {'type': 'user', 'id': to_user.id}
return intercom_plugin.send_message(from_, message, message_type='email', subject=subject, to=to)
logging.debug('Not sending email with subject "%s" via intercom because intercom plugin was not found', subject)
return None
@returns(Tag)
@arguments(tag=(IntercomTags, unicode), iyo_usernames=[unicode])
def tag_intercom_users(tag, iyo_usernames):
if isinstance(tag, IntercomTags):
tag = tag.value
users = [{'user_id': username} for username in iyo_usernames]
return get_intercom_plugin().tag_users(tag, users)
|
{
"content_hash": "8a772c2058fcb75d16b5c05445e6a1e0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 116,
"avg_line_length": 38.15277777777778,
"alnum_prop": 0.7186021113942482,
"repo_name": "threefoldfoundation/app_backend",
"id": "e221c5273ce060bb44bf667e80462c13a3f7f473",
"size": "3380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/tff_backend/bizz/intercom_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8307"
},
{
"name": "HTML",
"bytes": "88477"
},
{
"name": "JavaScript",
"bytes": "1870"
},
{
"name": "Python",
"bytes": "430948"
},
{
"name": "TypeScript",
"bytes": "217217"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='sites',
field=models.ManyToManyField(to='sites.Site', through='accounts.UserSite', blank=True),
preserve_default=True,
),
]
|
{
"content_hash": "1c2af0655e866484841b65194946bda6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 99,
"avg_line_length": 24,
"alnum_prop": 0.6052631578947368,
"repo_name": "ugoertz/django-familio",
"id": "a92b2ef906c55cc50330f48ec1afd1837b24f222",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/migrations/0002_auto_20150215_1205.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "61023"
},
{
"name": "HTML",
"bytes": "632961"
},
{
"name": "JavaScript",
"bytes": "1352913"
},
{
"name": "Makefile",
"bytes": "1735"
},
{
"name": "Python",
"bytes": "532976"
},
{
"name": "Shell",
"bytes": "352"
},
{
"name": "TeX",
"bytes": "16522"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os.path
import glob
import errno
from lnc.plugins.base_plugin import BasePlugin
from lnc.lib.process import cmd_try_run, cmd_run, _COMMAND_NOT_FOUND_MSG
from lnc.lib.io import mkdir_p, filter_regexp, needs_update
from lnc.lib.exceptions import ProgramError
def handler(info):
try:
os.remove(info["output"])
except OSError as err:
if err.errno != errno.ENOENT:
raise
cmd_run(["c44", info["input"], info["output"]])
class Plugin(BasePlugin):
def test(self):
self._check_target_options(["in-cache-dir",
"out-cache-dir",
"djvu-file"])
cmd_try_run("c44", fail_msg=_COMMAND_NOT_FOUND_MSG.format(
command="c44",
package="DjVuLibre"))
cmd_try_run("djvm", fail_msg=_COMMAND_NOT_FOUND_MSG.format(
command="djvm",
package="DjVuLibre"))
def before_tasks(self):
out_cache_dir = self._get_option("out-cache-dir")
djvu_file = self._get_option("djvu-file")
mkdir_p(out_cache_dir)
mkdir_p(os.path.dirname(djvu_file))
def get_tasks(self):
in_cache_dir = self._get_option("in-cache-dir")
out_cache_dir = self._get_option("out-cache-dir")
imgs = filter_regexp(in_cache_dir, r"^[0-9]+[.].*$")
res = []
for img in imgs:
num = int(img[:img.index(".")])
x = {
"__handler__": handler,
"input": os.path.join(in_cache_dir, img),
"output": os.path.join(out_cache_dir, "%04d.djvu" % num)
}
if needs_update(x["input"], x["output"]):
res.append(x)
return res
def after_tasks(self):
out_cache_dir = self._get_option("out-cache-dir")
djvu_file = self._get_option("djvu-file")
input_files = sorted(glob.glob(os.path.join(out_cache_dir, "*.djvu")))
if len(input_files) == 0:
raise ProgramError(_("No input files."))
cmd_run(["djvm", "-create", djvu_file] + input_files)
|
{
"content_hash": "73e8ec06a16a7866568f73e805ed4c0a",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 32.65151515151515,
"alnum_prop": 0.5470997679814386,
"repo_name": "atrosinenko/lecture-notes-compiler",
"id": "49adf8d60619eee06a5ffc95646ecacd835e722c",
"size": "2155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lnc/plugins/djvu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83886"
}
],
"symlink_target": ""
}
|
import os
import json
inverse_ohlc_ratio = None
sym_sid_map = None
sid_sym_map = None
sid_name_map = None
ohlc_ratio = None
inverse_ohlc_ratio = None
inverse_ohlc_ratio_instrument = None
def sid(symbol):
"""
Returns the arbitrary id assigned to the instrument
symbol.
See broker/oanda_instruments.json
Return
------
sid : int
"""
global sym_sid_map
if sym_sid_map is None:
load_instruments_info()
return sym_sid_map[symbol]
def symbol(sid):
global sid_sym_map
if sid_sym_map is None:
load_instruments_info()
return sid_sym_map[sid]
def display_name(sid):
global sid_name_map
if sid_name_map is None:
load_instruments_info()
return sid_name_map[sid]
def multiplier(instrument):
global ohlc_ratio
if ohlc_ratio is None:
load_instruments_info()
return ohlc_ratio[instrument]
def float_multiplier(sid):
global inverse_ohlc_ratio
if inverse_ohlc_ratio is None:
load_instruments_info()
return inverse_ohlc_ratio[sid]
def float_multiplier_inst(instrument):
global inverse_ohlc_ratio_instrument
if inverse_ohlc_ratio_instrument is None:
load_instruments_info()
return inverse_ohlc_ratio_instrument[instrument]
def load_instruments_info():
global sym_sid_map
global sid_sym_map
global sid_name_map
global ohlc_ratio
global inverse_ohlc_ratio
global inverse_ohlc_ratio_instrument
dir_path = os.path.dirname(os.path.realpath(__file__))
with open('{}/../broker/oanda_instruments.json'.format(dir_path)) as data_file:
instruments = json.load(data_file)
sid_sym_map = {i['sid']: i['instrument'] for i in instruments}
sym_sid_map = {i['instrument']: i['sid'] for i in instruments}
sid_name_map = {i['sid']: i['displayName'] for i in instruments}
ohlc_ratio = {i['instrument']: int(100 * 1 / float(i['pip'])) for i in instruments}
inverse_ohlc_ratio = {i['sid']: float(i['pip'])/100.0 for i in instruments}
inverse_ohlc_ratio_instrument = {i['instrument']: float(i['pip'])/100.0 for i in instruments}
|
{
"content_hash": "c028dd596ea016788724e90ccabe6082",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 101,
"avg_line_length": 27.265822784810126,
"alnum_prop": 0.6541318477251625,
"repo_name": "bernoullio/toolbox",
"id": "1bf11074fffaf5f27ffb7c34d6051668ff8187b6",
"size": "2154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forex_toolbox/utils/instrument.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153307"
}
],
"symlink_target": ""
}
|
import pytest
import unittest
from modules.sfp_onyphe import sfp_onyphe
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleOnyphe(unittest.TestCase):
def test_opts(self):
module = sfp_onyphe()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_onyphe()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_onyphe()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_onyphe()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_no_api_key_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
module = sfp_onyphe()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
self.assertTrue(module.errorState)
|
{
"content_hash": "48af1b583984f4533bc6fd6cd3fb0282",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 81,
"avg_line_length": 29.53061224489796,
"alnum_prop": 0.663441603317208,
"repo_name": "smicallef/spiderfoot",
"id": "2a7a930b77e07a6fc8ecf2cbd66e31b81744d9ae",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit/modules/test_sfp_onyphe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9833"
},
{
"name": "Dockerfile",
"bytes": "2779"
},
{
"name": "JavaScript",
"bytes": "34248"
},
{
"name": "Python",
"bytes": "2845553"
},
{
"name": "RobotFramework",
"bytes": "7584"
},
{
"name": "Shell",
"bytes": "1636"
}
],
"symlink_target": ""
}
|
from commands.cmds.BaseCommand import *
from packet.send import chat
class cmd(BaseCommand):
def process(self):
self.osc = self.cmdobj['scope']
chat(self.osc, self.jsconcat(), 1)
def jsconcat(self):
self.plugins = self.osc.plugins
self.pcount = len(self.plugins)
return "Plugins (" + str(self.pcount) + "): " + ", ".join(self.plugins)
|
{
"content_hash": "7f965cd9a93c4d797145004df26a8f1c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 79,
"avg_line_length": 34.81818181818182,
"alnum_prop": 0.6292428198433421,
"repo_name": "Armored-Dragon/pymineserver",
"id": "4f32652d741b3847af3e882303c24d3f9a5abda6",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/cmds/CommandPlugins.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "17913"
}
],
"symlink_target": ""
}
|
import os
from nose import SkipTest
from funtests import transport
class test_SQS(transport.TransportCase):
transport = 'SQS'
prefix = 'sqs'
event_loop_max = 100
message_size_limit = 4192 # SQS max body size / 2.
reliable_purge = False
#: does not guarantee FIFO order, even in simple cases
suppress_disorder_warning = True
def before_connect(self):
try:
import boto # noqa
except ImportError:
raise SkipTest('boto not installed')
if 'AWS_ACCESS_KEY_ID' not in os.environ:
raise SkipTest('Missing envvar AWS_ACCESS_KEY_ID')
if 'AWS_SECRET_ACCESS_KEY' not in os.environ:
raise SkipTest('Missing envvar AWS_SECRET_ACCESS_KEY')
def after_connect(self, connection):
connection.channel().sqs
|
{
"content_hash": "b3ebe94ccbf93a5cda6ec0285b11eaa6",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 66,
"avg_line_length": 29.25,
"alnum_prop": 0.645909645909646,
"repo_name": "jindongh/kombu",
"id": "31f689b21646e1e07a2bd799b7d047689ea862a3",
"size": "819",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "funtests/tests/test_SQS.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1501"
},
{
"name": "Python",
"bytes": "978206"
},
{
"name": "Shell",
"bytes": "1830"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ImagerProfileConfig(AppConfig):
name = 'imager_profile'
def ready(self):
from imager_profile import handlers
|
{
"content_hash": "b81e4d0536de2e764c2aca90f10ba831",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 21,
"alnum_prop": 0.7261904761904762,
"repo_name": "nadiabahrami/django-imager",
"id": "48b4d32603cc4235e00d0537e51207738123a9be",
"size": "168",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68384"
},
{
"name": "HTML",
"bytes": "27306"
},
{
"name": "JavaScript",
"bytes": "14517"
},
{
"name": "Python",
"bytes": "37251"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from google.appengine.ext import ndb
import model
import util
import config
from .counter import CountableLazy
from .icon import Iconize
from .collection import Collection, AddCollection
"""
A tag consists of three model classes:
TagStructure: Which holds all tag specific data but no additional information.
Tag: The 'Tag' model contains additional information for a tag like a counter
and collection. A 'TagStructure' is return by 'get_tag()'.
TagRelation: Which saves relations between tags.
For each tag exists a toplevel tag which can have children grouped by a collection.
Once a tag is created it should not be changed anymore.
If one of the children's counter is updated the topevel tag counter is updated
as well.
The highest toplevel has the default collection Collection.top_key().
A tag key look as follow : 'tag__{name}_{collection}'.
"""
class TagValidator(model.BaseValidator):
@classmethod
def name(cls,tag):
"""Validates and proccesses a tag name:
strip it and lower it if shorther than 4 letters.
'tag' can also be a list with tags!
"""
if not tag:
return None
if isinstance(tag,list) or isinstance(tag,tuple):
tags = []
for t in tag:
tags.append(cls.name(t))
return list(set(tags))
tag = util.constrain_string(tag,1,20)
return tag.lower().strip() if len(tag) > 4 else tag.strip()
class TagStructure(ndb.Model): # use the counter mixin
"""Basic tag class
"""
icon_id = ndb.IntegerProperty(indexed=True,required=True,default=0)
icon_url = ndb.StringProperty(required=True,default="",indexed=False)
name = ndb.StringProperty(indexed=True,required=True)
color = ndb.StringProperty(indexed=True,required=True)
class Tag(Iconize, CountableLazy, AddCollection, model.Base):
"""Tag Model
The key should have the following from: tag__{name}_{collection}"""
def _validate_tag(p,v):
""" Internal validate method, see below """
return TagValidator.name(v)
# only names longer than 4 chars are saved as lower chars
name = ndb.StringProperty(indexed=True,required=True,
validator=_validate_tag)
color = ndb.StringProperty(indexed=True,required=True,default='')
approved = ndb.BooleanProperty(required=True,default=False)
# category can be sofar: 'level', 'waypoint' , 'route'
category = ndb.StringProperty(indexed=True,repeated=True,\
choices=['level','waypoint','route'])
@classmethod
def validate_tag(cls,tag):
""" Validates and proccesses a tag name:
strip it and lower it if shorther than 4 letters.
'tag' can also be a list with tags!
"""
print "[model/tag.py] dont use 'validate_tag' anymore!"
print "[model/tag.py] replace it with 'TagValidator.name(tag)'"
return TagValidator.name(tag)
def get_tag(self):
""" Returns a TagStructure.
Should be used instead of directly accessing the properties for a tag.
This can be saved as a property by other models.
"""
#if self.key is None:
#raise UserWarning("Key not set yet, use first 'put()' before you use this method.")
#self.icon.icon_key = self.key
# TODO detel icon!
icon_url = getattr(self,'icon_url')
icon_id = getattr(self,'icon_id')
color = getattr(self,'color')
if not icon_url or not color:
# take parent icon url
if getattr(self,'toplevel') and getattr(self,'collection') != Collection.top_key():
parent_db = self.toplevel.get().get_tag()
if parent_db:
icon_url = parent_db.icon_url if not icon_url else icon_url
icon_id = parent_db.icon_id if not icon_url else icon_id
color = parent_db.color if not color else color
return TagStructure(name=self.name,\
color=color or 'blue',icon_id=icon_id,\
icon_url=icon_url)
# TODO write tests
def related(self,char_limit=15,word_limit=None,char_space=4):
word_limit = word_limit or int(char_limit/5)+3
dbs, cursor = model.TagRelation.get_dbs(tag_name=self.name,\
collection=self.collection,limit=word_limit,\
order='-cnt')
# count chars
char_cnt = 0
out = False
more=False
new_dbs = []
for db in dbs:
if out:
more=True
break
char_cnt += len(db.related_to) + char_space
if char_cnt > char_limit:
out=True
new_dbs.append(db)
if char_cnt > char_limit+int(char_space*2):
del new_dbs[-1]
more=True
return new_dbs, more
@staticmethod
def tag_to_keyname(name,collection=None):
"""Returns a key name (string)"""
col = collection or Collection.top_key()
return "tag__{}_{}".format(TagValidator.name(name), col.id())
@staticmethod
def tag_to_key(name, collection=None):
"""Returns a key"""
return ndb.Key("Tag", Tag.tag_to_keyname(name,collection))
@staticmethod
def tag_structures_to_tagnames(tag_structures):
tagnames = []
for tag in tag_structures:
tagnames.append(tag.name)
return tagnames
@classmethod
def get_tag_infos(cls,names,collection=None,urlsafe=False):
if urlsafe and collection:
collection = ndb.Key(urlsafe=collection)
tags=[]
for name in names:
#print "Check tag '{}' with the collection id '{}'".format(name,collection.id() if collection else None)
key = model.Tag.tag_to_key(name,collection)
try:
db = key.get()
tag = db.get_tag().to_dict()
except:
if not collection or collection == Collection.top_key():
tag = TagStructure(name=TagValidator.name(name), color='blue').to_dict()
else:
tag = cls.get_tag_infos([name])[0]
print tag
tags.append(tag)
print tags
return tags
@classmethod
def add(cls,name,collection=None, toplevel_key=None, icon_data=None, \
icon_id=None, icon_key=None, color=None, force_new_icon=False, auto_incr=True,
approved=False,**kwargs):
""" Add a tag, if it not exists create one.
If an 'icon_strucuture' is given a new icon is created for the icon DB,
if an 'icon_key' is given this icon is used.
An icon can only be added once, except 'force_new_icon' is 'True'
This method already 'put()'s the tag to the DB.
"""
name = TagValidator.name(name)
col = collection or Collection.top_key()
#key = ndb.Key('Tag','tag_{}_{}'.format(name,col))
#print key
tag_db = Tag.get_or_insert(Tag.tag_to_keyname(name,col),\
name=name,collection=col,cnt=0)
if col != Collection.top_key() and not toplevel_key:
tag_db.toplevel = Tag.tag_to_key(name,Collection.top_key())
top_db = Tag.get_or_insert(Tag.tag_to_keyname(name,Collection.top_key()),\
name=name,collection=Collection.top_key(),cnt=0)
top_db.put()
elif toplevel_key:
tag_db.toplevel = toplevel_key
if auto_incr:
tag_db.incr()
# check if icon already exists
if not tag_db.get_tag().icon_id or force_new_icon:
if icon_key or icon_id:
key = model.Icon.id_to_key(icon_id) if icon_id else icon_key
tag_db.add_icon(key=key)
elif icon_data:
tag_db.create_icon(icon_data,name)
if color:
tag_db.color = color
tag_db.approved=approved
return tag_db.put()
@classmethod
def remove(cls,name,collection=None):
"""Removes a tag by its name"""
# TODO Should it also work with a key??
name = TagValidator.name(name)
col = collection or Collection.top_key()
tag_db = Tag.tag_to_key(name,col).get()
if tag_db:
tag_db.decr()
if tag_db.get_tag().icon_id:
tag_db.remove_icon()
return tag_db.put()
else:
return False
@classmethod
def approve(cls,name,collection=None,approved=True):
"""The method approves a tag, by default only global tags need approvement"""
name = TagValidator.name(name)
col = collection or Collection.top_key()
tag_db = Tag.tag_to_key(name,col).get()
tag_db.approved=approved
return tag_db.put()
@classmethod
def qry(cls, toplevel=None, name=None, collection=None, only_approved=False,
order_by_count=True, count_greater=0, **kwargs):
"""Query for the icon model"""
qry = cls.query(cls.cnt > count_greater, **kwargs)
if toplevel:
qry_tmp = qry
qry = qry.filter(cls.toplevel==toplevel)
if name:
qry_tmp = qry
qry = qry.filter(cls.name==TagValidator.name(name))
if collection:
qry_tmp = qry
qry = qry.filter(cls.collection == collection)
if only_approved:
qry_tmp = qry
qry = qry_tmp.filter(cls.approved==True)
if order_by_count:
qry_tmp = qry
qry = qry.order(-cls.cnt)
#else filter for private True and False
return qry
@classmethod
def get_dbs(
cls, name=None, color=None, approved=None,
category=None,
**kwargs
):
kwargs = cls.get_col_dbs(**kwargs)
kwargs = cls.get_counter_dbs(**kwargs)
name=name or util.param('name', str)
name=TagValidator.name(name)
return super(Tag, cls).get_dbs(
name=name,
color=color or util.param('color', str),
approved=approved or util.param('approved', bool),
category=category or util.param('category', list),
**kwargs
)
@staticmethod
def print_list(dbs):
print "\n+-------------------+-------------------+-------------------"\
+"+-------------------+-----------+-------------------+"\
+"---------------------------------------+"
print "| {:<18}| {:<18}| {:<18}| {:<18}| {:<10}| {:<18}| {:<38}|".\
format("name", "collection", "icon", "color", \
"count", "approved", "toplevel")
print "+-------------------+-------------------+-------------------"\
+"+-------------------+-----------+-------------------+---------------------------------------+"
for db in dbs:
print "| {:<18}| {:<18}| {:<18}| {:<18}| {:<10}| {:<18}| {:<38}|".\
format(db.name, db.collection, \
getattr(db.icon_id,"icon_key",""), db.color, db.count, db.approved, db.toplevel or "")
print "+-------------------+-------------------+-------------------"\
+"+-------------------+-----------+-------------------+"\
+"---------------------------------------+"
print
print
PUBLIC_PROPERTIES = ['name', 'color', 'icon_url', 'cnt', 'collection','category']
PRIVATE_PROPERTIES = ['icon_id','approved']
class TagRelation(CountableLazy, AddCollection, model.Base): # use the counter mixin
"""Tag relation model
Saves all relation between tags with a counter.
Can be used for tag suggestions.
key: tagrel__{tag_name}_{relate_to}_{collection}
"""
tag_name = ndb.StringProperty(indexed=True,required=True)
related_to = ndb.StringProperty(indexed=True,required=True)
@staticmethod
def to_keyname(tag_name,related_to,collection=None):
"""Returns a key name (string)"""
col = collection or Collection.top_key()
return "tagrel__{}_{}_{}".format(TagValidator.name(tag_name),\
TagValidator.name(related_to), col.id())
@staticmethod
def from_keyname(keyname):
"""Returns tag_name, related_to, collection """
names = keyname.split('_')
tag_name = names[2]
related_to = names[3]
col_id = names[4]
if col_id.isdigit():
collection = ndb.Key('Collection',int(col_id))
else:
collection = ndb.Key('Collection',col_id)
return tag_name, related_to, collection
@classmethod
def from_key(cls,key):
"""Returns tag_name, related_to, collection """
return cls.from_keyname(key.id())
@classmethod
def to_key(cls, tag_name, related_to, collection=None):
"""Returns a key"""
return ndb.Key("TagRelation", cls.to_keyname(tag_name,related_to,collection))
@classmethod
def generate_all_keys(cls, tag_names, collection=None):
"""Generates all key combination depending on a tag name list"""
keys = []
for tag_name in tag_names:
keys.extend(cls.generate_related_keys(tag_name,tag_names,collection))
return keys
@classmethod
def generate_related_keys(cls,tag_name,related_tos,collection=None):
"""Generates all keys from one tag name to a list of related tags"""
keys = []
for related_to in related_tos:
if related_to != tag_name:
keys.append(cls.to_key(tag_name,related_to,collection))
return keys
@classmethod
def add_by_keys(cls,tag_rel_keys,_incr_step=1):
"""Add relation by keys
Toplevels are added automatically."""
keys = tag_rel_keys
dbs = ndb.get_multi(keys)
dbs_new = []
keys_del = []
for db, key in zip(dbs, keys):
if not db:
tag_name, related_to, collection = cls.from_key(key)
db = cls.get_or_insert(key.id(),tag_name=tag_name,related_to=related_to,\
collection=collection,cnt=0)
if collection != Collection.top_key():
top_key = cls.to_key(tag_name,related_to,Collection.top_key())
db.toplevel = top_key
cls.get_or_insert(top_key.id(),tag_name=tag_name,related_to=related_to,\
collection=Collection.top_key(),cnt=0)
db.incr(_incr_step)
if db.count <= 0:
keys_del.append(db.key)
if getattr(db,"toplevel",None):
db_top = db.toplevel.get()
if db_top.count <= 1: # its 0 after put()
keys_del.append(db.toplevel)
else:
dbs_new.append(db)
ndb.delete_multi(keys_del) #TODO async delete
return ndb.put_multi(dbs_new)
@classmethod
def add(cls, tag_names, collection=None,_incr_step=1):
"""Add relations by a tag list"""
if not tag_names:
return []
tag_names=TagValidator.name(tag_names)
keys = TagRelation.generate_all_keys(tag_names,collection)
#print "Keys to add for relation"
#print keys
return cls.add_by_keys(keys,_incr_step)
@classmethod
def remove(cls, tag_names, collection=None):
"""Remove relations by a tag list"""
if not tag_names:
return []
tag_names=TagValidator.name(tag_names)
keys = TagRelation.generate_all_keys(tag_names,collection)
cls.add_by_keys(keys,_incr_step=-1)
return keys
@classmethod
def qry(cls, tag_name=None, related_to=None, toplevel=None, \
collection=None, order_by_count=True, **kwargs):
"""Query for the icon model"""
qry = cls.query(**kwargs)
if tag_name:
qry_tmp = qry
qry = qry.filter(cls.tag_name==TagValidator.name(tag_name))
if toplevel:
qry_tmp = qry
qry = qry.filter(cls.toplevel==toplevel)
if related_to:
qry_tmp = qry
qry = qry.filter(cls.related_to==TagValidator.name(related_to))
if collection:
qry_tmp = qry
qry = qry.filter(cls.collection == collection)
if order_by_count:
qry_tmp = qry
qry = qry.order(-cls.cnt)
#else filter for private True and False
return qry
@staticmethod
def print_list(dbs):
print "\n+-------------------+-------------------+-------------------+-----------+---"
print "| {:<18}| {:<18}| {:<18}| {:<10}| {:<48}".\
format("tag", "related to", "collection", "count", "toplevel")
print "+-------------------+-------------------+-------------------+-----------+---"
for db in dbs:
print "| {:<18}| {:<18}| {:<18}| {:<10}| {:<48}".\
format(db.tag_name, db.related_to, db.collection, db.count, db.toplevel or "")
print "+-------------------+-------------------+-------------------+-----------+---"
print
print
@classmethod
def get_dbs(
cls, tag_name=None, related_to=None,**kwargs
):
kwargs = cls.get_col_dbs(**kwargs)
kwargs = cls.get_counter_dbs(**kwargs)
return super(TagRelation, cls).get_dbs(
tag_name=tag_name or util.param('tag_name', str),
related_to=related_to or util.param('related_to', bool),
**kwargs
)
class Taggable(ndb.Model): # use the counter mixin
"""Adds a tags property
Tags are managed in the 'Tag' model, this mixin
adds two methods to deal with tags:
'add_tags': if an tag already exists it can be added by its key
'create_tags': create a new tag
The two method 'put' the tag automatically, this means it is recommended to
put the taggable model as well or remove the tags again if something went wrong.
"""
#tags = ndb.StringProperty(TagStructure, repeated=True)
tags = ndb.StringProperty(indexed=True, repeated=True,
validator=lambda p, v: v.lower())
_MAX_TAGS = 20 # TODO config option
#_new_tags = []
def add_tags(self, tags):
"""Add tags as strings. (tags needs to be a list)
Color and icon are saved in the 'Tag' model.
All tag names are change to lower letters and double entries are deleted."""
# TODO if icon changes it could give double entries, not good!
new_tags = []
if not getattr(self,'collection',None):
col = Collection.top_key()
else:
col = self.collection
if getattr(self,'tags',None):
#print "Check if tags already exist"
for tag in tags:
if tag not in self.tags:
new_tags.append(tag)
#new_tags = self.tags - tags
else:
new_tags = tags
#print new_tags
if len(self.tags) + len(new_tags) > self._MAX_TAGS:
raise UserWarning('Too many tags, maximum {} tags are allowed, {} are used.'.\
format(self._MAX_TAGS,len(self.tags) + len(new_tags)))
for tag in new_tags:
#Tag.add(tag.name, icon_structure=tag.icon, color=tag.color, collection=col)
Tag.add(tag, collection=col)
## Add relations
#old_tagnames = Tag.tag_structures_to_tagnames(self.tags)
old_tagnames = self.tags[:]
self.tags.extend(new_tags)
#new_tagnames = Tag.tag_structures_to_tagnames(self.tags)
new_tagnames = self.tags[:]
#print "Add new relation names"
#print new_tagnames
TagRelation.remove(old_tagnames,col)
TagRelation.add(new_tagnames,col)
return self.tags
def remove_tags(self, tags):
"""Removes tags as strings. """
# TODO if icon is different it could give wrong entries, not good!
# only compare names!
rm_tags = []
new_tags = []
if not getattr(self,'collection',None):
col = Collection.top_key()
else:
col = self.collection
if not getattr(self,'tags',None):
#print "Check if tags already exist"
return []
else:
for tag in tags:
if tag in self.tags:
rm_tags.append(tag)
#print "Tags to remove"
#print rm_tags
for tag in rm_tags:
#Tag.remove(tag.name, collection=col)
Tag.remove(tag, collection=col)
## Del relations
#old_tagnames = Tag.tag_structures_to_tagnames(self.tags)
old_tagnames = self.tags
# remove tags
still_tags = []
for tag in self.tags:
if tag not in rm_tags:
still_tags.append(tag)
self.tags = still_tags
#new_tagnames = Tag.tag_structures_to_tagnames(self.tags)
new_tagnames = self.tags
#print "Tags to remove (Rel):"
#print old_tagnames
TagRelation.remove(old_tagnames,col)
#print "Tags to add (Rel):"
#print new_tagnames
TagRelation.add(new_tagnames,col)
def update_tags(self,tags):
"""Updates the tag list, a full tag list is required.
The function adds, removes, and reorders the tag list"""
# make list unique, no double entries
tags_unique = []
for tag in tags:
if tag not in tags_unique:
tags_unique.append(tag.lower())
tags = tags_unique
add_tags = []
rm_tags = []
if not getattr(self,'tags',None):
self.tags = []
for tag in tags: # look which tags to add
if tag not in self.tags:
add_tags.append(tag)
for tag in self.tags: # look which tags to remove
if tag not in tags:
rm_tags.append(tag)
self.add_tags(add_tags)
self.remove_tags(rm_tags)
self.tags = tags
return self.tags
@classmethod
def get_tag_dbs(
cls, tags=None, **kwargs
):
""" Call this function when 'Taggable' is used int the 'get_dbs' function.
"""
tags = tags or util.param('tags',list)
kwargs["tags"] = tags
return kwargs
|
{
"content_hash": "e93bb7567e9efa220b3e5daae51403fd",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 116,
"avg_line_length": 36.442622950819676,
"alnum_prop": 0.5560053981106613,
"repo_name": "wodore/wodore-ng",
"id": "0bef6aa8ce4298150161df6b507e4a1c19d320c9",
"size": "22246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/model/tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9501"
},
{
"name": "HTML",
"bytes": "57499"
},
{
"name": "JavaScript",
"bytes": "61074"
},
{
"name": "Python",
"bytes": "207624"
},
{
"name": "Shell",
"bytes": "337"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 3ad5422060a7
Revises: d0ff2f23d725
Create Date: 2016-05-19 00:01:43.633946
"""
# revision identifiers, used by Alembic.
revision = '3ad5422060a7'
down_revision = 'd0ff2f23d725'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('comments', schema=None) as batch_op:
batch_op.add_column(sa.Column('reply', sa.UnicodeText(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('comments', schema=None) as batch_op:
batch_op.drop_column('reply')
### end Alembic commands ###
|
{
"content_hash": "0b50abdbfd0552f4954f225360bcf1ba",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.687248322147651,
"repo_name": "WillSkywalker/blog",
"id": "6b2f824d489ace42638cbd168b726d78c1a6d240",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/3ad5422060a7_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "98832"
},
{
"name": "HTML",
"bytes": "51893"
},
{
"name": "JavaScript",
"bytes": "15163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "33385"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('smsforms', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sqlxformssession',
name='current_action_due',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='current_reminder_num',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='expire_after',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='include_case_updates_in_partial_submissions',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='phone_number',
field=models.CharField(max_length=126, null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='reminder_intervals',
field=models.JSONField(null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='session_is_open',
field=models.BooleanField(null=True),
),
migrations.AddField(
model_name='sqlxformssession',
name='submit_partially_completed_forms',
field=models.BooleanField(null=True),
),
migrations.AlterIndexTogether(
name='sqlxformssession',
index_together=set([('session_is_open', 'connection_id'), ('session_is_open', 'current_action_due')]),
),
]
|
{
"content_hash": "b7628a592bb8b2b85203dd2085e91c95",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 114,
"avg_line_length": 32.527272727272724,
"alnum_prop": 0.5645612073784237,
"repo_name": "dimagi/commcare-hq",
"id": "523e53ee0a2c7019da11cc7c07bf74e37a6da61e",
"size": "1839",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/smsforms/migrations/0002_add_state_tracking_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
import urllib
from cStringIO import StringIO
from nose.tools import eq_, ok_, assert_raises
import mock
from airmozilla.base.tests.testbase import DjangoTestCase
from airmozilla.main.models import Event, VidlySubmission
from airmozilla.manage import vidly
def get_custom_XML(**kwargs):
return (
'<?xml version="1.0"?>'
'<Response><Message>{message}</Message>'
'<MessageCode>{message_code}</MessageCode>'
'<Success><Task><UserID>{user_id}</UserID>'
'<MediaShortLink>{tag}</MediaShortLink>'
'<SourceFile>{source_file}</SourceFile>'
'<BatchID>{batch_id}</BatchID>'
'<Status>{status}</Status>'
'<Private>{private}</Private>'
'<PrivateCDN>{private_cdn}</PrivateCDN>'
'<IsHD>{hd}</IsHD>'
'<Created>{created}</Created>'
'<Updated>{updated}</Updated>'
'<UserEmail>{user_email}</UserEmail>'
'</Task></Success></Response>'
).format(message=kwargs.get('message', 'Action successful.'),
message_code=kwargs.get('message_code', '4.1'),
user_id=kwargs.get('user_id', '1234'),
tag=kwargs.get('tag', 'abc123').encode('utf8'),
source_file=kwargs.get(
'source_file', 'http://videos.mozilla.org/bla.f4v'),
batch_id=kwargs.get('batch_id', '35402'),
status=kwargs.get('status', 'Finished'),
private=kwargs.get('private', 'false'),
private_cdn=kwargs.get('private_cdn', 'false'),
hd=kwargs.get('hd', 'false'),
created=kwargs.get('created', '2012-08-23 19:30:58'),
updated=kwargs.get('updated', '2012-08-23 20:44:22'),
user_email=kwargs.get('user_email', 'airmozilla@mozilla.com'))
SAMPLE_XML = (
'<?xml version="1.0"?>'
'<Response><Message>Action successful.</Message>'
'<MessageCode>4.1</MessageCode><Success><Task><UserID>1234</UserID>'
'<MediaShortLink>abc123</MediaShortLink>'
'<SourceFile>http://videos.mozilla.org/bla.f4v</SourceFile>'
'<BatchID>35402</BatchID>'
'<Status>Finished</Status>'
'<Private>false</Private>'
'<PrivateCDN>false</PrivateCDN><Created>2012-08-23 19:30:58</Created>'
'<Updated>2012-08-23 20:44:22</Updated>'
'<UserEmail>airmozilla@mozilla.com</UserEmail>'
'</Task></Success></Response>'
)
SAMPLE_MEDIALIST_XML = (
'<?xml version="1.0"?>'
'<Response><Message>OK</Message><MessageCode>7.4</MessageCode><Success>'
'<Media><MediaShortLink>abc123</MediaShortLink><VanityLink/>'
'<Notify>vvm@spb-team.com</Notify><Created>2011-12-25 18:45:56</Created>'
'<Updated>2012-11-28 14:05:07</Updated><Status>Error</Status>'
'<IsDeleted>false</IsDeleted><IsPrivate>false</IsPrivate>'
'<IsPrivateCDN>false</IsPrivateCDN><CDN>AWS</CDN></Media>'
'<Media><MediaShortLink>xyz987</MediaShortLink><VanityLink/>'
'<Notify>vvm@spb-team.com</Notify><Created>2011-12-25 19:41:05</Created>'
'<Updated>2012-11-28 14:04:57</Updated><Status>Error</Status>'
'<IsDeleted>false</IsDeleted><IsPrivate>false</IsPrivate>'
'<IsPrivateCDN>false</IsPrivateCDN><CDN>AWS</CDN></Media>'
'</Success></Response>'
)
SAMPLE_STATISTICS_XML = (
'<?xml version="1.0"?>'
'<Response><Message/><MessageCode/><Success><StatsInfo><StatsTable>'
'<cols><col>Class</col><col>Vendor</col><col>Model</col>'
'<col>Platform</col><col>OS</col><col>Browser</col><col>Browser Ver</col>'
'<col>Hits</col></cols><rows><row><col>Desktop</col><col></col><col></col>'
'<col></col><col>Apple</col><col>Firefox</col><col>21.0</col><col>5</col>'
'</row><row><col>Desktop</col><col></col><col></col><col></col>'
'<col>Apple</col><col>Firefox</col><col>20.0</col><col>2</col></row>'
'</rows></StatsTable><Others>0</Others><TotalHits>10</TotalHits>'
'</StatsInfo></Success></Response>'
)
SAMPLE_STATISTICS_BROKEN_XML = (
'<?xml version="1.0"?>'
'<Response><Message/><MessageCode/><Success><StatsInfo>'
'</StatsInfo></Success></Response>'
)
SAMPLE_INVALID_LINKS_XML = (
'<?xml version="1.0"?>'
'<Response><Message>Action failed: all media short links are wrong.'
'</Message><MessageCode>4.3</MessageCode><Errors><Error>'
'<ErrorCode>4.1</ErrorCode>'
'<ErrorName>No media short links provided.</ErrorName>'
'<Description>You have not provided any media short links in your request '
'or all media short links are invalid.</Description><Suggestion>Check '
'that you have provided valid media short links in your request. If you '
'have used batch ID, verify that it contains any media links (you may '
'need to consult site administrator for this).</Suggestion></Error>'
'</Errors></Response>'
)
SAMPLE_MEDIA_UPDATED_XML = (
'<?xml version="1.0"?>'
'<Response><Message>All medias have been updated.</Message>'
'<MessageCode>2.5</MessageCode>'
'</Response>'
)
SAMPLE_MEDIA_UPDATE_FAILED_XML = (
'<?xml version="1.0"?>'
'<Response>'
'<Message>Action failed: none of media short link were updated.</Message>'
'<MessageCode>2.6</MessageCode>'
'<Errors>'
'<Error>'
'<ErrorCode>8.4</ErrorCode>'
'<ErrorName>Media invalidation in progress</ErrorName>'
'<Description>Media invalidation in progress</Description>'
'<Suggestion></Suggestion>'
'<SourceFile>9b8a4b</SourceFile>'
'</Error>'
'</Errors>'
'</Response>'
)
class TestVidlyTokenize(DjangoTestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_secure_token(self, p_urllib2):
event = Event.objects.get(title='Test event')
submission = VidlySubmission.objects.create(
event=event,
tag='xyz123'
)
tokenize_calls = [] # globally scope mutable
def mocked_urlopen(request):
tokenize_calls.append(1)
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>OK</Message>
<MessageCode>7.4</MessageCode>
<Success>
<MediaShortLink>8r9e0o</MediaShortLink>
<Token>MXCsxINnVtycv6j02ZVIlS4FcWP</Token>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
eq_(
vidly.tokenize(submission.tag, 60),
'MXCsxINnVtycv6j02ZVIlS4FcWP'
)
eq_(len(tokenize_calls), 1)
# do it a second time
eq_(
vidly.tokenize(submission.tag, 60),
'MXCsxINnVtycv6j02ZVIlS4FcWP'
)
eq_(len(tokenize_calls), 1) # caching for the win!
submission.token_protection = True
submission.save()
eq_(
vidly.tokenize(submission.tag, 60),
'MXCsxINnVtycv6j02ZVIlS4FcWP'
)
eq_(len(tokenize_calls), 2) # cache got invalidated
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_not_secure_token(self, p_urllib2, p_logging):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Error</Message>
<MessageCode>7.5</MessageCode>
<Errors>
<Error>
<ErrorCode>8.1</ErrorCode>
<ErrorName>Short URL is not protected</ErrorName>
<Description>bla bla</Description>
<Suggestion>ble ble</Suggestion>
</Error>
</Errors>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
eq_(vidly.tokenize('abc123', 60), '')
# do it a second time and it should be cached
def mocked_urlopen_different(request):
return StringIO("""
Anything different
""")
p_urllib2.urlopen = mocked_urlopen_different
eq_(vidly.tokenize('abc123', 60), '')
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_invalid_response_token(self, p_urllib2, p_logging):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Error</Message>
<MessageCode>99</MessageCode>
<Errors>
<Error>
<ErrorCode>0.0</ErrorCode>
<ErrorName>Some other error</ErrorName>
<Description>bla bla</Description>
<Suggestion>ble ble</Suggestion>
</Error>
</Errors>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
eq_(vidly.tokenize('def123', 60), None)
p_logging.error.asert_called_with(
"Unable fetch token for tag 'abc123'"
)
class TestVidlyAddMedia(DjangoTestCase):
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_add_media_with_email(self, p_urllib2, p_logging):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>All medias have been added.</Message>
<MessageCode>2.1</MessageCode>
<BatchID>47520</BatchID>
<Success>
<MediaShortLink>
<SourceFile>http://www.com/file.flv</SourceFile>
<ShortLink>8oxv6x</ShortLink>
<MediaID>13969839</MediaID>
<QRCode>http://vid.ly/8oxv6x/qrcodeimg</QRCode>
<HtmlEmbed>code code</HtmlEmbed>
<EmailEmbed>more code code</EmailEmbed>
</MediaShortLink>
</Success>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
shortcode, error = vidly.add_media('http//www.com')
eq_(shortcode, '8oxv6x')
ok_(not error)
# same thing should work with optional extras
shortcode, error = vidly.add_media(
'http//www.com',
email='mail@peterbe.com',
token_protection=True
)
eq_(shortcode, '8oxv6x')
ok_(not error)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_add_media_with_notify_url(self, p_urllib2, p_logging):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>All medias have been added.</Message>
<MessageCode>2.1</MessageCode>
<BatchID>47520</BatchID>
<Success>
<MediaShortLink>
<SourceFile>http://www.com/file.flv</SourceFile>
<ShortLink>8oxv6x</ShortLink>
<MediaID>13969839</MediaID>
<QRCode>http://vid.ly/8oxv6x/qrcodeimg</QRCode>
<HtmlEmbed>code code</HtmlEmbed>
<EmailEmbed>more code code</EmailEmbed>
</MediaShortLink>
</Success>
</Response>
""")
def mocked_Request(url, query_string):
ok_(
'<Notify>https://mywebhook.example.com</Notify>' in
urllib.unquote(query_string)
)
return mock.MagicMock()
p_urllib2.Request = mocked_Request
p_urllib2.urlopen = mocked_urlopen
shortcode, error = vidly.add_media(
'http//www.com',
notify_url='https://mywebhook.example.com',
)
eq_(shortcode, '8oxv6x')
ok_(not error)
def test_add_media_with_notify_url_and_email(self):
assert_raises(
TypeError,
vidly.add_media,
'http://example.com',
email='peterbe@example.com',
notify_url='http://example.com/hook',
)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_add_media_failure(self, p_urllib2, p_logging):
def mocked_urlopen(request):
# I don't actually know what it would say
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Error</Message>
<MessageCode>0.0</MessageCode>
<Errors>
<Error>
<ErrorCode>0.0</ErrorCode>
<ErrorName>Error message</ErrorName>
<Description>bla bla</Description>
<Suggestion>ble ble</Suggestion>
</Error>
</Errors>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
shortcode, error = vidly.add_media('http//www.com')
ok_(not shortcode)
ok_('0.0' in error)
class TestVidlyDeleteMedia(DjangoTestCase):
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_delete_media(self, p_urllib2, p_logging):
def mocked_urlopen(request):
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Success</Message>
<MessageCode>0.0</MessageCode>
<Success>
<MediaShortLink>8oxv6x</MediaShortLink>
</Success>
<Errors>
<Error>
<SourceFile>http://www.com</SourceFile>
<ErrorCode>1</ErrorCode>
<Description>ErrorDescriptionK</Description>
<Suggestion>ErrorSuggestionK</Suggestion>
</Error>
</Errors>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
shortcode, error = vidly.delete_media(
'8oxv6x',
email='test@example.com'
)
eq_(shortcode, '8oxv6x')
ok_(not error)
@mock.patch('airmozilla.manage.vidly.logging')
@mock.patch('airmozilla.manage.vidly.urllib2')
def test_delete_media_failure(self, p_urllib2, p_logging):
def mocked_urlopen(request):
# I don't actually know what it would say
return StringIO("""
<?xml version="1.0"?>
<Response>
<Message>Success</Message>
<MessageCode>0.0</MessageCode>
<Errors>
<Error>
<SourceFile>http://www.com</SourceFile>
<ErrorCode>1.1</ErrorCode>
<Description>ErrorDescriptionK</Description>
<Suggestion>ErrorSuggestionK</Suggestion>
</Error>
</Errors>
</Response>
""")
p_urllib2.urlopen = mocked_urlopen
shortcode, error = vidly.delete_media(
'8oxv6x',
email='test@example.com'
)
ok_(not shortcode)
ok_('1.1' in error)
class VidlyTestCase(DjangoTestCase):
@mock.patch('urllib2.urlopen')
def test_query(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_XML.strip())
p_urlopen.side_effect = mocked_urlopen
results = vidly.query('abc123')
ok_('abc123' in results)
eq_(results['abc123']['Status'], 'Finished')
@mock.patch('urllib2.urlopen')
def test_medialist(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_MEDIALIST_XML.strip())
p_urlopen.side_effect = mocked_urlopen
results = vidly.medialist('Error')
ok_(results['abc123'])
ok_(results['xyz987'])
@mock.patch('urllib2.urlopen')
def test_statistics(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_STATISTICS_XML.strip())
p_urlopen.side_effect = mocked_urlopen
results = vidly.statistics('abc123')
eq_(results['total_hits'], 10)
@mock.patch('urllib2.urlopen')
def test_statistics_broken(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_STATISTICS_BROKEN_XML.strip())
p_urlopen.side_effect = mocked_urlopen
results = vidly.statistics('abc123')
eq_(results, None)
@mock.patch('urllib2.urlopen')
def test_update_media_protection_protect(self, p_urlopen):
def mocked_urlopen(request):
xml_string = urllib.unquote_plus(request.data)
ok_('<Protect><Token /></Protect>' in xml_string)
return StringIO(SAMPLE_MEDIA_UPDATED_XML.strip())
p_urlopen.side_effect = mocked_urlopen
# This doesn't return anything but we're only interested in if it
# can execute at all without errors.
vidly.update_media_protection('abc123', True)
@mock.patch('urllib2.urlopen')
def test_update_media_protection_unprotect(self, p_urlopen):
def mocked_urlopen(request):
xml_string = urllib.unquote_plus(request.data)
ok_('<Protect />' in xml_string)
return StringIO(SAMPLE_MEDIA_UPDATED_XML.strip())
p_urlopen.side_effect = mocked_urlopen
# This doesn't return anything but we're only interested in if it
# can execute at all without errors.
vidly.update_media_protection('abc123', False)
@mock.patch('urllib2.urlopen')
def test_update_media_protection_error(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_MEDIA_UPDATE_FAILED_XML.strip())
p_urlopen.side_effect = mocked_urlopen
# This doesn't return anything but we're only interested in if it
# can execute at all without errors.
assert_raises(
vidly.VidlyUpdateError,
vidly.update_media_protection,
'abc123', True
)
|
{
"content_hash": "cba99fbe80bf4bbf9982aa93485e5a80",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 79,
"avg_line_length": 35.541501976284586,
"alnum_prop": 0.5728425266903915,
"repo_name": "Nolski/airmozilla",
"id": "dcbdaacd823e440fccf0e3c116ade7bfd79268e3",
"size": "17984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airmozilla/manage/tests/test_vidly.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4527"
},
{
"name": "Brightscript",
"bytes": "67473"
},
{
"name": "CSS",
"bytes": "1714414"
},
{
"name": "HTML",
"bytes": "2399577"
},
{
"name": "JavaScript",
"bytes": "3194328"
},
{
"name": "Makefile",
"bytes": "13548"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "3296951"
},
{
"name": "Ruby",
"bytes": "4978"
},
{
"name": "Shell",
"bytes": "3573"
},
{
"name": "Smarty",
"bytes": "1943"
}
],
"symlink_target": ""
}
|
"""Main Application Controllers"""
import functools
import hashlib
import httplib2
import os.path
import uuid
import socket
from collections import OrderedDict
import time
from datetime import datetime
from functools import partial
import zmq
import json
import ConfigParser
from flask import ( Flask, render_template, request, redirect, abort, Response,
jsonify, make_response, session, url_for)
from flask.ext.scrypt import generate_random_salt, generate_password_hash, check_password_hash
from apiclient.discovery import build
from oauth2client.client import ( AccessTokenRefreshError,
AccessTokenCredentials,
flow_from_clientsecrets,
FlowExchangeError)
from app import app, app_config, db, sockets
from model import Model, UnknownUserError, UnknownTestError
from notifications import console_subscribe
from cstar_perf.frontend.lib.util import random_token
from cstar_perf.frontend.lib import screenshot, stupid_cache
from cstar_perf.frontend import SERVER_KEY_PATH
from cstar_perf.frontend.lib.crypto import APIKey
import logging
log = logging.getLogger('cstar_perf.controllers')
### Setup authentication method configured in server.conf:
try:
authentication_type = app_config.get("server", "authentication_type")
except ConfigParser.NoOptionError:
authentication_type = 'local'
if authentication_type == 'local':
pass
elif authentication_type == 'google':
### Google+ API:
gplus = build('plus', 'v1')
google_client_secrets = os.path.join(os.path.expanduser("~"),'.cstar_perf','client_secrets.json')
with open(google_client_secrets) as f:
google_client_id = json.load(f)['web']['client_id']
else:
raise AssertionError('Invalid authentication type configured in server.conf: {}'.format(authentication_type))
server_key = APIKey.load(SERVER_KEY_PATH)
################################################################################
#### Template functions:
################################################################################
def get_user_id():
return session.get('user_id', None)
app.jinja_env.globals['get_user_id'] = get_user_id
def user_is_authenticated():
return session.get('logged_in',False)
app.jinja_env.globals['user_is_authenticated'] = user_is_authenticated
################################################################################
#### Helpers
################################################################################
def user_in_role(role, user=None):
"""Find if a user is in the given role"""
if user is None:
user = get_user_id()
try:
user_roles = db.get_user_roles(get_user_id())
if role in user_roles:
return True
except UnknownUserError:
pass
return False
def requires_auth(role):
"""Ensures the current user has the appropriate authorization before
running the wrapped function"""
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kw):
# Do the check:
if user_is_authenticated():
if user_in_role(role):
return function(*args, **kw)
return make_response(render_template('access_denied.jinja2.html'), 401)
return wrapper
return decorator
@app.context_processor
def inject_template_variables():
"""Common variables available to all templates"""
d = {'clusters': db.get_cluster_names(),
'authentication_type': authentication_type,
'google_client_id': None}
if authentication_type == 'google':
d['google_client_id'] = google_client_id
return d
################################################################################
#### Page Controllers
################################################################################
@app.route('/')
def index():
return render_template('index.jinja2.html')
def login_with_google():
"""Login via Google+"""
log.info("Initiating login with Google+")
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets(google_client_secrets, scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return make_response(
jsonify({'error':'Failed to upgrade the authorization code.'}), 401)
# An ID Token is a cryptographically-signed JSON object encoded in base 64.
# Normally, it is critical that you validate an ID Token before you use it,
# but since you are communicating directly with Google over an
# intermediary-free HTTPS channel and using your Client Secret to
# authenticate yourself to Google, you can be confident that the token you
# receive really comes from Google and is valid. If your server passes the
# ID Token to other components of your app, it is extremely important that
# the other components validate the token before using it.
gplus_id = credentials.id_token['sub']
stored_credentials = AccessTokenCredentials(session.get('credentials'),
request.user_agent)
stored_gplus_id = session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
return make_response(jsonify(
{'success':'Current user is already connected.'}), 200)
# Get the user's email address:
http = httplib2.Http()
http = credentials.authorize(http)
# Get a list of people that this user has shared with this app.
google_request = gplus.people().get(userId='me')
user_obj = google_request.execute(http=http)
email = None
# Find the google account email:
for e in user_obj['emails']:
if e['type'] == 'account':
email = e['value']
break
else:
return make_response(
jsonify({'error':'Authorization from Google failed.'}), 401)
# Store the access token in the session for later use.
session['credentials'] = credentials.access_token
session['gplus_id'] = gplus_id
session['logged_in'] = True
session['user_id'] = email
return make_response(jsonify({'success':'Successfully connected user.'}),
200)
def login_with_passphrase():
data = request.get_json(force=True)
log.info("Initiating login with passphrase")
try:
if db.validate_user_passphrase(data['email'], data['passphrase']):
session['logged_in'] = True
session['user_id'] = data['email']
return make_response(jsonify({'success':'Successfully connected user.'}),
200)
except UnknownUserError:
pass
return make_response(jsonify({'error':'Unauthorized - did you enter the user right user / passphrase?'}), 401)
@app.route('/login', methods=['POST'])
def login():
"""Login via digest authentication, or Google+"""
if authentication_type == 'local':
return login_with_passphrase()
elif authentication_type == 'google':
return login_with_google()
else:
raise AssertionError('Invalid authentication type configured in server.conf: {}'.format(authentication_type))
@app.route('/logout', methods=['GET','POST'])
def logout():
for i in ['credentials','gplus_id','logged_in','bypass_csrf'] :
try:
session.pop(i)
except KeyError:
pass
if request.method == "POST":
return make_response(jsonify({'success':'Logged out.'}), 200)
else:
return redirect("/")
@app.route('/tests')
def tests():
clusters = db.get_cluster_names()
cluster_scheduled_tests = {}
cluster_in_progress_tests = {}
for c in clusters:
scheduled_tests = db.get_scheduled_tests(c)
if len(scheduled_tests) > 0:
cluster_scheduled_tests[c] = scheduled_tests
in_progress_tests = db.get_in_progress_tests(c)
if len(in_progress_tests) > 0:
cluster_in_progress_tests[c] = in_progress_tests
completed_tests = db.get_completed_tests()
return render_template('tests.jinja2.html', clusters=clusters,
cluster_scheduled_tests=cluster_scheduled_tests,
cluster_in_progress_tests=cluster_in_progress_tests,
completed_tests=completed_tests)
@app.route('/tests/user')
@requires_auth('user')
def my_tests():
queued_tests = db.get_user_scheduled_tests(get_user_id())
in_progress_tests = db.get_user_in_progress_tests(get_user_id())
completed_tests = db.get_user_completed_tests(get_user_id())
failed_tests = db.get_user_failed_tests(get_user_id(), 10)
return render_template('user.jinja2.html', queued_tests=queued_tests,
in_progress_tests=in_progress_tests,
completed_tests=completed_tests,
failed_tests=failed_tests)
@app.route('/tests/id/<test_id>')
def view_test(test_id):
try:
test = db.get_test(test_id)
except UnknownTestError:
return make_response('Unknown Test {test_id}.'.format(test_id=test_id), 404)
artifacts = db.get_test_artifacts(test_id)
has_chart = False
for a in artifacts:
if a['artifact_type'] in ['failure','link']:
# Proactively fetch non-blob artifacts:
a['artifact'] = db.get_test_artifact_data(test_id, a['artifact_type'], a['name'])
if a['artifact_type'] == 'stats':
has_chart = True
return render_template('view_test.jinja2.html', test=test, artifacts=artifacts, has_chart=has_chart)
@app.route('/tests/artifacts/<test_id>/<artifact_type>')
@app.route('/tests/artifacts/<test_id>/<artifact_type>/<artifact_name>')
def get_artifact(test_id, artifact_type, artifact_name=None):
if artifact_type == 'graph':
return redirect("/graph?command=one_job&stats={test_id}".format(test_id=test_id))
elif artifact_type == 'flamegraph' and not artifact_name:
artifacts = db.get_test_artifacts(test_id, artifact_type)
for artifact in artifacts:
artifact['data'] = db.get_test_artifact_data(test_id, artifact_type, artifact['name'])
return render_template('flamegraph.jinja2.html', test_id=test_id, artifacts=artifacts)
if not artifact_name:
return make_response(jsonify({'error':'No artifact name provided.'}), 400)
artifact, object_id, artifact_available = db.get_test_artifact_data(test_id, artifact_type, artifact_name)
if artifact_name.endswith(".tar.gz"):
mimetype = 'application/gzip'
elif artifact_name.endswith(".json"):
mimetype = 'application/json'
elif artifact_name.endswith(".svg"):
mimetype = 'image/svg+xml'
else:
mimetype = 'text/plain'
if artifact is None and object_id is not None and artifact_available:
artifact = db.generate_object_by_chunks(object_id)
return Response(response=artifact,
status=200,
mimetype=mimetype,
headers={"Content-Disposition": "filename={name}".format(name=artifact_name)})
@app.route('/graph')
def graph():
return render_template('graph.jinja2.html')
@app.route('/schedule', methods=['GET'])
@requires_auth('user')
def schedule():
"""Page to schedule a test"""
return render_template('schedule.jinja2.html')
@app.route('/cluster/<cluster_name>')
@requires_auth('user')
def cluster(cluster_name):
return render_template('cluster.jinja2.html',
cluster_name=cluster_name)
@app.route('/cluster/specs', methods=['GET'])
def cluster_specs():
return render_template('cluster_specs.jinja2.html')
################################################################################
#### JSON API
################################################################################
@app.route('/api/login', methods=['GET','POST'])
def login_for_apps():
"""Login for API access only"""
if request.method == "GET":
session['unsigned_access_token'] = random_token()
session['logged_in'] = False
return jsonify({"token":session['unsigned_access_token'],
"signature":server_key.sign_message(session['unsigned_access_token'])})
elif request.method == "POST":
# Client posts it's login name and a signed token.
data = request.get_json()
# Verify signed token against stored public key for that name.
pubkey = APIKey(db.get_pub_key(data['login'])['pubkey'])
try:
pubkey.verify_message(session['unsigned_access_token'], data['signature'])
except Exception, e:
session['logged_in'] = False
del session['unsigned_access_token']
return make_response(jsonify({'error':'Bad token signature.'}), 401)
# Token has valid signature, grant login:
session['user_id'] = data['login']
session['logged_in'] = True
# Mark this session as safe to bypass csrf protection, due to the ECDSA authentication:
session['bypass_csrf'] = True
return jsonify({'success':'Logged in'})
@app.route('/api/tests/schedule', methods=['POST'])
@requires_auth('user')
def schedule_test():
"""Schedule a test"""
job = request.get_json()
job_id = uuid.uuid1()
job['test_id'] = str(job_id)
job['user'] = get_user_id()
test_series = job.get('testseries', 'no_series')
if not test_series:
test_series = 'no_series'
db.schedule_test(test_id=job_id, test_series=test_series, user=job['user'],
cluster=job['cluster'], test_definition=job)
return jsonify({'success':True, 'url':'/tests/id/{test_id}'.format(test_id=job['test_id'])})
@app.route('/api/tests/cancel', methods=['POST'])
@requires_auth('user')
def cancel_test():
"""Cancel a scheduled test"""
test_id = request.form['test_id']
test = db.get_test(test_id)
# If test is scheduled, we can immediately cancel.
# If test is already in progress, we need to mark as
# cancel_pending to await the client to cancel the job itself.
new_status = 'cancelled'
if test['status'] == 'in_progress' or test['status'] == 'cancel_pending':
new_status = 'cancel_pending'
if user_in_role('admin'):
db.update_test_status(test_id, new_status)
else:
# Check if the test is owned by the user:
if test['user'] == get_user_id():
db.update_test_status(test_id, new_status)
else:
return make_response(jsonify({'error':'Access Denied to modify test {test_id}'
.format(test_id=test_id)}), 401)
return jsonify({'success':'Test cancelled'})
@app.route('/api/tests')
def get_tests():
"""Retreive all completed tests"""
completed_tests = db.get_completed_tests()
# Apply filters
try:
param_from = request.args.get('date_from', None)
param_to = request.args.get('date_to', None)
date_from = datetime.fromtimestamp(float(param_from)) if param_from else None
date_to = datetime.fromtimestamp(float(param_to)) if param_to else None
except:
return make_response(jsonify({'error':'Invalid date parameters.'}), 400)
if date_from:
completed_tests = [t for t in completed_tests if t['scheduled_date'] >= date_from]
if date_to:
completed_tests = [t for t in completed_tests if t['scheduled_date'] <= date_to]
tests = map(lambda t: {
'test_id': t['test_id'],
'href': url_for('get_test', test_id=t['test_id'])
}, completed_tests)
response = json.dumps(obj=tests)
return Response(response=response,
status=200,
mimetype= 'application/json')
@app.route('/api/tests/id/<test_id>')
@requires_auth('user')
def get_test(test_id):
"""Retrieve the definition for a scheduled test"""
try:
test = db.get_test(test_id)
return jsonify(test)
except UnknownTestError:
return make_response(jsonify({'error':'Unknown Test {test_id}.'.format(test_id=test_id)}), 404)
@app.route('/api/series')
def get_series_list():
series = db.get_series_list()
if 'true' == request.args.get('pretty', 'True').lower():
response = json.dumps(obj=series, sort_keys=True, indent=4, separators=(',', ': '))
else:
response = json.dumps(obj=series)
return Response(response=response,
status=200,
mimetype='application/json')
@app.route('/api/series/<series>/<start_timestamp>/<end_timestamp>')
def get_series( series, start_timestamp, end_timestamp):
series = db.get_series( series, start_timestamp, end_timestamp)
# barf -- like below this sucks -- changing the series table to include status needs to be done
valid_jobs = [job_id for job_id in series if db.get_test_status(job_id) == 'completed']
jsobj = {'series': valid_jobs}
if 'true' == request.args.get('pretty', 'True').lower():
response = json.dumps(obj=jsobj, sort_keys=True, indent=4, separators=(',', ': '))
else:
response = json.dumps(obj=jsobj)
return Response(response=response,
status=200,
mimetype='application/json')
def get_series_summaries_impl(series, start_timestamp, end_timestamp):
series = db.get_series(series, start_timestamp, end_timestamp)
summaries = []
for test_id in series:
status = db.get_test_status(test_id)
if status == 'completed':
artifact = db.get_test_artifact_data(test_id, 'stats_summary', 'stats_summary.{}.json'.format(test_id))
if artifact and artifact[0]:
summaries.append(json.loads(artifact[0]))
return summaries
@app.route('/api/series/<series>/<start_timestamp>/<end_timestamp>/summaries')
def get_series_summaries(series, start_timestamp, end_timestamp):
summaries = get_series_summaries_impl(series, start_timestamp, end_timestamp)
# Construct the response in two passes, first sort the data points on the UUID
# Then denormalize to arrays for each metric
# Operation -> revision label -> uuid (for ordering) -> metrics as a bloc
# Then do Operation -> revision label -> metrics as arrays (already sorted)
byOperation = {}
for summary in summaries:
# First get everything sorted by operation, revision label (not actual revision branch/tag,sha), test id
for stat in summary['stats']:
if 'test' not in stat:
log.error("stat summary without test key: {}".format(stat['id']))
continue
operationStats = byOperation.setdefault(stat['test'], {})
revisionStats = operationStats.setdefault(stat['label'], OrderedDict())
revisionStats[uuid.UUID(stat['id'])] = stat
del stat['test']
del stat['label']
# Now flatten the entire thing to arrays for each operation -> revision
summaries = {}
for operation in byOperation:
newOperation = summaries.setdefault(operation, {})
for revision in byOperation[operation]:
newRevision = newOperation.setdefault(revision, {})
for stats in byOperation[operation][revision].itervalues():
for key, value in stats.iteritems():
statsArray = newRevision.setdefault(key, [])
if isinstance(value, basestring):
statsArray.append(value.split()[0])
else:
statsArray.append(value)
# Wrapper object of facilitate adding fields later
jsobj = { 'summaries' : summaries }
if 'true' == request.args.get('pretty', 'True').lower():
response = json.dumps(obj=jsobj, sort_keys=True, indent=4, separators=(',', ': '))
else:
response = json.dumps(obj=jsobj)
return Response(response=response,
status=200,
mimetype= 'application/json')
def construct_series_graph_url( series, start_timestamp, end_timestamp, operation, metric ):
redirectURL = "/graph?"
redirectURL += "command=series"
redirectURL += "&series={series}"
redirectURL += "&start_timestamp={start_timestamp}"
redirectURL += "&end_timestamp={end_timestamp}"
redirectURL += "&metric={metric}"
redirectURL += "&show_aggregates=false"
redirectURL += "&operation={operation}"
return redirectURL.format(series=series, start_timestamp=start_timestamp, end_timestamp=end_timestamp,
operation=operation, metric=metric)
@app.route('/api/series/<series>/<start_timestamp>/<end_timestamp>/graph/<operation>/<metric>')
def get_series_graph( series, start_timestamp, end_timestamp, operation, metric):
redirectURL = construct_series_graph_url(series, start_timestamp, end_timestamp, operation, metric)
return redirect(redirectURL)
@app.route('/api/series/<series>/<start_timestamp>/<end_timestamp>/graph/<operation>/<metric>.png')
def get_series_graph_png( series, start_timestamp, end_timestamp, operation, metric):
host = socket.gethostname()
graphURL = "http://" + host + construct_series_graph_url( series, start_timestamp, end_timestamp, operation, metric )
return Response(response=screenshot.get_graph_png(graphURL, x_crop=900, y_crop=650),
status=200,
mimetype='application/png')
def get_series_graph_png_cached( series, age, operation, metric, expires, invalidate):
host = socket.gethostname()
end_timestamp = int(time.time())
start_timestamp = max(0, end_timestamp - int(age))
graphURL = "http://" + host + construct_series_graph_url( series, start_timestamp, end_timestamp, operation, metric )
def loader():
return screenshot.get_graph_png(graphURL, x_crop=900, y_crop=650)
cache_key = series + "/" + age + "/" + operation + "/" + metric
return stupid_cache.stupid_cache_get("/tmp", cache_key, loader, expires, invalidate)
@app.route('/api/series/<series>/<age>/graph/cached/<operation>/<metric>.png')
def get_series_graph_png_cached_caching(series, age, operation, metric):
return Response(response=get_series_graph_png_cached(series, age, operation, metric, 0, False),
status=200,
mimetype='application/png')
@app.route('/api/series/<series>/<age>/graph/<operation>/<metric>.png')
def get_series_graph_png_cached_invalidating( series, age, operation, metric):
return Response(response=get_series_graph_png_cached( series, age, operation, metric, 0, True),
status=200,
mimetype='application/png')
@app.route('/api/tests/status/id/<test_id>')
@requires_auth('user')
def get_test_status(test_id):
"""Retrieve the status for a test"""
try:
status = db.get_test_status(test_id)
return jsonify({'status':status})
except UnknownTestError:
return make_response(jsonify({'error':'Unknown Test {test_id}.'.format(test_id=test_id)}), 404)
@app.route('/api/clusters')
@requires_auth('user')
def get_clusters():
"""Retrieve information about available clusters"""
clusters = db.get_clusters()
return make_response(jsonify({'clusters':clusters}))
@app.route('/api/clusters/<cluster_name>')
@requires_auth('user')
def get_clusters_by_name(cluster_name):
"""Retrieve information about a cluster"""
clusters = db.get_clusters()
return make_response(jsonify(clusters[cluster_name]))
@app.route('/api/tests/progress/id/<test_id>', methods=['POST'])
@requires_auth('user')
def set_progress_message_on_test(test_id):
msg = request.get_json()['progress_msg']
db.update_test_progress_msg(test_id, msg)
return jsonify({'status': 'ok'})
################################################################################
#### Websockets
################################################################################
@requires_auth('user')
@sockets.route('/api/console')
def console_messages(ws):
"""Receive console messages as they happen
ZMQ message format:
Console messages:
console cluster_name {"job_id":"current_job_id", "msg":"message from console"}
Control messages:
Keep alive:
The cluster is starting a job:
console cluster_name {"job_id":"current_job_id", "ctl":"START"}
The cluster finished a job:
console cluster_name {"job_id":"current_job_id", "ctl":"DONE"}
The cluster is not working on anything:
console cluster_name {"ctl":"IDLE"}
When forwarding messages to the websocket client, the "console cluster_name"
portion is dropped and just the JSON is sent.
Websocket sends keepalive messages periodically:
{"ctl":"KEEPALIVE"}
"""
cluster_name = ws.receive()
console_socket = console_subscribe(cluster_name)
try:
while True:
try:
data = console_socket.recv_string()
data = data.lstrip("console {cluster_name} ".format(cluster_name=cluster_name))
ws.send(data)
except zmq.error.Again:
# If we timeout from zmq, send a keep alive request to the
# websocket client:
ws.send('{"ctl":"KEEPALIVE"}')
# The client websocket will send keepalive back:
ws.receive()
except zmq.error.ZMQError, e:
if e.errno == zmq.POLLERR:
log.error(e)
# Interrupted zmq socket code, reinitialize:
# I get this when I resize my terminal.. WTF?
console_socket = setup_zmq()
finally:
log.error("Unsubscribing from zmq socket")
console_socket.setsockopt_string(zmq.UNSUBSCRIBE, u'')
|
{
"content_hash": "d8fb4b311b0df9c7a13d0872c0a69ab9",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 121,
"avg_line_length": 40.522620904836195,
"alnum_prop": 0.6179788257940327,
"repo_name": "mambocab/cstar_perf",
"id": "8eb116cfb442d78148cdf370fa22e37a2f1051b5",
"size": "25975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/cstar_perf/frontend/server/controllers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6321"
},
{
"name": "HTML",
"bytes": "29680"
},
{
"name": "JavaScript",
"bytes": "75081"
},
{
"name": "Nginx",
"bytes": "5066"
},
{
"name": "Python",
"bytes": "632311"
},
{
"name": "Ruby",
"bytes": "2417"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from importlib import import_module
from django.apps import apps
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.operations import (RunSQL, RunPython, RemoveField,
RenameField, DeleteModel, RenameModel)
RISKY_OPERATIONS = (RunSQL, RunPython, RemoveField, RenameField,
DeleteModel, RenameModel)
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--database',
action='store',
dest='database',
help='Nominates a database to synchronize. Defaults to the "default" database.',
default='default',
)
parser.add_argument(
'--all',
action='store_true',
dest='all',
help='Also show applied risky migrations',
default=False,
)
help = "Print a list of risky unapplied migrations"
args = "[app_label] [migration_name]"
def handle(self, *args, **options):
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# If they asked for a migration listing, quit main execution flow and show it
show_all = options.get("all", False)
return self.show_risky_migration_list(connection, args, show_all)
def show_risky_migration_list(self, connection, app_names=None, show_all=False):
"""
Shows a list of all migrations on the system, or only those of
some named apps.
"""
# Load migrations from disk/DB
loader = MigrationLoader(connection, ignore_no_migrations=True)
graph = loader.graph
# If we were passed a list of apps, validate it
if app_names:
invalid_apps = []
for app_name in app_names:
if app_name not in loader.migrated_apps:
invalid_apps.append(app_name)
if invalid_apps:
raise CommandError("No migrations present for: %s" % (", ".join(invalid_apps)))
# Otherwise, show all apps in alphabetic order
else:
app_names = sorted(loader.migrated_apps)
# For each app, print its migrations in order from oldest (roots) to
# newest (leaves).
for app_name in app_names:
shown = set()
for node in graph.leaf_nodes(app_name):
for plan_node in graph.forwards_plan(node):
if plan_node not in shown and plan_node[0] == app_name:
# Give it a nice title if it's a squashed one
title = plan_node[1]
if graph.nodes[plan_node].replaces:
title += " (%s squashed migrations)" % len(graph.nodes[plan_node].replaces)
migration = loader.get_migration(app_name, plan_node[1])
is_risky = any([isinstance(operation, RISKY_OPERATIONS) for
operation in migration.operations])
if is_risky:
if plan_node in loader.applied_migrations:
if show_all:
self.stdout.write(" [x] %s %s" % (app_name, title))
else:
self.stdout.write(" [ ] %s %s" % (app_name, title))
shown.add(plan_node)
|
{
"content_hash": "165e6fff29d93b9126f618df952f9db0",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 103,
"avg_line_length": 41.88659793814433,
"alnum_prop": 0.584051193699237,
"repo_name": "VantageAnalytics/django-migration-sniffer",
"id": "593ff98d2bb2d7cef00d525616f656444c83efe5",
"size": "4087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migration_sniffer/management/commands/sniff_migrations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5282"
}
],
"symlink_target": ""
}
|
import httplib
import urllib, urllib2
import json
import base64
import functools
import logging
import time
class RequestApi(object):
TimeOut = 3
DEBUG_LEVEL = 1
HOST = "api.douban.com"
@classmethod
def request(cls, method, path, params, headers={}, host=''):
"""test --- http://api.douban.com/book/subject/1220562?alt=json """
_headers = {'Accept-Language': 'zh-cn', 'User-Agent': 'Python/Automate', "Accept-Charset": "utf-8"}
_headers.update(headers)
host = host == '' and cls.HOST or host
conn = httplib.HTTPConnection(host, timeout=cls.TimeOut)
for k, v in params.items():
if v == '' or v == None:
del params[k]
params = urllib.urlencode(params)
if method == "GET":
path = "%s?%s" % (path, params)
params = ''
else:
path = "%s" % path
logging.debug("*[Requst]* %s %s %s" % (method, host + path, params))
conn.request(method, path, params, _headers)
#conn.set_debuglevel(cls.DEBUG_LEVEL)
try:
r = conn.getresponse()
data = r.read()
return data
except Exception,e:
logging.error("*[Requst]* %s %s %s request error:%s" % (method, host + path, params,e))
raise e
finally:
conn.close()
@classmethod
def get(cls, path, params, headers={}, host=''):
return cls.request("GET", path, params, headers, host)
@classmethod
def get_json(cls, path, params, headers={}, host=''):
return json.loads(cls.request("GET", path, params, headers, host))
@classmethod
def post(cls, path, params, headers={}, host=''):
return cls.request("POST", path, params, headers, host)
@classmethod
def post_json(cls, path, params, headers={}, host=''):
return json.loads(cls.request("POST", path, params, headers, host))
|
{
"content_hash": "432202b1dd36cdbf90a563efe8e13353",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 107,
"avg_line_length": 30.384615384615383,
"alnum_prop": 0.5569620253164557,
"repo_name": "lisawei/api_automate_test",
"id": "1d902e1bd8e2acd637a05c27f6478767854beeaa",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3310"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
docs_dir = os.path.dirname(os.path.abspath(__file__))
project_dir = os.path.abspath(os.path.join(docs_dir, '..'))
sys.path.append(project_dir)
# Set up the Django settings/environment
sys.path.append(os.path.join(project_dir, 'demoproject'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'demoproject.settings'
from chartit import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
autodoc_default_flags = ['members', 'undoc-members', 'private-members', 'special-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django-Chartit'
copyright = u'2011-2016 Praveen Gollakota & contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Django-Chartitdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Django-Chartit.tex', u'Django-Chartit Documentation',
u'Praveen Gollakota', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-chartit', u'Django-Chartit Documentation',
[u'Praveen Gollakota'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Django-Chartit', u'Django-Chartit Documentation',
u'Praveen Gollakota', 'Django-Chartit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "5cb30cd1c93c4c81062c0100a2d3096b",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 90,
"avg_line_length": 32.74058577405858,
"alnum_prop": 0.7056869009584664,
"repo_name": "pgollakota/django-chartit",
"id": "7174c50678d9bc5b1e5f969cdb0798e576965d22",
"size": "8250",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "55534"
},
{
"name": "JavaScript",
"bytes": "33930"
},
{
"name": "Prolog",
"bytes": "5596"
},
{
"name": "Python",
"bytes": "201179"
},
{
"name": "Shell",
"bytes": "5112"
}
],
"symlink_target": ""
}
|
from asynctnt_queue import Queue, Tube, Task
from asynctnt_queue.task import Status
from tests import BaseTarantoolTestCase
class TubeTestCase(BaseTarantoolTestCase):
def create_tube(self):
q = Queue(self.conn)
return q.tube("test_tube")
def _data_obj(self):
return {
'key': 'value'
}
async def test__tube_queue(self):
q = Queue(self.conn)
tube = q.tube('test_tube')
self.assertIs(tube.queue, q, 'queue is the same object')
async def test__tube_put(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
self.assertIsNotNone(t)
self.assertIsInstance(t, Task)
self.assertEqual(t.status, Status.READY)
self.assertEqual(t.task_id, 0) # first task has id = 0
self.assertEqual(t.data, self._data_obj())
async def test__tube_put_options(self):
tube = self.create_tube()
t = await tube.put(self._data_obj(), pri=4, ttl=10, ttr=1, delay=0.2)
self.assertIsNotNone(t)
self.assertIsInstance(t, Task)
self.assertEqual(t.status, Status.DELAYED)
self.assertEqual(t.task_id, 0) # first task has id = 0
self.assertEqual(t.data, self._data_obj())
async def test__tube_take(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
taken_t = await tube.take()
self.assertEqual(taken_t.task_id, t.task_id, 'task id equal')
self.assertEqual(taken_t.status, Status.TAKEN)
self.assertDictEqual(taken_t.data, t.data)
async def test__tube_take_no_tasks(self):
tube = self.create_tube()
taken_t = await tube.take(0.5)
self.assertIsNone(taken_t)
async def test__tube_ack(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.ack(t2.task_id)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.EXECUTED)
self.assertEqual(t2.data, t.data)
async def test__tube_release(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.release(t2.task_id)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.READY)
self.assertEqual(t2.data, t.data)
async def test__tube_release_delay(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.release(t2.task_id, delay=5)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.DELAYED)
self.assertEqual(t2.data, t.data)
async def test__tube_bury(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.bury(t2.task_id)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.BURIED)
self.assertEqual(t2.data, t.data)
async def test__tube_peek(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.peek(t2.task_id)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.TAKEN)
self.assertEqual(t2.data, t.data)
async def test__tube_touch(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.touch(t2.task_id, 1)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.TAKEN)
self.assertEqual(t2.data, t.data)
async def test__tube_kick(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.bury(t2.task_id)
t3 = await tube.take(0.5)
self.assertIsNone(t3, 'no tasks left')
count = await tube.kick(1)
self.assertEqual(count, 1)
t3 = await tube.take(0.5)
self.assertEqual(t3.task_id, t2.task_id)
self.assertEqual(t3.status, Status.TAKEN)
self.assertEqual(t3.data, t2.data)
async def test__tube_delete(self):
tube = self.create_tube()
t = await tube.put(self._data_obj())
t2 = await tube.take()
t2 = await tube.delete(t2.task_id)
self.assertEqual(t2.task_id, t.task_id)
self.assertEqual(t2.status, Status.EXECUTED)
self.assertEqual(t2.data, t.data)
async def test__tube_statistics(self):
tube = self.create_tube()
res = await tube.statistics()
self.assertIsNotNone(res)
|
{
"content_hash": "610d754c32086682863c1678075003a3",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 77,
"avg_line_length": 35.45522388059702,
"alnum_prop": 0.6002946748053042,
"repo_name": "igorcoding/asynctnt-queue",
"id": "8d641bda4b3d5f6a22347976dcd59553a5a547a5",
"size": "4751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tube.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "868"
},
{
"name": "Makefile",
"bytes": "911"
},
{
"name": "Python",
"bytes": "22389"
},
{
"name": "Shell",
"bytes": "4668"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SinkValidateList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, sid):
"""
Initialize the SinkValidateList
:param Version version: Version that contains the resource
:param sid: A string that uniquely identifies this Sink.
:returns: twilio.rest.events.v1.sink.sink_validate.SinkValidateList
:rtype: twilio.rest.events.v1.sink.sink_validate.SinkValidateList
"""
super(SinkValidateList, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Sinks/{sid}/Validate'.format(**self._solution)
def create(self, test_id):
"""
Create the SinkValidateInstance
:param unicode test_id: A string that uniquely identifies the test event for a Sink being validated.
:returns: The created SinkValidateInstance
:rtype: twilio.rest.events.v1.sink.sink_validate.SinkValidateInstance
"""
data = values.of({'TestId': test_id, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return SinkValidateInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Events.V1.SinkValidateList>'
class SinkValidatePage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the SinkValidatePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param sid: A string that uniquely identifies this Sink.
:returns: twilio.rest.events.v1.sink.sink_validate.SinkValidatePage
:rtype: twilio.rest.events.v1.sink.sink_validate.SinkValidatePage
"""
super(SinkValidatePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SinkValidateInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.events.v1.sink.sink_validate.SinkValidateInstance
:rtype: twilio.rest.events.v1.sink.sink_validate.SinkValidateInstance
"""
return SinkValidateInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Events.V1.SinkValidatePage>'
class SinkValidateInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, sid):
"""
Initialize the SinkValidateInstance
:returns: twilio.rest.events.v1.sink.sink_validate.SinkValidateInstance
:rtype: twilio.rest.events.v1.sink.sink_validate.SinkValidateInstance
"""
super(SinkValidateInstance, self).__init__(version)
# Marshaled Properties
self._properties = {'result': payload.get('result'), }
# Context
self._context = None
self._solution = {'sid': sid, }
@property
def result(self):
"""
:returns: Feedback indicating whether the given Sink was validated.
:rtype: unicode
"""
return self._properties['result']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Events.V1.SinkValidateInstance>'
|
{
"content_hash": "1e792e207e950d9a24a16ccf29802aed",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 108,
"avg_line_length": 31.57037037037037,
"alnum_prop": 0.6377287658376349,
"repo_name": "twilio/twilio-python",
"id": "644778060e67cba33a42dc55ce58915d9dbb154a",
"size": "4277",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/events/v1/sink/sink_validate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
}
|
"""Represent a signal repeater."""
from pytradfri.const import ROOT_SIGNAL_REPEATER
class SignalRepeater:
"""Represent a signal repeater."""
def __init__(self, device, index):
self.device = device
self.index = index
@property
def raw(self):
"""Return raw data that it represents."""
return self.device.raw[ROOT_SIGNAL_REPEATER][self.index]
|
{
"content_hash": "e31d0996602c875255ff38501ed914e6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 26.133333333333333,
"alnum_prop": 0.6479591836734694,
"repo_name": "rubenbe/pytradfri",
"id": "f80748d75d407321b3b40cf4100a7129eb384482",
"size": "392",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pytradfri/device/signal_repeater.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "339"
},
{
"name": "Python",
"bytes": "116785"
},
{
"name": "Shell",
"bytes": "1330"
}
],
"symlink_target": ""
}
|
{
'name': 'Products & Pricelists',
'version': '1.2',
'category': 'Sales',
'depends': ['base', 'decimal_precision', 'mail', 'report'],
'demo': [
'product_demo.xml',
'product_image_demo.xml',
],
'description': """
This is the base module for managing products and pricelists in OpenERP.
========================================================================
Products support variants, different pricing methods, vendors information,
make to stock/order, different unit of measures, packaging and properties.
Pricelists support:
-------------------
* Multiple-level of discount (by product, category, quantities)
* Compute price based on different criteria:
* Other pricelist
* Cost price
* List price
* Vendor price
Pricelists preferences by product and/or partners.
Print product labels with barcode.
""",
'data': [
'security/product_security.xml',
'security/ir.model.access.csv',
'wizard/product_price_view.xml',
'res_config_view.xml',
'product_data.xml',
'product_report.xml',
'product_view.xml',
'pricelist_view.xml',
'partner_view.xml',
'views/report_pricelist.xml',
'views/report_productlabel.xml'
],
'test': [
'product_pricelist_demo.yml',
'test/product_pricelist.yml',
],
'installable': True,
'auto_install': False,
}
|
{
"content_hash": "4bc2c91958e3c2ff2871f88f5ced9c58",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 29.510204081632654,
"alnum_prop": 0.5746887966804979,
"repo_name": "vileopratama/vitech",
"id": "85c7d763f8b5fb61d6a5fb0992fa9307914d707d",
"size": "1547",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "src/addons/product/__openerp__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
"""Structural variation detection for split and paired reads using lumpy.
Uses lumpyexpress for lumpy integration and samblaster for read preparation:
https://github.com/GregoryFaust/samblaster
https://github.com/arq5x/lumpy-sv
"""
from distutils.version import LooseVersion
import contextlib
import os
import sys
import shutil
import subprocess
import vcf
from bcbio import utils
from bcbio.bam import ref
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.structural import shared as sshared
from bcbio.variation import effects, vcfutils, vfilter
# ## Lumpy main
def _run_lumpy(full_bams, sr_bams, disc_bams, previous_evidence, work_dir, items):
"""Run lumpy-sv, using speedseq pipeline.
"""
batch = sshared.get_cur_batch(items)
ext = "-%s-svs" % batch if batch else "-svs"
out_file = os.path.join(work_dir, "%s%s.vcf"
% (os.path.splitext(os.path.basename(items[0]["align_bam"]))[0], ext))
sv_exclude_bed = sshared.prepare_exclude_file(items, out_file)
if not utils.file_exists(out_file):
with file_transaction(items[0], out_file) as tx_out_file:
with tx_tmpdir(items[0]) as tmpdir:
full_bams = ",".join(full_bams)
sr_bams = ",".join(sr_bams)
disc_bams = ",".join(disc_bams)
exclude = "-x %s" % sv_exclude_bed if (sv_exclude_bed and utils.file_exists(sv_exclude_bed)) else ""
ref_file = dd.get_ref_file(items[0])
depths = []
for sample, ev_files in previous_evidence.items():
for ev_type, ev_file in ev_files.items():
if utils.file_exists(ev_file):
depths.append("%s:%s" % (sample, ev_file))
depth_arg = "-d %s" % ",".join(depths) if len(depths) > 0 else ""
# use our bcbio python for runs within lumpyexpress
exports = utils.local_path_export()
cmd = ("{exports}lumpyexpress -v -B {full_bams} -S {sr_bams} -D {disc_bams} "
"{exclude} {depth_arg} -T {tmpdir} -o {tx_out_file}")
do.run(cmd.format(**locals()), "lumpyexpress", items[0])
return vcfutils.sort_by_ref(out_file, items[0]), sv_exclude_bed
def _filter_by_support(in_file, data):
"""Filter call file based on supporting evidence, adding FILTER annotations to VCF.
Filters based on the following criteria:
- Minimum read support for the call (SU = total support)
- Large calls need split read evidence.
"""
rc_filter = ("FORMAT/SU < 4 || "
"(FORMAT/SR == 0 && FORMAT/SU < 15 && ABS(SVLEN)>50000) || "
"(FORMAT/SR == 0 && FORMAT/SU < 5 && ABS(SVLEN)<2000) || "
"(FORMAT/SR == 0 && FORMAT/SU < 15 && ABS(SVLEN)<300)")
return vfilter.cutoff_w_expression(in_file, rc_filter, data, name="ReadCountSupport",
limit_regions=None)
def _filter_by_background(base_samples, back_samples, gt_vcfs, data):
"""Filter base samples, marking any also present in the background.
"""
filtname = "InBackground"
filtdoc = "Variant also present in background samples with same genotype"
for base_name in base_samples:
orig_vcf = gt_vcfs[base_name]
out_file = "%s-backfilter.vcf" % (utils.splitext_plus(orig_vcf)[0])
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(data, out_file) as tx_out_file:
with utils.open_gzipsafe(orig_vcf) as in_handle:
with _vcf_readers([gt_vcfs[n] for n in back_samples]) as back_readers:
inp = vcf.Reader(in_handle, orig_vcf)
inp.filters[filtname] = vcf.parser._Filter(filtname, filtdoc)
with open(tx_out_file, "w") as out_handle:
outp = vcf.Writer(out_handle, inp)
for rec in inp:
back_recs = [r.next() for r in back_readers]
if _genotype_in_background(rec, back_recs):
rec.add_filter(filtname)
outp.write_record(rec)
if utils.file_exists(out_file + ".gz"):
out_file = out_file + ".gz"
gt_vcfs[base_name] = vcfutils.bgzip_and_index(out_file, data["config"])
return gt_vcfs
def _genotype_in_background(rec, back_recs):
"""Check if the genotype in the record of interest is present in the background records.
"""
def passes(rec):
return not rec.FILTER or len(rec.FILTER) == 0
return any([passes(brec) and passes(rec) and rec.samples[0].gt_alleles == brec.samples[0].gt_alleles
for brec in back_recs])
@contextlib.contextmanager
def _vcf_readers(vcf_files):
handles = []
readers = []
for vcf_file in vcf_files:
in_handle = utils.open_gzipsafe(vcf_file)
handles.append(in_handle)
readers.append(vcf.Reader(in_handle, vcf_file))
yield readers
for handle in handles:
handle.close()
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "lumpy"))
def run(items):
"""Perform detection of structural variations with lumpy, using bwa-mem alignment.
"""
if not all(utils.get_in(data, ("config", "algorithm", "aligner")) in ["bwa", False, None] for data in items):
raise ValueError("Require bwa-mem alignment input for lumpy structural variation detection")
paired = vcfutils.get_paired_bams([x["align_bam"] for x in items], items)
work_dir = _sv_workdir(paired.tumor_data if paired and paired.tumor_data else items[0])
previous_evidence = {}
full_bams, sr_bams, disc_bams = [], [], []
for data in items:
sr_bam, disc_bam = sshared.get_split_discordants(data, work_dir)
full_bams.append(dd.get_align_bam(data))
sr_bams.append(sr_bam)
disc_bams.append(disc_bam)
cur_dels, cur_dups = _bedpes_from_cnv_caller(data, work_dir)
previous_evidence[dd.get_sample_name(data)] = {}
if cur_dels and utils.file_exists(cur_dels):
previous_evidence[dd.get_sample_name(data)]["dels"] = cur_dels
if cur_dups and utils.file_exists(cur_dups):
previous_evidence[dd.get_sample_name(data)]["dups"] = cur_dups
lumpy_vcf, exclude_file = _run_lumpy(full_bams, sr_bams, disc_bams, previous_evidence,
work_dir, items)
gt_vcfs = {}
for data in items:
sample = dd.get_sample_name(data)
sr_bam, _ = sshared.get_split_discordants(data, work_dir)
sample_vcf = vcfutils.select_sample(lumpy_vcf, sample,
utils.append_stem(lumpy_vcf, "-%s" % sample),
data["config"])
if "bnd-genotype" in dd.get_tools_on(data):
gt_vcf = _run_svtyper(sample_vcf, dd.get_align_bam(data), sr_bam, exclude_file, data)
else:
std_vcf, bnd_vcf = _split_breakends(sample_vcf, data)
std_gt_vcf = _run_svtyper(std_vcf, dd.get_align_bam(data), sr_bam, exclude_file, data)
gt_vcf = vcfutils.concat_variant_files_bcftools(
orig_files=[std_gt_vcf, bnd_vcf],
out_file="%s-combined.vcf.gz" % utils.splitext_plus(std_gt_vcf)[0],
config=data["config"])
gt_vcfs[dd.get_sample_name(data)] = _filter_by_support(gt_vcf, data)
if paired and paired.normal_name:
gt_vcfs = _filter_by_background([paired.tumor_name], [paired.normal_name], gt_vcfs, paired.tumor_data)
out = []
for data in items:
if "sv" not in data:
data["sv"] = []
vcf_file = gt_vcfs[dd.get_sample_name(data)]
if dd.get_svprioritize(data):
effects_vcf, _ = effects.add_to_vcf(vcf_file, data, "snpeff")
else:
effects_vcf = None
data["sv"].append({"variantcaller": "lumpy",
"vrn_file": effects_vcf or vcf_file,
"exclude_file": exclude_file})
out.append(data)
return out
def _bedpes_from_cnv_caller(data, work_dir):
"""Retrieve BEDPEs deletion and duplications from CNV callers.
Currently integrates with CNVkit.
"""
supported = set(["cnvkit"])
cns_file = None
for sv in data.get("sv", []):
if sv["variantcaller"] in supported and "cns" in sv:
cns_file = sv["cns"]
break
if not cns_file:
return None, None
else:
out_base = os.path.join(work_dir, utils.splitext_plus(os.path.basename(cns_file))[0])
out_dels = out_base + "-dels.bedpe"
out_dups = out_base + "-dups.bedpe"
if not os.path.exists(out_dels) or not os.path.exists(out_dups):
with file_transaction(data, out_dels, out_dups) as (tx_out_dels, tx_out_dups):
try:
cnvanator_path = config_utils.get_program("cnvanator_to_bedpes.py", data)
except config_utils.CmdNotFound:
return None, None
cmd = [cnvanator_path, "-c", cns_file, "--cnvkit",
"--del_o=%s" % tx_out_dels, "--dup_o=%s" % tx_out_dups,
"-b", "250"] # XXX Uses default piece size for CNVkit. Right approach?
do.run(cmd, "Prepare CNVkit as input for lumpy", data)
return out_dels, out_dups
def _split_breakends(in_file, data):
"""Skip genotyping on breakends. This is often slow in high depth regions with many breakends.
"""
bnd_file = "%s-bnd.vcf.gz" % utils.splitext_plus(in_file)[0]
std_file = "%s-std.vcf.gz" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(bnd_file, in_file):
with file_transaction(data, bnd_file) as tx_out_file:
cmd = """bcftools view -O z -o {tx_out_file} -i "SVTYPE='BND'" {in_file}"""
do.run(cmd.format(**locals()), "Select Lumpy breakends")
vcfutils.bgzip_and_index(bnd_file, data["config"])
if not utils.file_uptodate(std_file, in_file):
with file_transaction(data, std_file) as tx_out_file:
cmd = """bcftools view -O z -o {tx_out_file} -e "SVTYPE='BND'" {in_file}"""
do.run(cmd.format(**locals()), "Select Lumpy non-breakends")
vcfutils.bgzip_and_index(std_file, data["config"])
return std_file, bnd_file
def run_svtyper_prioritize(call):
"""Run svtyper on prioritized outputs, adding in typing for breakends skipped earlier.
"""
def _run(in_file, work_dir, data):
sr_bam, _ = sshared.get_split_discordants(data, work_dir)
return _run_svtyper(in_file, dd.get_align_bam(data), sr_bam, call.get("exclude_file"), data)
return _run
def _older_svtyper_version(svtyper):
"""Allows setup of options for older and newer svtyper.
Can remove after bcbio 1.0.2 release.
"""
version = None
for line in subprocess.check_output([svtyper, "-h"]).split("\n"):
if line.startswith("version"):
version = line.replace("$", "").strip().split()[-1].replace("v", "")
break
return not version or LooseVersion(version) < "0.1.0"
def _run_svtyper(in_file, full_bam, sr_bam, exclude_file, data):
"""Genotype structural variant calls with SVtyper.
Removes calls in high depth regions to avoid slow runtimes:
https://github.com/hall-lab/svtyper/issues/16
"""
out_file = "%s-wgts.vcf.gz" % utils.splitext_plus(in_file)[0]
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
if not vcfutils.vcf_has_variants(in_file):
shutil.copy(in_file, out_file)
else:
python = sys.executable
svtyper = os.path.join(os.path.dirname(sys.executable), "svtyper")
if exclude_file and utils.file_exists(exclude_file):
regions_to_rm = "-T ^%s" % (exclude_file)
else:
regions_to_rm = ""
# add FILTER headers, which are lost during svtyping
header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0]
with open(header_file, "w") as out_handle:
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
break
if line.startswith("##FILTER"):
out_handle.write(line)
for region in ref.file_contigs(dd.get_ref_file(data), data["config"]):
out_handle.write("##contig=<ID=%s,length=%s>\n" % (region.name, region.size))
if _older_svtyper_version(svtyper):
svtyper_extra_opts = "-M -S {sr_bam}"
else:
svtyper_extra_opts = ""
cmd = ("bcftools view {in_file} {regions_to_rm} | "
"{python} {svtyper} -B {full_bam} " + svtyper_extra_opts + " | "
"bcftools annotate -h {header_file} | "
"bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "SV genotyping with svtyper")
return vcfutils.sort_by_ref(out_file, data)
|
{
"content_hash": "7e725d5af82ad94783283ea84f50e8a0",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 116,
"avg_line_length": 48.91428571428571,
"alnum_prop": 0.576518691588785,
"repo_name": "brainstorm/bcbio-nextgen",
"id": "0b03d1974cdd1a3820350080bc7ff24d5bf3ca37",
"size": "13696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/structural/lumpy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "3166"
},
{
"name": "Python",
"bytes": "1907112"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14841"
}
],
"symlink_target": ""
}
|
"""Entrypoint for Cook's sidecar progress reporter."""
import faulthandler
import logging
import os
import requests
import signal
import sys
import cook.sidecar.config as csc
import cook.sidecar.tracker as cst
from cook.sidecar import util
from cook.sidecar.version import VERSION
def start_progress_trackers():
try:
logging.info(f'Starting cook.sidecar {VERSION} progress reporter')
config = csc.initialize_config(os.environ)
default_url = config.callback_url
current_url = default_url
def send_progress_message(message):
nonlocal current_url
try:
for i in range(config.max_post_attempts):
response = requests.post(current_url, allow_redirects=False, timeout=config.max_post_time_secs, json=message)
if 200 <= response.status_code <= 299:
return True
elif response.is_redirect and response.status_code == 307:
current_url = response.headers['location']
logging.info(f'Redirected! Changed progress update callback url to: {current_url}')
else:
logging.warning(f'Unexpected progress update response ({response.status_code}): {response.content}')
break
else:
logging.warning(f'Reached max redirect retries ({config.max_post_redirect_follow})')
except Exception:
logging.exception(f'Error raised while posting progress update to {current_url}')
current_url = default_url
logging.info(f'Failed to post progress update. Reset progress update callback url: {current_url}')
return False
max_message_length = config.max_message_length
sample_interval_ms = config.progress_sample_interval_ms
sequence_counter = cst.ProgressSequenceCounter()
progress_updater = cst.ProgressUpdater(max_message_length, sample_interval_ms, send_progress_message)
def launch_progress_tracker(progress_location, location_tag):
progress_file_path = os.path.abspath(progress_location)
logging.info(f'Location {progress_location} (absolute path={progress_file_path}) tagged as [tag={location_tag}]')
progress_tracker = cst.ProgressTracker(config, sequence_counter, progress_updater, progress_location, location_tag)
progress_tracker.start()
return progress_tracker
progress_locations = {config.progress_output_name: 'progress',
config.stderr_file(): 'stderr',
config.stdout_file(): 'stdout'}
logging.info(f'Progress will be tracked from {len(progress_locations)} locations')
progress_trackers = [launch_progress_tracker(file, name) for file, name in progress_locations.items()]
def set_terminate_handler(handler):
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
def exit_on_interrupt(interrupt_code, _):
sys.exit(f'Progress Reporter killed with code {interrupt_code}')
def handle_interrupt(interrupt_code, _):
logging.info(f'Progress Reporter interrupted with code {interrupt_code}')
# allow a second signal to kill the process immediately (no blocking)
set_terminate_handler(exit_on_interrupt)
# force send the latest progress state if available, and stop the tracker
for progress_tracker in progress_trackers:
progress_tracker.stop()
for progress_tracker in progress_trackers:
progress_tracker.wait()
def dump_traceback(signal, frame):
faulthandler.dump_traceback()
signal.signal(signal.SIGUSR1, dump_traceback)
set_terminate_handler(handle_interrupt)
return progress_trackers
except Exception:
logging.exception('Error starting Progress Reporter')
return None
def await_progress_trackers(progress_trackers):
if progress_trackers is None:
sys.exit('Failed to start progress trackers')
# wait for all background threads to exit
# (but this process will probably be killed first instead)
for progress_tracker in progress_trackers:
progress_tracker.wait()
def main():
util.init_logging()
if len(sys.argv) == 2 and sys.argv[1] == "--version":
print(VERSION)
else:
progress_trackers = start_progress_trackers()
await_progress_trackers(progress_trackers)
if __name__ == '__main__':
main()
|
{
"content_hash": "e11608cb873729701e8a689ae9370e8f",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 129,
"avg_line_length": 41.910714285714285,
"alnum_prop": 0.6374094588836813,
"repo_name": "twosigma/Cook",
"id": "ddf986fd405187121fc0758311df77b5315d8763",
"size": "5842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sidecar/cook/sidecar/progress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "Clojure",
"bytes": "2506248"
},
{
"name": "Dockerfile",
"bytes": "2638"
},
{
"name": "Java",
"bytes": "268686"
},
{
"name": "Jupyter Notebook",
"bytes": "8047"
},
{
"name": "Makefile",
"bytes": "638"
},
{
"name": "Python",
"bytes": "978718"
},
{
"name": "Shell",
"bytes": "51541"
}
],
"symlink_target": ""
}
|
import re
import pandas as pd
import numpy as np
from numpy import int64
from pandas.tseries.offsets import *
# copied from pandas.tseries.offset
__all__ = {
'day': Day,
'businessday': BusinessDay,
'bday': BDay,
'custombusinessday': CustomBusinessDay,
'cday': CDay,
'cbmonthend': CBMonthEnd,
'cbmonthbegin': CBMonthBegin,
'monthbegin': MonthBegin,
'bmonthbegin': BMonthBegin,
'monthend': MonthEnd,
'bmonthend': BMonthEnd,
'businesshour': BusinessHour,
'custombusinesshour': CustomBusinessHour,
'yearbegin': YearBegin,
'byearbegin': BYearBegin,
'yearend': YearEnd,
'byearend': BYearEnd,
'quarterbegin': QuarterBegin,
'bquarterbegin': BQuarterBegin,
'quarterend': QuarterEnd,
'bquarterend': BQuarterEnd,
# 'lastweekofmonth': LastWeekOfMonth,
# 'fy5253quarter': FY5253Quarter,
# 'fy5253': FY5253,
'week': Week,
'weekofmonth': WeekOfMonth,
'easter': Easter,
'hour': Hour,
'minute': Minute,
'second': Second,
'milli': Milli,
'micro': Micro,
'nano': Nano,
# some helpers
'bd': BDay,
'd': DateOffset,
'w': DateOffset,
'm': DateOffset,
'y': DateOffset,
'mon': Week,
'tue': Week,
'wed': Week,
'thu': Week,
'fri': Week,
'sat': Week,
'sun': Week,
'h': Hour,
'min': Minute,
'sec': Second
}
_offset_kwds = {
'd': ['days', 1],
'w': ['weeks', 1],
'm': ['months', 1],
'y': ['years', 1],
'mon': ['weekday', 0],
'tue': ['weekday', 1],
'wed': ['weekday', 2],
'thu': ['weekday', 3],
'fri': ['weekday', 4],
'sat': ['weekday', 5],
'sun': ['weekday', 6]
}
def date_shift(dtime, shift_string):
"""
Apply shift_string to dtime (can be either datetime or DatetimeIndex)
:param dtime: base datetime one wants to shift
:param shift_string: string representing how one wants to shift the
base datetime
:return: shifted datetime
"""
_parsed_list = parse_shift_string(shift_string)
for _offset in _parsed_list:
dtime += _offset[0] * _offset[1]
return dtime
def parse_shift_string(shift_string):
"""
Convert shift_string into the list of shift integer and offset class.
:param shift_string: string representing how one wants to shift the
base datetime
:return:
"""
_shift_list = re.findall("[-+]?\w+", shift_string)
_parsed_list = []
for _offset in _shift_list:
# split into shift integer and offset class name
_shift_num = re.match("(^[-+]?[0-9]*)", _offset).group(0)
_shift_str = _offset.replace(_shift_num, "")
_split = [_shift_num, _shift_str]
if _split[0] == "+":
n = 1
elif _split[0] == "-":
n = -1
else:
n = int(_split[0])
name = assign_offset_class(_split[1].lower())
_parsed_list.append([n, name])
return _parsed_list
def assign_offset_class(offset_name):
_class = __all__[offset_name]
if offset_name in _offset_kwds.keys():
_kwds = _offset_kwds[offset_name]
return eval("_class({}={})".format(_kwds[0], _kwds[1]))
else:
return _class()
def data_asfreq(data, shift_string, fill_method="pad"):
""" Change the data frequency based on shift_string while keeping
the original index of data
:param data: pandas series or dataframe
:param shift_string: string representing how one wants to shift the
base datetime. Data specified by this string will be returned.
:param fill_method: fill method to be used for non-specified data
:return:
"""
flg = freq_flg(data, shift_string)
return data.where(flg.values > 0).fillna(method=fill_method)
def freq_flg(data, shift_string):
""" Create series which contains flag values on dates specified
by shift_string. Internal function. """
tmp = data.copy(True)
tmp.index = date_shift(data.index, shift_string)
flg = pd.Series(tmp.index.astype(int64),
index=data.index).diff().fillna(0).pipe(np.sign)
return flg
|
{
"content_hash": "eaaf059fa4cf26b627be95b40e87fb8d",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 73,
"avg_line_length": 27.158940397350992,
"alnum_prop": 0.5993660082906608,
"repo_name": "nekopuni/adagio",
"id": "de8394ead01b782d6c95816840853bcd17eb73a7",
"size": "4101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adagio/utils/date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84887"
}
],
"symlink_target": ""
}
|
import copy
import pickle
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import aliased
from sqlalchemy.orm import attributes
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.orm import lazyload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import state as sa_state
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import with_loader_criteria
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.collections import attribute_keyed_dict
from sqlalchemy.orm.collections import column_keyed_dict
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.pickleable import Address
from sqlalchemy.testing.pickleable import AddressWMixin
from sqlalchemy.testing.pickleable import Child1
from sqlalchemy.testing.pickleable import Child2
from sqlalchemy.testing.pickleable import Dingaling
from sqlalchemy.testing.pickleable import EmailUser
from sqlalchemy.testing.pickleable import Mixin
from sqlalchemy.testing.pickleable import Order
from sqlalchemy.testing.pickleable import Parent
from sqlalchemy.testing.pickleable import Screen
from sqlalchemy.testing.pickleable import User
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import picklers
from test.orm import _fixtures
from .inheritance._poly_fixtures import _Polymorphic
from .inheritance._poly_fixtures import Company
from .inheritance._poly_fixtures import Engineer
from .inheritance._poly_fixtures import Manager
from .inheritance._poly_fixtures import Person
def no_ed_foo(cls):
return cls.email_address != "ed@foo.com"
class PickleTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"orders",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("address_id", None, ForeignKey("addresses.id")),
Column("description", String(30)),
Column("isopen", Integer),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", None, ForeignKey("addresses.id")),
Column("data", String(30)),
test_needs_acid=True,
test_needs_fk=True,
)
def _option_test_fixture(self):
users, addresses, dingalings = (
self.tables.users,
self.tables.addresses,
self.tables.dingalings,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={"dingaling": relationship(Dingaling)},
)
self.mapper_registry.map_imperatively(Dingaling, dingalings)
sess = fixture_session()
u1 = User(name="ed")
u1.addresses.append(Address(email_address="ed@bar.com"))
sess.add(u1)
sess.flush()
sess.expunge_all()
return sess, User, Address, Dingaling
def test_transient(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="ed")
u1.addresses.append(Address(email_address="ed@bar.com"))
u2 = pickle.loads(pickle.dumps(u1))
sess.add(u2)
sess.flush()
sess.expunge_all()
eq_(u1, sess.get(User, u2.id))
def test_no_mappers(self):
users = self.tables.users
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="ed")
u1_pickled = pickle.dumps(u1, -1)
clear_mappers()
assert_raises_message(
orm_exc.UnmappedInstanceError,
"Cannot deserialize object of type "
"<class 'sqlalchemy.testing.pickleable.User'> - no mapper()",
pickle.loads,
u1_pickled,
)
def test_no_instrumentation(self):
users = self.tables.users
self.mapper_registry.map_imperatively(User, users)
u1 = User(name="ed")
u1_pickled = pickle.dumps(u1, -1)
clear_mappers()
self.mapper_registry.map_imperatively(User, users)
u1 = pickle.loads(u1_pickled)
# this fails unless the InstanceState
# compiles the mapper
eq_(str(u1), "User(name='ed')")
def test_class_deferred_cols(self):
addresses, users = (self.tables.addresses, self.tables.users)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"name": sa.orm.deferred(users.c.name),
"addresses": relationship(Address, backref="user"),
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"email_address": sa.orm.deferred(addresses.c.email_address)
},
)
with fixture_session(expire_on_commit=False) as sess:
u1 = User(name="ed")
u1.addresses.append(Address(email_address="ed@bar.com"))
sess.add(u1)
sess.commit()
with fixture_session() as sess:
u1 = sess.get(User, u1.id)
assert "name" not in u1.__dict__
assert "addresses" not in u1.__dict__
u2 = pickle.loads(pickle.dumps(u1))
with fixture_session() as sess2:
sess2.add(u2)
eq_(u2.name, "ed")
eq_(
u2,
User(
name="ed", addresses=[Address(email_address="ed@bar.com")]
),
)
u2 = pickle.loads(pickle.dumps(u1))
with fixture_session() as sess2:
u2 = sess2.merge(u2, load=False)
eq_(u2.name, "ed")
eq_(
u2,
User(
name="ed", addresses=[Address(email_address="ed@bar.com")]
),
)
def test_instance_lazy_relation_loaders(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="noload")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="ed", addresses=[Address(email_address="ed@bar.com")])
sess.add(u1)
sess.commit()
sess.close()
u1 = sess.query(User).options(lazyload(User.addresses)).first()
u2 = pickle.loads(pickle.dumps(u1))
sess = fixture_session()
sess.add(u2)
assert u2.addresses
def test_lazyload_extra_criteria_not_supported(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address)},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(
name="ed",
addresses=[
Address(email_address="ed@bar.com"),
Address(email_address="ed@wood.com"),
],
)
sess.add(u1)
sess.commit()
sess.close()
u1 = (
sess.query(User)
.options(
lazyload(
User.addresses.and_(Address.email_address == "ed@bar.com")
)
)
.first()
)
with testing.expect_warnings(
r"Can't reliably serialize a lazyload\(\) option"
):
u2 = pickle.loads(pickle.dumps(u1))
eq_(len(u1.addresses), 1)
sess = fixture_session()
sess.add(u2)
eq_(len(u2.addresses), 2)
def test_invalidated_flag_pickle(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="noload")},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses.append(Address())
u2 = pickle.loads(pickle.dumps(u1))
u2.addresses.append(Address())
eq_(len(u2.addresses), 2)
def test_invalidated_flag_deepcopy(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="noload")},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses.append(Address())
u2 = copy.deepcopy(u1)
u2.addresses.append(Address())
eq_(len(u2.addresses), 2)
@testing.combinations(True, False, argnames="pickle_it")
@testing.combinations(True, False, argnames="use_mixin")
def test_loader_criteria(self, pickle_it, use_mixin):
"""test #8109"""
users, addresses = (self.tables.users, self.tables.addresses)
AddressCls = AddressWMixin if use_mixin else Address
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(AddressCls)},
)
self.mapper_registry.map_imperatively(AddressCls, addresses)
with fixture_session(expire_on_commit=False) as sess:
u1 = User(name="ed")
u1.addresses = [
AddressCls(email_address="ed@bar.com"),
AddressCls(email_address="ed@foo.com"),
]
sess.add(u1)
sess.commit()
with fixture_session(expire_on_commit=False) as sess:
# note that non-lambda is not picklable right now as
# SQL expressions usually can't be pickled.
opt = with_loader_criteria(
Mixin if use_mixin else Address,
no_ed_foo,
include_aliases=True,
)
u1 = sess.query(User).options(opt).first()
if pickle_it:
u1 = pickle.loads(pickle.dumps(u1))
sess.close()
sess.add(u1)
eq_([ad.email_address for ad in u1.addresses], ["ed@bar.com"])
def test_instance_deferred_cols(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
with fixture_session(expire_on_commit=False) as sess:
u1 = User(name="ed")
u1.addresses.append(Address(email_address="ed@bar.com"))
sess.add(u1)
sess.commit()
with fixture_session(expire_on_commit=False) as sess:
u1 = sess.get(
User,
u1.id,
options=[
sa.orm.defer(User.name),
sa.orm.defaultload(User.addresses).defer(
Address.email_address
),
],
)
assert "name" not in u1.__dict__
assert "addresses" not in u1.__dict__
u2 = pickle.loads(pickle.dumps(u1))
with fixture_session() as sess2:
sess2.add(u2)
eq_(u2.name, "ed")
assert "addresses" not in u2.__dict__
ad = u2.addresses[0]
assert "email_address" not in ad.__dict__
eq_(ad.email_address, "ed@bar.com")
eq_(
u2,
User(
name="ed", addresses=[Address(email_address="ed@bar.com")]
),
)
u2 = pickle.loads(pickle.dumps(u1))
with fixture_session() as sess2:
u2 = sess2.merge(u2, load=False)
eq_(u2.name, "ed")
assert "addresses" not in u2.__dict__
ad = u2.addresses[0]
# mapper options now transmit over merge(),
# new as of 0.6, so email_address is deferred.
assert "email_address" not in ad.__dict__
eq_(ad.email_address, "ed@bar.com")
eq_(
u2,
User(
name="ed", addresses=[Address(email_address="ed@bar.com")]
),
)
def test_pickle_protocols(self):
users, addresses = (self.tables.users, self.tables.addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
u1 = User(name="ed")
u1.addresses.append(Address(email_address="ed@bar.com"))
sess.add(u1)
sess.commit()
u1 = sess.query(User).first()
u1.addresses
for loads, dumps in picklers():
u2 = loads(dumps(u1))
eq_(u1, u2)
def test_state_info_pickle(self):
users = self.tables.users
self.mapper_registry.map_imperatively(User, users)
u1 = User(id=1, name="ed")
sa.inspect(u1).info["some_key"] = "value"
state_dict = sa.inspect(u1).__getstate__()
state = sa_state.InstanceState.__new__(sa_state.InstanceState)
state.__setstate__(state_dict)
u2 = state.obj()
eq_(sa.inspect(u2).info["some_key"], "value")
@testing.combinations(
lambda User: sa.orm.joinedload(User.addresses),
lambda User: sa.orm.defer(User.name),
lambda Address: sa.orm.joinedload(User.addresses).joinedload(
Address.dingaling
),
lambda: sa.orm.joinedload(User.addresses).raiseload("*"),
lambda: sa.orm.raiseload("*"),
)
def test_unbound_options(self, test_case):
sess, User, Address, Dingaling = self._option_test_fixture()
opt = testing.resolve_lambda(test_case, User=User, Address=Address)
opt2 = pickle.loads(pickle.dumps(opt))
eq_(opt.path, opt2.path)
u1 = sess.query(User).options(opt).first()
pickle.loads(pickle.dumps(u1))
@testing.combinations(
lambda User: sa.orm.Load(User).joinedload(User.addresses),
lambda User: sa.orm.Load(User)
.joinedload(User.addresses)
.raiseload("*"),
lambda User: sa.orm.Load(User).defer(User.name),
lambda User, Address: sa.orm.Load(User)
.joinedload(User.addresses)
.joinedload(Address.dingaling),
lambda User, Address: sa.orm.Load(User)
.joinedload(User.addresses, innerjoin=True)
.joinedload(Address.dingaling),
)
def test_bound_options(self, test_case):
sess, User, Address, Dingaling = self._option_test_fixture()
opt = testing.resolve_lambda(test_case, User=User, Address=Address)
opt2 = pickle.loads(pickle.dumps(opt))
eq_(opt.path, opt2.path)
for v1, v2 in zip(opt.context, opt2.context):
eq_(v1.local_opts, v2.local_opts)
u1 = sess.query(User).options(opt).first()
pickle.loads(pickle.dumps(u1))
def test_collection_setstate(self):
"""test a particular cycle that requires CollectionAdapter
to not rely upon InstanceState to deserialize."""
m = MetaData()
c1 = Table(
"c1",
m,
Column("parent_id", String, ForeignKey("p.id"), primary_key=True),
)
c2 = Table(
"c2",
m,
Column("parent_id", String, ForeignKey("p.id"), primary_key=True),
)
p = Table("p", m, Column("id", String, primary_key=True))
self.mapper_registry.map_imperatively(
Parent,
p,
properties={
"children1": relationship(Child1),
"children2": relationship(Child2),
},
)
self.mapper_registry.map_imperatively(Child1, c1)
self.mapper_registry.map_imperatively(Child2, c2)
obj = Parent()
screen1 = Screen(obj)
screen1.errors = [obj.children1, obj.children2]
screen2 = Screen(Child2(), screen1)
pickle.loads(pickle.dumps(screen2))
def test_exceptions(self):
class Foo:
pass
users = self.tables.users
self.mapper_registry.map_imperatively(User, users)
for sa_exc in (
orm_exc.UnmappedInstanceError(Foo()),
orm_exc.UnmappedClassError(Foo),
orm_exc.ObjectDeletedError(attributes.instance_state(User())),
):
for loads, dumps in picklers():
repickled = loads(dumps(sa_exc))
eq_(repickled.args[0], sa_exc.args[0])
def test_attribute_mapped_collection(self):
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=attribute_keyed_dict("email_address"),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses = {"email1": Address(email_address="email1")}
for loads, dumps in picklers():
repickled = loads(dumps(u1))
eq_(u1.addresses, repickled.addresses)
eq_(repickled.addresses["email1"], Address(email_address="email1"))
def test_column_mapped_collection(self):
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=column_keyed_dict(
addresses.c.email_address
),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses = {
"email1": Address(email_address="email1"),
"email2": Address(email_address="email2"),
}
for loads, dumps in picklers():
repickled = loads(dumps(u1))
eq_(u1.addresses, repickled.addresses)
eq_(repickled.addresses["email1"], Address(email_address="email1"))
def test_composite_column_mapped_collection(self):
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
collection_class=column_keyed_dict(
[addresses.c.id, addresses.c.email_address]
),
)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
u1 = User()
u1.addresses = {
(1, "email1"): Address(id=1, email_address="email1"),
(2, "email2"): Address(id=2, email_address="email2"),
}
for loads, dumps in picklers():
repickled = loads(dumps(u1))
eq_(u1.addresses, repickled.addresses)
eq_(
repickled.addresses[(1, "email1")],
Address(id=1, email_address="email1"),
)
class OptionsTest(_Polymorphic):
def test_options_of_type(self):
with_poly = with_polymorphic(Person, [Engineer, Manager], flat=True)
for opt, serialized_path, serialized_of_type in [
(
sa.orm.joinedload(Company.employees.of_type(Engineer)),
[(Company, "employees"), (Engineer, None)],
Engineer,
),
(
sa.orm.joinedload(Company.employees.of_type(with_poly)),
[(Company, "employees"), (Person, None)],
None,
),
]:
opt2 = pickle.loads(pickle.dumps(opt))
eq_(opt.__getstate__()["path"], serialized_path)
eq_(opt2.__getstate__()["path"], serialized_path)
for v1, v2 in zip(opt.context, opt2.context):
eq_(v1.__getstate__()["_of_type"], serialized_of_type)
eq_(v2.__getstate__()["_of_type"], serialized_of_type)
def test_load(self):
s = fixture_session()
with_poly = with_polymorphic(Person, [Engineer, Manager], flat=True)
emp = (
s.query(Company)
.options(subqueryload(Company.employees.of_type(with_poly)))
.first()
)
pickle.loads(pickle.dumps(emp))
class PolymorphicDeferredTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
Column("type", String(30)),
)
Table(
"email_users",
metadata,
Column("id", Integer, ForeignKey("users.id"), primary_key=True),
Column("email_address", String(30)),
)
def test_polymorphic_deferred(self):
email_users, users = (self.tables.email_users, self.tables.users)
self.mapper_registry.map_imperatively(
User,
users,
polymorphic_identity="user",
polymorphic_on=users.c.type,
)
self.mapper_registry.map_imperatively(
EmailUser,
email_users,
inherits=User,
polymorphic_identity="emailuser",
)
eu = EmailUser(name="user1", email_address="foo@bar.com")
with fixture_session() as sess:
sess.add(eu)
sess.commit()
with fixture_session() as sess:
eu = sess.query(User).first()
eu2 = pickle.loads(pickle.dumps(eu))
sess2 = fixture_session()
sess2.add(eu2)
assert "email_address" not in eu2.__dict__
eq_(eu2.email_address, "foo@bar.com")
class TupleLabelTest(_fixtures.FixtureTest):
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
users, addresses, orders = (
cls.tables.users,
cls.tables.addresses,
cls.tables.orders,
)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", order_by=addresses.c.id
),
# o2m, m2o
"orders": relationship(
Order, backref="user", order_by=orders.c.id
),
},
)
cls.mapper_registry.map_imperatively(Address, addresses)
cls.mapper_registry.map_imperatively(
Order, orders, properties={"address": relationship(Address)}
) # m2o
def test_tuple_labeling(self):
sess = fixture_session()
# test pickle + all the protocols !
for pickled in False, -1, 0, 1, 2:
for row in sess.query(User, Address).join(User.addresses).all():
if pickled is not False:
row = pickle.loads(pickle.dumps(row, pickled))
eq_(list(row._fields), ["User", "Address"])
eq_(row.User, row[0])
eq_(row.Address, row[1])
for row in sess.query(User.name, User.id.label("foobar")):
if pickled is not False:
row = pickle.loads(pickle.dumps(row, pickled))
eq_(list(row._fields), ["name", "foobar"])
eq_(row.name, row[0])
eq_(row.foobar, row[1])
for row in sess.query(User).with_entities(
User.name, User.id.label("foobar")
):
if pickled is not False:
row = pickle.loads(pickle.dumps(row, pickled))
eq_(list(row._fields), ["name", "foobar"])
eq_(row.name, row[0])
eq_(row.foobar, row[1])
oalias = aliased(Order)
for row in (
sess.query(User, oalias)
.join(User.orders.of_type(oalias))
.all()
):
if pickled is not False:
row = pickle.loads(pickle.dumps(row, pickled))
eq_(list(row._fields), ["User"])
eq_(row.User, row[0])
oalias = aliased(Order, name="orders")
for row in (
sess.query(User, oalias).join(oalias, User.orders).all()
):
if pickled is not False:
row = pickle.loads(pickle.dumps(row, pickled))
eq_(list(row._fields), ["User", "orders"])
eq_(row.User, row[0])
eq_(row.orders, row[1])
for row in sess.query(User.name + "hoho", User.name):
eq_(list(row._fields), ["name"])
eq_(row[0], row.name + "hoho")
if pickled is not False:
ret = sess.query(User, Address).join(User.addresses).all()
pickle.loads(pickle.dumps(ret, pickled))
class CustomSetupTeardownTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
test_needs_acid=True,
test_needs_fk=True,
)
def test_rebuild_state(self):
"""not much of a 'test', but illustrate how to
remove instance-level state before pickling.
"""
users = self.tables.users
self.mapper_registry.map_imperatively(User, users)
u1 = User()
attributes.manager_of_class(User).teardown_instance(u1)
assert not u1.__dict__
u2 = pickle.loads(pickle.dumps(u1))
attributes.manager_of_class(User).setup_instance(u2)
assert attributes.instance_state(u2)
|
{
"content_hash": "93b4c76e3cd705d4b0de274fe1c361cf",
"timestamp": "",
"source": "github",
"line_count": 867,
"max_line_length": 79,
"avg_line_length": 33.01038062283737,
"alnum_prop": 0.5495108315863033,
"repo_name": "sqlalchemy/sqlalchemy",
"id": "710c98ee6d26df1ec3e6968bc7630c2cbb6e22ee",
"size": "28620",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/orm/test_pickled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
from .channel_steepness import SteepnessFinder
__all__ = ['SteepnessFinder', ]
|
{
"content_hash": "1974bba3641a1b03b4252f7ffb44bef7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 20.25,
"alnum_prop": 0.7283950617283951,
"repo_name": "Carralex/landlab",
"id": "d3ce7ad17e114e2f842872180382504e22d8aab8",
"size": "81",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "landlab/components/steepness_index/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1452"
},
{
"name": "HTML",
"bytes": "99948"
},
{
"name": "Jupyter Notebook",
"bytes": "13921"
},
{
"name": "Makefile",
"bytes": "1765"
},
{
"name": "PowerShell",
"bytes": "7128"
},
{
"name": "Python",
"bytes": "3555237"
},
{
"name": "Shell",
"bytes": "2370"
},
{
"name": "TeX",
"bytes": "64170"
}
],
"symlink_target": ""
}
|
import pybindgen
def generate(file_):
mod = pybindgen.Module('_rabinkarprh')
mod.add_include('"rabinkarp.h"')
mod.add_container('std::list<unsigned int>', 'unsigned int', 'list')
mod.add_container('std::list<double>', 'double', 'list')
cls = mod.add_class('RabinKarpHash')
cls.add_constructor([pybindgen.param('int', 'my_window_size'),
pybindgen.param('int', 'seed')])
cls.add_method('set_threshold',
None,
[pybindgen.param('double', 'my_threshold')])
cls.add_method('next_chunk_boundaries',
pybindgen.retval('std::list<unsigned int>'),
[pybindgen.param('const std::string*', 'content'),
pybindgen.param('const unsigned int', 'prepend_bytes')])
cls = mod.add_class('RabinKarpMultiThresholdHash')
cls.add_constructor([pybindgen.param('int', 'my_window_size'),
pybindgen.param('int', 'seed'),
pybindgen.param('std::list<double>', 'my_thresholds')])
cls.add_method('next_chunk_boundaries_with_thresholds',
pybindgen.retval('std::list<unsigned int>'),
[pybindgen.param('const std::string*', 'content'),
pybindgen.param('unsigned int', 'prepend_bytes')])
mod.generate(file_)
|
{
"content_hash": "55de38571c955751aacdefd0d0929ce4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 45.06666666666667,
"alnum_prop": 0.5724852071005917,
"repo_name": "netleibi/fastchunking",
"id": "821794d76569df47f125a3ed73bbf643b5bab55c",
"size": "1352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/rabinkarp_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "27922"
},
{
"name": "Python",
"bytes": "24608"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404
from .models import Officer
def index(request):
officers = Officer.objects.all()
return render(request, 'officers/index.jade', {'officers': officers})
def detail_id(request, officer_id):
officer = get_object_or_404(Officer, pk=officer_id)
return render(request, 'officers/detail.jade', {'officer': officer})
def detail_name(request, officer_username):
officer = get_object_or_404(Officer, user__user__username=officer_username)
return render(request, 'officers/detail.jade', {'officer': officer})
|
{
"content_hash": "aabd0d803391d46f0508f44126e1dd31",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 40.5,
"alnum_prop": 0.7442680776014109,
"repo_name": "sreidy/roboticsclub.org",
"id": "cf5b74e4d56346aa3048af87ccdd786d26ee37f7",
"size": "567",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "officers/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87807"
},
{
"name": "HTML",
"bytes": "32573"
},
{
"name": "JavaScript",
"bytes": "5052"
},
{
"name": "Python",
"bytes": "239652"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0083_auto_20161221_1411'),
]
operations = [
migrations.AlterField(
model_name='claimant',
name='application_year',
field=models.IntegerField(default=2017),
),
]
|
{
"content_hash": "9628de21e81e13a785bfd75a7c8d3421",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 21.72222222222222,
"alnum_prop": 0.6035805626598465,
"repo_name": "softwaresaved/fat",
"id": "3ce2bdf95a8a1adf65ba66b8bc7a22ea7db20a82",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lowfat/migrations/0084_auto_20170112_1614.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3202"
},
{
"name": "HTML",
"bytes": "38552"
},
{
"name": "JavaScript",
"bytes": "653"
},
{
"name": "Python",
"bytes": "235043"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
}
|
import os
import psutil
import logging
from time import sleep
from reynard.monitors import Monitor
from katcp import Sensor
from subprocess import Popen,PIPE
log = logging.getLogger("reynard.monitor.memory")
class MemoryMonitor(Monitor):
def __init__(self,polling_interval=1):
super(MemoryMonitor,self).__init__(polling_interval)
for node in get_meminfo().keys():
name_ = "%s_memory_size"%node
self._sensors[name_] = Sensor.float(name_,
description = "total memory on %s"%node,
params = [8192,1e9],
unit = "MB",
default = 0)
name_ = "%s_memory_avail"%node
self._sensors[name_] = Sensor.float(name_,
description = "available memory on %s"%node,
params = [8192,1e9],
unit = "MB",
default = 0)
def update_values(self):
info = get_meminfo()
for node in info.keys():
total = info[node]["MemTotal"]
avail = info[node]["MemFree"]
percent = 100.0 * avail/total
if percent < 5:
status = Sensor.WARN
else:
status = Sensor.NOMINAL
self._sensors["%s_memory_size"%node].set_value(info[node]["MemTotal"])
self._sensors["%s_memory_avail"%node].set_value(info[node]["MemFree"],status)
def get_meminfo():
try:
return numastat_meminfo()
except:
return psutil_meminfo()
def psutil_meminfo():
tag = "sys"
out = {tag:{}}
vmem = psutil.virtual_memory()
out[tag]["MemTotal"] = vmem.total/1e6
out[tag]["MemFree"] = vmem.available/1e6
return out
def numastat_meminfo():
out = {}
p = Popen(["numastat","-m"],stdout=PIPE,stderr=PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
count = lines[2].count("Node")
for ii in range(count):
out["numa%d"%ii] = {}
for line in lines[4:]:
split = line.split()
name = split[0]
for ii,val in enumerate(split[1:-1]):
out["numa%d"%ii][name] = float(val)
return out
if __name__ == "__main__":
from reynard.monitors.monitor import monitor_test
monitor_test(MemoryMonitor())
|
{
"content_hash": "0ac76b0877e81ceeacc9a5e0f5fab2b5",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 89,
"avg_line_length": 31,
"alnum_prop": 0.5572249226690235,
"repo_name": "ewanbarr/reynard",
"id": "197b19900f5b709103cf1f57ab45a9389d1b89f1",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reynard/monitors/memory_monitor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19994"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
def migrate_comments(apps, schema_editor):
AlertConfig = apps.get_model("main", "AlertConfig")
for t in ['fanfunding', 'streamtip', 'imraising', 'twitchalerts']:
configs = AlertConfig.objects.filter(type=t)
for c in configs:
c.alert_text = c.alert_text + "[[br]][[comment]]"
c.save()
class Migration(migrations.Migration):
dependencies = [
('main', '0016_auto_20160617_1016'),
]
operations = [
migrations.RunPython(migrate_comments)
]
|
{
"content_hash": "675526b28d67a7e9b1eea154b233150d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 29.476190476190474,
"alnum_prop": 0.6122778675282714,
"repo_name": "google/mirandum",
"id": "f1e6e07fb694bc65e988b5ffa8eb427a909b73de",
"size": "1256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alerts/main/migrations/0017_donation_comment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9472"
},
{
"name": "Elixir",
"bytes": "574"
},
{
"name": "HTML",
"bytes": "122101"
},
{
"name": "JavaScript",
"bytes": "19438"
},
{
"name": "Jinja",
"bytes": "4124"
},
{
"name": "Python",
"bytes": "398732"
},
{
"name": "Shell",
"bytes": "3296"
}
],
"symlink_target": ""
}
|
from mptypes import mpmathify, extraprec, eps, mpf, MultiPrecisionArithmetic
from calculus import diff
from functions import sqrt, sign, ldexp
from matrices import matrix, norm as norm_
from linalg import lu_solve
from copy import copy
##############
# 1D-SOLVERS #
##############
class Newton:
"""
1d-solver generating pairs of approximative root and error.
Needs starting points x0 close to the root.
Pro:
* converges fast
* sometimes more robust than secant with bad second starting point
Contra:
* converges slowly for multiple roots
* needs first derivative
* 2 function evaluations per iteration
"""
maxsteps = 20
def __init__(self, f, x0, **kwargs):
if len(x0) == 1:
self.x0 = x0[0]
else:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.f = f
if not 'df' in kwargs:
def df(x):
return diff(f, x)
else:
df = kwargs['df']
self.df = df
def __iter__(self):
f = self.f
df = self.df
x0 = self.x0
while True:
x1 = x0 - f(x0) / df(x0)
error = abs(x1 - x0)
x0 = x1
yield (x1, error)
class Secant:
"""
1d-solver generating pairs of approximative root and error.
Needs starting points x0 and x1 close to the root.
x1 defaults to x0 + 0.25.
Pro:
* converges fast
Contra:
* converges slowly for multiple roots
"""
maxsteps = 30
def __init__(self, f, x0, **kwargs):
if len(x0) == 1:
self.x0 = x0[0]
self.x1 = self.x0 + 0.25
elif len(x0) == 2:
self.x0 = x0[0]
self.x1 = x0[1]
else:
raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
self.f = f
def __iter__(self):
f = self.f
x0 = self.x0
x1 = self.x1
f0 = f(x0)
while True:
f1 = f(x1)
l = x1 - x0
if not l:
break
s = (f1 - f0) / l
if not s:
break
x0, x1 = x1, x1 - f1/s
f0 = f1
yield x1, abs(l)
class MNewton:
"""
1d-solver generating pairs of approximative root and error.
Needs starting point x0 close to the root.
Uses modified Newton's method that converges fast regardless of the
multiplicity of the root.
Pro:
* converges fast for multiple roots
Contra:
* needs first and second derivative of f
* 3 function evaluations per iteration
"""
maxsteps = 20
def __init__(self, f, x0, **kwargs):
if not len(x0) == 1:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.x0 = x0[0]
self.f = f
if not 'df' in kwargs:
def df(x):
return diff(f, x)
else:
df = kwargs['df']
self.df = df
if not 'd2f' in kwargs:
def d2f(x):
return diff(df, x)
else:
d2f = kwargs['df']
self.d2f = d2f
def __iter__(self):
x = self.x0
f = self.f
df = self.df
d2f = self.d2f
while True:
prevx = x
fx = f(x)
if fx == 0:
break
dfx = df(x)
d2fx = d2f(x)
# x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
x -= fx / (dfx - fx * d2fx / dfx)
error = abs(x - prevx)
yield x, error
class Halley:
"""
1d-solver generating pairs of approximative root and error.
Needs a starting point x0 close to the root.
Uses Halley's method with cubic convergance rate.
Pro:
* converges even faster the Newton's method
* useful when computing with *many* digits
Contra:
* needs first and second derivative of f
* 3 function evaluations per iteration
* converges slowly for multiple roots
"""
maxsteps = 20
def __init__(self, f, x0, **kwargs):
if not len(x0) == 1:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.x0 = x0[0]
self.f = f
if not 'df' in kwargs:
def df(x):
return diff(f, x)
else:
df = kwargs['df']
self.df = df
if not 'd2f' in kwargs:
def d2f(x):
return diff(df, x)
else:
d2f = kwargs['df']
self.d2f = d2f
def __iter__(self):
x = self.x0
f = self.f
df = self.df
d2f = self.d2f
while True:
prevx = x
fx = f(x)
dfx = df(x)
d2fx = d2f(x)
x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
error = abs(x - prevx)
yield x, error
class Muller:
"""
1d-solver generating pairs of approximative root and error.
Needs starting points x0, x1 and x2 close to the root.
x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
Uses Muller's method that converges towards complex roots.
Pro:
* converges fast (somewhat faster than secant)
* can find complex roots
Contra:
* converges slowly for multiple roots
* may have complex values for real starting points and real roots
http://en.wikipedia.org/wiki/Muller's_method
"""
maxsteps = 30
def __init__(self, f, x0, **kwargs):
if len(x0) == 1:
self.x0 = x0[0]
self.x1 = self.x0 + 0.25
self.x2 = self.x1 + 0.25
elif len(x0) == 2:
self.x0 = x0[0]
self.x1 = x0[1]
self.x2 = self.x1 + 0.25
elif len(x0) == 3:
self.x0 = x0[0]
self.x1 = x0[1]
self.x2 = x0[2]
else:
raise ValueError('expected 1, 2 or 3 starting points, got %i'
% len(x0))
self.f = f
self.verbose = kwargs['verbose']
def __iter__(self):
f = self.f
x0 = self.x0
x1 = self.x1
x2 = self.x2
fx0 = f(x0)
fx1 = f(x1)
fx2 = f(x2)
while True:
# TODO: maybe refactoring with function for divided differences
# calculate divided diffferences
fx2x1 = (fx1 - fx2) / (x1 - x2)
fx2x0 = (fx0 - fx2) / (x0 - x2)
fx1x0 = (fx0 - fx1) / (x0 - x1)
w = fx2x1 + fx2x0 - fx1x0
fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
if w == 0 and fx2x1x0 == 0:
if self.verbose:
print 'canceled with'
print 'x0 =', x0, ', x1 =', x1, 'and x2 =', x2
break
x0 = x1
fx0 = fx1
x1 = x2
fx1 = fx2
# denominator should be as large as possible => choose sign
r = sqrt(w**2 - 4*fx2*fx2x1x0)
if abs(w - r) > abs(w + r):
r = -r
x2 -= 2*fx2 / (w + r)
fx2 = f(x2)
error = abs(x2 - x1)
yield x2, error
# TODO: consider raising a ValueError when there's no sign change in a and b
class Bisection:
"""
1d-solver generating pairs of approximative root and error.
Uses bisection method to find a root of f in [a, b].
Might fail for multiple roots (needs sign change).
Pro:
* robust and reliable
Contra:
* converges slowly
* needs sign change
"""
maxsteps = 100
def __init__(self, f, x0, **kwargs):
if len(x0) != 2:
raise ValueError('expected interval of 2 points, got %i' % len(x0))
self.f = f
self.a = x0[0]
self.b = x0[1]
def __iter__(self):
f = self.f
a = self.a
b = self.b
l = b - a
fb = f(b)
while True:
m = ldexp(a + b, -1)
fm = f(m)
if fm * fb < 0:
a = m
else:
b = m
fb = fm
l /= 2
yield (a + b)/2, abs(l)
def _getm(method):
"""
Return a function to calculate m for Illinois-like methods.
"""
if method == 'illinois':
def getm(fz, fb):
return 0.5
elif method == 'pegasus':
def getm(fz, fb):
return fb/(fb + fz)
elif method == 'anderson':
def getm(fz, fb):
m = 1 - fz/fb
if m > 0:
return m
else:
return 0.5
else:
raise ValueError, "method '%s' not recognized" % method
return getm
class Illinois:
"""
1d-solver generating pairs of approximative root and error.
Uses Illinois method or similair to find a root of f in [a, b].
Might fail for multiple roots (needs sign change).
Combines bisect with secant (improved regula falsi).
The only difference between the methods is the scaling factor m, which is
used to ensure convergence (you can choose one using the 'method' keyword):
Illinois method ('illinois'):
m = 0.5
Pegasus method ('pegasus'):
m = fb/(fb + fz)
Anderson-Bjoerk method ('anderson'):
m = 1 - fz/fb if positive else 0.5
Pro:
* converges very fast
Contra:
* has problems with multiple roots
* needs sign change
"""
maxsteps = 30
def __init__(self, f, x0, **kwargs):
if len(x0) != 2:
raise ValueError('expected interval of 2 points, got %i' % len(x0))
self.a = x0[0]
self.b = x0[1]
self.f = f
self.tol = kwargs['tol']
self.verbose = kwargs['verbose']
self.method = kwargs.get('method', 'illinois')
self.getm = _getm(self.method)
if self.verbose:
print 'using %s method' % self.method
def __iter__(self):
method = self.method
f = self.f
a = self.a
b = self.b
fa = f(a)
fb = f(b)
m = None
while True:
l = b - a
if l == 0:
break
s = (fb - fa) / l
z = a - fa/s
fz = f(z)
if abs(fz) < self.tol:
# TODO: better condition (when f is very flat)
if self.verbose:
print 'canceled with z =', z
yield z, l
break
if fz * fb < 0: # root in [z, b]
a = b
fa = fb
b = z
fb = fz
else: # root in [a, z]
m = self.getm(fz, fb)
b = z
fb = fz
fa = m*fa # scale down to ensure convergence
if self.verbose and m and not method == 'illinois':
print 'm:', m
yield (a + b)/2, abs(l)
def Pegasus(*args, **kwargs):
"""
1d-solver generating pairs of approximative root and error.
Uses Pegasus method to find a root of f in [a, b].
Wrapper for illinois to use method='pegasus'.
"""
kwargs['method'] = 'pegasus'
return Illinois(*args, **kwargs)
def Anderson(*args, **kwargs):
u"""
1d-solver generating pairs of approximative root and error.
Uses Anderson-Bjoerk method to find a root of f in [a, b].
Wrapper for illinois to use method='pegasus'.
"""
kwargs['method'] = 'anderson'
return Illinois(*args, **kwargs)
# TODO: check whether it's possible to combine it with Illinois stuff
class Ridder:
"""
1d-solver generating pairs of approximative root and error.
Ridders' method to find a root of f in [a, b].
Is told to perform as well as Brent's method while being simpler.
Pro:
* very fast
* simpler than Brent's method
Contra:
* two function evaluations per step
* has problems with multiple roots
* needs sign change
http://en.wikipedia.org/wiki/Ridders'_method
"""
maxsteps = 30
def __init__(self, f, x0, **kwargs):
self.f = f
if len(x0) != 2:
raise ValueError('expected interval of 2 points, got %i' % len(x0))
self.x1 = x0[0]
self.x2 = x0[1]
self.verbose = kwargs['verbose']
self.tol = kwargs['tol']
def __iter__(self):
f = self.f
x1 = self.x1
fx1 = f(x1)
x2 = self.x2
fx2 = f(x2)
while True:
x3 = 0.5*(x1 + x2)
fx3 = f(x3)
x4 = x3 + (x3 - x1) * sign(fx1 - fx2) * fx3 / sqrt(fx3**2 - fx1*fx2)
fx4 = f(x4)
if abs(fx4) < self.tol:
# TODO: better condition (when f is very flat)
if self.verbose:
print 'canceled with f(x4) =', fx4
yield x4, abs(x1 - x2)
break
if fx4 * fx2 < 0: # root in [x4, x2]
x1 = x4
fx1 = fx4
else: # root in [x1, x4]
x2 = x4
fx2 = fx4
error = abs(x1 - x2)
yield (x1 + x2)/2, error
class ANewton:
"""
EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
Uses Newton's method modified to use Steffensens method when convergence is
slow. (I.e. for multiple roots.)
"""
maxsteps = 20
def __init__(self, f, x0, **kwargs):
if not len(x0) == 1:
raise ValueError('expected 1 starting point, got %i' % len(x0))
self.x0 = x0[0]
self.f = f
if not 'df' in kwargs:
def df(x):
return diff(f, x)
else:
df = kwargs['df']
self.df = df
def phi(x):
return x - f(x) / df(x)
self.phi = phi
self.verbose = kwargs['verbose']
def __iter__(self):
x0 = self.x0
f = self.f
df = self.df
phi = self.phi
error = 0
counter = 0
while True:
prevx = x0
try:
x0 = phi(x0)
except ZeroDivisionError:
if self.verbose:
'ZeroDivisionError: canceled with x =', x0
break
preverror = error
error = abs(prevx - x0)
# TODO: decide not to use convergence acceleration
if error and abs(error - preverror) / error < 1:
if self.verbose:
print 'converging slowly'
counter += 1
if counter >= 3:
# accelerate convergence
phi = steffensen(phi)
counter = 0
if self.verbose:
print 'accelerating convergence'
yield x0, error
# TODO: add Brent
############################
# MULTIDIMENSIONAL SOLVERS #
############################
def jacobian(f, x):
"""
Calculate the Jacobian matrix of a function at the point x0.
This is the first derivative of a vectorial function:
f : R^m -> R^n with m >= n
"""
x = matrix(x)
h = sqrt(eps)
fx = matrix(f(*x))
m = len(fx)
n = len(x)
J = matrix(m, n)
for j in xrange(n):
xj = x.copy()
xj[j] += h
Jj = (matrix(f(*xj)) - fx) / h
for i in xrange(m):
J[i,j] = Jj[i]
return J
one = mpf(1)
# TODO: test with user-specified jacobian matrix, support force_type
class MDNewton:
"""
Find the root of a vector function numerically using Newton's method.
f is a vector function representing a nonlinear equation system.
x0 is the starting point close to the root.
J is a function returning the jacobian matrix for a point.
Supports overdetermined systems.
Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
The function to calculate the Jacobian matrix can be given using the
keyword 'J'. Otherwise it will be calculated numerically.
Please note that this method converges only locally. Especially for high-
dimensional systems it is not trvial to find a good starting point being
close enough to the root.
It is recommended to use a faster, low-precision solver from SciPy [1] or
OpenOpt [2] to get an initial guess. Afterwards you can use this method for
root-polishing to any precision.
[1] http://scipy.org
[2] http://openopt.org
"""
maxsteps = 10
def __init__(self, f, x0, **kwargs):
self.f = f
if isinstance(x0, (tuple, list)):
x0 = matrix(x0)
assert x0.cols == 1, 'need a vector'
self.x0 = x0
if 'J' in kwargs:
self.J = kwargs['J']
else:
def J(*x):
return jacobian(f, x)
self.J = J
self.norm = kwargs['norm']
self.verbose = kwargs['verbose']
def __iter__(self):
f = self.f
x0 = self.x0
norm = self.norm
J = self.J
fx = matrix(f(*x0))
fxnorm = norm(fx)
cancel = False
while not cancel:
# get direction of descent
fxn = -fx
Jx = J(*x0)
s = lu_solve(Jx, fxn)
if self.verbose:
print 'Jx:'
print Jx
print 's:', s
# damping step size TODO: better strategy (hard task)
l = one
x1 = x0 + s
while True:
if x1 == x0:
if self.verbose:
print "canceled, won't get more excact"
cancel = True
break
fx = matrix(f(*x1))
newnorm = norm(fx)
if newnorm < fxnorm:
# new x accepted
fxnorm = newnorm
x0 = x1
break
l /= 2
x1 = x0 + l*s
yield (x0, fxnorm)
#############
# UTILITIES #
#############
str2solver = {'newton':Newton, 'secant':Secant,'mnewton':MNewton,
'halley':Halley, 'muller':Muller, 'bisect':Bisection,
'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
@extraprec(20)
def findroot(f, x0, solver=Secant, tol=None, verbose=False, verify=True,
force_type=mpmathify, **kwargs):
r"""
Find a solution to `f(x) = 0`, using *x0* as starting point or
interval for *x*.
Multidimensional overdetermined systems are supported.
You can specify them using a function or a list of functions.
If the found root does not satisfy `|f(x)^2 < \mathrm{tol}|`,
an exception is raised (this can be disabled with *verify=False*).
**Arguments**
*f*
one dimensional function
*x0*
starting point, several starting points or interval (depends on solver)
*tol*
the returned solution has an error smaller than this
*verbose*
print additional information for each iteration if true
*verify*
verify the solution and raise a ValueError if `|f(x) > \mathrm{tol}|`
*force_type*
use specified type constructor on starting points
*solver*
a generator for *f* and *x0* returning approximative solution and error
*maxsteps*
after how many steps the solver will cancel
*df*
first derivative of *f* (used by some solvers)
*d2f*
second derivative of *f* (used by some solvers)
*multidimensional*
force multidimensional solving
*J*
Jacobian matrix of *f* (used by multidimensional solvers)
*norm*
used vector norm (used by multidimensional solvers)
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
yielding pairs of approximative solution and estimated error (which is
expected to be positive).
You can use the following string aliases:
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
'ridder', 'anewton', 'bisect'
See mpmath.optimization for their documentation.
**Examples**
The function :func:`findroot` locates a root of a given function using the
secant method by default. A simple example use of the secant method is to
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
>>> from mpmath import *
>>> mp.dps = 30
>>> print findroot(sin, 3)
3.14159265358979323846264338328
The secant method can be used to find complex roots of analytic functions,
although it must in that case generally be given a nonreal starting value
(or else it will never leave the real line)::
>>> mp.dps = 15
>>> print findroot(lambda x: x**3 + 2*x + 1, j)
(0.226698825758202 + 1.46771150871022j)
A nice application is to compute nontrivial roots of the Riemann zeta
function with many digits (good initial values are needed for convergence)::
>>> mp.dps = 30
>>> print findroot(zeta, 0.5+14j)
(0.5 + 14.1347251417346937904572519836j)
The secant method can also be used as an optimization algorithm, by passing
it a derivative of a function. The following example locates the positive
minimum of the gamma function::
>>> mp.dps = 20
>>> print findroot(lambda x: diff(gamma, x), 1)
1.4616321449683623413
Finally, a useful application is to compute inverse functions, such as the
Lambert W function which is the inverse of `w e^w`, given the first
term of the solution's asymptotic expansion as the initial value. In basic
cases, this gives identical results to mpmath's builtin ``lambertw``
function::
>>> def lambert(x):
... return findroot(lambda w: w*exp(w) - x, log(1+x))
...
>>> mp.dps = 15
>>> print lambert(1), lambertw(1)
0.567143290409784 0.567143290409784
>>> print lambert(1000), lambert(1000)
5.2496028524016 5.2496028524016
Multidimensional functions are also supported::
>>> f = [lambda x1, x2: x1**2 + x2,
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
>>> findroot(f, (0, 0))
matrix(
[['-0.618033988749895'],
['-0.381966011250105']])
>>> findroot(f, (10, 10))
matrix(
[['1.61803398874989'],
['-2.61803398874989']])
You can verify this by solving the system manually.
Please note that the following (more general) syntax also works::
>>> def f(x1, x2):
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
...
>>> findroot(f, (0, 0))
matrix(
[['-0.618033988749895'],
['-0.381966011250105']])
**Multiple roots**
For multiple roots all methods of the Newtonian family (including secant)
converge slowly. Consider this example::
>>> f = lambda x: (x - 1)**99
>>> findroot(f, 0.9, verify=False)
mpf('0.91807354244492868')
Even for a very close starting point the secant method converges very
slowly. Use ``verbose=True`` to illustrate this.
It is possible to modify Newton's method to make it converge regardless of
the root's multiplicity::
>>> findroot(f, -10, solver='mnewton')
mpf('1.0')
This variant uses the first and second derivative of the function, which is
not very efficient.
Alternatively you can use an experimental Newtonian solver that keeps track
of the speed of convergence and accelerates it using Steffensen's method if
necessary::
>>> findroot(f, -10, solver='anewton', verbose=True)
x: -9.88888888888888888889
error: 0.111111111111111111111
converging slowly
x: -9.77890011223344556678
error: 0.10998877665544332211
converging slowly
x: -9.67002233332199662166
error: 0.108877778911448945119
converging slowly
accelerating convergence
x: -9.5622443299551077669
error: 0.107778003366888854764
converging slowly
x: 0.99999999999999999214
error: 10.562244329955107759
x: 1.0
error: 7.8598304758094664213e-18
mpf('1.0')
**Complex roots**
For complex roots it's recommended to use Muller's method as it converges
even for real starting points very fast::
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
mpc(real='0.72713608449119684', imag='0.93409928946052944')
**Intersection methods**
When you need to find a root in a known interval, it's highly recommended to
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
Usually they converge faster and more reliable. They have however problems
with multiple roots and usually need a sign change to find a root::
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
mpf('0.0')
Be careful with symmetric functions::
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
Traceback (most recent call last):
...
ZeroDivisionError
It fails even for better starting points, because there is no sign change::
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
Traceback (most recent call last):
...
ValueError: Could not find root within given tolerance. (1 > 2.1684e-19)
Try another starting point or tweak arguments.
"""
# initialize arguments
if not force_type:
force_type = lambda x: x
elif not tol and (force_type == float or force_type == complex):
tol = 2**(-42)
kwargs['verbose'] = verbose
if 'd1f' in kwargs:
kwargs['df'] = kwargs['d1f']
if tol is None:
tol = eps * 2**10
kwargs['tol'] = tol
if isinstance(x0, (list, tuple)):
x0 = [force_type(x) for x in x0]
else:
x0 = [force_type(x0)]
if isinstance(solver, str):
try:
solver = str2solver[solver]
except KeyError:
raise ValueError('could not recognize solver')
# accept list of functions
if isinstance(f, (list, tuple)):
f2 = copy(f)
def tmp(*args):
return [fn(*args) for fn in f2]
f = tmp
# detect multidimensional functions
try:
fx = f(*x0)
multidimensional = isinstance(fx, (list, tuple, matrix))
except TypeError:
fx = f(x0[0])
multidimensional = False
if 'multidimensional' in kwargs:
multidimensional = kwargs['multidimensional']
if multidimensional:
# only one multidimensional solver available at the moment
solver = MDNewton
if not 'norm' in kwargs:
norm = lambda x: norm_(x, mpf('inf'))
kwargs['norm'] = norm
else:
norm = kwargs['norm']
else:
norm = abs
# happily return starting point if it's a root
if norm(fx) == 0:
if multidimensional:
return matrix(x0)
else:
return x0[0]
# use solver
iterations = solver(f, x0, **kwargs)
if 'maxsteps' in kwargs:
maxsteps = kwargs['maxsteps']
else:
maxsteps = iterations.maxsteps
i = 0
for x, error in iterations:
if verbose:
print 'x: ', x
print 'error:', error
i += 1
if error < tol * max(1, norm(x)) or i >= maxsteps:
break
if not isinstance(x, (list, tuple, matrix)):
xl = [x]
else:
xl = x
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
raise ValueError('Could not find root within given tolerance. '
'(%g > %g)\n'
'Try another starting point or tweak arguments.'
% (norm(f(*xl))**2, tol))
return x
def multiplicity(f, root, tol=eps, maxsteps=10, **kwargs):
"""
Return the multiplicity of a given root of f.
Internally, numerical derivatives are used. This might be inefficient for
higher order derviatives. Due to this, ``multiplicity`` cancels after
evaluating 10 derivatives by default. You can be specify the n-th derivative
using the dnf keyword.
>>> from mpmath import *
>>> multiplicity(lambda x: sin(x) - 1, pi/2)
2
"""
kwargs['d0f'] = f
for i in xrange(maxsteps):
dfstr = 'd' + str(i) + 'f'
if dfstr in kwargs:
df = kwargs[dfstr]
else:
df = lambda x: diff(f, x, i)
if not abs(df(root)) < tol:
break
return i
def steffensen(f):
"""
linear convergent function -> quadratic convergent function
Steffensen's method for quadratic convergence of a linear converging
sequence.
Don not use it for higher rates of convergence.
It may even work for divergent sequences.
Definition:
F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
Example
.......
You can use Steffensen's method to accelerate a fixpoint iteration of linear
(or less) convergence.
x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
phi(x) = x**2 there are two fixpoints: 0 and 1.
Let's try Steffensen's method:
>>> f = lambda x: x**2
>>> from mpmath.optimization import steffensen
>>> F = steffensen(f)
>>> for x in [0.5, 0.9, 2.0]:
... fx = Fx = x
... for i in xrange(10):
... try:
... fx = f(fx)
... except OverflowError:
... pass
... try:
... Fx = F(Fx)
... except ZeroDivisionError:
... pass
... print '%20g %20g' % (fx, Fx)
0.25 -0.5
0.0625 0.1
0.00390625 -0.0011236
1.52588e-005 1.41691e-009
2.32831e-010 -2.84465e-027
5.42101e-020 2.30189e-080
2.93874e-039 -1.2197e-239
8.63617e-078 0
7.45834e-155 0
5.56268e-309 0
0.81 1.02676
0.6561 1.00134
0.430467 1
0.185302 1
0.0343368 1
0.00117902 1
1.39008e-006 1
1.93233e-012 1
3.73392e-024 1
1.39421e-047 1
4 1.6
16 1.2962
256 1.10194
65536 1.01659
4.29497e+009 1.00053
1.84467e+019 1
3.40282e+038 1
1.15792e+077 1
1.34078e+154 1
1.34078e+154 1
Unmodified, the iteration converges only towards 0. Modified it converges
not only much faster, it converges even to the repelling fixpoint 1.
"""
def F(x):
fx = f(x)
ffx = f(fx)
return (x*ffx - fx**2) / (ffx - 2*fx + x)
return F
MultiPrecisionArithmetic.findroot = staticmethod(findroot)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "4643d39ee0e474dd0bd05b5bb5f411c5",
"timestamp": "",
"source": "github",
"line_count": 1068,
"max_line_length": 83,
"avg_line_length": 29.584269662921347,
"alnum_prop": 0.5200974806937587,
"repo_name": "jbaayen/sympy",
"id": "ef6b2ec5b46f94fda4beda25ae20798d9a1e7665",
"size": "31596",
"binary": false,
"copies": "1",
"ref": "refs/heads/i1667",
"path": "sympy/mpmath/optimization.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6833033"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
}
|
from app import app
from flask import render_template,request
from models import BillRecord,BillRecord_Form
@app.route('/')
def index():
form = BillRecord_Form()
todos = BillRecord.objects.order_by('-time')
return render_template("index.html", todos=todos,form=form)
# return render_template("index.html",text="Hello world")
@app.route('/add', methods=['POST', 'GET'])
def add():
# content = request.form.get("content")
form = BillRecord_Form(request.form)
if form.validate():
money = form.money.data
shop = form.shop.data
content = form.content.data
todo = BillRecord(money=money,shop=shop,content=content)
todo.save()
todos = BillRecord.objects.order_by('-time')
return render_template("index.html",todos=todos,form=form)
@app.route('/done/<string:todo_id>')
def done(todo_id):
form = BillRecord_Form()
todo = BillRecord.objects.get_or_404(id=todo_id)
todo.status = 1
todo.save()
todos = BillRecord.objects.order_by('-time')
return render_template("index.html",todos=todos,form=form)
@app.route('/undone/<string:todo_id>')
def undone(todo_id):
form = BillRecord_Form()
todo = BillRecord.objects.get_or_404(id=todo_id)
todo.status = 0
todo.save()
todos = BillRecord.objects.order_by('-time')
return render_template("index.html", todos=todos,form=form)
@app.route('/delete/<string:todo_id>')
def delete(todo_id):
form = BillRecord_Form()
todo = BillRecord.objects.get_or_404(id=todo_id)
todo.delete()
todos = BillRecord.objects.order_by('-time')
return render_template("index.html",todos=todos,form=form)
@app.errorhandler(404)
def not_found(e):
return render_template('404.html'),404
|
{
"content_hash": "9484e41ff530171471dd4d7aeb394db3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 64,
"avg_line_length": 29.133333333333333,
"alnum_prop": 0.6710526315789473,
"repo_name": "niasand/learn_how_to_flask",
"id": "e9d7b1205036f570b0b2fdfb33ae83e4bc9d7820",
"size": "1773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "523"
},
{
"name": "HTML",
"bytes": "3523"
},
{
"name": "Mako",
"bytes": "13"
},
{
"name": "Python",
"bytes": "10009"
}
],
"symlink_target": ""
}
|
import requests
import json
# Detect all human faces present in a given image and try to guess their age, gender and emotion state via their facial shapes
# Target image: Feel free to change to whatever image holding as many human faces as you want
img = 'http://www.scienceforums.com/uploads/1282315190/gallery_1625_35_9165.jpg'
req = requests.get('http://api.pixlab.io/facemotion',params={
'img': img,
'key':'PixLab_API_Key',
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
exit();
total = len(reply['faces']) # Total detected faces
print(str(total)+" faces were detected")
# Extract each face now
for face in reply['faces']:
cord = face['rectangle']
print ('Face coordinate: width: ' + str(cord['width']) + ' height: ' + str(cord['height']) + ' x: ' + str(cord['left']) +' y: ' + str(cord['top']))
# Guess emotion
for emotion in face['emotion']:
if emotion['score'] > 0.5:
print ("Emotion - "+emotion['state']+': '+str(emotion['score']))
# Grab the age and gender
print ("Age ~: " + str(face['age']))
print ("Gender: " + str(face['gender']))
|
{
"content_hash": "762089432423193edf7a125c39d67ed4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 148,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.6464285714285715,
"repo_name": "symisc/pixlab",
"id": "a509b2f763edfac7f3af8e55f12635114298dc67",
"size": "1120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/gender_age_emotion.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Java",
"bytes": "60145"
},
{
"name": "PHP",
"bytes": "37444"
},
{
"name": "Python",
"bytes": "53592"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ShowtickprefixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showtickprefix",
parent_name="densitymapbox.colorbar",
**kwargs
):
super(ShowtickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs
)
|
{
"content_hash": "ce8c89905430e8120eaced7b0e9c0c65",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 80,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.5833333333333334,
"repo_name": "plotly/python-api",
"id": "bb505c2f0f923660aa97932a77fa896fbcd99f09",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/colorbar/_showtickprefix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import operator
import collections
import logging
logger = logging.getLogger(__name__)
def get_accumulated_probabilities(sorted_candidate_lengths, current_round_acc_probability):
'''Take a dictionary of sorted candidate alphabets and calculate the
relative probability of each candidate being in the target secret based on
their associated accumulative lengths. Then associate the relative values
with the probability of the parent Round and calculate the final accumulated
probability.
Returns a dictionary containing every possible candidate alphabet and its
accumulated probability value.
'''
compression_function_factor = 1.05
relative_probability_sum = 0.0
min_candidate_value = sorted_candidate_lengths[0]['length']
amplification_factor = 1.05
# Calculate relative probability sum based on each candidate's length.
for candidate in sorted_candidate_lengths:
relative_probability_sum += compression_function_factor ** (
-abs(candidate['length'] - min_candidate_value)
)
accumulated_probabilities = []
# Calculate every candidate's accumulated probability by multiplying its
# parent's probability with the relative value of this round and an
# amplification factor.
for candidate in sorted_candidate_lengths:
relative_prob = compression_function_factor ** (
-abs(candidate['length'] - min_candidate_value)
) / relative_probability_sum
accumulated_value = (
amplification_factor *
current_round_acc_probability *
relative_prob
)
accumulated_probabilities.append({
'candidate': candidate['candidate_alphabet'],
'probability': accumulated_value
})
return accumulated_probabilities
def get_candidates(candidate_lengths, accumulated_prob):
'''Take a dictionary of candidate alphabets and their associated
accumulative lengths.
Returns a list with each candidate and its accumulated probability.
'''
assert(len(candidate_lengths) > 1)
accumulated_candidate_lengths = []
for candidate_alphabet, list_of_lengths in candidate_lengths.iteritems():
accumulated_candidate_lengths.append({
'candidate_alphabet': candidate_alphabet,
'length': sum(list_of_lengths)
})
# Sort sampleset groups by length.
sorted_candidate_lengths = sorted(
accumulated_candidate_lengths,
key=operator.itemgetter('length')
)
candidates_probabilities = get_accumulated_probabilities(sorted_candidate_lengths, accumulated_prob)
logger.info(75 * '#')
logger.info('Candidate scoreboard:')
for cand in sorted_candidate_lengths:
logger.info('\t{}: {}'.format(cand['candidate_alphabet'], cand['length']))
logger.info(75 * '#')
return candidates_probabilities
def decide_next_backtracking_world_state(samplesets, accumulated_prob):
'''Take a list of samplesets and the accumulated probability of current
round and extract a decision for a state transition with a certain
probability for each candidate.
Arguments:
samplesets -- a list of samplesets.
accumulated_prob -- the accumulated probability of current knownalpahbet.
This list must must contain at least two elements so that we have some basis
for comparison. Each of the list's elements must share the same world state
(knownsecret and knownalphabet) so that we are comparing on the same basis.
The samplesets must contain at least two different candidate alphabets so
that a decision can be made. It can contain multiple samplesets collected
over the same candidate alphabet.
Returns an array of dictionary pairs. The first element of the pair is the new
state of every candidate; the second element of the pair is the
confidence with which the analyzer is suggesting the state transition.
'''
# Ensure we have enough sample sets to compare.
assert(len(samplesets) > 1)
# Ensure all samplesets are extending the same known state
knownsecret = samplesets[0].round.knownsecret
round = samplesets[0].round
victim = round.victim
target = victim.target
for sampleset in samplesets:
assert(sampleset.round == round)
# Split samplesets based on alphabetvector under consideration
# and collect data lengths for each candidate.
candidate_lengths = collections.defaultdict(lambda: [])
candidate_count_samplesets = collections.defaultdict(lambda: 0)
for sampleset in samplesets:
candidate_lengths[sampleset.candidatealphabet].append(sampleset.datalength)
candidate_count_samplesets[sampleset.candidatealphabet] += 1
candidate_count_samplesets = candidate_count_samplesets.items()
samplesets_per_candidate = candidate_count_samplesets[0][1]
for alphabet, count in candidate_count_samplesets:
assert(count == samplesets_per_candidate)
# Ensure we have a decision to make
assert(len(candidate_lengths) > 1)
candidates = get_candidates(candidate_lengths, accumulated_prob)
state = []
# All candidates are returned in order to create new rounds.
for i in candidates:
state.append({
'knownsecret': knownsecret + i['candidate'],
'probability': i['probability'],
'knownalphabet': target.alphabet
})
return state
|
{
"content_hash": "888af10ff9f264a8f5c6bf2d48f3460e",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 104,
"avg_line_length": 37.04761904761905,
"alnum_prop": 0.7078589790672053,
"repo_name": "dionyziz/rupture",
"id": "628aff60dc80770b0628b1b582186d369a911bff",
"size": "5446",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/breach/backtracking_analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "61046"
},
{
"name": "CSS",
"bytes": "5609"
},
{
"name": "HTML",
"bytes": "34042"
},
{
"name": "JavaScript",
"bytes": "52116"
},
{
"name": "Makefile",
"bytes": "805"
},
{
"name": "Python",
"bytes": "160351"
},
{
"name": "Shell",
"bytes": "9852"
},
{
"name": "TeX",
"bytes": "225330"
}
],
"symlink_target": ""
}
|
"""
BitBake SFTP Fetch implementation
Class for fetching files via SFTP. It tries to adhere to the (now
expired) IETF Internet Draft for "Uniform Resource Identifier (URI)
Scheme for Secure File Transfer Protocol (SFTP) and Secure Shell
(SSH)" (SECSH URI).
It uses SFTP (as to adhere to the SECSH URI specification). It only
supports key based authentication, not password. This class, unlike
the SSH fetcher, does not support fetching a directory tree from the
remote.
http://tools.ietf.org/html/draft-ietf-secsh-scp-sftp-ssh-uri-04
https://www.iana.org/assignments/uri-schemes/prov/sftp
https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13
Please note that '/' is used as host path seperator, and not ":"
as you may be used to from the scp/sftp commands. You can use a
~ (tilde) to specify a path relative to your home directory.
(The /~user/ syntax, for specyfing a path relative to another
user's home directory is not supported.) Note that the tilde must
still follow the host path seperator ("/"). See exampels below.
Example SRC_URIs:
SRC_URI = "sftp://host.example.com/dir/path.file.txt"
A path relative to your home directory.
SRC_URI = "sftp://host.example.com/~/dir/path.file.txt"
You can also specify a username (specyfing password in the
URI is not supported, use SSH keys to authenticate):
SRC_URI = "sftp://user@host.example.com/dir/path.file.txt"
"""
# Copyright (C) 2013, Olof Johansson <olof.johansson@axis.com>
#
# Based in part on bb.fetch2.wget:
# Copyright (C) 2003, 2004 Chris Larson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
import os
import bb
import urllib
import commands
from bb import data
from bb.fetch2 import URI
from bb.fetch2 import FetchMethod
from bb.fetch2 import runfetchcmd
class SFTP(FetchMethod):
"""Class to fetch urls via 'sftp'"""
def supports(self, ud, d):
"""
Check to see if a given url can be fetched with sftp.
"""
return ud.type in ['sftp']
def recommends_checksum(self, urldata):
return True
def urldata_init(self, ud, d):
if 'protocol' in ud.parm and ud.parm['protocol'] == 'git':
raise bb.fetch2.ParameterError(
"Invalid protocol - if you wish to fetch from a " +
"git repository using ssh, you need to use the " +
"git:// prefix with protocol=ssh", ud.url)
if 'downloadfilename' in ud.parm:
ud.basename = ud.parm['downloadfilename']
else:
ud.basename = os.path.basename(ud.path)
ud.localfile = data.expand(urllib.unquote(ud.basename), d)
def download(self, ud, d):
"""Fetch urls"""
urlo = URI(ud.url)
basecmd = 'sftp -oPasswordAuthentication=no'
port = ''
if urlo.port:
port = '-P %d' % urlo.port
urlo.port = None
dldir = data.getVar('DL_DIR', d, True)
lpath = os.path.join(dldir, ud.localfile)
user = ''
if urlo.userinfo:
user = urlo.userinfo + '@'
path = urlo.path
# Supoprt URIs relative to the user's home directory, with
# the tilde syntax. (E.g. <sftp://example.com/~/foo.diff>).
if path[:3] == '/~/':
path = path[3:]
remote = '%s%s:%s' % (user, urlo.hostname, path)
cmd = '%s %s %s %s' % (basecmd, port, commands.mkarg(remote),
commands.mkarg(lpath))
bb.fetch2.check_network_access(d, cmd, ud.url)
runfetchcmd(cmd, d)
return True
|
{
"content_hash": "401470d5fcd47843099b14b9c38ff838",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 75,
"avg_line_length": 33.15748031496063,
"alnum_prop": 0.6618380432201377,
"repo_name": "wwright2/dcim3-angstrom1",
"id": "8ea4ef2ff3b5ac54069e56b68737a0ddd3ffbdcd",
"size": "4300",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sources/bitbake/lib/bb/fetch2/sftp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "73541"
},
{
"name": "Awk",
"bytes": "286"
},
{
"name": "Batchfile",
"bytes": "19960"
},
{
"name": "BitBake",
"bytes": "2875212"
},
{
"name": "BlitzBasic",
"bytes": "6367"
},
{
"name": "C",
"bytes": "1598095"
},
{
"name": "C++",
"bytes": "2198121"
},
{
"name": "CMake",
"bytes": "7277"
},
{
"name": "CSS",
"bytes": "28636"
},
{
"name": "Groff",
"bytes": "502999"
},
{
"name": "HTML",
"bytes": "210823"
},
{
"name": "JavaScript",
"bytes": "23100"
},
{
"name": "Lua",
"bytes": "1194"
},
{
"name": "Makefile",
"bytes": "32539"
},
{
"name": "Nginx",
"bytes": "2744"
},
{
"name": "PHP",
"bytes": "829048"
},
{
"name": "Pascal",
"bytes": "17352"
},
{
"name": "Perl",
"bytes": "66339"
},
{
"name": "Python",
"bytes": "3672452"
},
{
"name": "QMake",
"bytes": "165"
},
{
"name": "Ruby",
"bytes": "10695"
},
{
"name": "Shell",
"bytes": "820076"
},
{
"name": "SourcePawn",
"bytes": "259600"
},
{
"name": "Tcl",
"bytes": "4897"
},
{
"name": "VimL",
"bytes": "8483"
},
{
"name": "XSLT",
"bytes": "9089"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "visiblapp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "bb587991543b563bdeabb869cf5e06fd",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7130434782608696,
"repo_name": "joshcrist20/visiblapp",
"id": "97687e2896405c01b9caaa5f133d941ac1b2d393",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1190907"
},
{
"name": "HTML",
"bytes": "401353"
},
{
"name": "JavaScript",
"bytes": "245863"
},
{
"name": "Python",
"bytes": "5932"
}
],
"symlink_target": ""
}
|
import numpy as np
from rllab.core.serializable import Serializable
from rllab.envs.base import Step
from rllab.envs.proxy_env import ProxyEnv
from rllab.misc import autoargs
from rllab.misc.overrides import overrides
class NoisyObservationEnv(ProxyEnv, Serializable):
@autoargs.arg('obs_noise', type=float,
help='Noise added to the observations (note: this makes the '
'problem non-Markovian!)')
def __init__(self,
env,
obs_noise=1e-1,
):
super(NoisyObservationEnv, self).__init__(env)
Serializable.quick_init(self, locals())
self.obs_noise = obs_noise
def get_obs_noise_scale_factor(self, obs):
# return np.abs(obs)
return np.ones_like(obs)
def inject_obs_noise(self, obs):
"""
Inject entry-wise noise to the observation. This should not change
the dimension of the observation.
"""
noise = self.get_obs_noise_scale_factor(obs) * self.obs_noise * \
np.random.normal(size=obs.shape)
return obs + noise
def get_current_obs(self):
return self.inject_obs_noise(self._wrapped_env.get_current_obs())
@overrides
def reset(self):
obs = self._wrapped_env.reset()
return self.inject_obs_noise(obs)
@overrides
def step(self, action):
next_obs, reward, done, info = self._wrapped_env.step(action)
return Step(self.inject_obs_noise(next_obs), reward, done, **info)
class DelayedActionEnv(ProxyEnv, Serializable):
@autoargs.arg('action_delay', type=int,
help='Time steps before action is realized')
def __init__(self,
env,
action_delay=3,
):
assert action_delay > 0, "Should not use this env transformer"
super(DelayedActionEnv, self).__init__(env)
Serializable.quick_init(self, locals())
self.action_delay = action_delay
self._queued_actions = None
@overrides
def reset(self):
obs = self._wrapped_env.reset()
self._queued_actions = np.zeros(self.action_delay * self.action_dim)
return obs
@overrides
def step(self, action):
queued_action = self._queued_actions[:self.action_dim]
next_obs, reward, done, info = self._wrapped_env.step(queued_action)
self._queued_actions = np.concatenate([
self._queued_actions[self.action_dim:],
action
])
return Step(next_obs, reward, done, **info)
|
{
"content_hash": "13817d436c62df21466f19480f3fbd26",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 32.59493670886076,
"alnum_prop": 0.6062135922330097,
"repo_name": "brain-research/mirage-rl-qprop",
"id": "afe8d063b6dc4dd15c05d618501e87d23489c9fd",
"size": "2575",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rllab/envs/noisy_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8270"
},
{
"name": "Dockerfile",
"bytes": "2310"
},
{
"name": "HTML",
"bytes": "14896"
},
{
"name": "JavaScript",
"bytes": "28156"
},
{
"name": "Jupyter Notebook",
"bytes": "151886"
},
{
"name": "Mako",
"bytes": "3714"
},
{
"name": "Python",
"bytes": "1831569"
},
{
"name": "Ruby",
"bytes": "12147"
},
{
"name": "Shell",
"bytes": "13760"
}
],
"symlink_target": ""
}
|
import sys
import wx
from .process import Process
from ..widgets import Label, Font
class Runner(wx.EvtHandler):
def __init__(self, config, notebook):
wx.EvtHandler.__init__(self)
self.Bind(wx.EVT_TIMER, self.OnTimer)
self.name = config.name
self._timer = wx.Timer(self)
self._config = config
self._window = self._get_output_window(notebook)
def _get_output_window(self, notebook):
return _OutputWindow(notebook, self)
def run(self):
self._process = Process(self._config.command)
self._process.start()
self._timer.Start(500)
def OnTimer(self, event=None):
finished = self._process.is_finished()
self._window.update_output(self._process.get_output(), finished)
if finished:
self._timer.Stop()
def stop(self):
try:
self._process.stop()
except Exception as err:
wx.MessageBox(str(err), style=wx.ICON_ERROR)
class _OutputWindow(wx.ScrolledWindow):
def __init__(self, notebook, runner):
wx.ScrolledWindow.__init__(self, notebook)
self._create_ui()
self._add_to_notebook(notebook, runner.name)
self._runner = runner
def _create_ui(self):
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._create_state_button())
sizer.Add(self._create_output())
self.SetSizer(sizer)
self.SetScrollRate(20, 20)
def _create_state_button(self):
if sys.version_info[:2] >= (2,6):
self._state_button = _StopAndRunAgainButton(self)
else:
self._state_button = _RunAgainButton(self)
return self._state_button
def _create_output(self):
self._output = _OutputDisplay(self)
return self._output
def _add_to_notebook(self, notebook, name):
notebook.add_tab(self, '%s (running)' % name, allow_closing=False)
notebook.show_tab(self)
def update_output(self, output, finished=False):
if output:
self._output.update(output)
self.SetVirtualSize(self._output.Size)
if finished:
self._rename_tab('%s (finished)' % self._runner.name)
self.Parent.allow_closing(self)
self._state_button.enable_run_again()
def OnStop(self):
self._runner.stop()
def OnRunAgain(self):
self._output.clear()
self._rename_tab('%s (running)' % self._runner.name)
self.Parent.disallow_closing(self)
self._state_button.reset()
self._runner.run()
def _rename_tab(self, name):
self.Parent.rename_tab(self, name)
class _OutputDisplay(Label):
def __init__(self, parent):
Label.__init__(self, parent)
self.SetFont(Font().fixed)
def update(self, addition):
self.SetLabel(self.LabelText + addition.decode('UTF-8', 'ignore'))
def clear(self):
self.SetLabel('')
class _StopAndRunAgainButton(wx.Button):
def __init__(self, parent):
wx.Button.__init__(self, parent, label='Stop')
self.Bind(wx.EVT_BUTTON, self.OnClick, self)
def OnClick(self, event):
self.Enable(False)
getattr(self.Parent, 'On' + self.LabelText.replace(' ', ''))()
def enable_run_again(self):
self.Enable()
self.SetLabel('Run Again')
def reset(self):
self.Enable()
self.SetLabel('Stop')
class _RunAgainButton(wx.Button):
def __init__(self, parent):
wx.Button.__init__(self, parent, label='Run Again')
self.Bind(wx.EVT_BUTTON, self.OnClick, self)
self.Enable(False)
def OnClick(self, event):
self.Parent.OnRunAgain()
def enable_run_again(self):
self.Enable()
def reset(self):
self.Enable(False)
|
{
"content_hash": "bd915d5d0620193d6ae07151f45a305b",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 74,
"avg_line_length": 27.323741007194243,
"alnum_prop": 0.5966298051606108,
"repo_name": "robotframework/RIDE",
"id": "b6b53f2a08c95b44fadce894595b242206ab89f7",
"size": "4442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/run/ui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
}
|
"""
The MatchMaker classes should accept a Topic or Fanout exchange key and
return keys for direct exchanges, per (approximate) AMQP parlance.
"""
from essential.config import cfg
from essential import importutils
from essential import log as logging
from essential.rpc import matchmaker as mm_common
redis = importutils.try_import('redis')
matchmaker_redis_opts = [
cfg.StrOpt('host',
default='127.0.0.1',
help='Host to locate redis'),
cfg.IntOpt('port',
default=6379,
help='Use this port to connect to redis host.'),
cfg.StrOpt('password',
default=None,
help='Password for Redis server. (optional)'),
]
CONF = cfg.CONF
opt_group = cfg.OptGroup(name='matchmaker_redis',
title='Options for Redis-based MatchMaker')
CONF.register_group(opt_group)
CONF.register_opts(matchmaker_redis_opts, opt_group)
LOG = logging.getLogger(__name__)
class RedisExchange(mm_common.Exchange):
def __init__(self, matchmaker):
self.matchmaker = matchmaker
self.redis = matchmaker.redis
super(RedisExchange, self).__init__()
class RedisTopicExchange(RedisExchange):
"""Exchange where all topic keys are split, sending to second half.
i.e. "compute.host" sends a message to "compute" running on "host"
"""
def run(self, topic):
while True:
member_name = self.redis.srandmember(topic)
if not member_name:
# If this happens, there are no
# longer any members.
break
if not self.matchmaker.is_alive(topic, member_name):
continue
host = member_name.split('.', 1)[1]
return [(member_name, host)]
return []
class RedisFanoutExchange(RedisExchange):
"""Return a list of all hosts."""
def run(self, topic):
topic = topic.split('~', 1)[1]
hosts = self.redis.smembers(topic)
good_hosts = filter(
lambda host: self.matchmaker.is_alive(topic, host), hosts)
return [(x, x.split('.', 1)[1]) for x in good_hosts]
class MatchMakerRedis(mm_common.HeartbeatMatchMakerBase):
"""MatchMaker registering and looking-up hosts with a Redis server."""
def __init__(self):
super(MatchMakerRedis, self).__init__()
if not redis:
raise ImportError("Failed to import module redis.")
self.redis = redis.Redis(
host=CONF.matchmaker_redis.host,
port=CONF.matchmaker_redis.port,
password=CONF.matchmaker_redis.password)
self.add_binding(mm_common.FanoutBinding(), RedisFanoutExchange(self))
self.add_binding(mm_common.DirectBinding(), mm_common.DirectExchange())
self.add_binding(mm_common.TopicBinding(), RedisTopicExchange(self))
def ack_alive(self, key, host):
topic = "%s.%s" % (key, host)
if not self.redis.expire(topic, CONF.matchmaker_heartbeat_ttl):
# If we could not update the expiration, the key
# might have been pruned. Re-register, creating a new
# key in Redis.
self.register(self.topic_host[host], host)
def is_alive(self, topic, host):
if self.redis.ttl(host) == -1:
self.expire(topic, host)
return False
return True
def expire(self, topic, host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.delete(host)
pipe.srem(topic, host)
pipe.execute()
def backend_register(self, key, key_host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.sadd(key, key_host)
# No value is needed, we just
# care if it exists. Sets aren't viable
# because only keys can expire.
pipe.set(key_host, '')
pipe.execute()
def backend_unregister(self, key, key_host):
with self.redis.pipeline() as pipe:
pipe.multi()
pipe.srem(key, key_host)
pipe.delete(key_host)
pipe.execute()
|
{
"content_hash": "aad210b160c57139bc634f99b670239b",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 32.16279069767442,
"alnum_prop": 0.5996625692938058,
"repo_name": "zhangxiaolins/python_base",
"id": "176956ce06f7e375d329c7c044775ff884360538",
"size": "4768",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "essential/rpc/matchmaker_redis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1440663"
}
],
"symlink_target": ""
}
|
"""Handle /_ah/warmup requests on instance start."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import appengine_config
from modules.warmup import warmup
from tests.functional import actions
class WarmupTests(actions.TestBase):
def test_warmup_dev(self):
self.get('http://localhost:8081' + warmup.WarmupHandler.URL)
self.assertLogContains('Course Builder is now available')
self.assertLogContains('at ' + self.INTEGRATION_SERVER_BASE_URL)
self.assertLogContains('or http://0.0.0.0:8081')
def test_warmup_dev_different_port(self):
self.get('http://localhost:4321' + warmup.WarmupHandler.URL)
self.assertLogContains('Course Builder is now available')
self.assertLogContains('at http://localhost:4321')
self.assertLogContains('or http://0.0.0.0:4321')
def test_warmup_prod(self):
try:
appengine_config.PRODUCTION_MODE = True
self.get('http://localhost:8081' + warmup.WarmupHandler.URL)
self.assertLogDoesNotContain('Course Builder is now available')
self.assertLogDoesNotContain(
'at ' + self.INTEGRATION_SERVER_BASE_URL)
self.assertLogDoesNotContain('or http://0.0.0.0:8081')
finally:
appengine_config.PRODUCTION_MODE = False
|
{
"content_hash": "c5b9f2fb4279ae0e5f40c06daa9732b7",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 75,
"avg_line_length": 41.09375,
"alnum_prop": 0.6692015209125475,
"repo_name": "andela-angene/coursebuilder-core",
"id": "f4ec931103803f3ad084c6b7dbd3bf2f0c55b29e",
"size": "1913",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop-frontend",
"path": "coursebuilder/modules/warmup/warmup_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "729194"
},
{
"name": "HTML",
"bytes": "739873"
},
{
"name": "JavaScript",
"bytes": "720406"
},
{
"name": "Python",
"bytes": "6245524"
},
{
"name": "Shell",
"bytes": "53815"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from djblets.webapi.testing.decorators import webapi_test_template
from reviewboard.webapi.tests.mixins_extra_data import (ExtraDataItemMixin,
ExtraDataListMixin)
class BaseCommentListMixin(object):
@webapi_test_template
def test_post_with_text_type_markdown(self):
"""Testing the POST <URL> API with text_type=markdown"""
self._test_post_with_text_type('markdown')
@webapi_test_template
def test_post_with_text_type_plain(self):
"""Testing the POST <URL> API with text_type=plain"""
self._test_post_with_text_type('plain')
def _test_post_with_text_type(self, text_type):
comment_text = '`This` is a **test**'
url, mimetype, data, objs = \
self.setup_basic_post_test(self.user, False, None, True)
data['text'] = comment_text
data['text_type'] = text_type
rsp = self.api_post(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class BaseCommentItemMixin(object):
def compare_item(self, item_rsp, comment):
self.assertEqual(item_rsp['id'], comment.pk)
self.assertEqual(item_rsp['text'], comment.text)
if comment.rich_text:
self.assertEqual(item_rsp['rich_text'], 'markdown')
else:
self.assertEqual(item_rsp['rich_text'], 'plain')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='markdown',
expected_text=r'\# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='plain',
expected_text='# `This` is a **test**')
@webapi_test_template
def test_get_with_markdown_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=markdown and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text=r'\# `This` is a **test**',
rich_text=True,
force_text_type='html',
expected_text='<p># <code>This</code> is a '
'<strong>test</strong></p>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_markdown(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=markdown
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='markdown',
expected_text=r'\#<\`This\` is a \*\*test\*\*>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_plain(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=plain
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='plain',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_get_with_plain_and_force_text_type_html(self):
"""Testing the GET <URL> API with text_type=plain and
?force-text-type=html
"""
self._test_get_with_force_text_type(
text='#<`This` is a **test**>',
rich_text=False,
force_text_type='html',
expected_text='#<`This` is a **test**>')
@webapi_test_template
def test_put_with_text_type_markdown_and_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text specified
"""
self._test_put_with_text_type_and_text('markdown')
@webapi_test_template
def test_put_with_text_type_plain_and_text(self):
"""Testing the PUT <URL> API with text_type=plain and text specified"""
self._test_put_with_text_type_and_text('plain')
@webapi_test_template
def test_put_with_text_type_markdown_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=markdown and text not specified escapes text
"""
self._test_put_with_text_type_and_not_text(
'markdown',
'`Test` **diff** comment',
r'\`Test\` \*\*diff\*\* comment')
@webapi_test_template
def test_put_with_text_type_plain_and_not_text(self):
"""Testing the PUT <URL> API
with text_type=plain and text not specified
"""
self._test_put_with_text_type_and_not_text(
'plain',
r'\`Test\` \*\*diff\*\* comment',
'`Test` **diff** comment')
@webapi_test_template
def test_put_without_text_type_and_escaping_provided_fields(self):
"""Testing the PUT <URL> API
without changing text_type and with escaping provided fields
"""
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.rich_text = True
reply_comment.save()
if 'text_type' in data:
del data['text_type']
data.update({
'text': '`This` is **text**',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], 'markdown')
self.assertEqual(comment_rsp['text'], '\\`This\\` is \\*\\*text\\*\\*')
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
@webapi_test_template
def test_put_with_multiple_include_text_types(self):
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data.update({
'include_text_types': 'raw,plain,markdown,html',
'text': 'Foo',
})
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
def _test_get_with_force_text_type(self, text, rich_text,
force_text_type, expected_text):
url, mimetype, comment = \
self.setup_basic_get_test(self.user, False, None)
comment.text = text
comment.rich_text = rich_text
comment.save()
rsp = self.api_get(url + '?force-text-type=%s' % force_text_type,
expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text_type'], force_text_type)
self.assertEqual(comment_rsp['text'], expected_text)
self.assertNotIn('raw_text_fields', comment_rsp)
rsp = self.api_get('%s?force-text-type=%s&include-text-types=raw'
% (url, force_text_type),
expected_mimetype=mimetype)
comment_rsp = rsp[self.resource.item_result_key]
self.assertIn('raw_text_fields', comment_rsp)
self.assertEqual(comment_rsp['raw_text_fields']['text'], text)
def _test_put_with_text_type_and_text(self, text_type):
comment_text = '`Test` **diff** comment'
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
data['text_type'] = text_type
data['text'] = comment_text
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], comment_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
def _test_put_with_text_type_and_not_text(self, text_type, text,
expected_text):
self.assertIn(text_type, ('markdown', 'plain'))
rich_text = (text_type == 'markdown')
url, mimetype, data, reply_comment, objs = \
self.setup_basic_put_test(self.user, False, None, True)
reply_comment.text = text
reply_comment.rich_text = not rich_text
reply_comment.save()
data['text_type'] = text_type
if 'text' in data:
del data['text']
rsp = self.api_put(url, data, expected_mimetype=mimetype)
self.assertEqual(rsp['stat'], 'ok')
self.assertIn(self.resource.item_result_key, rsp)
comment_rsp = rsp[self.resource.item_result_key]
self.assertEqual(comment_rsp['text'], expected_text)
self.assertEqual(comment_rsp['text_type'], text_type)
comment = self.resource.model.objects.get(pk=comment_rsp['id'])
self.compare_item(comment_rsp, comment)
class CommentListMixin(ExtraDataListMixin, BaseCommentListMixin):
pass
class CommentItemMixin(ExtraDataItemMixin, BaseCommentItemMixin):
pass
class CommentReplyListMixin(BaseCommentListMixin):
pass
class CommentReplyItemMixin(BaseCommentItemMixin):
pass
|
{
"content_hash": "8511ffc0f0e76517431ca7fc490d25f8",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 36.22857142857143,
"alnum_prop": 0.5910883280757098,
"repo_name": "sgallagher/reviewboard",
"id": "517457ab332eea87c783c27a6c25c50dbb1445f8",
"size": "10144",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/tests/mixins_comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "225650"
},
{
"name": "HTML",
"bytes": "185770"
},
{
"name": "JavaScript",
"bytes": "2121168"
},
{
"name": "Python",
"bytes": "4153859"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
r"""Simple transfer learning with an Inception v3 architecture model.
With support for TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import struct
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
print('WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_list_of_floats_to_file(list_of_floats, file_path):
"""Writes a given list of floats to a binary file.
Args:
list_of_floats: List of floats we want to write to a file.
file_path: Path to a file where list of floats will be stored.
"""
s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)
with open(file_path, 'wb') as f:
f.write(s)
def read_list_of_floats_from_file(file_path):
"""Reads list of floats from a given file.
Args:
file_path: Path to a file where list of floats was stored.
Returns:
Array of bottleneck values (list of floats).
"""
with open(file_path, 'rb') as f:
s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())
return list(s)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
print('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count],
stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop,
FLAGS.random_scale, FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,
'testing', FLAGS.bottleneck_dir,
FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%% (N=%d)' % (
test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
print('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
print('%70s %s' % (test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
{
"content_hash": "fe24cb30fd5db202dcbe5eca2dd3b030",
"timestamp": "",
"source": "github",
"line_count": 1089,
"max_line_length": 90,
"avg_line_length": 39.85123966942149,
"alnum_prop": 0.6591317572238352,
"repo_name": "kashim-zcxk/clasificador-mascota",
"id": "59e99b0d61d215879cc30c58c0b437af6ffb94ac",
"size": "44087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/retrain.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46379"
},
{
"name": "Shell",
"bytes": "214"
}
],
"symlink_target": ""
}
|
import os.path
from nova.api.openstack import common
from nova.image import glance
class ViewBuilder(common.ViewBuilder):
_collection_name = "images"
def basic(self, request, image):
"""Return a dictionary with basic image attributes."""
return {
"image": {
"id": image.get("id"),
"name": image.get("name"),
"links": self._get_links(request,
image["id"],
self._collection_name),
},
}
def show(self, request, image):
"""Return a dictionary with image details."""
image_dict = {
"id": image.get("id"),
"name": image.get("name"),
"minRam": int(image.get("min_ram") or 0),
"minDisk": int(image.get("min_disk") or 0),
"metadata": image.get("properties", {}),
"created": self._format_date(image.get("created_at")),
"updated": self._format_date(image.get("updated_at")),
"status": self._get_status(image),
"progress": self._get_progress(image),
"links": self._get_links(request,
image["id"],
self._collection_name),
}
instance_uuid = image.get("properties", {}).get("instance_uuid")
if instance_uuid is not None:
server_ref = self._get_href_link(request, instance_uuid, 'servers')
image_dict["server"] = {
"id": instance_uuid,
"links": [{
"rel": "self",
"href": server_ref,
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
instance_uuid,
'servers'),
}],
}
return dict(image=image_dict)
def detail(self, request, images):
"""Show a list of images with details."""
list_func = self.show
return self._list_view(list_func, request, images)
def index(self, request, images):
"""Show a list of images with basic attributes."""
list_func = self.basic
return self._list_view(list_func, request, images)
def _list_view(self, list_func, request, images):
"""Provide a view for a list of images."""
image_list = [list_func(request, image)["image"] for image in images]
images_links = self._get_collection_links(request,
images,
self._collection_name)
images_dict = dict(images=image_list)
if images_links:
images_dict["images_links"] = images_links
return images_dict
def _get_links(self, request, identifier, collection_name):
"""Return a list of links for this image."""
return [{
"rel": "self",
"href": self._get_href_link(request, identifier, collection_name),
},
{
"rel": "bookmark",
"href": self._get_bookmark_link(request,
identifier,
collection_name),
},
{
"rel": "alternate",
"type": "application/vnd.openstack.image",
"href": self._get_alternate_link(request, identifier),
}]
def _get_alternate_link(self, request, identifier):
"""Create an alternate link for a specific image id."""
glance_url = glance.generate_glance_url()
glance_url = self._update_glance_link_prefix(glance_url)
return os.path.join(glance_url,
request.environ["nova.context"].project_id,
self._collection_name,
str(identifier))
@staticmethod
def _format_date(date_string):
"""Return standard format for given date."""
if date_string is not None:
return date_string.strftime('%Y-%m-%dT%H:%M:%SZ')
@staticmethod
def _get_status(image):
"""Update the status field to standardize format."""
return {
'active': 'ACTIVE',
'queued': 'SAVING',
'saving': 'SAVING',
'deleted': 'DELETED',
'pending_delete': 'DELETED',
'killed': 'ERROR',
}.get(image.get("status"), 'UNKNOWN')
@staticmethod
def _get_progress(image):
return {
"queued": 25,
"saving": 50,
"active": 100,
}.get(image.get("status"), 0)
|
{
"content_hash": "011428600d39399e62c4dcedb26509ff",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 35.73134328358209,
"alnum_prop": 0.4780701754385965,
"repo_name": "DirectXMan12/nova-hacking",
"id": "cc0718a4ebb7b80bd54566c4286d586fa6a226ce",
"size": "5474",
"binary": false,
"copies": "10",
"ref": "refs/heads/feature_novnc_krb",
"path": "nova/api/openstack/compute/views/images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "10361785"
},
{
"name": "Shell",
"bytes": "17485"
}
],
"symlink_target": ""
}
|
import sys
import os
from Crypto.Cipher import AES
from django.views.debug import technical_500_response
from django.template.loader import render_to_string
from django.conf import settings
from .utils import get_decode_key, get_client_key
BLOCK_SIZE = 16
MONKEY_FORCE_ACTIVE = getattr(settings, "MONKEY_FORCE_ACTIVE", False)
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)
unpad = lambda s : s[0:-ord(s[-1])]
class MonkeyTeamMiddleware(object):
@staticmethod
def patch_response(request, response):
iv = os.urandom(16)
response.content = render_to_string("monkey_500.html", {
'client_key': get_client_key(),
'data': (
iv + AES.new(
get_decode_key(),
AES.MODE_CBC,
iv,
).encrypt(
pad(response.content)
)
).encode('base64'),
'extra': ''#MonkeySetup.get_userscript_code(request),
})
def process_exception(self, request, exception):
if not settings.DEBUG or MONKEY_FORCE_ACTIVE:
exc_info = sys.exc_info()
if exc_info:
response = technical_500_response(request, *exc_info)
else:
response = technical_500_response(request, type(exception), exception, None)
self.patch_response(request, response)
return response
|
{
"content_hash": "b63e8550623d2321431e2d2b1944a1e9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 94,
"avg_line_length": 33.43181818181818,
"alnum_prop": 0.583956492182189,
"repo_name": "ionelmc/django-monkey-team",
"id": "261254cadf18e54bdbf76ecaa4201d9d007ce578",
"size": "1471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/monkey_team/middleware.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "1132"
},
{
"name": "Python",
"bytes": "11950"
}
],
"symlink_target": ""
}
|
from math import log
import numpy as np
import copy
from collections import defaultdict
import random
from simple_rl.pomdp.BeliefMDPClass import BeliefMDP
class BeliefSparseSampling(object):
'''
A Sparse Sampling Algorithm for Near-Optimal Planning in Large Markov Decision Processes (Kearns et al)
Assuming that you don't have access to the underlying transition dynamics, but do have access to a naiive generative
model of the underlying MDP, this algorithm performs on-line, near-optimal planning with a per-state running time
that has no dependence on the number of states in the MDP.
'''
def __init__(self, gen_model, gamma, tol, max_reward, state, name="bss"):
'''
Args:
gen_model (BeliefMDP): Model of our MDP -- we tell it what action we are performing from some state s
and it will return what our next state is
gamma (float): MDP discount factor
tol (float): Most expected difference between optimal and computed value function
max_reward (float): Upper bound on the reward you can get for any state, action
state (State): This is the current state, and we need to output the action to take here
'''
self.tol = tol
self.gamma = gamma
self.max_reward = max_reward
self.gen_model = gen_model
self.current_state = state
self.horizon = self._horizon
self.width = self._width
print('BSS Horizon = {} \t Width = {}'.format(self.horizon, self.width))
self.name = name
self.root_level_qvals = defaultdict()
self.nodes_by_horizon = defaultdict(lambda: defaultdict(float))
@property
def _horizon(self):
'''
Returns:
_horizon (int): The planning horizon; depth of the recursive tree created to determined the near-optimal
action to take from a given state
'''
return int(log((self._lam / self._vmax), self.gamma))
@property
def _width(self):
'''
The number of times we ask the generative model to give us a next_state sample for each state, action pair.
Returns:
_width (int)
'''
part1 = (self._vmax ** 2) / (self._lam ** 2)
part2 = 2 * self._horizon * log(self._horizon * (self._vmax ** 2) / (self._lam ** 2))
part3 = log(self.max_reward / self._lam)
return int(part1 * (part2 + part3))
@property
def _lam(self):
return (self.tol * (1.0 - self.gamma) ** 2) / 4.0
@property
def _vmax(self):
return float(self.max_reward) / (1 - self.gamma)
def _get_width_at_height(self, height):
'''
The branching factor of the tree is decayed according to this formula as suggested by the BSS paper.
Args:
height (int): the current depth in the MDP recursive tree measured from top
Returns:
width (int): the decayed branching factor for a state, action pair
'''
c = int(self.width * (self.gamma ** (2 * height)))
return c if c > 1 else 1
def _estimate_qs(self, state, horizon):
qvalues = np.zeros(len(self.gen_model.actions))
for action_idx, action in enumerate(self.gen_model.actions):
if horizon <= 0:
qvalues[action_idx] = 0.0
else:
qvalues[action_idx] = self._sampled_q_estimate(state, action, horizon)
return qvalues
def _sampled_q_estimate(self, state, action, horizon):
'''
Args:
state (State): current state in MDP
action (str): action to take from `state`
horizon (int): planning horizon / depth of recursive tree
Returns:
average_reward (float): measure of how good (s, a) would be
'''
total = 0.0
width = self._get_width_at_height(self.horizon - horizon)
for _ in range(width):
next_state = self.gen_model.transition_func(state, action)
total += self.gen_model.reward_func(state, action) + (self.gamma * self._estimate_v(next_state, horizon-1))
return total / float(width)
def _estimate_v(self, state, horizon):
'''
Args:
state (State): current state
horizon (int): time steps in future you want to use to estimate V*
Returns:
V(s) (float)
'''
if state in self.nodes_by_horizon:
if horizon in self.nodes_by_horizon[state]:
return self.nodes_by_horizon[state][horizon]
if self.gen_model.is_in_goal_state():
self.nodes_by_horizon[state][horizon] = self.gen_model.reward_func(state, random.choice(self.gen_model.actions))
else:
self.nodes_by_horizon[state][horizon] = np.max(self._estimate_qs(state, horizon))
return self.nodes_by_horizon[state][horizon]
def plan_from_state(self, state):
'''
Args:
state (State): the current state in the MDP
Returns:
action (str): near-optimal action to perform from state
'''
if state in self.root_level_qvals:
qvalues = self.root_level_qvals[state]
else:
init_horizon = self.horizon
qvalues = self._estimate_qs(state, init_horizon)
action_idx = np.argmax(qvalues)
self.root_level_qvals[state] = qvalues
return self.gen_model.actions[action_idx]
def run(self, verbose=True):
discounted_sum_rewards = 0.0
num_iter = 0
self.gen_model.reset()
state = self.gen_model.init_state
policy = defaultdict()
while not self.gen_model.is_in_goal_state():
action = self.plan_from_state(state)
reward, next_state = self.gen_model.execute_agent_action(action)
policy[state] = action
discounted_sum_rewards += ((self.gamma ** num_iter) * reward)
if verbose: print('({}, {}, {}) -> {} | {}'.format(state, action, next_state, reward, discounted_sum_rewards))
state = copy.deepcopy(next_state)
num_iter += 1
return discounted_sum_rewards, policy
|
{
"content_hash": "29dcb88f5bdc2f9b907e3f57fccb81c0",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 124,
"avg_line_length": 39.113207547169814,
"alnum_prop": 0.5972021225277376,
"repo_name": "david-abel/simple_rl",
"id": "939259c24dba38ce2fcf1798db2bfa2127379f41",
"size": "6219",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_rl/planning/BeliefSparseSamplingClass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "196326"
},
{
"name": "Python",
"bytes": "433150"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function, absolute_import
import os
import sys
sys.path = [os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")] + sys.path
from crox.core import main
if __name__ == '__main__':
main()
|
{
"content_hash": "3a21402bd66b5c1307d18fe68d88c691",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 86,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.6583333333333333,
"repo_name": "gustavla/crox",
"id": "f6c4ff1e56378a721cff207e0b897179e75df852",
"size": "262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "crox/__main__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7301"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.contrib.gis.db import models
import datetime
from django.utils import timezone
class AlertNotification(models.Model):
INCIDENT, NEARMISS, HAZARD, THEFT, UNDEFINED = range(5)
ACTION_CHOICES = (
(INCIDENT, _("Incident")),
(NEARMISS, _("Near miss")),
(HAZARD, _("Hazard")),
(THEFT, _("Theft")),
(UNDEFINED, _("Undefined"))
)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_("user"))
date = models.DateTimeField(auto_now_add=True)
action = models.IntegerField(choices=ACTION_CHOICES, default=UNDEFINED)
is_read = models.BooleanField(default=False)
emailed = models.BooleanField(default=False)
# objects = AlertNotificationManager()
def get_location(self):
return self.point.geom
@property
def text_action(self):
return ACTION_CHOICES[self.action][1]
@property
def is_incident(self):
return self.action == self.INCIDENT
@property
def is_nearmiss(self):
return self.action == self.NEARMISS
@property
def is_hazard(self):
return self.action == self.HAZARD
@property
def is_theft(self):
return self.action == self.THEFT
def __unicode__(self):
return "%s" % (self.user)
class Meta:
app_label = 'mapApp'
unique_together = ('user', 'point')
ordering = ['-date', ]
verbose_name = _("alert notification")
verbose_name_plural = _("alert notifications")
abstract= True
app_label = 'mapApp'
class IncidentNotification(AlertNotification):
point = models.ForeignKey('mapApp.Incident', on_delete=models.CASCADE, related_name='incidentNotification')
class HazardNotification(AlertNotification):
point = models.ForeignKey('mapApp.Hazard', on_delete=models.CASCADE, related_name='hazardNotification')
class TheftNotification(AlertNotification):
point = models.ForeignKey('mapApp.Theft', on_delete=models.CASCADE, related_name='theftNotification')
|
{
"content_hash": "8aa03b21cb864218e284d287a9770cb0",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 111,
"avg_line_length": 28.573333333333334,
"alnum_prop": 0.6677554829678022,
"repo_name": "SPARLab/BikeMaps",
"id": "8be2a4cdfbdee742f55f242a86ec83c31283c87f",
"size": "2143",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mapApp/models/alert_notification.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15111"
},
{
"name": "HTML",
"bytes": "134960"
},
{
"name": "JavaScript",
"bytes": "73792"
},
{
"name": "Python",
"bytes": "965817"
}
],
"symlink_target": ""
}
|
import sys
import socket
import random
from threading import Thread
from math import gcd
from base64 import b64encode
def lcm(a, b):
"""Return lowest common multiple of a and b"""
return a * b // gcd(a, b)
with open("key.png", "rb") as f:
SECRET = b64encode(f.read())
PAD_LENGTH = 1024 * 100
REPEAT = lcm(PAD_LENGTH, len(SECRET))
def client_thread(clientsocket):
random.seed()
pad = [ random.getrandbits(8) for i in range(PAD_LENGTH) ]
for i in range(REPEAT):
s = SECRET[i % len(SECRET)]
p = pad[i % len(pad)]
b = bytes([s ^ p])
if clientsocket.send(b) == 0:
return
clientsocket.close()
def main():
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((socket.gethostname(), 8888))
serversocket.listen()
print('len(SECRET) = {} Bytes'.format(len(SECRET)))
print('len(pad) = {} Bytes'.format(PAD_LENGTH))
print('shift = {} Bytes'.format(PAD_LENGTH % len(SECRET)))
print('Repeat after {} MiB'.format(REPEAT / 1024 ** 2))
sys.stdout.flush()
while True:
# accept connections on socket
(clientsocket, address) = serversocket.accept()
print('Client connected {}'.format(address))
sys.stdout.flush()
thread = Thread(target = client_thread, args = (clientsocket, ))
thread.start()
if __name__ == "__main__":
main()
|
{
"content_hash": "423e2e0948b2e64f0445c19bad07bb08",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 72,
"avg_line_length": 28.6734693877551,
"alnum_prop": 0.6170818505338078,
"repo_name": "Enteee/Enteee.github.io",
"id": "c6275789d918748049a5ebd918e07f59ee91bada",
"size": "1464",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "static/posts/broken-one-time-pad/challenge.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20940"
},
{
"name": "Gnuplot",
"bytes": "663"
},
{
"name": "HTML",
"bytes": "13180"
},
{
"name": "JavaScript",
"bytes": "5160"
},
{
"name": "Python",
"bytes": "4803"
},
{
"name": "Ruby",
"bytes": "1166"
},
{
"name": "Shell",
"bytes": "1884"
}
],
"symlink_target": ""
}
|
import numpy as np
import tensorflow as tf
from .. import config
from ..distribution import Distribution
def _normal_logp(X, mu, sigma):
return (
tf.log(1 / (tf.constant(np.sqrt(2 * np.pi), dtype=config.dtype) * sigma)) -
(X - mu)**2 / (tf.constant(2, dtype=config.dtype) * sigma**2)
)
def _normal_cdf(lim, mu, sigma):
return 0.5 * tf.erfc((mu - lim) / (tf.constant(np.sqrt(2), config.dtype) * sigma))
@Distribution
def Normal(mu, sigma, name=None):
# TODO(chrisburr) Just use NormalN?
X = tf.placeholder(config.dtype, name=name)
Distribution.logp = _normal_logp(X, mu, sigma)
def integral(lower, upper):
upper_integrand = tf.cond(
tf.is_inf(tf.cast(upper, config.dtype)),
lambda: tf.constant(1, dtype=config.dtype),
lambda: _normal_cdf(upper, mu, sigma)
)
lower_integrand = tf.cond(
tf.is_inf(tf.cast(lower, config.dtype)),
lambda: tf.constant(0, dtype=config.dtype),
lambda: _normal_cdf(lower, mu, sigma)
)
return upper_integrand - lower_integrand
Distribution.integral = integral
return X
# @Distribution
# def NormalN(mus, sigmas, name=None):
# X = tf.placeholder(config.dtype, name=name)
# logps = [_normal_logp(X, mu, sigma) for mu, sigma in zip(mus, sigmas)]
# def cdf(lim):
# raise NotImplementedError
# Distribution.logp = sum(logps)
# Distribution.integral = lambda lower, upper: cdf(upper) - cdf(lower)
# return X
|
{
"content_hash": "b37935607d5a59dc4ac96c2bb5ec6c29",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 86,
"avg_line_length": 26.551724137931036,
"alnum_prop": 0.612987012987013,
"repo_name": "ibab/tensorprob",
"id": "97dd3f5500675cb037704aac2e6a5012ece9d0a8",
"size": "1540",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorprob/distributions/normal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "58561"
}
],
"symlink_target": ""
}
|
"""
============================================
Wifi 802.11 simulation (:mod:`commpy.wifi80211`)
============================================
.. autosummary::
:toctree: generated/
Wifi80211 -- Class to simulate the transmissions and receiving parameters of physical layer 802.11
"""
import math
from typing import List
import numpy as np
import commpy.channelcoding.convcode as cc
import commpy.links as lk
import commpy.modulation as mod
from commpy.channels import _FlatChannel
# =============================================================================
# Convolutional Code
# =============================================================================
class Wifi80211:
"""
This class aims to simulate the transmissions and receiving parameters of physical layer 802.11 (currently till VHT (ac))
First the chunk is coded according to the generator matrix from the standard, having a rate of 1/2.
Then, depending on the Modulation Coding Scheme (MCS) used, puncturing is applied to achieve other coding rates.
For more details of which MCS map to which modulation and each coding the standard is *the* recommended place,
but for a lighter and faster source to check https://mcsindex.com is a good place.
Finally the bits are then mapped to the modulation scheme in conformity to the MCS (BPSK, QPSK, 16-QAM, 64-QAM, 256-QAM).
On the receiving the inverse operations are perform, with depuncture when MCS needs it.
"""
# Build memory and generator matrix
# Number of delay elements in the convolutional encoder
# "The encoder uses a 6-stage shift register."
# (https://pdfs.semanticscholar.org/c63b/71e43dc23b17ca57267f3b769224c64d5e33.pdf p.19)
memory = np.array(6, ndmin=1)
generator_matrix = np.array((133, 171), ndmin=2) # from 802.11 standard, page 2295
def get_modem(self) -> mod.Modem:
"""
Gets the modem that is going to be used for this particular WiFi simulation according to the MCS
"""
bits_per_symbol = [
2,
4,
4,
16,
16,
64,
64,
64,
256,
256
]
if self.mcs <= 2:
# BPSK for mcs 0
# QPSK for mcs 1 a 2
return mod.PSKModem(bits_per_symbol[self.mcs])
else:
# Modem : QAMModem
return mod.QAMModem(bits_per_symbol[self.mcs])
@staticmethod
def _get_puncture_matrix(numerator: int, denominator: int) -> List:
if numerator == 1 and denominator == 2:
return None
# from the standard 802.11 2016
if numerator == 2 and denominator == 3:
# page 2297
return [1, 1, 1, 0]
if numerator == 3 and denominator == 4:
# page 2297
return [1, 1, 1, 0, 0, 1]
if numerator == 5 and denominator == 6:
# page 2378
return [1, 1, 1, 0, 0, 1, 1, 0, 0, 1]
return None
def _get_coding(self):
coding = [
(1, 2),
(1, 2),
(3, 4),
(1, 2),
(3, 4),
(2, 3),
(3, 4),
(5, 6),
(3, 4),
(5, 6),
]
return coding[self.mcs]
@staticmethod
def _get_trellis():
return cc.Trellis(Wifi80211.memory, Wifi80211.generator_matrix)
def __init__(self, mcs: int):
"""
Build WiFi 802.11 simulation class
Parameters
----------
mcs : int
The Modulation Coding Scheme (MCS) to simulate.
A list of MCS and which coding and modulations they correspond to is bellow:
- 0 : BPSK 1/2
- 1 : QPSK 1/2
- 2 : QPSK 3/4
- 3 : 16-QAM 1/2
- 4 : 16-QAM 3/4
- 5 : 64-QAM 2/3
- 6 : 64-QAM 3/4
- 7 : 64-QAM 5/6
- 8 : 256-QAM 3/4
- 9 : 256-QAM 5/6
"""
self.mcs = mcs
self.modem = None
def link_performance(self, channel: _FlatChannel, SNRs, tx_max, err_min, send_chunk=None,
frame_aggregation=1, receiver=None, stop_on_surpass_error=True):
"""
Estimate the BER performance of a link model with Monte Carlo simulation as in commpy.links.link_performance
Parameters
----------
channel : _FlatChannel
The channel to be used for the simulation
SNRs : 1D arraylike
Signal to Noise ratio in dB defined as :math:`SNR_{dB} = (E_b/N_0)_{dB} + 10 \log_{10}(R_cM_c)`
where :math:`Rc` is the code rate and :math:`Mc` the modulation rate.
tx_max : int
Maximum number of transmissions for each SNR.
err_min : int
link_performance send bits until it reach err_min errors (see also send_max).
send_chunk : int
Number of bits to be send at each frame. This is also the frame length of the decoder if available
so it should be large enough regarding the code type.
*Default*: send_chunck = err_min
frame_aggregation : int
Number of frames aggregated per transmission (each frame with size send_chunk)
receiver : function
Specify a custom receiver function to be used in the simulation.
This is particular useful for MIMO simulations.
stop_on_surpass_error : bool
Controls if during simulation of a SNR it should break and move to the next SNR when
the bit error is above the err_min parameter
Returns
-------
BERs : 1d ndarray
Estimated Bit Error Ratio corresponding to each SNRs
"""
trellis1 = Wifi80211._get_trellis()
coding = self._get_coding()
modem = self.get_modem()
def modulate(bits):
res = cc.conv_encode(bits, trellis1, 'cont')
puncture_matrix = Wifi80211._get_puncture_matrix(coding[0], coding[1])
res_p = res
if puncture_matrix:
res_p = cc.puncturing(res, puncture_matrix)
return modem.modulate(res_p)
# Receiver function (no process required as there are no fading)
def _receiver(y, h, constellation, noise_var):
return modem.demodulate(y, 'soft', noise_var)
if not receiver:
receiver = _receiver
# Decoder function
def decoder_soft(msg):
msg_d = msg
puncture_matrix = Wifi80211._get_puncture_matrix(coding[0], coding[1])
if puncture_matrix:
try:
msg_d = cc.depuncturing(msg, puncture_matrix, math.ceil(len(msg) * coding[0] / coding[1] * 2))
except IndexError as e:
print(e)
print("Decoded message size %d" % (math.ceil(len(msg) * coding[0] / coding[1] * 2)))
print("Encoded message size %d" % len(msg))
print("Coding %d/%d" % (coding[0], coding[1]))
return cc.viterbi_decode(msg_d, trellis1, decoding_type='soft')
self.model = lk.LinkModel(modulate, channel, receiver,
modem.num_bits_symbol, modem.constellation, modem.Es,
decoder_soft, coding[0] / coding[1])
return self.model.link_performance_full_metrics(SNRs, tx_max,
err_min=err_min, send_chunk=send_chunk,
code_rate=coding[0] / coding[1],
number_chunks_per_send=frame_aggregation,
stop_on_surpass_error=stop_on_surpass_error
)
|
{
"content_hash": "add79adb39e62b584d2137e0238f7484",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 125,
"avg_line_length": 38.032863849765256,
"alnum_prop": 0.5199358103937786,
"repo_name": "veeresht/CommPy",
"id": "58ee43569929c3f20573c2ba4cfec264437a3d01",
"size": "8157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commpy/wifi80211.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "220593"
}
],
"symlink_target": ""
}
|
from .BasicRNNCell import BasicRNNCell
from .GRU import GRUCell
from .LSTM import BasicLSTMCell
__all__ = ('GRUCell', 'BasicLSTMCell', 'BasicRNNCell')
|
{
"content_hash": "9c01fab41bcc90f3d5030987d514ea15",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 54,
"avg_line_length": 30.4,
"alnum_prop": 0.7631578947368421,
"repo_name": "SwordYork/sequencing",
"id": "efec7698437165137dc5088dbb32300abf777330",
"size": "301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sequencing_np/nn/rnn_cells/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "4826"
},
{
"name": "Python",
"bytes": "282623"
},
{
"name": "Shell",
"bytes": "1746"
}
],
"symlink_target": ""
}
|
import numpy as np
from pandas._libs.tslibs import fields
import pandas.util.testing as tm
def test_fields_readonly():
# https://github.com/vaexio/vaex/issues/357
# fields functions should't raise when we pass read-only data
dtindex = np.arange(5, dtype=np.int64) * 10 ** 9 * 3600 * 24 * 32
dtindex.flags.writeable = False
result = fields.get_date_name_field(dtindex, "month_name")
expected = np.array(
["January", "February", "March", "April", "May"], dtype=np.object
)
tm.assert_numpy_array_equal(result, expected)
result = fields.get_date_field(dtindex, "Y")
expected = np.array([1970, 1970, 1970, 1970, 1970], dtype=np.int32)
tm.assert_numpy_array_equal(result, expected)
result = fields.get_start_end_field(dtindex, "is_month_start", None)
expected = np.array([True, False, False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
# treat dtindex as timedeltas for this next one
result = fields.get_timedelta_field(dtindex, "days")
expected = np.arange(5, dtype=np.int32) * 32
tm.assert_numpy_array_equal(result, expected)
|
{
"content_hash": "a56b2c23b5d3f946fbfb68acc77d5bfe",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 75,
"avg_line_length": 36.903225806451616,
"alnum_prop": 0.6809440559440559,
"repo_name": "toobaz/pandas",
"id": "cd729956a027c025b914a939a4286178af6bc26c",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/tslibs/test_fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394843"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "15031623"
},
{
"name": "Shell",
"bytes": "27585"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
}
|
"""Nikola -- a modular, fast, simple, static website generator."""
import os
import sys
__version__ = '8.0.0.dev0'
DEBUG = bool(os.getenv('NIKOLA_DEBUG'))
if sys.version_info[0] == 2:
raise Exception("Nikola does not support Python 2.")
from .nikola import Nikola # NOQA
from . import plugins # NOQA
|
{
"content_hash": "f1e86fd58cc11bb25004fa87e2972db9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 23.846153846153847,
"alnum_prop": 0.6806451612903226,
"repo_name": "gwax/nikola",
"id": "578564206efa449ca58be39175217e9a229d62c0",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16706"
},
{
"name": "JavaScript",
"bytes": "37423"
},
{
"name": "Python",
"bytes": "1207247"
},
{
"name": "Shell",
"bytes": "10237"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from .views import CarrierCRUDL, Carriers
from django.conf.urls import patterns, url, include
urlpatterns = CarrierCRUDL().as_urlpatterns()
urlpatterns += patterns('carriers.views',
url(r'^carriers', Carriers.as_view(), name='carriers.carriers'))
|
{
"content_hash": "7d2e7d9fee93ee7c5dc28d7ed6331fba",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 88,
"avg_line_length": 39,
"alnum_prop": 0.6996336996336996,
"repo_name": "nyaruka/sigtrac",
"id": "921de5bdc2c6db5fd35c0e713d8f54cc70b8254a",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sigtrac/carriers/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3249"
},
{
"name": "Groovy",
"bytes": "1318"
},
{
"name": "Java",
"bytes": "56803"
},
{
"name": "JavaScript",
"bytes": "31015"
},
{
"name": "Python",
"bytes": "66975"
},
{
"name": "Shell",
"bytes": "7484"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ExpressRouteCircuitPeeringConfig(Model):
"""Specifies the peering configuration.
:param advertised_public_prefixes: The reference of
AdvertisedPublicPrefixes.
:type advertised_public_prefixes: list[str]
:param advertised_communities: The communities of bgp peering. Spepcified
for microsoft peering
:type advertised_communities: list[str]
:param advertised_public_prefixes_state: AdvertisedPublicPrefixState of
the Peering resource. Possible values are 'NotConfigured', 'Configuring',
'Configured', and 'ValidationNeeded'. Possible values include:
'NotConfigured', 'Configuring', 'Configured', 'ValidationNeeded'
:type advertised_public_prefixes_state: str or
~azure.mgmt.network.v2017_06_01.models.ExpressRouteCircuitPeeringAdvertisedPublicPrefixState
:param legacy_mode: The legacy mode of the peering.
:type legacy_mode: int
:param customer_asn: The CustomerASN of the peering.
:type customer_asn: int
:param routing_registry_name: The RoutingRegistryName of the
configuration.
:type routing_registry_name: str
"""
_attribute_map = {
'advertised_public_prefixes': {'key': 'advertisedPublicPrefixes', 'type': '[str]'},
'advertised_communities': {'key': 'advertisedCommunities', 'type': '[str]'},
'advertised_public_prefixes_state': {'key': 'advertisedPublicPrefixesState', 'type': 'str'},
'legacy_mode': {'key': 'legacyMode', 'type': 'int'},
'customer_asn': {'key': 'customerASN', 'type': 'int'},
'routing_registry_name': {'key': 'routingRegistryName', 'type': 'str'},
}
def __init__(self, *, advertised_public_prefixes=None, advertised_communities=None, advertised_public_prefixes_state=None, legacy_mode: int=None, customer_asn: int=None, routing_registry_name: str=None, **kwargs) -> None:
super(ExpressRouteCircuitPeeringConfig, self).__init__(**kwargs)
self.advertised_public_prefixes = advertised_public_prefixes
self.advertised_communities = advertised_communities
self.advertised_public_prefixes_state = advertised_public_prefixes_state
self.legacy_mode = legacy_mode
self.customer_asn = customer_asn
self.routing_registry_name = routing_registry_name
|
{
"content_hash": "0d2fc7f6db868516cf4eacf5a4ff5138",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 225,
"avg_line_length": 52.70454545454545,
"alnum_prop": 0.709357481673135,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "b1ffd9ef70aa45c3585d41eeea351a4623382cdd",
"size": "2793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/express_route_circuit_peering_config_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
from flask import Flask , jsonify , request,abort
from AI import main
import pandas as pd
import numpy as np
import datetime
import twilio_service
from pymongo import MongoClient
app = Flask(__name__)
@app.route('/verify', methods=['POST'])
def get_probability():
if not request.json:
abort(400)
df= pd.DataFrame(columns=["Amount","CardVendorFeature","LoginAtempts","ClientCountryFeature",
'TransactionTypeFeature','Longitude','Latitude','CountryFeature',
'AmountOfSpentMoneyPerDay','CardTypeFeature','MerchantFeature',
'CardStartFeature','CardEndFeature','CardExpiryDateFeature',
'LastTransactionDateFeature','AmountOfSpentMoneyPerMonth','Class'],
data=[[request.json['Amount'],request.json['CardVendorFeature'],request.json['LoginAtempts'],
request.json['ClientCountryFeature'],request.json['TransactionTypeFeature'],
request.json['Longitude'],request.json['Latitude'],request.json['CountryFeature'],
request.json['AmountOfSpentMoneyPerDay'],request.json['CardTypeFeature'],request.json['MerchantFeature'],
request.json['CardStartFeature'],request.json['CardEndFeature'],
request.json['CardExpiryDateFeature'],request.json['LastTransactionDateFeature'],
request.json['AmountOfSpentMoneyPerMonth'],request.json['Class']]])
df['LastTransactionDateFeature'] = datetime.date.today() - datetime.timedelta(days=4)
df['CardExpiryDateFeature'] = df['CardExpiryDateFeature'].map(np.datetime64)
df['TransactionTimeFeature'] = datetime.time()
df['TransactionDateFeature'] = datetime.date.today()
df = df[["Amount","CardVendorFeature","LoginAtempts","ClientCountryFeature",
'TransactionTypeFeature','Longitude','Latitude','CountryFeature',
'AmountOfSpentMoneyPerDay','CardTypeFeature','MerchantFeature',
'CardStartFeature','CardEndFeature','CardExpiryDateFeature',
'TransactionTimeFeature','TransactionDateFeature','LastTransactionDateFeature','AmountOfSpentMoneyPerMonth','Class']]
df= main.return_prediction(df)
df['LastTransactionDateFeature'] = df['LastTransactionDateFeature'].astype('datetime64')
fraudProbability = df['FraudProbability'][0]
client = MongoClient()
db = client.FraudDetection
df['TransactionId'] = db.Transactions.find_one(sort=[("TransactionId", -1)])["TransactionId"] +1
df['SmsCode'] = np.random.randint(100001,999999)
trans = df.to_dict('records')
db.Transactions.insert(trans)
print(fraudProbability)
if fraudProbability > 0.3:
#twilio_service.send_alert_message(df['Merchant'][0],df['Amount'][0],df['SmsCode'][0])
print(df['SmsCode'][0])
return jsonify({'fraudProbability': fraudProbability})
if __name__ == '__main__':
app.run(debug=True)
|
{
"content_hash": "58e6fe3ae81f760e1087a195f049ae75",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 147,
"avg_line_length": 48.484375,
"alnum_prop": 0.6455043506284242,
"repo_name": "imironica/Fraud-Detection-System",
"id": "4d269321d73ca2017002f48fc1b23da035e9acda",
"size": "3103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FraudDetection.ML/python_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "60532"
},
{
"name": "CSS",
"bytes": "413833"
},
{
"name": "HTML",
"bytes": "112184"
},
{
"name": "JavaScript",
"bytes": "8737"
},
{
"name": "Python",
"bytes": "247699"
},
{
"name": "TypeScript",
"bytes": "52443"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
import plotly
n = 50
x, y, z, s, ew = np.random.rand(5, n)
c, ec = np.random.rand(2, n, 4)
area_scale, width_scale = 500, 5
fig, ax = plt.subplots()
sc = ax.scatter(x, y, c=c,
s=np.square(s)*area_scale,
edgecolor=ec,
linewidth=ew*width_scale)
ax.grid()
# Use "iplot_mpl" instead of "plot_mpl" in the next line when working in a Jupyter Notebook
plot_url = plotly.offline.plot_mpl(fig)
|
{
"content_hash": "b72cd79a21e3846515d4446e60f4a029",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 91,
"avg_line_length": 27.055555555555557,
"alnum_prop": 0.6262833675564682,
"repo_name": "jeremiedecock/snippets",
"id": "9fb1b5824da297400d97038e38881dc56661f208",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/plotly/offline_demo_using_matplotlib.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "4294"
},
{
"name": "Batchfile",
"bytes": "6779"
},
{
"name": "C",
"bytes": "102107"
},
{
"name": "C++",
"bytes": "320943"
},
{
"name": "CMake",
"bytes": "11424"
},
{
"name": "CSS",
"bytes": "21121"
},
{
"name": "Cython",
"bytes": "21"
},
{
"name": "Dockerfile",
"bytes": "1818"
},
{
"name": "Fortran",
"bytes": "633"
},
{
"name": "Gnuplot",
"bytes": "39999"
},
{
"name": "Go",
"bytes": "3166"
},
{
"name": "Groovy",
"bytes": "3009"
},
{
"name": "HTML",
"bytes": "138995"
},
{
"name": "IDL",
"bytes": "43"
},
{
"name": "Java",
"bytes": "120221"
},
{
"name": "JavaScript",
"bytes": "32342"
},
{
"name": "Jinja",
"bytes": "206"
},
{
"name": "Jupyter Notebook",
"bytes": "95991"
},
{
"name": "Lua",
"bytes": "200"
},
{
"name": "M4",
"bytes": "111"
},
{
"name": "MATLAB",
"bytes": "31972"
},
{
"name": "Makefile",
"bytes": "81307"
},
{
"name": "OpenSCAD",
"bytes": "14995"
},
{
"name": "PHP",
"bytes": "94"
},
{
"name": "Perl",
"bytes": "46"
},
{
"name": "Processing",
"bytes": "208"
},
{
"name": "Prolog",
"bytes": "454"
},
{
"name": "Python",
"bytes": "1685966"
},
{
"name": "R",
"bytes": "76"
},
{
"name": "Raku",
"bytes": "43"
},
{
"name": "Ruby",
"bytes": "42"
},
{
"name": "Scheme",
"bytes": "649"
},
{
"name": "Shell",
"bytes": "52865"
},
{
"name": "Smalltalk",
"bytes": "55"
},
{
"name": "TeX",
"bytes": "1189"
},
{
"name": "Vue",
"bytes": "49445"
},
{
"name": "XSLT",
"bytes": "1816"
}
],
"symlink_target": ""
}
|
from ClassBuilder import ClassBuilder
def main():
instance = ClassBuilder() \
.with_attribute_1('Attribute 1') \
.with_attribute_2('Attribute 2') \
.with_attribute_3('Attribute 3') \
.build()
print (instance.attribute_1)
print (instance.attribute_2)
print (instance.attribute_3)
main()
|
{
"content_hash": "4c420e90f6b66e94313dc1783190f15e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 42,
"avg_line_length": 24.071428571428573,
"alnum_prop": 0.6261127596439169,
"repo_name": "danieldeveloper001/Learning",
"id": "586b489a0a7c3802ad76670f0d75a78ad5b2d947",
"size": "337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/02_DesignPatterns/Creational/Builder/Program.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "7563"
},
{
"name": "CSS",
"bytes": "24937"
},
{
"name": "HTML",
"bytes": "41307"
},
{
"name": "JavaScript",
"bytes": "64280"
},
{
"name": "PowerShell",
"bytes": "3737"
},
{
"name": "Python",
"bytes": "44282"
},
{
"name": "Ruby",
"bytes": "68363"
},
{
"name": "TypeScript",
"bytes": "16270"
}
],
"symlink_target": ""
}
|
import os, sys, tempfile
from subprocess import Popen
import math, numpy
from common import config, fileutils
class GNUPlot:
def __init__(self, filename, title, group=None):
self.group = group
self.filename = filename.replace(" ", "-")
self.title = title
self.data = {}
self.image_type = config.DEFAULT_IMAGE_TYPE
self.style = None
self.series_values = None
self.series_styles = {}
self.xvalues = []
self.width = 640
self.height = 400
self.axis_specs = None
self.fontsize = 9
self.keyfontsize = 9
self.key_args = []
self.suppressed_titles = []
self.plotscript = None
self.failed = False
self.custom_setup = []
def add_custom_setup(self, command):
self.custom_setup.append(command)
def legend(self, args):
self.key_args.append(args)
def suppress_title(self, series):
self.suppressed_titles.append(series)
def set_series_style(self, series, style):
self.series_styles[series] = style
def set_value(self, series, x, y):
if type(series) is int or type(series) is float:
series = str(series)
if series not in self.data:
self.data[series] = {}
self.data[series][x] = y
if x not in self.xvalues:
self.xvalues.append(x)
def get_image_location(self):
imagename = "%s.%s" % (self.filename, self.image_type)
if self.group is not None:
return fileutils.getimagepath(imagename, self.group)
else:
return fileutils.getimagepath(imagename)
def get_data_location(self):
dataname = "%s.dat" % self.filename
return fileutils.getcache(dataname, "gnuplot")
def get_terminal_args(self):
if self.image_type == "eps":
return [
"postscript eps color enhanced",
'font "Arial"',
]
elif self.image_type == "png":
return [
"png nocrop enhanced",
'font "Arial" %d' % self.fontsize,
" size %d,%d\n" % (self.width, self.height),
]
def get_key_args(self):
key_args = self.key_args[:]
if "off" in key_args or self.image_type == "eps":
return key_args
key_args.append('font "Arial,%d"' % self.keyfontsize)
return key_args
def write_datafile(self):
if self.series_values is None:
self.series_values = sorted(self.data.keys())
datapath = self.get_data_location()
datafile = open(datapath, "w")
# put label at the end since it might be a long string
series_values = ['"' + value + '"' for value in self.series_values]
datafile.write("\t".join(series_values) + "\tlabel\n")
min_y = None
max_y = None
# these will determine where to place the legend
# or if we need extra room across the board
max_y_righthalf = None
max_y_lefthalf = None
# determine how many points are candidates for being covered
# i.e. ignore the middle 25%: starting at 8 points the middle
# 2 are ignored, at 16 the middle 4, etc.
midpoint = len(self.xvalues) / 2
midquarter = round(midpoint / 5)
lefthalf_cutoff = math.ceil(midpoint) - 1 - midquarter
righthalf_cutoff = math.floor(midpoint) + midquarter
for i in range(len(self.xvalues)):
x = self.xvalues[i]
rowdata = []
for series in self.series_values:
if series in self.data and x in self.data[series]:
rowdata.append(self.data[series][x])
else:
rowdata.append("")
func = lambda x: type(x) is int or type(x) is float \
or type(x) is numpy.float64
rowvalues = list(filter(func, rowdata))
if self.style == "histogram rowstacked":
local_min = sum(rowvalues)
local_max = local_min
elif self.style == "points":
local_min = rowvalues[1]
local_max = rowvalues[1]
else:
local_min = min(rowvalues)
local_max = max(rowvalues)
if max_y is None or max_y < local_max:
max_y = local_max
if min_y is None or min_y > local_min:
min_y = local_min
if i <= lefthalf_cutoff:
if max_y_lefthalf is None or max_y_lefthalf < local_max:
max_y_lefthalf = local_max
if i >= righthalf_cutoff:
if max_y_righthalf is None or max_y_righthalf < local_max:
max_y_righthalf = local_max
rowstrdata = [str(y) for y in rowdata]
if type(x) is str:
datafile.write("\t".join(rowstrdata) + '\t"' + x + '"\n')
else:
datafile.write("\t".join(rowstrdata) + "\t" + str(x) + "\n")
datafile.close()
#print(self.filename, max_y_righthalf, max_y_lefthalf, min_y, max_y)
if max_y is None:
self.failed = True
return
self.max_y = max_y * 1.1
self.min_y = min_y
self.max_y_lefthalf = max_y_lefthalf
self.max_y_righthalf = max_y_righthalf
def get_header(self):
specs = [
"reset",
"set terminal %s" % " ".join(self.get_terminal_args()),
"set output '%s'" % self.get_image_location(),
]
return "\n".join(specs)
def get_footer(self):
return ""
def get_appearance_specs(self):
specs = []
for arg in self.get_key_args():
specs.append("set key %s" % arg)
if self.is_histogram():
specs.append("set style data histogram")
specs.append("set palette gray")
specs.append("unset colorbox")
# rotate xtics based on arbitrary length cutoff
xtic_len = max([len(str(series)) for series in self.xvalues])
if self.style != "histogram horizontal" and xtic_len > 8:
specs.append("set xtics nomirror rotate by -45")
if self.style == "histogram rowstacked":
specs.append("set style histogram rowstacked")
specs.append("set boxwidth 0.75 absolute")
elif self.style == "histogram horizontal":
# rotated plot
# http://gnuplot-tricks.blogspot.com/2009/10/turning-of-histogram.html
specs.append("set xtics rotate by 90 scale 0")
specs.append("set xtics offset -2,-%d"
% math.ceil(xtic_len / 2.5))
# TODO make this work like right top, left top
num_series = len(self.series_values)
key_location = 1 - 0.05 * num_series
for i in range(num_series):
series = self.series_values[i]
xtic = self.xvalues[i]
specs.append(
"set label %d '%s' at graph %.2f, 0.7 left rotate by 90"
% (i + 1, str(series), key_location + 0.05 * (i - 1)))
specs.append(
"set key at graph %.2f, 0.7 horizontal samplen 0.1"
% (key_location + 0.01))
specs.append("set y2tics rotate by 90")
specs.append("set y2tics offset 0,-1")
specs.append("unset ytics")
else:
specs.append("set style histogram clustered")
specs.append("set style fill solid 1.00 border -1")
if self.title and not config.SUPPRESS_PLOT_TITLES:
# TODO: sanitize titles
specs.append("set title '%s'" % self.title)
return specs
# TODO: this has a side effect on max_y
def get_axis_specs(self):
if self.axis_specs is not None:
return self.axis_specs
specs = []
adjust = True
for arg in self.key_args:
argparts = arg.split()
if arg == "off" or "out" in argparts or "below" in argparts:
adjust = False
# decide on y range
if adjust:
increase = 1.3 # threshold to add space on top
move = 1.2 # theshold to determine where to place key
increase_factor = 1.3
if self.max_y_righthalf * increase > self.max_y_lefthalf and \
self.max_y_lefthalf * increase > self.max_y_righthalf:
# both left and right are within $increase of each other
self.max_y *= increase_factor
if self.max_y_righthalf > self.max_y_lefthalf * move:
specs.append("set key left top")
elif self.max_y_righthalf * move < self.max_y_lefthalf:
specs.append("set key right top")
if self.max_y < 1:
order = int(round(math.log(self.max_y, 10)))
else:
order = int(math.log(self.max_y, 10))
self.max_y = math.ceil(self.max_y / 10**order) * 10**order
if order < 1:
inner = "%%.%df" % abs(order)
else:
inner = "%d."
if self.min_y < 0:
self.min_y = math.floor(self.min_y / 10**order) * 10**order
template = "set yrange [ %s : %s ] noreverse nowriteback" \
% (inner, inner)
specs.append(template % (self.min_y, self.max_y))
else:
template = "set yrange [ 0 : %s ] noreverse nowriteback" % inner
specs.append(template % self.max_y)
self.axis_specs = specs
return specs
def write_tables(self):
self.write_datafile()
if self.failed:
sys.stderr.write("Invalid data for %s, aborting plot\n"
% self.filename)
return
def get_plotscript_content(self):
content = "\n".join(self.get_appearance_specs()) + "\n" + \
"\n".join(self.get_axis_specs()) + "\n" + \
"\n".join(self.custom_setup) + "\n" + \
"plot '%s' " % self.get_data_location() + \
", ".join(self.get_plot_clauses())
print(content)
return content
def multiplot(nrows, ncols, *plots):
for plot in plots:
plot.write_tables()
plotscript = tempfile.NamedTemporaryFile(mode="w",
suffix=".gnu",
delete=False)
plotscript.write(plots[0].get_header() + "\n")
plotscript.write("set multiplot layout %d,%d\n" % (nrows, ncols))
for plot in plots:
plotscript.write(plot.get_plotscript_content() + "\n")
plotscript.write("unset multiplot\n")
plotscript.close()
filename = plotscript.name
proc = Popen('gnuplot ' + filename, shell=True)
proc.wait()
if config.DEBUG_MODE:
print("new plot created at %s" % plots[0].get_image_location())
os.remove(filename)
def is_histogram(self):
return self.style is not None and self.style.startswith("histogram")
def get_plot_clauses(self):
plot_clauses = []
# keep track of which column the series gets put in
series_positions = {}
num_series = len(self.series_values)
for i in range(num_series):
series = self.series_values[i]
series_positions[series] = i + 1
label_column = num_series + 1
firstclause = True
for i in range(num_series):
series = self.series_values[i]
pos = series_positions[series]
if firstclause:
if self.is_histogram():
clause = "using %d:xtic(%d) lc palette frac %.2f" \
% (pos, label_column, float(pos) / num_series)
else:
clause = "using %d:%d" % (label_column, pos)
firstclause = False
else:
if self.is_histogram():
clause = "'' using %d lc palette frac %.2f" \
% (pos, float(pos) / num_series)
else:
clause = "'' using %d:%d" % (label_column, pos)
if series in self.series_styles:
clause += " " + self.series_styles[series]
if series in self.suppressed_titles:
clause += " notitle"
elif self.style == "histogram horizontal":
clause += " title ' '" # still want key for this series
else:
clause += " title column(%d)" % pos
plot_clauses.append(clause)
return plot_clauses
def generate_plot(self):
if self.failed:
return
with tempfile.NamedTemporaryFile(mode="w", suffix=".gnu",
delete=False) as plotscript:
plotscript.write(self.get_header() + "\n" +
self.get_plotscript_content() + "\n" +
self.get_footer())
filename = plotscript.name
proc = Popen('gnuplot ' + filename, shell=True)
proc.wait()
if config.DEBUG_MODE:
print("new plot created at %s" % self.get_image_location())
os.remove(filename)
class ScatterPlot(GNUPlot):
def get_plot_clauses(self):
clauses = ['using 1:2:3 with labels font "Arial,8"']
return clauses
def __init__(self, filename, title, group=None):
GNUPlot.__init__(self, filename, title, group)
self.style = "points"
self.legend("off")
|
{
"content_hash": "77569ed291b965dac994f7186e7edf91",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 86,
"avg_line_length": 33.89705882352941,
"alnum_prop": 0.5203904555314534,
"repo_name": "sonya/eea",
"id": "fa0bc5a1aeb0db1c4a370630ad3117d5958418b8",
"size": "14410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/common/plotutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "441743"
},
{
"name": "Shell",
"bytes": "31869"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
from lib.core.common import singleTimeWarnMessage
from lib.core.data import kb
from lib.core.enums import DBMS
from lib.core.enums import PRIORITY
from lib.core.settings import IGNORE_SPACE_AFFECTED_KEYWORDS
__priority__ = PRIORITY.HIGHER
def dependencies():
singleTimeWarnMessage("tamper script '%s' is only meant to be run against %s < 5.1" % (os.path.basename(__file__).split(".")[0], DBMS.MYSQL))
def tamper(payload, **kwargs):
"""
Adds versioned MySQL comment before each keyword
Requirement:
* MySQL < 5.1
Tested against:
* MySQL 4.0.18, 5.0.22
Notes:
* Useful to bypass several web application firewalls when the
back-end database management system is MySQL
* Used during the ModSecurity SQL injection challenge,
http://modsecurity.org/demo/challenge.html
>>> tamper("value' UNION ALL SELECT CONCAT(CHAR(58,107,112,113,58),IFNULL(CAST(CURRENT_USER() AS CHAR),CHAR(32)),CHAR(58,97,110,121,58)), NULL, NULL# AND 'QDWa'='QDWa")
"value'/*!0UNION/*!0ALL/*!0SELECT/*!0CONCAT(/*!0CHAR(58,107,112,113,58),/*!0IFNULL(CAST(/*!0CURRENT_USER()/*!0AS/*!0CHAR),/*!0CHAR(32)),/*!0CHAR(58,97,110,121,58)),/*!0NULL,/*!0NULL#/*!0AND 'QDWa'='QDWa"
"""
def process(match):
word = match.group('word')
if word.upper() in kb.keywords and word.upper() not in IGNORE_SPACE_AFFECTED_KEYWORDS:
return match.group().replace(word, "/*!0%s" % word)
else:
return match.group()
retVal = payload
if payload:
retVal = re.sub(r"(?<=\W)(?P<word>[A-Za-z_]+)(?=\W|\Z)", lambda match: process(match), retVal)
retVal = retVal.replace(" /*!0", "/*!0")
return retVal
|
{
"content_hash": "b3df54fef913223b4f4fd90aa122870f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 207,
"avg_line_length": 34.9811320754717,
"alnum_prop": 0.6434735706580367,
"repo_name": "michaelhidalgo/7WCSQ",
"id": "ef9c4ff61ff577f37f72546d0655f967115d77fa",
"size": "1877",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Tools/SQLMap/sqlmap/tamper/halfversionedmorekeywords.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13307"
},
{
"name": "C++",
"bytes": "1641"
},
{
"name": "Objective-C",
"bytes": "516"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "2136"
},
{
"name": "Python",
"bytes": "1630594"
},
{
"name": "Shell",
"bytes": "9683"
}
],
"symlink_target": ""
}
|
import argparse
parser = argparse.ArgumentParser(description="Go through the chunks, and plot comparisons relative to the original files.")
parser.add_argument("--chunk_index", type=int, help="Only run the calibration on a specific chunk.")
args = parser.parse_args()
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import psoap.constants as C
from psoap.data import Chunk
from psoap import covariance
import yaml
try:
f = open("config.yaml")
config = yaml.load(f)
f.close()
except FileNotFoundError as e:
print("You need to copy a config.yaml file to this directory, and then edit the values to your particular case.")
raise
# read in the chunks.dat file
chunks = ascii.read(config["chunk_file"])
print("Optimizing the calibration for the following chunks of data")
print(chunks)
pars = config["parameters"]
# Go through each chunk and optimize the calibration.
for chunk_index,chunk in enumerate(chunks):
if (args.chunk_index is not None) and (chunk_index != args.chunk_index):
continue
order, wl0, wl1 = chunk
chunk = Chunk.open(order, wl0, wl1)
plots_dir = "plots_" + C.chunk_fmt.format(order, wl0, wl1)
# Load the previously corrected flux values.
fl_cor = np.load(plots_dir + "/fl_cor.npy")
# Go through and plot the change in each epoch relative to the original (and relative to the highest S/N epoch.)
wl = chunk.wl
fl_orig = chunk.fl
date = chunk.date
mask = chunk.mask
# print("date", date)
date1D = chunk.date1D
print(wl.shape)
print(chunk.n_epochs)
print(fl_cor.shape)
print("Plotting", order, wl0, wl1)
# Make a figure comparing the optimization
fig, ax = plt.subplots(nrows=3, figsize=(8,6), sharex=True)
for i in range(chunk.n_epochs):
# print(np.allclose(fl_orig[i], chunkSpec.fl[i]))
ax[0].plot(chunk.wl[i], fl_orig[i])
ax[1].plot(chunk.wl[i], fl_cor[i])
ax[2].plot(chunk.wl[i], fl_cor[i]/fl_orig[i])
ax[0].set_ylabel("original")
ax[1].set_ylabel("optimized")
ax[2].set_ylabel("correction")
ax[-1].set_xlabel(r"$\lambda [\AA]$")
fig.savefig(plots_dir + "/optim_cal.png")
# Go through and re-plot the chunks with highlighted mask points.
# plot these relative to the highest S/N flux, so we know what looks suspicious, and what to mask.
for i in range(chunk.n_epochs):
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(10,8))
ax[0].plot(wl[0], fl_orig[0], color="0.5")
ax[0].plot(wl[i], fl_orig[i], color="b")
ax[0].plot(wl[i][~mask[i]], fl_cor[i][~mask[i]], color="r")
ax[1].plot(wl[0], fl_cor[0], color="0.5")
ax[1].plot(wl[i], fl_cor[i], color="b")
ax[1].plot(wl[i][~mask[i]], fl_cor[i][~mask[i]], color="r")
ax[2].plot(wl[i], fl_cor[i]/fl_orig[i], color="0.5")
ax[2].set_xlabel(r"$\lambda\quad[\AA]$")
fig.savefig(plots_dir + "/{:.1f}.png".format(date1D[i]))
plt.close('all')
|
{
"content_hash": "af11409b74da2811f2545d8c29ba85dc",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 123,
"avg_line_length": 33.120879120879124,
"alnum_prop": 0.6420039814200398,
"repo_name": "iancze/PSOAP",
"id": "27971402911b7a20ec228502fd4638db67b8d15c",
"size": "3037",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/psoap_plot_calibration.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "351158"
},
{
"name": "Shell",
"bytes": "566"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import copy
#import logging
import sys
import math
import numpy as np
from sasdata.dataloader.data_info import Data1D
from sasdata.dataloader.data_info import Data2D
_SMALLVALUE = 1.0e-10
class FitHandler(object):
"""
Abstract interface for fit thread handler.
The methods in this class are called by the optimizer as the fit
progresses.
Note that it is up to the optimizer to call the fit handler correctly,
reporting all status changes and maintaining the 'done' flag.
"""
done = False
"""True when the fit job is complete"""
result = None
"""The current best result of the fit"""
def improvement(self):
"""
Called when a result is observed which is better than previous
results from the fit.
result is a FitResult object, with parameters, #calls and fitness.
"""
def error(self, msg):
"""
Model had an error; print traceback
"""
def progress(self, current, expected):
"""
Called each cycle of the fit, reporting the current and the
expected amount of work. The meaning of these values is
optimizer dependent, but they can be converted into a percent
complete using (100*current)//expected.
Progress is updated each iteration of the fit, whatever that
means for the particular optimization algorithm. It is called
after any calls to improvement for the iteration so that the
update handler can control I/O bandwidth by suppressing
intermediate improvements until the fit is complete.
"""
def finalize(self):
"""
Fit is complete; best results are reported
"""
def abort(self):
"""
Fit was aborted.
"""
# TODO: not sure how these are used, but they are needed for running the fit
def update_fit(self, last=False): pass
def set_result(self, result=None): self.result = result
class Model:
"""
Fit wrapper for SAS models.
"""
def __init__(self, sas_model, sas_data=None, **kw):
"""
:param sas_model: the sas model to wrap for fitting
"""
self.model = sas_model
self.name = sas_model.name
self.data = sas_data
def get_params(self, fitparams):
"""
return a list of value of parameter to fit
:param fitparams: list of parameters name to fit
"""
return [self.model.getParam(k) for k in fitparams]
def set_params(self, paramlist, params):
"""
Set value for parameters to fit
:param params: list of value for parameters to fit
"""
for k,v in zip(paramlist, params):
self.model.setParam(k,v)
def set(self, **kw):
self.set_params(*zip(*kw.items()))
def eval(self, x):
"""
Override eval method of model.
:param x: the x value used to compute a function
"""
try:
return self.model.evalDistribution(x)
except:
raise
def eval_derivs(self, x, pars=[]):
"""
Evaluate the model and derivatives wrt pars at x.
pars is a list of the names of the parameters for which derivatives
are desired.
This method needs to be specialized in the model to evaluate the
model function. Alternatively, the model can implement is own
version of residuals which calculates the residuals directly
instead of calling eval.
"""
raise NotImplementedError('no derivatives available')
def __call__(self, x):
return self.eval(x)
class FitData1D(Data1D):
"""
Wrapper class for SAS data
FitData1D inherits from DataLoader.data_info.Data1D. Implements
a way to get residuals from data.
"""
def __init__(self, x, y, dx=None, dy=None, smearer=None, data=None, lam=None, dlam=None):
"""
:param smearer: is an object of class QSmearer or SlitSmearer
that will smear the theory data (slit smearing or resolution
smearing) when set.
The proper way to set the smearing object would be to
do the following: ::
from sas.sascalc.fit.qsmearing import smear_selection
smearer = smear_selection(some_data)
fitdata1d = FitData1D( x= [1,3,..,],
y= [3,4,..,8],
dx=None,
dy=[1,2...], smearer= smearer)
:Note: that some_data _HAS_ to be of
class DataLoader.data_info.Data1D
Setting it back to None will turn smearing off.
"""
Data1D.__init__(self, x=x, y=y, dx=dx, dy=dy, lam=lam, dlam=dlam)
self.num_points = len(x)
self.sas_data = data
self.smearer = smearer
self._first_unsmeared_bin = None
self._last_unsmeared_bin = None
# Check error bar; if no error bar found, set it constant(=1)
# TODO: Should provide an option for users to set it like percent,
# constant, or dy data
if dy is None or dy == [] or dy.all() == 0:
self.dy = np.ones(len(y))
else:
self.dy = np.asarray(dy).copy()
## Min Q-value
#Skip the Q=0 point, especially when y(q=0)=None at x[0].
if min(self.x) == 0.0 and self.x[0] == 0 and\
not np.isfinite(self.y[0]):
self.qmin = min(self.x[self.x != 0])
else:
self.qmin = min(self.x)
## Max Q-value
self.qmax = max(self.x)
# Range used for input to smearing
self._qmin_unsmeared = self.qmin
self._qmax_unsmeared = self.qmax
# Identify the bin range for the unsmeared and smeared spaces
self.idx = (self.x >= self.qmin) & (self.x <= self.qmax)
self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \
& (self.x <= self._qmax_unsmeared)
def set_fit_range(self, qmin=None, qmax=None):
""" to set the fit range"""
# Skip Q=0 point, (especially for y(q=0)=None at x[0]).
# ToDo: Find better way to do it.
if qmin == 0.0 and not np.isfinite(self.y[qmin]):
self.qmin = min(self.x[self.x != 0])
elif qmin is not None:
self.qmin = qmin
if qmax is not None:
self.qmax = qmax
# Determine the range needed in unsmeared-Q to cover
# the smeared Q range
self._qmin_unsmeared = self.qmin
self._qmax_unsmeared = self.qmax
self._first_unsmeared_bin = 0
self._last_unsmeared_bin = len(self.x) - 1
if self.smearer is not None:
self._first_unsmeared_bin, self._last_unsmeared_bin = \
self.smearer.get_bin_range(self.qmin, self.qmax)
self._qmin_unsmeared = self.x[self._first_unsmeared_bin]
self._qmax_unsmeared = self.x[self._last_unsmeared_bin]
# Identify the bin range for the unsmeared and smeared spaces
self.idx = (self.x >= self.qmin) & (self.x <= self.qmax)
## zero error can not participate for fitting
self.idx = self.idx & (self.dy != 0)
self.idx_unsmeared = (self.x >= self._qmin_unsmeared) \
& (self.x <= self._qmax_unsmeared)
def get_fit_range(self):
"""
Return the range of data.x to fit
"""
return self.qmin, self.qmax
def size(self):
"""
Number of measurement points in data set after masking, etc.
"""
return len(self.x)
def residuals(self, fn):
"""
Compute residuals.
If self.smearer has been set, use if to smear
the data before computing chi squared.
:param fn: function that return model value
:return: residuals
"""
# Compute theory data f(x)
fx = np.zeros(len(self.x))
fx[self.idx_unsmeared] = fn(self.x[self.idx_unsmeared])
## Smear theory data
if self.smearer is not None:
fx = self.smearer(fx, self._first_unsmeared_bin,
self._last_unsmeared_bin)
## Sanity check
if np.size(self.dy) != np.size(fx):
msg = "FitData1D: invalid error array "
msg += "%d <> %d" % (np.shape(self.dy), np.size(fx))
raise RuntimeError(msg)
return (self.y[self.idx] - fx[self.idx]) / self.dy[self.idx], fx[self.idx]
def residuals_deriv(self, model, pars=[]):
"""
:return: residuals derivatives .
:note: in this case just return empty array
"""
return []
class FitData2D(Data2D):
"""
Wrapper class for SAS data
"""
def __init__(self, sas_data2d, data=None, err_data=None):
Data2D.__init__(self, data=data, err_data=err_data)
# Data can be initialized with a sas plottable or with vectors.
self.res_err_image = []
self.num_points = 0 # will be set by set_data
self.idx = []
self.qmin = None
self.qmax = None
self.smearer = None
self.radius = 0
self.res_err_data = []
self.sas_data = sas_data2d
self.set_data(sas_data2d)
def set_data(self, sas_data2d, qmin=None, qmax=None):
"""
Determine the correct qx_data and qy_data within range to fit
"""
self.data = sas_data2d.data
self.err_data = sas_data2d.err_data
self.qx_data = sas_data2d.qx_data
self.qy_data = sas_data2d.qy_data
self.mask = sas_data2d.mask
x_max = max(math.fabs(sas_data2d.xmin), math.fabs(sas_data2d.xmax))
y_max = max(math.fabs(sas_data2d.ymin), math.fabs(sas_data2d.ymax))
## fitting range
if qmin is None:
self.qmin = 1e-16
if qmax is None:
self.qmax = math.sqrt(x_max * x_max + y_max * y_max)
## new error image for fitting purpose
if self.err_data is None or self.err_data == []:
self.res_err_data = np.ones(len(self.data))
else:
self.res_err_data = copy.deepcopy(self.err_data)
#self.res_err_data[self.res_err_data==0]=1
self.radius = np.sqrt(self.qx_data**2 + self.qy_data**2)
# Note: mask = True: for MASK while mask = False for NOT to mask
self.idx = ((self.qmin <= self.radius) &\
(self.radius <= self.qmax))
self.idx = (self.idx) & (self.mask)
self.idx = (self.idx) & (np.isfinite(self.data))
self.num_points = np.sum(self.idx)
def set_smearer(self, smearer):
"""
Set smearer
"""
if smearer is None:
return
self.smearer = smearer
self.smearer.set_index(self.idx)
self.smearer.get_data()
def set_fit_range(self, qmin=None, qmax=None):
"""
To set the fit range
"""
if qmin == 0.0:
self.qmin = 1e-16
elif qmin is not None:
self.qmin = qmin
if qmax is not None:
self.qmax = qmax
self.radius = np.sqrt(self.qx_data**2 + self.qy_data**2)
self.idx = ((self.qmin <= self.radius) &\
(self.radius <= self.qmax))
self.idx = (self.idx) & (self.mask)
self.idx = (self.idx) & (np.isfinite(self.data))
self.idx = (self.idx) & (self.res_err_data != 0)
def get_fit_range(self):
"""
return the range of data.x to fit
"""
return self.qmin, self.qmax
def size(self):
"""
Number of measurement points in data set after masking, etc.
"""
return np.sum(self.idx)
def residuals(self, fn):
"""
return the residuals
"""
if self.smearer is not None:
fn.set_index(self.idx)
gn = fn.get_value()
else:
gn = fn([self.qx_data[self.idx],
self.qy_data[self.idx]])
# use only the data point within ROI range
res = (self.data[self.idx] - gn) / self.res_err_data[self.idx]
return res, gn
def residuals_deriv(self, model, pars=[]):
"""
:return: residuals derivatives .
:note: in this case just return empty array
"""
return []
class FitAbort(Exception):
"""
Exception raise to stop the fit
"""
#pass
#print"Creating fit abort Exception"
class FitEngine:
def __init__(self):
"""
Base class for the fit engine
"""
#Dictionnary of fitArrange element (fit problems)
self.fit_arrange_dict = {}
self.weight_increase = {}
self.fitter_id = None
def set_model(self, model, id, pars=[], constraints=[], data=None):
"""
set a model on a given in the fit engine.
:param model: sas.models type
:param id: is the key of the fitArrange dictionary where model is saved as a value
:param pars: the list of parameters to fit
:param constraints: list of
tuple (name of parameter, value of parameters)
the value of parameter must be a string to constraint 2 different
parameters.
Example:
we want to fit 2 model M1 and M2 both have parameters A and B.
constraints can be ``constraints = [(M1.A, M2.B+2), (M1.B= M2.A *5),...,]``
:note: pars must contains only name of existing model's parameters
"""
if not pars:
raise ValueError("no fitting parameters")
if model is None:
raise ValueError("no model to fit")
if not issubclass(model.__class__, Model):
model = Model(model, data)
sasmodel = model.model
available_parameters = sasmodel.getParamList()
for p in pars:
if p not in available_parameters:
raise ValueError("parameter %s not available in model %s; use one of [%s] instead"
%(p, sasmodel.name, ", ".join(available_parameters)))
if id not in self.fit_arrange_dict:
self.fit_arrange_dict[id] = FitArrange()
self.fit_arrange_dict[id].set_model(model)
self.fit_arrange_dict[id].pars = pars
self.fit_arrange_dict[id].vals = [sasmodel.getParam(name) for name in pars]
self.fit_arrange_dict[id].constraints = constraints
def set_data(self, data, id, smearer=None, qmin=None, qmax=None):
"""
Receives plottable, creates a list of data to fit,set data
in a FitArrange object and adds that object in a dictionary
with key id.
:param data: data added
:param id: unique key corresponding to a fitArrange object with data
"""
if data.__class__.__name__ == 'Data2D':
fitdata = FitData2D(sas_data2d=data, data=data.data,
err_data=data.err_data)
else:
fitdata = FitData1D(x=data.x, y=data.y,
dx=data.dx, dy=data.dy, smearer=smearer)
fitdata.sas_data = data
fitdata.set_fit_range(qmin=qmin, qmax=qmax)
#A fitArrange is already created but contains model only at id
if id in self.fit_arrange_dict:
self.fit_arrange_dict[id].add_data(fitdata)
else:
#no fitArrange object has been create with this id
fitproblem = FitArrange()
fitproblem.add_data(fitdata)
self.fit_arrange_dict[id] = fitproblem
def get_model(self, id):
"""
:param id: id is key in the dictionary containing the model to return
:return: a model at this id or None if no FitArrange element was
created with this id
"""
if id in self.fit_arrange_dict:
return self.fit_arrange_dict[id].get_model()
else:
return None
def remove_fit_problem(self, id):
"""remove fitarrange in id"""
if id in self.fit_arrange_dict:
del self.fit_arrange_dict[id]
def select_problem_for_fit(self, id, value):
"""
select a couple of model and data at the id position in dictionary
and set in self.selected value to value
:param value: the value to allow fitting.
can only have the value one or zero
"""
if id in self.fit_arrange_dict:
self.fit_arrange_dict[id].set_to_fit(value)
def get_problem_to_fit(self, id):
"""
return the self.selected value of the fit problem of id
:param id: the id of the problem
"""
if id in self.fit_arrange_dict:
self.fit_arrange_dict[id].get_to_fit()
def set_weight_increase(self, fit_id, weight_increase):
self.weight_increase[fit_id] = weight_increase
def get_weight_increase(self, fit_id):
return self.weight_increase[fit_id]
class FitArrange:
def __init__(self):
"""
Class FitArrange contains a set of data for a given model
to perform the Fit.FitArrange must contain exactly one model
and at least one data for the fit to be performed.
model: the model selected by the user
Ldata: a list of data what the user wants to fit
"""
self.model = None
self.data_list = []
self.pars = []
self.vals = []
self.selected = 0
def set_model(self, model):
"""
set_model save a copy of the model
:param model: the model being set
"""
self.model = model
def add_data(self, data):
"""
add_data fill a self.data_list with data to fit
:param data: Data to add in the list
"""
if not data in self.data_list:
self.data_list.append(data)
def get_model(self):
"""
:return: saved model
"""
return self.model
def get_data(self):
"""
:return: list of data data_list
"""
return self.data_list[0]
def remove_data(self, data):
"""
Remove one element from the list
:param data: Data to remove from data_list
"""
if data in self.data_list:
self.data_list.remove(data)
def set_to_fit(self, value=0):
"""
set self.selected to 0 or 1 for other values raise an exception
:param value: integer between 0 or 1
"""
self.selected = value
def get_to_fit(self):
"""
return self.selected value
"""
return self.selected
class FResult(object):
"""
Storing fit result
"""
def __init__(self, model=None, param_list=None, data=None):
self.calls = None
self.fitness = None
self.chisqr = None
self.pvec = []
self.cov = []
self.info = None
self.mesg = None
self.success = None
self.stderr = None
self.residuals = []
self.index = []
self.model = model
self.data = data
self.theory = []
self.param_list = param_list
self.iterations = 0
self.inputs = []
self.fitter_id = None
if self.model is not None and self.data is not None:
self.inputs = [(self.model, self.data)]
def set_model(self, model):
"""
"""
self.model = model
def set_fitness(self, fitness):
"""
"""
self.fitness = fitness
def __str__(self):
"""
"""
if self.pvec is None and self.model is None and self.param_list is None:
return "No results"
sasmodel = self.model.model
pars = enumerate(sasmodel.getParamList())
msg1 = "[Iteration #: %s ]" % self.iterations
msg3 = "=== goodness of fit: %s ===" % (str(self.fitness))
msg2 = ["P%-3d %s......|.....%s" % (i, v, sasmodel.getParam(v))
for i,v in pars if v in self.param_list]
msg = [msg1, msg3] + msg2
return "\n".join(msg)
def print_summary(self):
"""
"""
print(str(self))
|
{
"content_hash": "9e4d422e99bf2f440a83414ea20d743d",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 98,
"avg_line_length": 31.921630094043888,
"alnum_prop": 0.5562211529018953,
"repo_name": "SasView/sasview",
"id": "e374f5a7af8fbe8231fc9e47ae64fe6faf51c9c1",
"size": "20366",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/sas/sascalc/fit/AbstractFitEngine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "60240"
},
{
"name": "Batchfile",
"bytes": "1616"
},
{
"name": "C",
"bytes": "11379"
},
{
"name": "C++",
"bytes": "217553"
},
{
"name": "CSS",
"bytes": "340"
},
{
"name": "Gherkin",
"bytes": "565"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "Inno Setup",
"bytes": "6892"
},
{
"name": "JavaScript",
"bytes": "27700"
},
{
"name": "Jupyter Notebook",
"bytes": "28926"
},
{
"name": "Makefile",
"bytes": "28052"
},
{
"name": "Python",
"bytes": "2959880"
},
{
"name": "Shell",
"bytes": "2063"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0015_collection_brands'),
]
operations = [
migrations.AddField(
model_name='product',
name='default',
field=models.ForeignKey(to='products.Category', null=True, blank=True, related_name='default_category'),
),
]
|
{
"content_hash": "8b96da3efe927dd52953569c58f74ef0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 116,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.6193693693693694,
"repo_name": "vanabo/mattress",
"id": "ba912d0efa17da0deb0f0ae3bede02150b284fc2",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/products/migrations/0016_product_default.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1351"
},
{
"name": "Batchfile",
"bytes": "1028"
},
{
"name": "CSS",
"bytes": "47386"
},
{
"name": "HTML",
"bytes": "46190"
},
{
"name": "JavaScript",
"bytes": "79707"
},
{
"name": "PowerShell",
"bytes": "1455"
},
{
"name": "Python",
"bytes": "153539"
}
],
"symlink_target": ""
}
|
"""
This module provides some utils for calculating metrics
"""
import numpy as np
from sklearn.metrics import average_precision_score, confusion_matrix
def softmax(raw_score, T=1):
exp_s = np.exp((raw_score - raw_score.max(axis=-1)[..., None])*T)
sum_s = exp_s.sum(axis=-1)
return exp_s / sum_s[..., None]
def top_k_acc(lb_set, scores, k=3):
idx = np.argsort(scores)[-k:]
return len(lb_set.intersection(idx)), len(lb_set)
def top_k_hit(lb_set, scores, k=3):
idx = np.argsort(scores)[-k:]
return len(lb_set.intersection(idx)) > 0, 1
def top_3_accuracy(score_dict, video_list):
return top_k_accuracy(score_dict, video_list, 3)
def top_k_accuracy(score_dict, video_list, k):
video_labels = [set([i.num_label for i in v.instances]) for v in video_list]
video_top_k_acc = np.array(
[top_k_hit(lb, score_dict[v.id], k=k) for v, lb in zip(video_list, video_labels)
if v.id in score_dict])
tmp = video_top_k_acc.sum(axis=0).astype(float)
top_k_acc = tmp[0] / tmp[1]
return top_k_acc
def video_mean_ap(score_dict, video_list):
avail_video_labels = [set([i.num_label for i in v.instances]) for v in video_list if
v.id in score_dict]
pred_array = np.array([score_dict[v.id] for v in video_list if v.id in score_dict])
gt_array = np.zeros(pred_array.shape)
for i in xrange(pred_array.shape[0]):
gt_array[i, list(avail_video_labels[i])] = 1
mean_ap = average_precision_score(gt_array, pred_array, average='macro')
return mean_ap
def mean_class_accuracy(scores, labels):
pred = np.argmax(scores, axis=1)
cf = confusion_matrix(labels, pred).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
return np.mean(cls_hit/cls_cnt)
|
{
"content_hash": "0f6d840a19cfa4d083846b3ccb7a41ee",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 88,
"avg_line_length": 29.766666666666666,
"alnum_prop": 0.6388577827547592,
"repo_name": "ZhanningGao/temporal-segment-networks",
"id": "b9804cc0c12d9da811933b60dbbba8b26c158075",
"size": "1786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyActionRecog/utils/metrics.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Matlab",
"bytes": "45610"
},
{
"name": "Python",
"bytes": "55807"
},
{
"name": "Shell",
"bytes": "5945"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import logging
import os
import re
import shutil
import sys
import tempfile
import zipfile
from distutils.util import change_root
from distutils import sysconfig
from email.parser import FeedParser
from pip._vendor import pkg_resources, six
from pip._vendor.six.moves import configparser
from pip._vendor.six.moves.urllib import parse as urllib_parse
import pip.wheel
from pip.compat import native_str, WINDOWS
from pip.download import is_url, url_to_path, path_to_url, is_archive_file
from pip.exceptions import (
InstallationError, UninstallationError, UnsupportedWheel,
)
from pip.index import Link
from pip.locations import (
bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user,
)
from pip.utils import (
display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir,
dist_in_usersite, dist_in_site_packages, egg_link_path, make_path_relative,
call_subprocess, is_prerelease, read_text_file, FakeFile, _make_build_dir,
)
from pip.utils.logging import indent_log
from pip.req.req_uninstall import UninstallPathSet
from pip.vcs import vcs
from pip.wheel import move_wheel_files, Wheel, wheel_ext
logger = logging.getLogger(__name__)
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, prereleases=None,
editable_options=None, pycompile=True):
self.extras = ()
if isinstance(req, six.string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
if editable_options is None:
editable_options = {}
self.editable_options = editable_options
self.url = url
self.as_egg = as_egg
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.pycompile = pycompile
# True if pre-releases are acceptable
if prereleases:
self.prereleases = True
elif self.req is not None:
self.prereleases = any([
is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs
])
else:
self.prereleases = False
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None):
name, url, extras_override = parse_editable(editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir,
editable=True,
url=url,
editable_options=extras_override,
prereleases=True)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, prereleases=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
url = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif (os.path.isdir(path)
and (os.path.sep in name or name.startswith('.'))):
if not is_installable_dir(path):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not "
"found." % name
)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warning(
'Requirement %r looks like a filename, but the file does '
'not exist',
name
)
link = Link(path_to_url(name))
# it's a local file, dir, or url
if link:
url = link.url
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
# wheel file
if link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel on this platform." %
wheel.filename
)
req = "%s==%s" % (wheel.name, wheel.version)
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req = link.egg_fragment
# a requirement specifier
else:
req = name
return cls(req, comes_from, url=url, prereleases=prereleases)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, six.string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir, unpack=True):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr
# need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s', new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug(
'Moving package %s from %s to new location %s',
self, display_path(old_location), display_path(new_location),
)
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return native_str(self.req.project_name)
@property
def url_name(self):
if self.req is None:
return None
return urllib_parse.quote(self.req.project_name.lower())
@property
def setup_py(self):
try:
import setuptools # noqa
except ImportError:
# Setuptools is not available
raise InstallationError(
"setuptools must be installed to install from a source "
"distribution"
)
setup_file = 'setup.py'
if self.editable_options and 'subdirectory' in self.editable_options:
setup_py = os.path.join(self.source_dir,
self.editable_options['subdirectory'],
setup_file)
else:
setup_py = os.path.join(self.source_dir, setup_file)
# Python2 __file__ should not be unicode
if six.PY2 and isinstance(setup_py, six.text_type):
setup_py = setup_py.encode(sys.getfilesystemencoding())
return setup_py
def run_egg_info(self):
assert self.source_dir
if self.name:
logger.info(
'Running setup.py (path:%s) egg_info for package %s',
self.setup_py, self.name,
)
else:
logger.info(
'Running setup.py (path:%s) egg_info for package from %s',
self.setup_py, self.url,
)
with indent_log():
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir
# if no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a
# specifier (not archive path/urls)
# TODO: take this out later
if (self.name == 'distribute'
and not os.path.isdir(
os.path.join(self.source_dir, 'setuptools'))):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
egg_info_cmd = [sys.executable, '-c', script, 'egg_info']
# We can't put the .egg-info files at the root, because then the
# source code will be mistaken for an installed egg, causing
# problems
if self.editable:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=cwd,
filter_stdout=self._filter_install,
show_stdout=False,
command_level=logging.DEBUG,
command_desc='python setup.py egg_info')
if not self.req:
self.req = pkg_resources.Requirement.parse(
"%(Name)s==%(Version)s" % self.pkg_info())
self.correct_build_location()
# FIXME: This is a lame hack, entirely for PasteScript which has
# a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
import tokenize
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(
getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'),
__file__,
'exec'
))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
data = read_text_file(filename)
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv
# environment
if (
os.path.exists(
os.path.join(root, dir, 'bin', 'python')
)
or os.path.exists(
os.path.join(
root, dir, 'Scripts', 'Python.exe'
)
)):
dirs.remove(dir)
# Also don't search through tests
elif dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError(
'No files/directories in %s (from %s)' % (base, filename)
)
assert filenames, \
"No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This
# can easily be the case if there is a dist folder which contains
# an extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(
key=lambda x: x.count(os.path.sep)
+ (os.path.altsep and x.count(os.path.altsep) or 0)
)
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warning(
'No PKG-INFO file found in %s',
display_path(self.egg_info_path('PKG-INFO')),
)
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
if self.satisfied_by:
for r in self.satisfied_by.requires(extras):
yield str(r)
return
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line.lower())
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
logger.debug('skipping extra %s', in_extra)
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
version = self.installed_version
if version not in self.req:
logger.warning(
'Requested %s, but installing version %s',
self,
self.installed_version,
)
else:
logger.debug(
'Source in %s has version %s, which satisfies requirement %s',
display_path(self.source_dir),
version,
self,
)
def update_editable(self, obtain=True):
if not self.url:
logger.debug(
"Cannot update repository at %s; repository location is "
"unknown",
self.source_dir,
)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError(
"Cannot uninstall requirement %s, not installed" % (self.name,)
)
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
dist_info_path = os.path.join(dist.location,
'-'.join(dist.egg_name().split('-')[:2])
) + '.dist-info'
# Workaround - http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
dist_info_exists = os.path.exists(dist_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata(
'installed-files.txt').splitlines():
path = os.path.normpath(
os.path.join(egg_info_path, installed_file)
)
paths_to_remove.add(path)
# FIXME: need a test for this elif block
# occurs with --single-version-externally-managed/--record outside
# of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [
p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
with open(develop_egg_link, 'r') as fh:
link_pointer = os.path.normcase(fh.readline().strip())
assert (link_pointer == dist.location), (
'Egg-link %s does not match installed location of %s '
'(at %s)' % (link_pointer, self.name, dist.location)
)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif dist_info_exists:
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, script))
if WINDOWS:
paths_to_remove.add(os.path.join(bin_dir, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = configparser.SafeConfigParser()
config.readfp(
FakeFile(dist.get_metadata_lines('entry_points.txt'))
)
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
if dist_in_usersite(dist):
bin_dir = bin_user
else:
bin_dir = bin_py
paths_to_remove.add(os.path.join(bin_dir, name))
if WINDOWS:
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '.exe.manifest'
)
paths_to_remove.add(
os.path.join(bin_dir, name) + '-script.py'
)
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error(
"Can't rollback %s, nothing uninstalled.", self.project_name,
)
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error(
"Can't commit %s, nothing uninstalled.", self.project_name,
)
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.installed_version)
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warning('Deleting %s', display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warning(
'Backing up %s to %s',
display_path(archive_path),
display_path(dest_file),
)
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(
archive_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True
)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.info('Saved %s', display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix + os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix)
)
name = name[len(prefix) + 1:]
name = name.replace(os.path.sep, '/')
return name
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
version = pip.wheel.wheel_version(self.source_dir)
pip.wheel.check_compatibility(version, self.name)
self.move_wheel_files(self.source_dir, root=root)
self.install_succeeded = True
return
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
)
install_args += list(global_options) + \
['install', '--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if self.pycompile:
install_args += ["--compile"]
else:
install_args += ["--no-compile"]
if running_under_virtualenv():
# FIXME: I'm not sure if this is a reasonable location;
# probably not but we can't put it in the default location, as
# that is a virtualenv symlink that isn't writable
py_ver_str = 'python' + sysconfig.get_python_version()
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
py_ver_str)]
logger.info('Running setup.py install for %s', self.name)
with indent_log():
call_subprocess(
install_args + install_options,
cwd=self.source_dir,
filter_stdout=self._filter_install,
show_stdout=False,
)
if not os.path.exists(record_filename):
logger.debug('Record file %s not found', record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install
# command so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
with open(record_filename) as f:
for line in f:
directory = os.path.dirname(line)
if directory.endswith('.egg-info'):
egg_info_dir = prepend_root(directory)
break
else:
logger.warning(
'Could not find .egg-info directory in install record'
' for %s',
self,
)
# FIXME: put the record somewhere
# FIXME: should this be an error?
return
new_lines = []
with open(record_filename) as f:
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(
make_path_relative(
prepend_root(filename), egg_info_dir)
)
inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt')
with open(inst_files_path, 'w') as f:
f.write('\n'.join(new_lines) + '\n')
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if os.path.exists(self.delete_marker_filename):
logger.debug('Removing source in %s', self.source_dir)
if self.source_dir:
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.info('Running setup.py develop for %s', self.name)
with indent_log():
# FIXME: should we do --install-headers here too?
cwd = self.source_dir
if self.editable_options and \
'subdirectory' in self.editable_options:
cwd = os.path.join(cwd, self.editable_options['subdirectory'])
call_subprocess(
[
sys.executable,
'-c',
"import setuptools, tokenize; __file__=%r; exec(compile("
"getattr(tokenize, 'open', open)(__file__).read().replace"
"('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py
]
+ list(global_options)
+ ['develop', '--no-deps']
+ list(install_options),
cwd=cwd, filter_stdout=self._filter_install,
show_stdout=False)
self.install_succeeded = True
def _filter_install(self, line):
level = logging.INFO
for regex in [
r'^running .*',
r'^writing .*',
'^creating .*',
'^[Cc]opying .*',
r'^reading .*',
r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
r'^SyntaxError:',
r'^SyntaxWarning:',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = logging.DEBUG
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(
self.req.project_name
)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif (running_under_virtualenv()
and dist_in_site_packages(existing_dist)):
raise InstallationError(
"Will not install to the user site because it will "
"lack sys.path precedence to %s in %s" %
(existing_dist.project_name, existing_dist.location)
)
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
def move_wheel_files(self, wheeldir, root=None):
move_wheel_files(
self.name, self.req, wheeldir,
user=self.use_user_site,
home=self.target_dir,
root=root,
pycompile=self.pycompile,
)
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
def _build_req_from_url(url):
parts = [p for p in url.split('#', 1)[0].split('/') if p]
req = None
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
return req
def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None
def parse_editable(editable_req, default_vcs=None):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError(
"Directory %r is not installable. File 'setup.py' not found." %
url_no_extras
)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return (
None,
url_no_extras,
pkg_resources.Requirement.parse(
'__placeholder__' + extras
).extras,
)
else:
return None, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
break
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either be a path to a local project or a VCS url '
'beginning with svn+, git+, hg+, or bzr+' %
editable_req
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
try:
options = _build_editable_options(editable_req)
except Exception as exc:
raise InstallationError(
'--editable=%s error in editable options:%s' % (editable_req, exc)
)
if not options or 'egg' not in options:
req = _build_req_from_url(editable_req)
if not req:
raise InstallationError(
'--editable=%s is not the right format; it must have '
'#egg=Package' % editable_req
)
else:
req = options['egg']
package = _strip_postfix(req)
return package, url, options
|
{
"content_hash": "8a9ba787913cba503acf332b41d80df8",
"timestamp": "",
"source": "github",
"line_count": 1064,
"max_line_length": 79,
"avg_line_length": 38.04981203007519,
"alnum_prop": 0.5233049277510189,
"repo_name": "mattcaldwell/pip",
"id": "1a928579e221906ed11a1ad071e5d7938f3d2312",
"size": "40485",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pip/req/req_install.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from opencv_engine.tiff_support import TiffMixin, TIFF_FORMATS
try:
import cv
except ImportError:
import cv2.cv as cv
from colour import Color
from thumbor.engines import BaseEngine
from pexif import JpegFile, ExifSegment
try:
from thumbor.ext.filters import _composite
FILTERS_AVAILABLE = True
except ImportError:
FILTERS_AVAILABLE = False
FORMATS = {
'.jpg': 'JPEG',
'.jpeg': 'JPEG',
'.gif': 'GIF',
'.png': 'PNG'
}
FORMATS.update(TIFF_FORMATS)
class Engine(BaseEngine, TiffMixin):
@property
def image_depth(self):
if self.image is None:
return 8
return cv.GetImage(self.image).depth
@property
def image_channels(self):
if self.image is None:
return 3
return self.image.channels
@classmethod
def parse_hex_color(cls, color):
try:
color = Color(color).get_rgb()
return tuple(c * 255 for c in reversed(color))
except Exception:
return None
def gen_image(self, size, color_value):
img0 = cv.CreateImage(size, self.image_depth, self.image_channels)
if color_value == 'transparent':
color = (255, 255, 255, 255)
else:
color = self.parse_hex_color(color_value)
if not color:
raise ValueError('Color %s is not valid.' % color_value)
cv.Set(img0, color)
return img0
def create_image(self, buffer, create_alpha=True):
self.extension = self.extension or '.tif'
self.no_data_value = None
# FIXME: opencv doesn't support gifs, even worse, the library
# segfaults when trying to decoding a gif. An exception is a
# less drastic measure.
try:
if FORMATS[self.extension] == 'GIF':
raise ValueError("opencv doesn't support gifs")
except KeyError:
pass
if FORMATS[self.extension] == 'TIFF':
self.buffer = buffer
img0 = self.read_tiff(buffer, create_alpha)
else:
imagefiledata = cv.CreateMatHeader(1, len(buffer), cv.CV_8UC1)
cv.SetData(imagefiledata, buffer, len(buffer))
img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_UNCHANGED)
if FORMATS[self.extension] == 'JPEG':
try:
info = JpegFile.fromString(buffer).get_exif()
if info:
self.exif = info.data
self.exif_marker = info.marker
except Exception:
pass
return img0
@property
def size(self):
return cv.GetSize(self.image)
def normalize(self):
pass
def resize(self, width, height):
dims = (int(round(width, 0)), int(round(height, 0)))
self.image = cv2.resize(numpy.asarray(self.image), dims, interpolation=cv2.INTER_CUBIC)
def crop(self, left, top, right, bottom):
x1, y1 = left, top
x2, y2 = right, bottom
self.image = self.image[y1:y2, x1:x2]
def rotate(self, degrees):
""" rotates the image by specified number of degrees.
Uses more effecient flip and transpose for multiples of 90
Args:
degrees - degrees to rotate image by (CCW)
"""
image = numpy.asarray(self.image)
# number passed to flip corresponds to rotation about: (0) x-axis, (1) y-axis, (-1) both axes
if degrees == 270:
transposed = cv2.transpose(image)
rotated = cv2.flip(transposed, 1)
elif degrees == 180:
rotated = cv2.flip(image, -1)
elif degrees == 90:
transposed = cv2.transpose(image)
rotated = cv2.flip(transposed, 0)
else:
rotated = self._rotate(image, degrees)
self.image = cv.fromarray(rotated)
def _rotate(self, image, degrees):
""" rotate an image about it's center by an arbitrary number of degrees
Args:
image - image to rotate (CvMat array)
degrees - number of degrees to rotate by (CCW)
Returns:
rotated image (numpy array)
"""
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, degrees, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def flip_vertically(self):
""" flip an image vertically (about x-axis) """
image = numpy.asarray(self.image)
self.image = cv.fromarray(cv2.flip(image, 0))
def flip_horizontally(self):
""" flip an image horizontally (about y-axis) """
image = numpy.asarray(self.image)
self.image = cv.fromarray(cv2.flip(image, 1))
def read(self, extension=None, quality=None):
if not extension and FORMATS[self.extension] == 'TIFF':
# If the image loaded was a tiff, return the buffer created earlier.
return self.buffer
else:
if quality is None:
quality = self.context.config.QUALITY
options = None
self.extension = extension or self.extension
try:
if FORMATS[self.extension] == 'JPEG':
options = [cv2.IMWRITE_JPEG_QUALITY, quality]
except KeyError:
options = [cv2.IMWRITE_JPEG_QUALITY, quality]
if FORMATS[self.extension] == 'TIFF':
channels = cv2.split(numpy.asarray(self.image))
data = self.write_channels_to_tiff_buffer(channels)
else:
success, numpy_data = cv2.imencode(self.extension, numpy.asarray(self.image), options or [])
if success:
data = numpy_data.tostring()
else:
raise Exception("Failed to encode image")
if FORMATS[self.extension] == 'JPEG' and self.context.config.PRESERVE_EXIF_INFO:
if hasattr(self, 'exif'):
img = JpegFile.fromString(data)
img._segments.insert(0, ExifSegment(self.exif_marker, None, self.exif, 'rw'))
data = img.writeString()
return data
def set_image_data(self, data):
cv.SetData(self.image, data)
def image_data_as_rgb(self, update_image=True):
if self.image.channels == 4:
mode = 'BGRA'
elif self.image.channels == 3:
mode = 'BGR'
else:
raise NotImplementedError("Only support fetching image data as RGB for 3/4 channel images")
return mode, self.image.tostring()
|
{
"content_hash": "fe04357e173f61c7f55cca70dd67e252",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 108,
"avg_line_length": 32.92079207920792,
"alnum_prop": 0.5688721804511279,
"repo_name": "dronedeploy/opencv-engine",
"id": "e27e8c2108a5c2cf27d8957891b008605836bd66",
"size": "6921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opencv_engine/engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "326"
},
{
"name": "Python",
"bytes": "31120"
}
],
"symlink_target": ""
}
|
from gpconfig_modules.compare_segment_guc import MultiValueGuc
from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc
class GucCollection:
"""
provide an enhanced dict of gucs, with responsibilities to assemble sets of info per segment.
"""
COORDINATOR_KEY = '-1'
def __init__(self):
self.gucs = {}
def update_list(self, guc_list):
for guc in guc_list:
self.update(guc)
def update(self, guc):
existing = self.gucs.get(guc.context)
if existing:
if isinstance(existing, DatabaseSegmentGuc) and type(guc) == type(existing):
pass # discard mirror
else:
self.gucs[guc.context] = MultiValueGuc(existing, guc)
else:
self.gucs[guc.context] = guc
def are_segments_consistent(self):
self.validate()
if not self._check_consistency_within_single_segment():
return False
if not self._check_consistency_across_segments():
return False
return True
def _check_consistency_across_segments(self):
segments_only = [v for k, v in self.gucs.items() if self.COORDINATOR_KEY != k]
segment_values = [guc.get_value() for guc in segments_only]
if len(set(segment_values)) > 1:
return False
return True
def _check_consistency_within_single_segment(self):
for guc in list(self.gucs.values()):
if not guc.is_internally_consistent():
return False
return True
def validate(self):
if len(self.gucs) < 2 or self.COORDINATOR_KEY not in self.gucs:
raise Exception("Collections must have at least a coordinator and segment value")
def values(self):
return sorted(list(self.gucs.values()), key=lambda x: x.context)
def report(self):
self.validate()
if self.are_segments_consistent():
last_seg_key = sorted(list(self.gucs.keys()), reverse=True)[0]
report = [self.gucs[self.COORDINATOR_KEY].report_success_format(),
self.gucs[last_seg_key].report_success_format()]
return "\n".join(report)
else:
sorted_gucs = sorted(list(self.gucs.values()), key=lambda x: x.context)
report = []
for guc in sorted_gucs:
report.extend(guc.report_fail_format())
return "\n".join(report)
|
{
"content_hash": "369b63513dbee80e004994d670f0d429",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 97,
"avg_line_length": 33.52054794520548,
"alnum_prop": 0.6011442582754393,
"repo_name": "greenplum-db/gpdb",
"id": "1f1978e751ecc6e0955d7260522a5ddc8a53bb6f",
"size": "2447",
"binary": false,
"copies": "10",
"ref": "refs/heads/main",
"path": "gpMgmt/bin/gpconfig_modules/guc_collection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3266"
},
{
"name": "Awk",
"bytes": "836"
},
{
"name": "Batchfile",
"bytes": "15613"
},
{
"name": "C",
"bytes": "48211707"
},
{
"name": "C++",
"bytes": "12681024"
},
{
"name": "CMake",
"bytes": "41408"
},
{
"name": "DTrace",
"bytes": "3833"
},
{
"name": "Emacs Lisp",
"bytes": "4164"
},
{
"name": "Fortran",
"bytes": "14873"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "504216"
},
{
"name": "HTML",
"bytes": "215381"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "254578"
},
{
"name": "M4",
"bytes": "133878"
},
{
"name": "Makefile",
"bytes": "511186"
},
{
"name": "PLpgSQL",
"bytes": "9280413"
},
{
"name": "Perl",
"bytes": "1161283"
},
{
"name": "PowerShell",
"bytes": "422"
},
{
"name": "Python",
"bytes": "3404111"
},
{
"name": "Roff",
"bytes": "30385"
},
{
"name": "Ruby",
"bytes": "299639"
},
{
"name": "SCSS",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "403369"
},
{
"name": "XS",
"bytes": "7098"
},
{
"name": "XSLT",
"bytes": "448"
},
{
"name": "Yacc",
"bytes": "748098"
},
{
"name": "sed",
"bytes": "1231"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import time
try:
import simplejson as json
except ImportError:
import json
import logging
from tornado.web import HTTPError
from octopus.core.communication import HttpResponse, Http400, Http404, Http403, HttpConflict, Http500
from octopus.core.enums.rendernode import *
from octopus.core import enums, singletonstats, singletonconfig
from octopus.core.framework import ResourceNotFoundError
from octopus.dispatcher.model import RenderNode
from octopus.dispatcher.model.filter.rendernode import IFilterRenderNode
from octopus.dispatcher.webservice import DispatcherBaseResource
from puliclient.model.renderNode import RenderNode as RenderNodeModel
logger = logging.getLogger("main.dispatcher")
class RenderNodeNotFoundError(ResourceNotFoundError):
"""
Raised when a request is sent for a node that is not a attached to root.
"""
def __init__(self, node, *args, **kwargs):
ResourceNotFoundError.__init__(self, node=node, *args, **kwargs)
class RenderNodesResource(DispatcherBaseResource):
"""
Lists the render nodes known by the dispatcher.
:param: request the HTTP request
"""
def get(self):
rendernodes = self.getDispatchTree().renderNodes.values()
content = {'rendernodes': list(rendernode.to_json() for rendernode in rendernodes)}
content = json.dumps(content)
self.writeCallback(content)
class RenderNodeResource(DispatcherBaseResource):
## Sends the JSON detailed representation of a given render node, url: http://server:8004/rendernodes/<rn:port>
#
# @param request the HTTP request object for this request
# @param computerName the name of the requested render node
#
def get(self, computerName):
computerName = computerName.lower()
try:
rendernode = self.getDispatchTree().renderNodes[computerName]
except KeyError:
return Http404("RenderNode not found")
content = rendernode.to_json()
content = json.dumps(content)
self.writeCallback(content)
def post(self, computerName):
"""
A worker send a request to get registered on the server.
"""
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleCounts['add_rns'] += 1
computerName = computerName.lower()
if computerName.startswith(('1', '2')):
return Http403(message="Cannot register a RenderNode without a name", content="Cannot register a RenderNode without a name")
dct = self.getBodyAsJSON()
if computerName in self.getDispatchTree().renderNodes:
# When the registering worker is already listed in RN list
logger.warning("RenderNode already registered: %s" % computerName)
existingRN = self.getDispatchTree().renderNodes[computerName]
if 'commands' not in dct:
# No commands in current RN, reset command that might be still assigned to this RN
existingRN.reset()
else:
logger.warning("Reset commands that are assigned to this RN: %r" % dct.get('commands', '-'))
for cmdId in dct['commands']:
existingRN.commands[cmdId] = self.getDispatchTree().commands[cmdId]
if 'status' in dct:
existingRN.status = int(dct['status'])
return HttpResponse(304, "RenderNode already registered.")
else:
# Add a new worker (and set infos given in request body)
for key in ('name', 'port', 'status', 'cores', 'speed', 'ram', 'pools', 'caracteristics'):
if not key in dct:
return Http400("Missing key %r" % key, content="Missing key %r" % key)
port = int(dct['port'])
status = int(dct['status'])
if status not in (RN_UNKNOWN, RN_PAUSED, RN_IDLE, RN_BOOTING):
# FIXME: CONFLICT is not a good value maybe
return HttpConflict("Unallowed status for RenderNode registration")
cores = int(dct['cores'])
speed = float(dct['speed'])
ram = int(dct['ram'])
pools = dct['pools']
caracteristics = dct['caracteristics']
name, port = computerName.split(":", 1)
puliversion = dct.get('puliversion', "unknown")
createDate = dct.get('createDate', time.time())
renderNode = RenderNode(None, computerName, cores, speed, name, port, ram, caracteristics, puliversion=puliversion, createDate=createDate)
renderNode.status = status
poolList = []
# check the existence of the pools
for poolName in pools:
try:
pool = self.getDispatchTree().pools[poolName]
poolList.append(pool)
except KeyError:
return HttpConflict("Pool %s is not a registered pool", poolName)
# add the rendernode to the pools
for pool in poolList:
pool.addRenderNode(renderNode)
# add the rendernode to the list of rendernodes
renderNode.pools = poolList
self.getDispatchTree().renderNodes[renderNode.name] = renderNode
self.writeCallback(json.dumps(renderNode.to_json()))
#@queue
def put(self, computerName):
computerName = computerName.lower()
try:
renderNode = self.getDispatchTree().renderNodes[computerName]
except KeyError:
return Http404("RenderNode %s not found" % computerName)
dct = self.getBodyAsJSON()
for key in dct:
if key == "cores":
renderNode.coresNumber = int(dct["cores"])
elif key == "speed":
renderNode.speed = float(dct["speed"])
elif key == "ram":
renderNode.ramSize = int(dct["ram"])
else:
return Http403("Modifying %r attribute is not authorized." % key)
self.writeCallback(json.dumps(renderNode.to_json()))
# Removes a RenderNode from the dispatchTree and all pools.
# Also call RN's reset method to remove assigned commands.
#
# @param request the HTTP request object for this request
# @param computerName the name of the requested render node
#
#@fqdn_request_decorator
#@queue
def delete(self, computerName):
computerName = computerName.lower()
try:
renderNode = self.getDispatchTree().renderNodes[computerName]
except KeyError:
return Http404("RenderNode not found")
if renderNode.status in [RN_ASSIGNED, RN_WORKING]:
renderNode.reset()
for pool in self.getDispatchTree().pools.values():
pool.removeRenderNode(renderNode)
renderNode.remove()
class RenderNodeCommandsResource(DispatcherBaseResource):
#@queue
def put(self, computerName, commandId):
'''Update command `commandId` running on rendernode `renderNodeId`.
Returns "200 OK" on success, or "404 Bad Request" if the provided json data is not valid.
'''
if singletonconfig.get('CORE', 'GET_STATS'):
singletonstats.theStats.cycleCounts['update_commands'] += 1
computerName = computerName.lower()
# try:
# updateDict = self.sanitizeUpdateDict(self.getBodyAsJSON())
# except TypeError, e:
# return Http400(repr(e.args))
updateDict = self.getBodyAsJSON()
updateDict['renderNodeName'] = computerName
try:
self.framework.application.updateCommandApply(updateDict)
except (KeyError, IndexError) as e:
raise Http404(str(e))
except Exception, e:
raise Http500("Exception during command update")
self.writeCallback("Command updated")
#@queue
def delete(self, computerName, commandId):
computerName = computerName.lower()
commandId = int(commandId)
try:
computer = self.framework.application.dispatchTree.renderNodes[computerName]
except KeyError:
return HTTPError(404, "No such RenderNode")
try:
command = computer.commands[commandId]
except KeyError:
return HTTPError(404, "No such command running on this RenderNode")
if command.id not in computer.commands:
return HTTPError(400, "Command %d not running on RenderNode %s" % (command.id, computer.name))
else:
if enums.command.isFinalStatus(command.status):
if enums.command.CMD_DONE == command.status:
command.completion = 1.0
command.finish()
msg = "Command %d removed successfully." % commandId
self.writeCallback(msg)
else:
# command.cancel() ??? dans ce cas c'est pas ce qu'on devrait faire ??? FIXME
message = "Cannot remove a running command from a RenderNode."
return HTTPError(403, message)
class RenderNodeSysInfosResource(DispatcherBaseResource):
#@queue
def put(self, computerName):
computerName = computerName.lower()
rns = self.getDispatchTree().renderNodes
if not computerName in rns:
raise Http404("RenderNode not found")
dct = self.getBodyAsJSON()
renderNode = rns[computerName]
if "puliversion" in dct:
renderNode.puliversion = dct.get('puliversion', "unknown")
if "caracteristics" in dct:
renderNode.caracteristics = eval(str(dct["caracteristics"]))
if "cores" in dct:
renderNode.cores = int(dct["cores"])
if "createDate" in dct:
renderNode.createDate = int(dct["createDate"])
if "ram" in dct:
renderNode.ram = int(dct["ram"])
if "systemFreeRam" in dct:
renderNode.systemFreeRam = int(dct["systemFreeRam"])
if "systemSwapPercentage" in dct:
renderNode.systemSwapPercentage = float(dct["systemSwapPercentage"])
if "speed" in dct:
renderNode.speed = float(dct["speed"])
if "performance" in dct:
renderNode.performance = float(dct["performance"])
if "status" in dct:
if renderNode.status == RN_UNKNOWN:
renderNode.status = int(dct["status"])
logger.info("status reported is %d" % renderNode.status)
# if renderNode.status != int(dct["status"]):
# logger.warning("The status reported by %s = %r is different from the status on dispatcher %r" % (renderNode.name, RN_STATUS_NAMES[dct["status"]],RN_STATUS_NAMES[renderNode.status]))
if "isPaused" in dct and "status" in dct:
logger.debug("reported for %r: remoteStatus=%r remoteIsPaused=%r" % (renderNode.name, RN_STATUS_NAMES[dct["status"]], dct['isPaused']))
renderNode.lastAliveTime = time.time()
renderNode.isRegistered = True
class RenderNodesPerfResource(DispatcherBaseResource):
"""
Sets a performance index (float) for one or several given rendernode names
TOFIX: might not be actually used, need to verify
"""
#@queue
def put(self):
dct = self.getBodyAsJSON()
for computerName, perf in dct.items():
renderNode = self.getDispatchTree().renderNodes[computerName]
renderNode.performance = float(perf)
self.writeCallback("Performance indexes have been set.")
class RenderNodeResetResource(DispatcherBaseResource):
#@queue
def put(self, computerName):
computerName = computerName.lower()
rns = self.getDispatchTree().renderNodes
if not computerName in rns:
return Http404("RenderNode not found")
dct = self.getBodyAsJSON()
renderNode = rns[computerName]
noMoreCmd = int(dct["nomorecmd"])
if noMoreCmd:
renderNode.reset()
class RenderNodeQuarantineResource(DispatcherBaseResource):
def put(self):
"""
Used to set a quarantine on a list of rendernodes. Quarantine rns have a flag "excluded"
that prevent them to be considered in assignement process.
example: curl -d '{"quarantine":true,"rns":["vfxpc64:9005"]}' -X PUT "http://pulitest:8004/rendernodes/quarantine/"
"""
dct = self.getBodyAsJSON()
quarantine = dct["quarantine"]
rns = self.getDispatchTree().renderNodes
for computerName in dct["rns"]:
if computerName not in rns:
logger.warning("following RN '%s' is not referenced, ignoring..." % computerName)
continue
renderNode = rns[computerName]
renderNode.excluded = quarantine
if not quarantine:
renderNode.history.clear()
renderNode.tasksHistory.clear()
logging.getLogger("main.dispatcher.webservice").info("Rendernode quarantine state changed: %s -> quarantine=%s" % (computerName, quarantine))
self.writeCallback("Quarantine attributes set.")
class RenderNodePausedResource(DispatcherBaseResource):
#@queue
def put(self, computerName):
dct = self.getBodyAsJSON()
paused = dct['paused']
killproc = dct['killproc']
computerName = computerName.lower()
rns = self.getDispatchTree().renderNodes
if not computerName in rns:
return Http404("RenderNode not found")
renderNode = rns[computerName]
if paused:
renderNode.status = RN_PAUSED
if killproc:
renderNode.reset(paused=True)
else:
# FIXME maybe set this to RN_FINISHING ?
renderNode.status = RN_IDLE
renderNode.excluded = False
class RenderNodeQueryResource(DispatcherBaseResource, IFilterRenderNode):
def createRenderNodeRepr(self, pNode):
"""
Create a json representation for a given node.
param: render node to explore
return: puliclient.model.rendernode object (which is serializable)
"""
newData = RenderNodeModel()
newData.createFromNode(pNode)
return newData
def post(self):
"""
"""
self.logger = logging.getLogger('main.query')
filters = self.getBodyAsJSON()
self.logger.debug('filters: %s' % filters)
try:
start_time = time.time()
resultData = []
renderNodes = self.getDispatchTree().renderNodes.values()
totalNodes = len(renderNodes)
#
# --- filtering
#
filteredNodes = self.match(filters, renderNodes)
#
# --- Prepare the result json object
#
for currNode in filteredNodes:
tmp = self.createRenderNodeRepr(currNode)
resultData.append(tmp.encode())
content = {
'summary': {
'count': len(filteredNodes),
'totalInDispatcher': totalNodes,
'requestTime': time.time() - start_time,
'requestDate': time.ctime()
},
'items': resultData
}
# Create response and callback
self.writeCallback(json.dumps(content))
except KeyError:
raise Http404('Error unknown key')
except HTTPError, e:
raise e
except Exception, e:
raise HTTPError(500, "Impossible to retrieve render nodes (%s)" % e)
|
{
"content_hash": "03f8867d46a2a578b3c1b249fc55bbd5",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 199,
"avg_line_length": 37.908212560386474,
"alnum_prop": 0.6136102969287626,
"repo_name": "mikrosimage/OpenRenderManagement",
"id": "52f0fb2d5fffcf2cdd3e36c10b68e64c0a382499",
"size": "15694",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/octopus/dispatcher/webservice/rendernodes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "878623"
},
{
"name": "Shell",
"bytes": "5347"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workflows', '0011_auto_20221116_1028'),
]
operations = [
migrations.AlterField(
model_name='workflow',
name='creation_time',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='workflow',
name='last_modified',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
{
"content_hash": "323bd9d6e562e170c0087fef2e9b843a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 69,
"avg_line_length": 25.434782608695652,
"alnum_prop": 0.588034188034188,
"repo_name": "xflows/clowdflows-backend",
"id": "634e061d1880221d2946980c713bba6355cb5212",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflows/migrations/0012_auto_20221116_1029.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "484"
},
{
"name": "HTML",
"bytes": "74413"
},
{
"name": "JavaScript",
"bytes": "10945"
},
{
"name": "Python",
"bytes": "372594"
},
{
"name": "Shell",
"bytes": "453"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='imutils',
packages=['imutils'],
version='0.2.2',
description='A series of convenience functions to make basic image processing functions such as translation, rotation, resizing, skeletonization, displaying Matplotlib images, sorting contours, detecting edges, and much more easier with OpenCV and Python.',
author='Adrian Rosebrock',
author_email='adrian@pyimagesearch.com',
url='https://github.com/jrosebr1/imutils',
download_url='https://github.com/jrosebr1/imutils/tarball/0.1',
keywords=['computer vision', 'image processing', 'opencv', 'matplotlib'],
classifiers=[],
scripts=['bin/range-detector'],
)
|
{
"content_hash": "201a4ca316e2d26886344363f0a0aa64",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 261,
"avg_line_length": 46.46666666666667,
"alnum_prop": 0.7245337159253945,
"repo_name": "PanTomaszRoszczynialski/imutils",
"id": "9d3ee463681f14d5e718d05c4b69c11dc9f84778",
"size": "697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20165"
}
],
"symlink_target": ""
}
|
from google.cloud import dialogflow_v2
def sample_list_conversations():
# Create a client
client = dialogflow_v2.ConversationsClient()
# Initialize request argument(s)
request = dialogflow_v2.ListConversationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_conversations(request=request)
# Handle the response
for response in page_result:
print(response)
# [END dialogflow_v2_generated_Conversations_ListConversations_sync]
|
{
"content_hash": "9e534b7d4ed174d48966aba99231ff77",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 25.65,
"alnum_prop": 0.7153996101364523,
"repo_name": "googleapis/python-dialogflow",
"id": "ac64eba364b955d41aebf6c827d1da866ad3c43a",
"size": "1912",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v2_generated_conversations_list_conversations_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.views.generic import TemplateView
from django_cradmin import javascriptregistry
from django_cradmin.apps.cradmin_generic_token_with_metadata.models import GenericTokenWithMetadata, \
GenericTokenExpiredError
class ActivateAccountView(TemplateView, javascriptregistry.viewmixin.StandaloneBaseViewMixin):
template_name = 'cradmin_activate_account/activate.django.html'
appname = 'cradmin_activate_account'
#: The template used to render the success message.
#: Default value for :obj:`~.ActivateAccountView.get_success_message_template`
success_message_template = 'cradmin_activate_account/messages/success.django.html'
def get(self, *args, **kwargs):
self.token_does_not_exist = False
self.token_expired = False
try:
token = GenericTokenWithMetadata.objects.get_and_validate(
token=self.kwargs['token'], app=self.appname)
except GenericTokenWithMetadata.DoesNotExist:
self.token_does_not_exist = True
except GenericTokenExpiredError:
self.token_expired = True
else:
return self.token_is_valid(token)
return super(ActivateAccountView, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ActivateAccountView, self).get_context_data(**kwargs)
self.add_javascriptregistry_component_ids_to_context(context=context)
context['token_does_not_exist'] = self.token_does_not_exist
context['token_expired'] = self.token_expired
return context
def get_success_message_template(self):
"""
Get the template used to render the success message.
Defaults to :obj:`~.ActivateAccountView.success_message_template`.
"""
return self.success_message_template
def get_success_message(self, user):
"""
Get the success message added by :meth:`.add_success_message`.
Defaults to rendering the :meth:`.get_success_message_template`
template.
"""
return render_to_string(self.get_success_message_template(), {
'user': user
}).strip()
def add_success_message(self, user):
"""
Add success message.
Defaults to adding the value of :meth:`~.ActivateAccountView.get_success_message`
as a django messages framework success message.
"""
messages.success(self.request, self.get_success_message(user))
def activate_user(self, user):
"""
Activate the user.
You can override this to provide custom user activation code,
but you will most likely want to override the ``activate_user()``
method of your User model if you have a custom user model.
"""
if hasattr(user, 'activate_user'):
user.activate_user()
else:
user.is_active = True
user.save()
def token_is_valid(self, token):
user = token.content_object
next_url = token.metadata['next_url']
self.activate_user(user)
self.add_success_message(user)
return HttpResponseRedirect(next_url)
|
{
"content_hash": "d08a0b0f372f5501219e366f9f0e726c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 102,
"avg_line_length": 38.24418604651163,
"alnum_prop": 0.6670720583764062,
"repo_name": "appressoas/django_cradmin",
"id": "abd2ec3d429a204d1c6483da8545a782e1d0ddc7",
"size": "3289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cradmin/apps/cradmin_activate_account/views/activate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "192105"
},
{
"name": "JavaScript",
"bytes": "1951677"
},
{
"name": "Python",
"bytes": "771868"
},
{
"name": "SCSS",
"bytes": "679114"
}
],
"symlink_target": ""
}
|
"""Module for single entry transformers."""
import logging
from collections import defaultdict
from operator import attrgetter
from ....core.models.base import RemoteInstance
from ....core.exceptions import DoesNotExistException, SkipField
from ....core.models.terminal import (
SshKey, Identity,
)
from .base import Transformer, DeletBadEncrypted
from .utils import id_getter, map_zip_model_fields
def id_getter_wrapper():
"""Generate id getter."""
return id_getter
# pylint: disable=abstract-method
class BulkEntryBaseTransformer(Transformer):
"""Base Transformer for one model."""
def __init__(self, model_class, **kwargs):
"""Create new entry transformer."""
super(BulkEntryBaseTransformer, self).__init__(**kwargs)
assert model_class
self.model_class = model_class
self.sync_keys = (
self.account_manager.get_settings()['synchronize_key']
)
self.skip = (
not self.sync_keys and
self.model_class in (SshKey, Identity)
)
class BulkPrimaryKeyTransformer(BulkEntryBaseTransformer):
"""Transformer for primary key payloads."""
logger = logging.getLogger(__name__)
to_model_mapping = defaultdict(id_getter_wrapper, {int: int, })
def to_model(self, payload):
"""Retrieve model from storage by payload."""
if not payload:
return None
if self.skip:
raise SkipField
remote_instance_id = self.id_from_payload(payload)
model = self.storage.get(
self.model_class,
**{'remote_instance.id': remote_instance_id}
)
return model
def to_payload(self, model):
"""Convert model to primary key or to set/id reference."""
if self.skip:
raise SkipField
if not model:
return None
if model.remote_instance:
return model.remote_instance.id
return '{model.set_name}/{model.id}'.format(model=model)
def id_from_payload(self, payload):
"""Get remote id from payload."""
return self.to_model_mapping[type(payload)](payload)
# pylint: disable=too-few-public-methods
class GetPrimaryKeyTransformerMixin(object):
"""Mixin to get primary get Transformer."""
def get_primary_key_transformer(self, model_class):
"""Create new primary key Transformer."""
return BulkPrimaryKeyTransformer(
storage=self.storage, model_class=model_class,
account_manager=self.account_manager,
)
class BulkEntryTransformer(GetPrimaryKeyTransformerMixin,
BulkPrimaryKeyTransformer):
"""Transformer for complete model."""
def __init__(self, **kwargs):
"""Create new Transformer."""
super(BulkEntryTransformer, self).__init__(**kwargs)
self.attrgetter = attrgetter(*self.model_class.fields)
self.remote_instance_attrgetter = attrgetter(*RemoteInstance.fields)
def to_payload(self, model):
"""Convert model to payload."""
if self.skip:
raise SkipField
payload = dict(map_zip_model_fields(model, self.attrgetter))
if model.remote_instance:
zipped_remote_instance = map_zip_model_fields(
model.remote_instance, self.remote_instance_attrgetter
)
payload.update(zipped_remote_instance)
for field, mapping in model.fields.items():
self.serialize_field(payload, model, field, mapping)
payload['local_id'] = model.id
return payload
def serialize_field(self, payload, model, field, mapping):
"""Transform field to payload or skip."""
try:
if field in model.fk_field_names():
payload[field] = self.serialize_related_field(
model, field, mapping
)
else:
payload[field] = getattr(model, field)
except SkipField:
payload.pop(field, None)
def serialize_related_field(self, model, field, mapping):
"""Transform relation to payload."""
related_transformer = self.get_primary_key_transformer(mapping.model)
fk_payload = related_transformer.to_payload(getattr(model, field))
return fk_payload
def to_model(self, payload):
"""Convert payload to model."""
if self.skip:
raise SkipField
model = self.get_or_initialize_model(payload)
model = self.update_model_fields(model, payload)
return model
def update_model_fields(self, model, payload):
"""Update model's fields with payload."""
fk_fields = model.fk_field_names()
models_fields = {
i: payload[i]
for i, mapping in model.fields.items()
if i not in fk_fields
}
for i, mapping in model.fields.items():
if i in fk_fields:
try:
models_fields[i] = self.render_relation_field(
mapping, payload[i]
)
except SkipField:
models_fields.pop(i, None)
model.update(models_fields)
model.remote_instance = self.create_remote_instance(payload)
return model
def get_or_initialize_model(self, payload):
"""Get existed model or generate new one using payload."""
try:
model = self.get_model(payload)
except DoesNotExistException:
model = self.initialize_model()
model.id = payload.get('local_id', model.id)
return model
def get_model(self, payload):
"""Get model for payload."""
return super(BulkEntryTransformer, self).to_model(payload)
def render_relation_field(self, mapping, value):
"""Convert relation mapping and value to whole model."""
transformer = self.get_primary_key_transformer(mapping.model)
return transformer.to_model(value)
def initialize_model(self):
"""Generate new model using payload."""
model = self.model_class()
return model
# pylint: disable=no-self-use
def create_remote_instance(self, payload):
"""Generate remote instance for payload."""
instance = RemoteInstance()
instance.init_from_payload(payload)
return instance
class CryptoBulkEntryTransformer(BulkEntryTransformer):
"""Entry Transformer that encrypt model and decrypt payload."""
def __init__(self, crypto_controller, **kwargs):
"""Construct new crypto Transformer for bulk entry."""
super(CryptoBulkEntryTransformer, self).__init__(**kwargs)
self.crypto_controller = crypto_controller
def to_model(self, payload):
"""Decrypt model after serialization."""
model = super(CryptoBulkEntryTransformer, self).to_model(payload)
try:
descrypted_model = self.crypto_controller.decrypt(model)
except self.crypto_controller.bad_encrypted_exception:
raise DeletBadEncrypted(model)
return self.storage.save(descrypted_model)
def to_payload(self, model):
"""Encrypt model before deserialization."""
encrypted_model = self.crypto_controller.encrypt(model)
return super(CryptoBulkEntryTransformer, self).to_payload(
encrypted_model
)
class SettingsTransformer(Transformer):
"""Transformer for settings."""
def to_model(self, payload):
"""Convert REST API payload to Application models."""
return payload
def to_payload(self, model):
"""Convert Application models to REST API payload."""
return model
|
{
"content_hash": "73ba903894c3a9dbd4d9edbfc5ec01ff",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 77,
"avg_line_length": 34.52466367713004,
"alnum_prop": 0.6231978178984283,
"repo_name": "Crystalnix/serverauditor-sshconfig",
"id": "0a74c7d9819681bcb31f639ff15c6ef950a95658",
"size": "7723",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "termius/cloud/client/transformers/single.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "197778"
},
{
"name": "Shell",
"bytes": "80519"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from op_test import OpTest
class TestShapeOp(OpTest):
def setUp(self):
self.op_type = "shape"
self.config()
self.shape = [2, 3]
input = np.zeros(self.shape)
self.inputs = {'Input': input}
self.outputs = {'Out': np.array(self.shape)}
def config(self):
self.shape = [2, 3]
def test_check_output(self):
self.check_output()
class case1(TestShapeOp):
def config(self):
self.shape = [2]
class case2(TestShapeOp):
def config(self):
self.shape = [1, 2, 3]
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ef59c00b5c06eeeb4b2588edd9b8e2ca",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 52,
"avg_line_length": 19.393939393939394,
"alnum_prop": 0.5703125,
"repo_name": "jacquesqiao/Paddle",
"id": "a62ee050075cb8c9f8817c142825a89c24bdfedf",
"size": "1253",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_shape_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274815"
},
{
"name": "C++",
"bytes": "8229685"
},
{
"name": "CMake",
"bytes": "288709"
},
{
"name": "Cuda",
"bytes": "1123627"
},
{
"name": "Dockerfile",
"bytes": "8120"
},
{
"name": "Go",
"bytes": "109508"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "4194414"
},
{
"name": "Shell",
"bytes": "164656"
}
],
"symlink_target": ""
}
|
from JumpScale import j
import sys
import time
try:
import ujson as json
except:
import json
from JumpScale.tools import cmdutils
from JumpScale.legacy.redisworker.RedisWorker import RedisWorkerFactory
from logging import FileHandler
import os
RUNTIME = 24 * 3600
def restart_program():
"""Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function."""
python = sys.executable
os.execl(python, python, *sys.argv)
class Worker(object):
def __init__(self, opts):
self.opts = opts
self.log = j.logger.get('Worker')
self.actions = {}
self.clients = dict()
self.acclient = None
self.redisw = RedisWorkerFactory()
self.queuename = opts.queuename
self.init()
self.starttime = time.time()
def getClient(self, job):
ipaddr = getattr(job, 'achost', None)
client = self.clients.get(ipaddr)
if not client:
if ipaddr:
client = j.legacy.agentcontroller.get(ipaddr, login='node')
self.clients[ipaddr] = client
else:
if self.acclient is None:
self.acclient = j.legacy.agentcontroller.get(opts.ip, port=opts.port, login='node')
return self.acclient
return client
def init(self):
j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.tmpDir, "jumpscripts"))
self.redisw.redis.delete("workers:action:%s" % self.queuename)
def processAction(self, action):
self.redisw.redis.delete("workers:action:%s" % self.queuename)
if action == "RESTART":
self.log.info("RESTART ASKED")
j.application.stop(0, True)
restart_program()
if action == "RELOAD":
self.log.info("RELOAD ASKED")
self.actions = {}
def run(self):
self.log.info("STARTED")
while True:
self.redisw.redis.hset("workers:heartbeat", self.queuename, int(time.time()))
if self.starttime + RUNTIME < time.time():
self.log.info("Running for %s seconds restarting" % RUNTIME)
restart_program()
try:
self.log.info("check if work %s", self.queuename)
jtype, job = self.redisw._getWork(self.queuename, timeout=10)
except Exception as e:
if str(e).find("Could not find queue to execute job") != -1:
self.log.exception(e)
# create queue
self.log.info("could not find queue")
elif isinstance(e, ValueError):
self.log.warning('invalid json sent in queue')
else:
# TODO: restore the ops error call
# j.events.opserror("Could not get work from redis, is redis running?", "workers.getwork", e)
self.log.exception(e)
self.log.error("Could not get work from redis, is redis running?: %s" % e)
time.sleep(10)
continue
if jtype == "action":
self.processAction(job)
continue
if job:
j.application.jid = job.guid
jskey = job.category, job.cmd
try:
jscript = self.actions.get(jskey)
if jscript is None:
self.log.info("JSCRIPT CACHEMISS")
try:
jscript = self.redisw.getJumpscriptFromName(job.category, job.cmd)
if jscript is None:
# try to get it by id
if job.jscriptid:
jscript = self.redisw.getJumpscriptFromId(job.jscriptid)
if jscript is None:
msg = "cannot find jumpscripts with id:%s cat:%s cmd:%s" % (
job.jscriptid, job.category, job.cmd)
self.log.error(msg)
eco = j.errorconditionhandler.raiseOperationalCritical(msg,
category="worker.jscript.notfound",
die=False)
job.result = eco.dump()
job.state = "ERROR"
self.notifyWorkCompleted(job)
continue
jscript.write()
jscript.load()
self.actions[jskey] = jscript
except Exception as e:
agentid = j.application.getAgentId()
if jscript is not None:
msg = "could not compile jscript:%s %s_%s on agent:%s.\nError:%s" % (
jscript.id, jscript.organization, jscript.name, agentid, e)
else:
msg = "could not compile jscriptid:%s on agent:%s.\nError:%s" % (
job.jscriptid, agentid, e)
eco = j.errorconditionhandler.processPythonExceptionObject(e)
eco.errormessage = msg
if jscript:
eco.code = jscript.source
eco.jid = job.guid
eco.category = 'workers.compilescript'
eco.process()
job.state = "ERROR"
eco.tb = None
job.result = eco.__dict__
# j.events.bug_warning(msg,category="worker.jscript.notcompile")
# self.loghandler.logECO(eco)
self.notifyWorkCompleted(job)
continue
self.actions[job.jscriptid] = jscript
self.log.info("Job started:%s script:%s %s/%s" %
(job.id, jscript.id, jscript.organization, jscript.name))
j.logger.enabled = job.log
job.timeStart = time.time()
status, result = jscript.executeInWorker(**job.args)
self.redisw.redis.hdel("workers:inqueuetest", jscript.getKey())
j.logger.enabled = True
if status:
job.result = result
job.state = "OK"
job.resultcode = 0
else:
if isinstance(result, str):
job.state = result
else:
eco = result
agentid = j.application.getAgentId()
msg = "Could not execute jscript:%s %s_%s on agent:%s\nError: %s" % (
jscript.id, jscript.organization, jscript.name, agentid, eco.errormessage)
eco.errormessage = msg
eco.jid = job.guid
eco.code = jscript.source
eco.category = "workers.executejob"
# out = ""
# tocheck = ["\"worker.py\"", "jscript.executeInWorker", "return self.module.action",
# "JumpscriptFactory.py"]
# for line in eco.backtrace.split("\n"):
# found = False
# for check in tocheck:
# if line.find(check) != -1:
# found = True
# break
# if found is False:
# out += "%s\n" % line
#
# eco.backtrace = out
if job.id < 1000000:
eco.process()
else:
self.log.error(eco)
# j.events.bug_warning(msg,category="worker.jscript.notexecute")
# self.loghandler.logECO(eco)
job.state = "ERROR"
eco.tb = None
job.result = eco.__dict__
job.resultcode = 1
# ok or not ok, need to remove from queue test
# thisin queue test is done to now execute script multiple time
self.notifyWorkCompleted(job)
finally:
j.application.jid = 0
def notifyWorkCompleted(self, job):
job.timeStop = int(time.time())
if hasattr(job, 'internal') and job.internal:
# means is internal job
self.redisw.redis.set("workers:jobs:%s" % job.id, json.dumps(job.__dict__), ex=60)
self.redisw.redis.rpush("workers:return:%s" % job.id, time.time())
self.redisw.redis.expire("workers:return:%s" % job.id, 60)
try:
acclient = self.getClient(job)
except Exception as e:
j.events.error("could not report job in error to agentcontroller: %s" % e)
return
def reportJob():
try:
acclient.notifyWorkCompleted(job.__dict__)
except Exception as e:
j.events.opserror("could not report job in error to agentcontroller", category='workers.errorreporting',
e=e)
return
# jumpscripts coming from AC
if job.state != "OK":
self.redisw.redis.expire("workers:jobs:%s" % job.id, 60)
reportJob()
else:
if job.log or job.wait:
reportJob()
# we don't have to keep status of local job result, has been forwarded to AC
if not hasattr(job, 'internal') or not job.internal:
self.redisw.redis.delete("workers:jobs:%s" % job.id)
if __name__ == '__main__':
parser = cmdutils.ArgumentParser()
parser.add_argument("-q", '--queuename', help='Queue name', required=True)
parser.add_argument("-l", '--logpath', help='Logging file path', required=False, default=None)
parser.add_argument('--controller-ip', dest='ip', default='localhost', help='Agent controller address')
parser.add_argument('--controller-port', dest='port', type=int, default=4444, help='Agent controller port')
opts = parser.parse_args()
j.application.start("jumpscale:worker:%s" % opts.queuename)
if j.application.config.jumpscale['system']['grid'].get("id", False):
j.application.initGrid()
logger = j.logger.get(j.logger.root_logger_name)
handler = FileHandler(opts.logpath, mode='w')
logger.addHandler(handler)
j.logger.consoleloglevel = 2
j.logger.maxlevel = 7
worker = Worker(opts)
worker.run()
|
{
"content_hash": "2a99b7fc37f743fa8069971a23fd9748",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 126,
"avg_line_length": 43.51330798479088,
"alnum_prop": 0.4699405802167074,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "34de79bdb485703c6611b09f8fc00d98a896f05e",
"size": "11466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/jsagent/lib/worker.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
}
|
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class MegacoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = MegacoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7950
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
{
"content_hash": "8369d3f9c6442d4dba7979309e1acca1",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 84,
"avg_line_length": 25.420408163265307,
"alnum_prop": 0.6453114964675658,
"repo_name": "CCPorg/MEC-MegaCoin-Ver-179600-Copy",
"id": "0ef05a332edff146c270c45bcc3c76a0f7cd5768",
"size": "6528",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "contrib/pyminer/pyminer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "98289"
},
{
"name": "C++",
"bytes": "2599724"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C",
"bytes": "5634"
},
{
"name": "Prolog",
"bytes": "28244"
},
{
"name": "Python",
"bytes": "77697"
},
{
"name": "Shell",
"bytes": "12743"
},
{
"name": "TypeScript",
"bytes": "10467824"
}
],
"symlink_target": ""
}
|
"""
Base widget, all widgets inherit from this.
"""
import pygame
from pygame.locals import Rect, SRCALPHA
from pygame import draw
from ._locals import *
from ._locals import (has_focus, is_active, add_widget, remove_widget_order,
set_cursor, remove_cursor)
import collections
class Simple(pygame.sprite.Sprite):
"""
Widget foundations all widgets should inherit from.
This can also be used as a simple widget that does nothing, such as
displaying an image.
Attributes:
image: The current surface that will be drawn to the screen.
rect: The ``pygame.Rect`` used for the widget's position and size.
rect_abs: A ``pygame.Rect`` using the absolute screen position.
pos: The widget's position. Can be retrieved or assigned as a shortcut
for rect.topleft. Also a shortcut for setting pos through config().
pos_abs: The widget's absolute screen position.
"""
# Widget settings
_can_focus = False
_modal = False
_layered = False # Layered updates for dialog windows etc.
_default_size = None
_image_state = "image"
_parent = None
_draw_rect = False
_surf_flags = 0
_available_images = ()
_extra_images = {}
_settings_default = {}
_fade = None # Alpha level when fading
_fade_up = True
_custom_image = False
_custom_extra = ()
_label = None
def __init__(self, surf=None, flags=None, **kwargs):
"""
Args:
surf: The surface that should be drawn to screen, of type:
pygame.Surface: Use an existing surface.
tuple,list: Contains size as (width,height), creates a new surface.
str: Contains file name to load an image.
dict: Contains multiple images to be loaded. The documentation will
specify if a widget uses multiple images and what names to use.
flags: Override the flags passed to `pygame.surface.Surface`.
kwargs: Any number of keyword arguments matching those for config().
"""
pygame.sprite.Sprite.__init__(self)
# Initialise attributes
self._images = {}
self._available_images = ("image",) + self._available_images
self._settings = self._settings_default.copy()
self.rect = Rect((0,0), (0,0))
# Use default size if none specified
if surf is None:
surf = self._default_size
elif isinstance(surf, (tuple, list)) and (isinstance(surf[0], str) or
isinstance(surf[1], str)):
size = get_screen().rect.size
s = list(surf)
for i in (0,1):
if isinstance(surf[i], str):
ratio = float(surf[i].rstrip("%")) / 100.
s[i] = size[i] * ratio
surf = tuple(s)
if flags is not None:
self._surf_flags = flags
# Create base surfaces if not None.
# If None, widget is expected to call this function later.
if surf is not None:
self._create_base_images(surf)
self.config(init=None, **kwargs)
def _create_event(self, gui_type, **kwargs):
"""
Returns a GUI `pygame.event.Event` object. The first argument must be
the value for `gui_type` and should roughly describe the event.
Optional keyword arguments can also be passed with additional
attributes for the event.
"""
return pygame.event.Event(
GUI,
dict(kwargs, **{"gui_type": gui_type, "widget_type": self.__class__,
"widget": self}))
def config(self, **kwargs):
"""
Update widget configuration and redraw the widget.
Keyword Args:
pos: ``tuple`` (x,y) Position to set widget to.
label: ``str`` Text to display next to widget.
label_side: ``str`` `("top", "right", "bottom", "left")`
Which side of the widget to display the label.
"""
if "pos" in kwargs:
self.pos = kwargs["pos"]
if "label" not in self._settings_default:
if "label" in kwargs:
if self._label is None:
self._label = _Label(kwargs["label"], self)
else:
self._label.text = kwargs["label"]
self._label._draw()
if "label_col" in kwargs:
self._label_col = kwargs["label_col"]
if self._label is not None:
self._label._draw()
if "label_font" in kwargs:
self._label_font = kwargs["label_font"]
if self._label is not None:
self._label._draw()
if "label_side" in kwargs:
assert kwargs["label_side"] in ("top", "right",
"bottom", "left"), \
"Must use: 'top', 'right', 'bottom' or 'left'"
self._label_side = kwargs["label_side"]
# Check if any callbacks have been passed in.
callbacks = [x for x in kwargs if x.startswith("on_")]
for f in callbacks:
assert f in dir(self), "Invalid callback name: %s" % f
assert isinstance(kwargs[f], collections.Callable), \
"Callback '%s' must be callable: %s" % (f, kwargs[f])
setattr(self, f, kwargs[f])
self._config(**kwargs)
self._draw()
def _config(self, **kwargs):
"""Widgets should overload for custom widget configuration."""
pass
def add(self, order=None, fade=True, focus=False):
"""
Add widget to screen.
Args:
order: Integer representing the order widget should receive focus
when user presses TAB. The widget with the lowest order will
receive focus first, then moving up with increasing values.
fade: True if widget should fade in, False if not.
focus: To focus widget immediately, use 1 if focused by keyboard,
2 if by mouse, otherwise 0.
"""
added = add_widget(self, order, focus)
# Fade widget in
if fade:
self._fade_up = True
if added and self._fade is None: self._fade = 1
self.image.set_alpha(self._fade)
else:
self._fade = None
self.image.set_alpha(255)
# Add any associated label
if self._label is not None:
self._label.add(fade=fade)
def remove(self, fade=True):
"""
Remove widget from screen.
Args:
fade: True if widget should fade out.
"""
if fade: # Fade widget out
self._fade_up = False
if self._fade is None: self._fade = 250
else: # Remove widget immediately
self.kill()
remove_widget_order(self)
if self.has_focus(): self._focus_exit()
# Remove any associated label
if self._label is not None:
self._label.remove(fade)
def active(self):
"""Return True if widget is active (onscreen)."""
return is_active(self)
def has_focus(self):
"""Return True if this widget has focus."""
return has_focus(self)
def _switch(self, image=None):
"""
Switch image state to the given image name.
Given no arguments will simply refresh the current image.
"""
if image is not None:
assert image in self._images, "Invalid image state %s" % image
self._image_state = image
assert self._images, ("Subclass of %s not initialised properly." %
self.__class__)
self.image = self._images[self._image_state].copy()
if self._draw_rect:
self._dotted_rect()
def update(self, time):
"""
Overload to update the widget per frame.
Args:
time: Milliseconds passed since last frame.
"""
pass
def _event(self, event):
"""Overload to process events received by the widget one at a time."""
pass
def _draw(self):
if not self._custom_image: self._draw_base()
for name in self._custom_extra:
f = getattr(self, "_draw_%s" % name)
f(self._images[name].image, self._images[name].rect.size)
self._draw_final()
self._switch()
def _draw_base(self):
"""
Widgets should overload to draw default images found in self._images.
This method will not be called when the user gives a custom image.
"""
pass
def _draw_final(self):
"""
Widgets should overload to draw final things that should
be drawn regardless of whether a custom image was used or not.
"""
pass
def _create_base_images(self, surf, parent=None):
"""
Creates the base surfaces to draw on, or uses existing images.
If self._default_size is None, widget is expected to call this
function manually when no size is given.
"""
Image = pygame.Surface
def create_image(surf):
"""Return a created surface."""
if isinstance(surf, Image):
return surf
elif isinstance(surf, (tuple,list)):
if isinstance(surf[0], (tuple,list)):
assert (len(surf[0]) > 0 or len(surf[1]) > 0), "Must specify atleast one size"
if len(surf[0]) > 0:
w = self.rect.w * surf[0][0] + surf[0][1]
if len(surf[1]) > 0:
h = self.rect.h * surf[1][0] + surf[1][1]
if len(surf[0]) == 0:
w = h
if len(surf[1]) == 0:
h = w
surf = (w, h)
surf = Image(surf, self._surf_flags)
return surf
elif isinstance(surf, str):
return pygame.image.load(surf).convert_alpha()
else:
raise ValueError("Invalid surface object: %s" % type(surf))
# Create base images
self._custom_image = False
images = False
custom_extra = []
if isinstance(surf, dict):
for img in surf:
assert (img in self._available_images or
img in self._extra_images), "Incorrect image"
if img in self._extra_images:
if not isinstance(surf[img], (tuple,list)):
custom_extra.append(img)
self._images[img] = Simple(create_image(surf[img]))
self._images[img]._parent = self
self._images[img]._show = True
else:
images = True
if not isinstance(surf[img], (tuple,list)):
self._custom_image = True
self._images[img] = create_image(surf[img])
else:
images = True
if not isinstance(surf, (tuple,list)):
self._custom_image = True
self._images["image"] = create_image(surf)
if not images and self._default_size is not None:
images = True
self._images["image"] = create_image(self._default_size)
if images:
# Copy other images, if any have not been supplied.
assert "image" in self._images, "Must supply 'image'"
for count, name in enumerate(self._available_images):
if name not in self._images:
img = self._images[self._available_images[count-1]]
self._images[name] = img.copy()
self.image = self._images["image"].copy()
self.rect.size = self.image.get_size()
# Set up extra images
self._custom_extra = []
for name in self._extra_images:
if name not in self._images:
copy = False
n = name
while isinstance(self._extra_images[n], str):
copy = True
n = self._extra_images[n]
self._images[name] = Simple(create_image(self._extra_images[n]))
self._images[name]._parent = self
self._images[name]._show = True
if copy and n in custom_extra:
self._images[name].image = self._images[n].image.copy()
else:
self._custom_extra.append(name)
def _change_focus(self, forward=True):
"""
Called when focus should be changed. Used primarily by the Container
widget.
Args:
forward: True if toggling focus forwards, False if backwards.
Returns:
True if widget should change focus from this widget.
"""
return True
def _focus_enter(self, focus=0):
"""
Called when the widget gains focus. Overload to customise behaviour.
Args:
focus: 1 if focused by keyboard, 2 if by mouse.
"""
pass
def _focus_exit(self):
"""
Called when the widget loses focus.
Overload to customise behaviour.
"""
pass
def _dotted_rect(self, col=(255,255,255)):
"""Draw a dotted rectangle to show keyboard focus."""
self.image.lock()
for i in range(0, self.rect.w, 3):
# Draw horizontal lines
self.image.set_at((i, 0), col)
self.image.set_at((i, self.rect.h-1), col)
for i in range(0, self.rect.h, 2):
# Draw vertical lines
self.image.set_at((0, i), col)
self.image.set_at((self.rect.w-1, i), col)
self.image.unlock()
def _set_cursor(self, size, hotspot, xormasks, andmasks):
set_cursor(self, size, hotspot, xormasks, andmasks)
def _remove_cursor(self):
remove_cursor(self)
# --PROPERTIES--
@property
def rect_abs(self):
if self._parent is None:
return self.rect
else:
p_abs = self._parent.pos_abs
p = (self.rect.x + p_abs[0], self.rect.y + p_abs[1])
return Rect(p, self.rect.size)
@property
def pos(self):
return self.rect.topleft
@pos.setter
def pos(self, value):
if not isinstance(value[0], str) and not isinstance(value[1], str):
self.rect.topleft = value
else:
if self._parent is not None:
size = self._parent.rect.size
else:
size = get_screen().rect.size
pos = list(value)
for i in (0,1):
if isinstance(value[i], str):
ratio = float(value[i].rstrip("%")) / 100.
pos[i] = size[i] * ratio
self.rect.topleft = pos
@property
def pos_abs(self):
if self._parent is None:
return self.rect.topleft
else:
p_abs = self._parent.pos_abs
return (self.rect.x + p_abs[0], self.rect.y + p_abs[1])
class _Label(Simple):
"""
Simple label that can be displayed next to widgets.
This differs from the normal label widget in that it is attached to a widget
and should not be used standalone. This is automatically attached by the
base widget when the user passes the label argument to config().
"""
_surf_flags = SRCALPHA
text = ""
def __init__(self, text, parent):
"""
Args:
text: Text label should display.
parent: Widget label should be attached to.
"""
pygame.sprite.Sprite.__init__(self)
self.text = text
self.parent = parent
self._rect = Rect(0,0,0,0)
self._draw()
def _draw(self):
"""Redraw label."""
# Split into lines
text = []
for line in self.text.split("\n"):
text.append(self.font.render(line, True, self.col))
# Dynamically set size
h = 0
for line in text:
h += line.get_height()
w = max(text, key=lambda x: x.get_width())
self._rect.size = (w.get_width(), h)
Image = pygame.Surface
self.image = Image((w.get_width(), h), SRCALPHA)
# Blit each line
y = 0
for line in text:
self.image.blit(line, (0,y))
y += self.font.get_linesize()
@property
def col(self):
"""
Colour of label text. Defaults to Font.col. Changed in the
base widget when "label_col" is passed to `self.config()`.
"""
try:
return self.parent._label_col
except AttributeError:
return Font.col
@property
def font(self):
"""
Font used for label text. Defaults to Font["widget"]. Changed in
the base widget when "label_font" is passed to `self.config()`.
"""
try:
return self.parent._label_font
except AttributeError:
return Font["widget"]
@property
def side(self):
"""
Return which side widget should be attached to.
Returns parent._label_side or defaults to "right".
"""
try:
return self.parent._label_side
except AttributeError:
return "right"
@property
def rect(self):
"""Returns the rect aligned to the appropriate side of it's parent."""
if self.side == "left":
self._rect.midright = self.parent.rect.midleft
elif self.side == "right":
self._rect.midleft = self.parent.rect.midright
elif self.side == "top":
self._rect.midbottom = self.parent.rect.midtop
elif self.side == "bottom":
self._rect.midtop = self.parent.rect.midbottom
return self._rect
|
{
"content_hash": "fc72eb4a2599167159c80953ffcb157c",
"timestamp": "",
"source": "github",
"line_count": 545,
"max_line_length": 98,
"avg_line_length": 33.09357798165138,
"alnum_prop": 0.5326014637391883,
"repo_name": "OneOneFour/ICSP_Monte_Carlo",
"id": "5d8b4e77fa56db35c495e5dc4ecdbdb70e951071",
"size": "18212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sgc/sgc/widgets/base_widget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36840"
}
],
"symlink_target": ""
}
|
from io import BytesIO
from .node import (Node, AtomNode, BinaryExpressionNode, BinaryOperatorNode,
ConditionalNode, DataNode, IndexNode, KeyValueNode, ListNode,
NumberNode, StringNode, UnaryExpressionNode,
UnaryOperatorNode, ValueNode, VariableNode)
class ParseError(Exception):
def __init__(self, filename, line, detail):
self.line = line
self.filename = filename
self.detail = detail
self.message = f"{self.detail}: {self.filename} line {self.line}"
Exception.__init__(self, self.message)
eol = object
group_start = object
group_end = object
digits = "0123456789"
open_parens = "[("
close_parens = "])"
parens = open_parens + close_parens
operator_chars = "=!"
unary_operators = ["not"]
binary_operators = ["==", "!=", "and", "or"]
operators = ["==", "!=", "not", "and", "or"]
atoms = {"True": True,
"False": False,
"Reset": object()}
def decode(s):
assert isinstance(s, str)
return s
def precedence(operator_node):
return len(operators) - operators.index(operator_node.data)
class TokenTypes:
def __init__(self) -> None:
for type in ["group_start", "group_end", "paren", "list_start", "list_end", "separator", "ident", "string", "number", "atom", "eof"]:
setattr(self, type, type)
token_types = TokenTypes()
class Tokenizer:
def __init__(self):
self.reset()
def reset(self):
self.indent_levels = [0]
self.state = self.line_start_state
self.next_state = self.data_line_state
self.line_number = 0
self.filename = ""
def tokenize(self, stream):
self.reset()
assert not isinstance(stream, str)
if isinstance(stream, bytes):
stream = BytesIO(stream)
if not hasattr(stream, "name"):
self.filename = ""
else:
self.filename = stream.name
self.next_line_state = self.line_start_state
for i, line in enumerate(stream):
assert isinstance(line, bytes)
self.state = self.next_line_state
assert self.state is not None
states = []
self.next_line_state = None
self.line_number = i + 1
self.index = 0
self.line = line.decode('utf-8').rstrip()
assert isinstance(self.line, str)
while self.state != self.eol_state:
states.append(self.state)
tokens = self.state()
if tokens:
yield from tokens
self.state()
while True:
yield (token_types.eof, None)
def char(self):
if self.index == len(self.line):
return eol
return self.line[self.index]
def consume(self):
if self.index < len(self.line):
self.index += 1
def peek(self, length):
return self.line[self.index:self.index + length]
def skip_whitespace(self):
while self.char() == " ":
self.consume()
def eol_state(self):
if self.next_line_state is None:
self.next_line_state = self.line_start_state
def line_start_state(self):
self.skip_whitespace()
if self.char() == eol:
self.state = self.eol_state
return
if self.index > self.indent_levels[-1]:
self.indent_levels.append(self.index)
yield (token_types.group_start, None)
else:
if self.index < self.indent_levels[-1]:
while self.index < self.indent_levels[-1]:
self.indent_levels.pop()
yield (token_types.group_end, None)
# This is terrible; if we were parsing an expression
# then the next_state will be expr_or_value but when we deindent
# it must always be a heading or key next so we go back to data_line_state
self.next_state = self.data_line_state
if self.index != self.indent_levels[-1]:
raise ParseError(self.filename, self.line_number, "Unexpected indent")
self.state = self.next_state
def data_line_state(self):
if self.char() == "[":
yield (token_types.paren, self.char())
self.consume()
self.state = self.heading_state
else:
self.state = self.key_state
def heading_state(self):
rv = ""
while True:
c = self.char()
if c == "\\":
rv += self.consume_escape()
elif c == "]":
break
elif c == eol:
raise ParseError(self.filename, self.line_number, "EOL in heading")
else:
rv += c
self.consume()
yield (token_types.string, decode(rv))
yield (token_types.paren, "]")
self.consume()
self.state = self.line_end_state
self.next_state = self.data_line_state
def key_state(self):
rv = ""
while True:
c = self.char()
if c == " ":
self.skip_whitespace()
if self.char() != ":":
raise ParseError(self.filename, self.line_number, "Space in key name")
break
elif c == ":":
break
elif c == eol:
raise ParseError(self.filename, self.line_number, "EOL in key name (missing ':'?)")
elif c == "\\":
rv += self.consume_escape()
else:
rv += c
self.consume()
yield (token_types.string, decode(rv))
yield (token_types.separator, ":")
self.consume()
self.state = self.after_key_state
def after_key_state(self):
self.skip_whitespace()
c = self.char()
if c == "#":
self.next_state = self.expr_or_value_state
self.state = self.comment_state
elif c == eol:
self.next_state = self.expr_or_value_state
self.state = self.eol_state
elif c == "[":
self.state = self.list_start_state
else:
self.state = self.value_state
def after_expr_state(self):
self.skip_whitespace()
c = self.char()
if c == "#":
self.next_state = self.after_expr_state
self.state = self.comment_state
elif c == eol:
self.next_state = self.after_expr_state
self.state = self.eol_state
elif c == "[":
self.state = self.list_start_state
else:
self.state = self.value_state
def list_start_state(self):
yield (token_types.list_start, "[")
self.consume()
self.state = self.list_value_start_state
def list_value_start_state(self):
self.skip_whitespace()
if self.char() == "]":
self.state = self.list_end_state
elif self.char() in ("'", '"'):
quote_char = self.char()
self.consume()
yield (token_types.string, self.consume_string(quote_char))
self.skip_whitespace()
if self.char() == "]":
self.state = self.list_end_state
elif self.char() != ",":
raise ParseError(self.filename, self.line_number, "Junk after quoted string")
self.consume()
elif self.char() == "#":
self.state = self.comment_state
self.next_line_state = self.list_value_start_state
elif self.char() == eol:
self.next_line_state = self.list_value_start_state
self.state = self.eol_state
elif self.char() == ",":
raise ParseError(self.filename, self.line_number, "List item started with separator")
elif self.char() == "@":
self.state = self.list_value_atom_state
else:
self.state = self.list_value_state
def list_value_state(self):
rv = ""
spaces = 0
while True:
c = self.char()
if c == "\\":
escape = self.consume_escape()
rv += escape
elif c == eol:
raise ParseError(self.filename, self.line_number, "EOL in list value")
elif c == "#":
raise ParseError(self.filename, self.line_number, "EOL in list value (comment)")
elif c == ",":
self.state = self.list_value_start_state
self.consume()
break
elif c == " ":
spaces += 1
self.consume()
elif c == "]":
self.state = self.list_end_state
self.consume()
break
else:
rv += " " * spaces
spaces = 0
rv += c
self.consume()
if rv:
yield (token_types.string, decode(rv))
def list_value_atom_state(self):
self.consume()
for _, value in self.list_value_state():
yield token_types.atom, value
def list_end_state(self):
self.consume()
yield (token_types.list_end, "]")
self.state = self.line_end_state
def value_state(self):
self.skip_whitespace()
c = self.char()
if c in ("'", '"'):
quote_char = self.char()
self.consume()
yield (token_types.string, self.consume_string(quote_char))
if self.char() == "#":
self.state = self.comment_state
else:
self.state = self.line_end_state
elif c == "@":
self.consume()
for _, value in self.value_inner_state():
yield token_types.atom, value
elif c == "[":
self.state = self.list_start_state
else:
self.state = self.value_inner_state
def value_inner_state(self):
rv = ""
spaces = 0
while True:
c = self.char()
if c == "\\":
rv += self.consume_escape()
elif c == "#":
self.state = self.comment_state
break
elif c == " ":
# prevent whitespace before comments from being included in the value
spaces += 1
self.consume()
elif c == eol:
self.state = self.line_end_state
break
else:
rv += " " * spaces
spaces = 0
rv += c
self.consume()
rv = decode(rv)
if rv.startswith("if "):
# Hack to avoid a problem where people write
# disabled: if foo
# and expect that to disable conditionally
raise ParseError(self.filename, self.line_number, "Strings starting 'if ' must be quoted "
"(expressions must start on a newline and be indented)")
yield (token_types.string, rv)
def comment_state(self):
while self.char() is not eol:
self.consume()
self.state = self.eol_state
def line_end_state(self):
self.skip_whitespace()
c = self.char()
if c == "#":
self.state = self.comment_state
elif c == eol:
self.state = self.eol_state
else:
raise ParseError(self.filename, self.line_number, "Junk before EOL %s" % c)
def consume_string(self, quote_char):
rv = ""
while True:
c = self.char()
if c == "\\":
rv += self.consume_escape()
elif c == quote_char:
self.consume()
break
elif c == eol:
raise ParseError(self.filename, self.line_number, "EOL in quoted string")
else:
rv += c
self.consume()
return decode(rv)
def expr_or_value_state(self):
if self.peek(3) == "if ":
self.state = self.expr_state
else:
self.state = self.value_state
def expr_state(self):
self.skip_whitespace()
c = self.char()
if c == eol:
raise ParseError(self.filename, self.line_number, "EOL in expression")
elif c in "'\"":
self.consume()
yield (token_types.string, self.consume_string(c))
elif c == "#":
raise ParseError(self.filename, self.line_number, "Comment before end of expression")
elif c == ":":
yield (token_types.separator, c)
self.consume()
self.state = self.after_expr_state
elif c in parens:
self.consume()
yield (token_types.paren, c)
elif c in ("!", "="):
self.state = self.operator_state
elif c in digits:
self.state = self.digit_state
else:
self.state = self.ident_state
def operator_state(self):
# Only symbolic operators
index_0 = self.index
while True:
c = self.char()
if c == eol:
break
elif c in operator_chars:
self.consume()
else:
self.state = self.expr_state
break
yield (token_types.ident, self.line[index_0:self.index])
def digit_state(self):
index_0 = self.index
seen_dot = False
while True:
c = self.char()
if c == eol:
break
elif c in digits:
self.consume()
elif c == ".":
if seen_dot:
raise ParseError(self.filename, self.line_number, "Invalid number")
self.consume()
seen_dot = True
elif c in parens:
break
elif c in operator_chars:
break
elif c == " ":
break
elif c == ":":
break
else:
raise ParseError(self.filename, self.line_number, "Invalid character in number")
self.state = self.expr_state
yield (token_types.number, self.line[index_0:self.index])
def ident_state(self):
index_0 = self.index
while True:
c = self.char()
if c == eol:
break
elif c == ".":
break
elif c in parens:
break
elif c in operator_chars:
break
elif c == " ":
break
elif c == ":":
break
else:
self.consume()
self.state = self.expr_state
yield (token_types.ident, self.line[index_0:self.index])
def consume_escape(self):
assert self.char() == "\\"
self.consume()
c = self.char()
self.consume()
if c == "x":
return self.decode_escape(2)
elif c == "u":
return self.decode_escape(4)
elif c == "U":
return self.decode_escape(6)
elif c in ["a", "b", "f", "n", "r", "t", "v"]:
return eval(r"'\%s'" % c)
elif c is eol:
raise ParseError(self.filename, self.line_number, "EOL in escape")
else:
return c
def decode_escape(self, length):
value = 0
for i in range(length):
c = self.char()
value *= 16
value += self.escape_value(c)
self.consume()
return chr(value)
def escape_value(self, c):
if '0' <= c <= '9':
return ord(c) - ord('0')
elif 'a' <= c <= 'f':
return ord(c) - ord('a') + 10
elif 'A' <= c <= 'F':
return ord(c) - ord('A') + 10
else:
raise ParseError(self.filename, self.line_number, "Invalid character escape")
class Parser:
def __init__(self):
self.reset()
def reset(self):
self.token = None
self.unary_operators = "!"
self.binary_operators = frozenset(["&&", "||", "=="])
self.tokenizer = Tokenizer()
self.token_generator = None
self.tree = Treebuilder(DataNode(None))
self.expr_builder = None
self.expr_builders = []
def parse(self, input):
try:
self.reset()
self.token_generator = self.tokenizer.tokenize(input)
self.consume()
self.manifest()
return self.tree.node
except Exception as e:
if not isinstance(e, ParseError):
raise ParseError(self.tokenizer.filename,
self.tokenizer.line_number,
str(e))
raise
def consume(self):
self.token = next(self.token_generator)
def expect(self, type, value=None):
if self.token[0] != type:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
f"Token '{self.token[0]}' doesn't equal expected type '{type}'")
if value is not None:
if self.token[1] != value:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
f"Token '{self.token[1]}' doesn't equal expected value '{value}'")
self.consume()
def manifest(self):
self.data_block()
self.expect(token_types.eof)
def data_block(self):
while self.token[0] == token_types.string:
self.tree.append(KeyValueNode(self.token[1]))
self.consume()
self.expect(token_types.separator)
self.value_block()
self.tree.pop()
while self.token == (token_types.paren, "["):
self.consume()
if self.token[0] != token_types.string:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
f"Token '{self.token[0]}' is not a string")
self.tree.append(DataNode(self.token[1]))
self.consume()
self.expect(token_types.paren, "]")
if self.token[0] == token_types.group_start:
self.consume()
self.data_block()
self.eof_or_end_group()
self.tree.pop()
def eof_or_end_group(self):
if self.token[0] != token_types.eof:
self.expect(token_types.group_end)
def value_block(self):
if self.token[0] == token_types.list_start:
self.consume()
self.list_value()
elif self.token[0] == token_types.string:
self.value()
elif self.token[0] == token_types.group_start:
self.consume()
self.expression_values()
if self.token[0] == token_types.string:
self.value()
elif self.token[0] == token_types.list_start:
self.consume()
self.list_value()
self.eof_or_end_group()
elif self.token[0] == token_types.atom:
self.atom()
else:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number,
f"Token '{self.token[0]}' is not a known type")
def list_value(self):
self.tree.append(ListNode())
while self.token[0] in (token_types.atom, token_types.string):
if self.token[0] == token_types.atom:
self.atom()
else:
self.value()
self.expect(token_types.list_end)
self.tree.pop()
def expression_values(self):
while self.token == (token_types.ident, "if"):
self.consume()
self.tree.append(ConditionalNode())
self.expr_start()
self.expect(token_types.separator)
self.value_block()
self.tree.pop()
def value(self):
self.tree.append(ValueNode(self.token[1]))
self.consume()
self.tree.pop()
def atom(self):
if self.token[1] not in atoms:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised symbol @%s" % self.token[1])
self.tree.append(AtomNode(atoms[self.token[1]]))
self.consume()
self.tree.pop()
def expr_start(self):
self.expr_builder = ExpressionBuilder(self.tokenizer)
self.expr_builders.append(self.expr_builder)
self.expr()
expression = self.expr_builder.finish()
self.expr_builders.pop()
self.expr_builder = self.expr_builders[-1] if self.expr_builders else None
if self.expr_builder:
self.expr_builder.operands[-1].children[-1].append(expression)
else:
self.tree.append(expression)
self.tree.pop()
def expr(self):
self.expr_operand()
while (self.token[0] == token_types.ident and self.token[1] in binary_operators):
self.expr_bin_op()
self.expr_operand()
def expr_operand(self):
if self.token == (token_types.paren, "("):
self.consume()
self.expr_builder.left_paren()
self.expr()
self.expect(token_types.paren, ")")
self.expr_builder.right_paren()
elif self.token[0] == token_types.ident and self.token[1] in unary_operators:
self.expr_unary_op()
self.expr_operand()
elif self.token[0] in [token_types.string, token_types.ident]:
self.expr_value()
elif self.token[0] == token_types.number:
self.expr_number()
else:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Unrecognised operand")
def expr_unary_op(self):
if self.token[1] in unary_operators:
self.expr_builder.push_operator(UnaryOperatorNode(self.token[1]))
self.consume()
else:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected unary operator")
def expr_bin_op(self):
if self.token[1] in binary_operators:
self.expr_builder.push_operator(BinaryOperatorNode(self.token[1]))
self.consume()
else:
raise ParseError(self.tokenizer.filename, self.tokenizer.line_number, "Expected binary operator")
def expr_value(self):
node_type = {token_types.string: StringNode,
token_types.ident: VariableNode}[self.token[0]]
self.expr_builder.push_operand(node_type(self.token[1]))
self.consume()
if self.token == (token_types.paren, "["):
self.consume()
self.expr_builder.operands[-1].append(IndexNode())
self.expr_start()
self.expect(token_types.paren, "]")
def expr_number(self):
self.expr_builder.push_operand(NumberNode(self.token[1]))
self.consume()
class Treebuilder:
def __init__(self, root):
self.root = root
self.node = root
def append(self, node):
assert isinstance(node, Node)
self.node.append(node)
self.node = node
assert self.node is not None
return node
def pop(self):
node = self.node
self.node = self.node.parent
assert self.node is not None
return node
class ExpressionBuilder:
def __init__(self, tokenizer):
self.operands = []
self.operators = [None]
self.tokenizer = tokenizer
def finish(self):
while self.operators[-1] is not None:
self.pop_operator()
rv = self.pop_operand()
assert self.is_empty()
return rv
def left_paren(self):
self.operators.append(None)
def right_paren(self):
while self.operators[-1] is not None:
self.pop_operator()
if not self.operators:
raise ParseError(self.tokenizer.filename, self.tokenizer.line,
"Unbalanced parens")
assert self.operators.pop() is None
def push_operator(self, operator):
assert operator is not None
while self.precedence(self.operators[-1]) > self.precedence(operator):
self.pop_operator()
self.operators.append(operator)
def pop_operator(self):
operator = self.operators.pop()
if isinstance(operator, BinaryOperatorNode):
operand_1 = self.operands.pop()
operand_0 = self.operands.pop()
self.operands.append(BinaryExpressionNode(operator, operand_0, operand_1))
else:
operand_0 = self.operands.pop()
self.operands.append(UnaryExpressionNode(operator, operand_0))
def push_operand(self, node):
self.operands.append(node)
def pop_operand(self):
return self.operands.pop()
def is_empty(self):
return len(self.operands) == 0 and all(item is None for item in self.operators)
def precedence(self, operator):
if operator is None:
return 0
return precedence(operator)
def parse(stream):
p = Parser()
return p.parse(stream)
|
{
"content_hash": "16a7aed153b0561664c5dad87b868ae2",
"timestamp": "",
"source": "github",
"line_count": 771,
"max_line_length": 141,
"avg_line_length": 32.82101167315175,
"alnum_prop": 0.5191859316340645,
"repo_name": "nwjs/chromium.src",
"id": "c0ef9b4dd33d43722c3ea8b8d5f755605f7672af",
"size": "25538",
"binary": false,
"copies": "3",
"ref": "refs/heads/nw70",
"path": "third_party/wpt_tools/wpt/tools/wptrunner/wptrunner/wptmanifest/parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import print_function
import logging
logging.basicConfig()
import av
from av.codec import CodecContext, CodecParser
from av.video import VideoFrame
from av.packet import Packet
cc = CodecContext.create('mpeg4', 'r')
print(cc)
fh = open('test.mp4', 'r')
frame_count = 0
while True:
chunk = fh.read(819200)
for packet in cc.parse(chunk or None, allow_stream=True):
print(packet)
for frame in cc.decode(packet) or ():
print(frame)
img = frame.to_image()
img.save('sandbox/test.%04d.jpg' % frame_count)
frame_count += 1
if not chunk:
break # EOF!
|
{
"content_hash": "e42fc4374a0542f1a5ac79e65796d18b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 61,
"avg_line_length": 19.818181818181817,
"alnum_prop": 0.6299694189602446,
"repo_name": "mikeboers/PyAV",
"id": "67d70855e2a910e81e541adaf3e90343e903009f",
"size": "654",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "scratchpad/cctx_decode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1819"
},
{
"name": "Python",
"bytes": "517404"
},
{
"name": "Shell",
"bytes": "7128"
}
],
"symlink_target": ""
}
|
class IrProjectPipeline(object):
def process_item(self, item, spider):
return item
|
{
"content_hash": "18e5eb608325c2b6a611383cc253e4eb",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.6947368421052632,
"repo_name": "EliHar/InfoRetrievalSearch",
"id": "41befd2dcf5ff47f8546659e22a7adf1a555a0c0",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ir_project/ir_project/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36163"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('content', '0025_auto_20160426_0848'),
]
operations = [
migrations.RenameModel(
old_name='CustomPage1',
new_name='CustomPage',
),
]
|
{
"content_hash": "49fca9f34b4412aaf6e1d9785a707dd4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 47,
"avg_line_length": 19.941176470588236,
"alnum_prop": 0.5988200589970502,
"repo_name": "RachellCalhoun/lightandleadership",
"id": "ecc3771c95856663dda9f6e6439f94c04dbfb184",
"size": "363",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "content/migrations/0026_auto_20160428_0436.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13054"
},
{
"name": "HTML",
"bytes": "65394"
},
{
"name": "Python",
"bytes": "117216"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import random as rand
import numpy as np
data = np.reshape(rand.sample(range(10000), 1000), (1000,1))
label = data
#Setting configurations
n_nodes_hl1 = 3 #nodes in hidden layer 1
n_nodes_hl2 = 5 #nodes in hidden layer 2
n_nodes_hl3 = 3 #nodes in hidden layer 3
n_classes = 1 #number of classes = 1. Regression
batch_size = 100
x = tf.placeholder('float', [batch_size, None], name = 'input')
y = tf.placeholder('float') #the size is not specified (it can be anything)
#Defining the computation graph - the neural network model
def neural_network_model(data):
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([1, n_nodes_hl1])), #randomly (Normal dist) initialized weights of size 784 x n_nodes_hl1
'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))} #randomly initialized weights (Normal distribution) of length n_nodes_hl1
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal([n_classes]))}
#forward pass
z1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
a1 = tf.nn.relu(z1)
z2 = tf.add(tf.matmul(a1, hidden_2_layer['weights']), hidden_2_layer['biases'])
a2 = tf.nn.relu(z2)
z3 = tf.add(tf.matmul(a2, hidden_3_layer['weights']), hidden_3_layer['biases'])
a3 = tf.nn.relu(z3)
yhat = tf.add(tf.matmul(a3, output_layer['weights']), output_layer['biases'], name = 'output')
return yhat
#defining the training
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.square(prediction - y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
nEpochs = 100
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(nEpochs):
epoch_loss = 0
for batch in range(int(len(data)/batch_size)):
start = 0 + (batch) * batch_size
end = 100 + (batch) * batch_size
epoch_x = data[range(start, end)]
epoch_y = epoch_x
_, c = sess.run([optimizer, cost], feed_dict = {x: epoch_x, y: epoch_y})
epoch_loss += c
print('Epoch', epoch, 'completed out of', nEpochs, 'loss:', epoch_loss)
sess.run(init)
save_model(sess)
error = tf.reduce_mean(tf.square(prediction - y))
#accuracy = tf.reduce_mean(tf.cast(error, 'float'))
print('Error:', error)
#saving the trained model
def save_model(session):
saver = tf.train.Saver()
tf.global_variables_initializer().run()
#Saving as Protocol Buffer (pb)
saver.save(session, '/home/szi/Eclipse/Java/Tensorflow/Tensorflow_Java/Tensorflow_Load/src/resources/identity/model/chkpt', global_step = 0)
tf.train.write_graph(session.graph.as_graph_def(), '/home/szi/Eclipse/Java/Tensorflow/Tensorflow_Java/Tensorflow_Load/src/resources/identity/model/', 'model.pb', False)
#Saving as readable file
#saver.save(session, '/home/szi/Python/SublimeText/Tensorflow/tf_java2/model/trained_model.sd')
#tf.train.write_graph(session.graph_def, '.', '/home/szi/Python/SublimeText/Tensorflow/tf_java2/model/trained_model.proto', as_text = False)
#tf.train.write_graph(session.graph_def, '.', '/home/szi/Python/SublimeText/Tensorflow/tf_java2/model/trained_model.txt', as_text = True)
#Printing node names
#[print(n.name) for n in tf.get_default_graph().as_graph_def().node]
train_neural_network(x)
|
{
"content_hash": "f0498b48b37ccb9d4d67f8c9115eea12",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 169,
"avg_line_length": 39.82222222222222,
"alnum_prop": 0.705078125,
"repo_name": "derdav3/tf-sparql",
"id": "b0116e76a91e791f629e9a2a0ef01a0b84f9e384",
"size": "3584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ml_models/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "1854"
},
{
"name": "Python",
"bytes": "323046"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
}
|
import itertools
import json
import os
import subprocess as sp
import time
from nltk.corpus import treebank
from nltk.metrics import accuracy
from nltk.tag.api import TaggerI
from nltk.tag.perceptron import PerceptronTagger
from nltk.tag.util import untag
from tabulate import tabulate
AP_TIME = []
def pipe_through_prog(prog, text):
global AP_TIME
cmd = ['go', 'run'] + prog.split()
for _ in range(5):
p1 = sp.Popen(cmd, stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.PIPE)
now = time.time()
[result, err] = p1.communicate(input=text.encode('utf-8'))
AP_TIME.append(time.time() - now)
tags = [(t['Text'], t['Tag']) for t in json.loads(result.decode('utf-8'))]
return [p1.returncode, tags, err]
class APTagger(TaggerI):
"""A wrapper around the aptag Go library.
"""
def tag(self, tokens):
prog = os.path.join('scripts', 'main.go')
_, tags, _ = pipe_through_prog(prog, ' '.join(tokens))
return tags
def tag_sents(self, sentences):
text = []
for s in sentences:
text.append(' '.join(s))
return self.tag(text)
def evaluate(self, gold):
tagged_sents = self.tag_sents(untag(sent) for sent in gold)
gold_tokens = list(itertools.chain(*gold))
print(json.dumps(gold_tokens))
print(len(tagged_sents), len(gold_tokens))
return accuracy(gold_tokens, tagged_sents)
if __name__ == '__main__':
sents = treebank.tagged_sents()
PT = PerceptronTagger()
print("Timing NLTK ...")
pt_times = []
for _ in range(5):
now = time.time()
PT.tag_sents(untag(sent) for sent in sents)
pt_times.append(time.time() - now)
pt_time = round(sum(pt_times) / len(pt_times), 3)
'''NOTE: Moved to tag_test.go
print("Timing prose ...")
acc = round(APTagger().evaluate(sents), 3)
ap_time = round(sum(AP_TIME) / len(AP_TIME), 3)
'''
print("Evaluating accuracy ...")
headers = ['Library', 'Accuracy', '5-Run Average (sec)']
table = [
['NLTK', round(PT.evaluate(sents), 3), pt_time],
# ['`prose`', acc, ap_time]
]
print(tabulate(table, headers, tablefmt='pipe'))
|
{
"content_hash": "ee710bc03acad3675f134ddaded9b41c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 29,
"alnum_prop": 0.6039019963702359,
"repo_name": "jdkato/prose",
"id": "ef8b30e8d0062bc6763444409b5d576ea1f95616",
"size": "2204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/test_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "88397"
},
{
"name": "Makefile",
"bytes": "482"
},
{
"name": "Python",
"bytes": "4175"
},
{
"name": "Shell",
"bytes": "1432"
}
],
"symlink_target": ""
}
|
__version__='3.3.0'
__doc__="""Collection of axes for charts.
The current collection comprises axes for charts using cartesian
coordinate systems. All axes might have tick marks and labels.
There are two dichotomies for axes: one of X and Y flavours and
another of category and value flavours.
Category axes have an ordering but no metric. They are divided
into a number of equal-sized buckets. Their tick marks or labels,
if available, go BETWEEN the buckets, and the labels are placed
below to/left of the X/Y-axis, respectively.
Value axes have an ordering AND metric. They correspond to a nu-
meric quantity. Value axis have a real number quantity associated
with it. The chart tells it where to go.
The most basic axis divides the number line into equal spaces
and has tickmarks and labels associated with each; later we
will add variants where you can specify the sampling
interval.
The charts using axis tell them where the labels should be placed.
Axes of complementary X/Y flavours can be connected to each other
in various ways, i.e. with a specific reference point, like an
x/value axis to a y/value (or category) axis. In this case the
connection can be either at the top or bottom of the former or
at any absolute value (specified in points) or at some value of
the former axes in its own coordinate system.
"""
from reportlab.lib.validators import isNumber, isNumberOrNone, isListOfStringsOrNone, isListOfNumbers, \
isListOfNumbersOrNone, isColorOrNone, OneOf, isBoolean, SequenceOf, \
isString, EitherOr, Validator, NoneOr, isInstanceOf, \
isNormalDate, isNoneOrCallable
from reportlab.lib.attrmap import *
from reportlab.lib import normalDate
from reportlab.graphics.shapes import Drawing, Line, PolyLine, Rect, Group, STATE_DEFAULTS, _textBoxLimits, _rotatedBoxLimits
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection
from reportlab.graphics.charts.textlabels import Label, PMVLabel
from reportlab.graphics.charts.utils import nextRoundNumber
from reportlab.graphics.widgets.grids import ShadedRect
from reportlab.lib.colors import Color
from reportlab.lib.utils import isSeq
import copy
try:
reduce # Python 2.x
except NameError:
from functools import reduce
# Helpers.
def _findMinMaxValue(V, x, default, func, special=None):
if isSeq(V[0][0]):
if special:
f=lambda T,x=x,special=special,func=func: special(T,x,func)
else:
f=lambda T,x=x: T[x]
V=list(map(lambda e,f=f: list(map(f,e)),V))
V = list(filter(len,[[x for x in x if x is not None] for x in V]))
if len(V)==0: return default
return func(list(map(func,V)))
def _findMin(V, x, default,special=None):
'''find minimum over V[i][x]'''
return _findMinMaxValue(V,x,default,min,special=special)
def _findMax(V, x, default,special=None):
'''find maximum over V[i][x]'''
return _findMinMaxValue(V,x,default,max,special=special)
def _allInt(values):
'''true if all values are int'''
for v in values:
try:
if int(v)!=v: return 0
except:
return 0
return 1
class AxisLabelAnnotation:
'''Create a grid like line using the given user value to draw the line
v value to use
kwds may contain
scaleValue True/not given --> scale the value
otherwise use the absolute value
labelClass the label class to use default Label
all Label keywords are acceptable (including say _text)
'''
def __init__(self,v,**kwds):
self._v = v
self._kwds = kwds
def __call__(self,axis):
kwds = self._kwds.copy()
labelClass = kwds.pop('labelClass',Label)
scaleValue = kwds.pop('scaleValue',True)
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
sv = (axis.scale if scaleValue else lambda x: x)(self._v)
if axis.isYAxis:
y = axis._x
x = sv
else:
x = sv
y = axis._y
kwds['x'] = x
kwds['y'] = y
return labelClass(**kwds)
class AxisLineAnnotation:
'''Create a grid like line using the given user value to draw the line
kwds may contain
startOffset if true v is offset from the default grid start position
endOffset if true v is offset from the default grid end position
scaleValue True/not given --> scale the value
otherwise use the absolute value
lo lowest coordinate to draw default 0
hi highest coordinate to draw at default = length
drawAtLimit True draw line at appropriate limit if its coordinate exceeds the lo, hi range
False ignore if it's outside the range
all Line keywords are acceptable
'''
def __init__(self,v,**kwds):
self._v = v
self._kwds = kwds
def __call__(self,axis):
kwds = self._kwds.copy()
scaleValue = kwds.pop('scaleValue',True)
endOffset = kwds.pop('endOffset',False)
startOffset = kwds.pop('startOffset',False)
if axis.isYAxis:
offs = axis._x
d0 = axis._y
else:
offs = axis._y
d0 = axis._x
s = kwds.pop('start',None)
e = kwds.pop('end',None)
if s is None or e is None:
dim = getattr(getattr(axis,'joinAxis',None),'getGridDims',None)
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
hi = kwds.pop('hi',axis._length)+d0
lo = kwds.pop('lo',0)+d0
lo,hi=min(lo,hi),max(lo,hi)
drawAtLimit = kwds.pop('drawAtLimit',False)
oaglp = axis._get_line_pos
if not scaleValue:
axis._get_line_pos = lambda x: x
try:
v = self._v
if endOffset:
v = v + hi
elif startOffset:
v = v + lo
func = axis._getLineFunc(s-offs,e-offs,kwds.pop('parent',None))
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
d = axis._get_line_pos(v)
if d<lo or d>hi:
if not drawAtLimit: return None
if d<lo:
d = lo
else:
d = hi
axis._get_line_pos = lambda x: d
L = func(v)
for k,v in kwds.items():
setattr(L,k,v)
finally:
axis._get_line_pos = oaglp
return L
class AxisBackgroundAnnotation:
'''Create a set of coloured bars on the background of a chart using axis ticks as the bar borders
colors is a set of colors to use for the background bars. A colour of None is just a skip.
Special effects if you pass a rect or Shaded rect instead.
'''
def __init__(self,colors,**kwds):
self._colors = colors
self._kwds = kwds
def __call__(self,axis):
colors = self._colors
if not colors: return
kwds = self._kwds.copy()
isYAxis = axis.isYAxis
if isYAxis:
offs = axis._x
d0 = axis._y
else:
offs = axis._y
d0 = axis._x
s = kwds.pop('start',None)
e = kwds.pop('end',None)
if s is None or e is None:
dim = getattr(getattr(axis,'joinAxis',None),'getGridDims',None)
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
tv = getattr(axis,'_tickValues',None)
if not tv: return
G = Group()
ncolors = len(colors)
v0 = axis._get_line_pos(tv[0])
for i in range(1,len(tv)):
v1 = axis._get_line_pos(tv[i])
c = colors[(i-1)%ncolors]
if c:
if isYAxis:
y = v0
x = s
height = v1-v0
width = e-s
else:
x = v0
y = s
width = v1-v0
height = e-s
if isinstance(c,Color):
r = Rect(x,y,width,height,fillColor=c,strokeColor=None)
elif isinstance(c,Rect):
r = Rect(x,y,width,height)
for k in c.__dict__:
if k not in ('x','y','width','height'):
setattr(r,k,getattr(c,k))
elif isinstance(c,ShadedRect):
r = ShadedRect(x=x,y=y,width=width,height=height)
for k in c.__dict__:
if k not in ('x','y','width','height'):
setattr(r,k,getattr(c,k))
G.add(r)
v0 = v1
return G
class TickLU:
'''lookup special cases for tick values'''
def __init__(self,*T,**kwds):
self.accuracy = kwds.pop('accuracy',1e-8)
self.T = T
def __contains__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<accuracy:
return True
return False
def __getitem__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<self.accuracy:
return v
raise IndexError('cannot locate index %r' % t)
class _AxisG(Widget):
def _get_line_pos(self,v):
v = self.scale(v)
try:
v = v[0]
except:
pass
return v
def _cxLine(self,x,start,end):
x = self._get_line_pos(x)
return Line(x, self._y + start, x, self._y + end)
def _cyLine(self,y,start,end):
y = self._get_line_pos(y)
return Line(self._x + start, y, self._x + end, y)
def _cxLine3d(self,x,start,end,_3d_dx,_3d_dy):
x = self._get_line_pos(x)
y0 = self._y + start
y1 = self._y + end
y0, y1 = min(y0,y1),max(y0,y1)
x1 = x + _3d_dx
return PolyLine([x,y0,x1,y0+_3d_dy,x1,y1+_3d_dy],strokeLineJoin=1)
def _cyLine3d(self,y,start,end,_3d_dx,_3d_dy):
y = self._get_line_pos(y)
x0 = self._x + start
x1 = self._x + end
x0, x1 = min(x0,x1),max(x0,x1)
y1 = y + _3d_dy
return PolyLine([x0,y,x0+_3d_dx,y1,x1+_3d_dx,y1],strokeLineJoin=1)
def _getLineFunc(self, start, end, parent=None):
_3d_dx = getattr(parent,'_3d_dx',None)
if _3d_dx is not None:
_3d_dy = getattr(parent,'_3d_dy',None)
f = self.isYAxis and self._cyLine3d or self._cxLine3d
return lambda v, s=start, e=end, f=f,_3d_dx=_3d_dx,_3d_dy=_3d_dy: f(v,s,e,_3d_dx=_3d_dx,_3d_dy=_3d_dy)
else:
f = self.isYAxis and self._cyLine or self._cxLine
return lambda v, s=start, e=end, f=f: f(v,s,e)
def _makeLines(self,g,start,end,strokeColor,strokeWidth,strokeDashArray,strokeLineJoin,strokeLineCap,strokeMiterLimit,parent=None,exclude=[],specials={}):
func = self._getLineFunc(start,end,parent)
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
if exclude:
exf = self.isYAxis and (lambda l: l.y1 in exclude) or (lambda l: l.x1 in exclude)
else:
exf = None
for t in self._tickValues:
L = func(t)
if exf and exf(L): continue
L.strokeColor = strokeColor
L.strokeWidth = strokeWidth
L.strokeDashArray = strokeDashArray
L.strokeLineJoin = strokeLineJoin
L.strokeLineCap = strokeLineCap
L.strokeMiterLimit = strokeMiterLimit
if t in specials:
for a,v in specials[t].items():
setattr(L,a,v)
g.add(L)
def makeGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
c = self.gridStrokeColor
w = self.gridStrokeWidth or 0
if w and c and self.visibleGrid:
s = self.gridStart
e = self.gridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
self._makeLines(g,s-offs,e-offs,c,w,self.gridStrokeDashArray,self.gridStrokeLineJoin,self.gridStrokeLineCap,self.gridStrokeMiterLimit,parent=parent,exclude=exclude,specials=getattr(self,'_gridSpecials',{}))
self._makeSubGrid(g,dim,parent,exclude=[])
def _makeSubGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
if not (getattr(self,'visibleSubGrid',0) and self.subTickNum>0): return
c = self.subGridStrokeColor
w = self.subGridStrokeWidth or 0
if not(w and c): return
s = self.subGridStart
e = self.subGridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
otv = self._calcSubTicks()
try:
self._makeLines(g,s-offs,e-offs,c,w,self.subGridStrokeDashArray,self.subGridStrokeLineJoin,self.subGridStrokeLineCap,self.subGridStrokeMiterLimit,parent=parent,exclude=exclude)
finally:
self._tickValues = otv
def getGridDims(self,start=None,end=None):
if start is None: start = (self._x,self._y)[self.isYAxis]
if end is None: end = start+self._length
return start,end
def isYAxis(self):
if getattr(self,'_dataIndex',None)==1: return True
acn = self.__class__.__name__
return acn[0]=='Y' or acn[:4]=='AdjY'
isYAxis = property(isYAxis)
def isXAxis(self):
if getattr(self,'_dataIndex',None)==0: return True
acn = self.__class__.__name__
return acn[0]=='X' or acn[:11]=='NormalDateX'
isXAxis = property(isXAxis)
def addAnnotations(self,g,A=None):
if A is None: getattr(self,'annotations',[])
for x in A:
g.add(x(self))
def _splitAnnotations(self):
A = getattr(self,'annotations',[])[:]
D = {}
for v in ('early','beforeAxis','afterAxis','beforeTicks',
'afterTicks','beforeTickLabels',
'afterTickLabels','late'):
R = [].append
P = [].append
for a in A:
if getattr(a,v,0):
R(a)
else:
P(a)
D[v] = R.__self__
A[:] = P.__self__
D['late'] += A
return D
def draw(self):
g = Group()
A = self._splitAnnotations()
self.addAnnotations(g,A['early'])
if self.visible:
self.addAnnotations(g,A['beforeAxis'])
g.add(self.makeAxis())
self.addAnnotations(g,A['afterAxis'])
self.addAnnotations(g,A['beforeTicks'])
g.add(self.makeTicks())
self.addAnnotations(g,A['afterTicks'])
self.addAnnotations(g,A['beforeTickLabels'])
g.add(self.makeTickLabels())
self.addAnnotations(g,A['afterTickLabels'])
self.addAnnotations(g,A['late'])
return g
class CALabel(PMVLabel):
_attrMap = AttrMap(BASE=PMVLabel,
labelPosFrac = AttrMapValue(isNumber, desc='where in the category range [0,1] the labels should be anchored'),
)
def __init__(self,**kw):
PMVLabel.__init__(self,**kw)
self._setKeywords(
labelPosFrac = 0.5,
)
# Category axes.
class CategoryAxis(_AxisG):
"Abstract category axis, unusable in itself."
_nodoc = 1
_attrMap = AttrMap(
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
joinAxis = AttrMapValue(None, desc='Join both axes if true.'),
joinAxisPos = AttrMapValue(isNumberOrNone, desc='Position at which to join with other axis.'),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
style = AttrMapValue(OneOf('parallel','stacked','parallel_3d'),"How common category bars are plotted"),
labelAxisMode = AttrMapValue(OneOf('high','low','axis', 'axispmv'), desc="Like joinAxisMode, but for the axis labels"),
tickShift = AttrMapValue(isBoolean, desc='Tick shift typically'),
loPad = AttrMapValue(isNumber, desc='extra inner space before start of the axis'),
hiPad = AttrMapValue(isNumber, desc='extra inner space after end of the axis'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
skipGrid = AttrMapValue(OneOf('none','top','both','bottom'),"grid lines to skip top bottom both none"),
innerTickDraw = AttrMapValue(isNoneOrCallable, desc="Callable to replace _drawInnerTicks"),
)
def __init__(self):
assert self.__class__.__name__!='CategoryAxis', "Abstract Class CategoryAxis Instantiated"
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
self._catCount = 0
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.visibleLabels = 1
self.visibleGrid = 0
self.drawGridLast = False
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStrokeLineJoin = self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.gridStrokeLineCap = self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.gridStrokeMiterLimit = self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.gridStrokeWidth = 0.25
self.gridStrokeColor = STATE_DEFAULTS['strokeColor']
self.gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStart = self.gridEnd = None
self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.labels = TypedPropertyCollection(CALabel)
# if None, they don't get labels. If provided,
# you need one name per data point and they are
# used for label text.
self.categoryNames = None
self.joinAxis = None
self.joinAxisPos = None
self.joinAxisMode = None
self.labelAxisMode = 'axis'
self.reverseDirection = 0
self.style = 'parallel'
#various private things which need to be initialized
self._labelTextFormat = None
self.tickShift = 0
self.loPad = 0
self.hiPad = 0
self.loLLen = 0
self.hiLLen = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, multiSeries,barWidth=None):
self._catCount = max(list(map(len,multiSeries)))
self._barWidth = barWidth or ((self._length-self.loPad-self.hiPad)/float(self._catCount or 1))
self._calcTickmarkPositions()
if self.labelAxisMode == 'axispmv':
self._pmv = [sum([series[i] for series in multiSeries]) for i in xrange(self._catCount)]
def _calcTickmarkPositions(self):
n = self._catCount
if self.tickShift:
self._tickValues = [t+0.5 for t in range(n)]
else:
if self.reverseDirection:
self._tickValues = list(range(-1,n))
else:
self._tickValues = list(range(n+1))
def _scale(self,idx):
if self.reverseDirection: idx = self._catCount-idx-1
return idx
def _assertYAxis(axis):
assert axis.isYAxis, "Cannot connect to other axes (%s), but Y- ones." % axis.__class__.__name__
def _assertXAxis(axis):
assert axis.isXAxis, "Cannot connect to other axes (%s), but X- ones." % axis.__class__.__name__
class _XTicks:
_tickTweaks = 0 #try 0.25-0.5
def _drawTicksInner(self,tU,tD,g):
itd = getattr(self,'innerTickDraw',None)
if itd:
itd(self,tU,tD,g)
elif tU or tD:
sW = self.strokeWidth
tW = self._tickTweaks
if tW:
if tU and not tD:
tD = tW*sW
elif tD and not tU:
tU = tW*sW
self._makeLines(g,tU,-tD,self.strokeColor,sW,self.strokeDashArray,self.strokeLineJoin,self.strokeLineCap,self.strokeMiterLimit)
def _drawTicks(self,tU,tD,g=None):
g = g or Group()
if self.visibleTicks:
self._drawTicksInner(tU,tD,g)
return g
def _calcSubTicks(self):
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
otv = self._tickValues
if not hasattr(self,'_subTickValues'):
acn = self.__class__.__name__
if acn[:11]=='NormalDateX':
iFuzz = 0
dCnv = int
else:
iFuzz = 1e-8
dCnv = lambda x:x
OTV = [tv for tv in otv if getattr(tv,'_doSubTicks',1)]
T = [].append
nst = int(self.subTickNum)
i = len(OTV)
if i<2:
self._subTickValues = []
else:
if i==2:
dst = OTV[1]-OTV[0]
elif i==3:
dst = max(OTV[1]-OTV[0],OTV[2]-OTV[1])
else:
i >>= 1
dst = OTV[i+1] - OTV[i]
fuzz = dst*iFuzz
vn = self._valueMin+fuzz
vx = self._valueMax-fuzz
if OTV[0]>vn: OTV.insert(0,OTV[0]-dst)
if OTV[-1]<vx: OTV.append(OTV[-1]+dst)
dst /= float(nst+1)
for i,x in enumerate(OTV[:-1]):
for j in range(nst):
t = x+dCnv((j+1)*dst)
if t<=vn or t>=vx: continue
T(t)
self._subTickValues = T.__self__
self._tickValues = self._subTickValues
return otv
def _drawSubTicks(self,tU,tD,g):
if getattr(self,'visibleSubTicks',0) and self.subTickNum>0:
otv = self._calcSubTicks()
try:
self._subTicking = 1
self._drawTicksInner(tU,tD,g)
finally:
del self._subTicking
self._tickValues = otv
def makeTicks(self):
yold=self._y
try:
self._y = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickUp,self.tickDown)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._y = yold
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._y
elif mode == 'high':
return axis._y + axis._length
return self._y
class _YTicks(_XTicks):
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._x
elif mode == 'high':
return axis._x + axis._length
return self._x
def makeTicks(self):
xold=self._x
try:
self._x = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickRight,self.tickLeft)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._x = xold
class XCategoryAxis(_XTicks,CategoryAxis):
"X/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
)
_dataIndex = 0
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'n' #north - top edge
self.labels.dy = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickUp = 0 # how far into chart does tick go?
self.tickDown = 5 # how far below axis does tick go?
def demo(self):
self.setPosition(30, 70, 140)
self.configure([(10,20,30,40,50)])
self.categoryNames = ['One','Two','Three','Four','Five']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'n'
self.labels[4].boxAnchor = 'e'
self.labels[4].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._y = yAxis._y
elif mode == 'top':
self._y = yAxis._y + yAxis._length
elif mode == 'value':
self._y = yAxis.scale(pos)
elif mode == 'points':
self._y = pos
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('bottom', 'top'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def scale(self, idx):
"""returns the x position and width in drawing units of the slice"""
return (self._x + self.loPad + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
_y = self._labelAxisPos()
_x = self._x
pmv = self._pmv if self.labelAxisMode=='axispmv' else None
for i in range(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
if pmv:
_dy = label.dy
v = label._pmv = pmv[ic]
if v<0: _dy *= -2
else:
_dy = 0
lpf = label.labelPosFrac
x = _x + (i+lpf) * barWidth
label.setOrigin(x,_y+_dy)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class YCategoryAxis(_YTicks,CategoryAxis):
"Y/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
)
_dataIndex = 1
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'e' #east - right edge
self.labels.dx = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickLeft = 5 # how far left of axis does tick go?
self.tickRight = 0 # how far right of axis does tick go?
def demo(self):
self.setPosition(50, 10, 80)
self.configure([(10,20,30)])
self.categoryNames = ['One','Two','Three']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'e'
self.labels[2].boxAnchor = 's'
self.labels[2].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('left', 'right'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def scale(self, idx):
"Returns the y position and width in drawing units of the slice."
return (self._y + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
labels = self.labels
_x = self._labelAxisPos()
_y = self._y
pmv = self._pmv if self.labelAxisMode=='axispmv' else None
for i in range(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
lpf = label.labelPosFrac
y = _y + (i+lpf) * barWidth
if pmv:
_dx = label.dx
v = label._pmv = pmv[ic]
if v<0: _dx *= -2
else:
_dx = 0
label.setOrigin(_x+_dx, y)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class TickLabeller:
'''Abstract base class which may be used to indicate a change
in the call signature for callable label formats
'''
def __call__(self,axis,value):
return 'Abstract class instance called'
#this matches the old python str behaviour
_defaultLabelFormatter = lambda x: '%.12g' % x
# Value axes.
class ValueAxis(_AxisG):
"Abstract value axis, unusable in itself."
_attrMap = AttrMap(
forceZero = AttrMapValue(EitherOr((isBoolean,OneOf('near'))), desc='Ensure zero in range if true.'),
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
minimumTickSpacing = AttrMapValue(isNumber, desc='Minimum value for distance between ticks.'),
maximumTicks = AttrMapValue(isNumber, desc='Maximum number of ticks.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
labelTextFormat = AttrMapValue(None, desc='Formatting string or function used for axis labels.'),
labelTextPostFormat = AttrMapValue(None, desc='Extra Formatting string.'),
labelTextScale = AttrMapValue(isNumberOrNone, desc='Scaling for label tick values.'),
valueMin = AttrMapValue(isNumberOrNone, desc='Minimum value on axis.'),
valueMax = AttrMapValue(isNumberOrNone, desc='Maximum value on axis.'),
valueStep = AttrMapValue(isNumberOrNone, desc='Step size used between ticks.'),
valueSteps = AttrMapValue(isListOfNumbersOrNone, desc='List of step sizes used between ticks.'),
avoidBoundFrac = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Fraction of interval to allow above and below.'),
avoidBoundSpace = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Space to allow above and below.'),
abf_ignore_zero = AttrMapValue(EitherOr((NoneOr(isBoolean),SequenceOf(isBoolean,emptyOK=0,lo=2,hi=2))), desc='Set to True to make the avoidBoundFrac calculations treat zero as non-special'),
rangeRound=AttrMapValue(OneOf('none','both','ceiling','floor'),'How to round the axis limits'),
zrangePref = AttrMapValue(isNumberOrNone, desc='Zero range axis limit preference.'),
style = AttrMapValue(OneOf('normal','stacked','parallel_3d'),"How values are plotted!"),
skipEndL = AttrMapValue(OneOf('none','start','end','both'), desc='Skip high/low tick labels'),
origShiftIPC = AttrMapValue(isNumberOrNone, desc='Lowest label shift interval ratio.'),
origShiftMin = AttrMapValue(isNumberOrNone, desc='Minimum amount to shift.'),
origShiftSpecialValue = AttrMapValue(isNumberOrNone, desc='special value for shift'),
tickAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the ticks"),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
subTickNum = AttrMapValue(isNumber, desc='Number of axis sub ticks, if >0'),
subTickLo = AttrMapValue(isNumber, desc='sub tick down or left'),
subTickHi = AttrMapValue(isNumber, desc='sub tick up or right'),
visibleSubTicks = AttrMapValue(isBoolean, desc='Display axis sub ticks, if true.'),
visibleSubGrid = AttrMapValue(isBoolean, desc='Display axis sub grid, if true.'),
subGridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
subGridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
subGridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
subGridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
subGridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
subGridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
subGridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
subGridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
keepTickLabelsInside = AttrMapValue(isBoolean, desc='Ensure tick labels do not project beyond bounds of axis if true'),
skipGrid = AttrMapValue(OneOf('none','top','both','bottom'),"grid lines to skip top bottom both none"),
requiredRange = AttrMapValue(isNumberOrNone, desc='Minimum required value range.'),
innerTickDraw = AttrMapValue(isNoneOrCallable, desc="Callable to replace _drawInnerTicks"),
)
def __init__(self,**kw):
assert self.__class__.__name__!='ValueAxis', 'Abstract Class ValueAxis Instantiated'
self._setKeywords(**kw)
self._setKeywords(
_configured = 0,
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
_x = 50,
_y = 50,
_length = 100,
# public properties
visible = 1,
visibleAxis = 1,
visibleLabels = 1,
visibleTicks = 1,
visibleGrid = 0,
forceZero = 0,
strokeWidth = 1,
strokeColor = STATE_DEFAULTS['strokeColor'],
strokeDashArray = STATE_DEFAULTS['strokeDashArray'],
strokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
strokeLineCap = STATE_DEFAULTS['strokeLineCap'],
strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStrokeWidth = 0.25,
gridStrokeColor = STATE_DEFAULTS['strokeColor'],
gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
gridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
gridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
gridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStart = None,
gridEnd = None,
drawGridLast = False,
visibleSubGrid = 0,
visibleSubTicks = 0,
subTickNum = 0,
subTickLo = 0,
subTickHi = 0,
subGridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
subGridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
subGridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
subGridStrokeWidth = 0.25,
subGridStrokeColor = STATE_DEFAULTS['strokeColor'],
subGridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
subGridStart = None,
subGridEnd = None,
labels = TypedPropertyCollection(Label),
keepTickLabelsInside = 0,
# how close can the ticks be?
minimumTickSpacing = 10,
maximumTicks = 7,
# a format string like '%0.2f'
# or a function which takes the value as an argument and returns a string
_labelTextFormat = None,
labelAxisMode = 'axis',
labelTextFormat = None,
labelTextPostFormat = None,
labelTextScale = None,
# if set to None, these will be worked out for you.
# if you override any or all of them, your values
# will be used.
valueMin = None,
valueMax = None,
valueStep = None,
avoidBoundFrac = None,
avoidBoundSpace = None,
abf_ignore_zero = False,
rangeRound = 'none',
zrangePref = 0,
style = 'normal',
skipEndL='none',
origShiftIPC = None,
origShiftMin = None,
origShiftSpecialValue = None,
tickAxisMode = 'axis',
reverseDirection=0,
loLLen=0,
hiLLen=0,
requiredRange=0,
)
self.labels.angle = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, dataSeries):
"""Let the axis configure its scale and range based on the data.
Called after setPosition. Let it look at a list of lists of
numbers determine the tick mark intervals. If valueMin,
valueMax and valueStep are configured then it
will use them; if any of them are set to None it
will look at the data and make some sensible decision.
You may override this to build custom axes with
irregular intervals. It creates an internal
variable self._values, which is a list of numbers
to use in plotting.
"""
self._setRange(dataSeries)
self._configure_end()
def _configure_end(self):
self._calcTickmarkPositions()
self._calcScaleFactor()
self._configured = 1
def _getValueStepAndTicks(self, valueMin, valueMax,cache={}):
try:
K = (valueMin,valueMax)
r = cache[K]
except:
self._valueMin = valueMin
self._valueMax = valueMax
valueStep,T = self._calcStepAndTickPositions()
r = cache[K] = valueStep, T, valueStep*1e-8
return r
def _preRangeAdjust(self,valueMin,valueMax):
rr = self.requiredRange
if rr>0:
r = valueMax - valueMin
if r<rr:
m = 0.5*(valueMax+valueMin)
rr *= 0.5
y1 = min(m-rr,valueMin)
y2 = max(m+rr,valueMax)
if valueMin>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif valueMin>=0 and y1<0:
y2 = y2 - y1
y1 = 0
valueMin = self._cValueMin = y1
valueMax = self._cValueMax = y2
return valueMin,valueMax
def _setRange(self, dataSeries):
"""Set minimum and maximum axis values.
The dataSeries argument is assumed to be a list of data
vectors. Each vector is itself a list or tuple of numbers.
Returns a min, max tuple.
"""
oMin = valueMin = self.valueMin
oMax = valueMax = self.valueMax
if valueMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0)
if valueMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0)
if valueMin == valueMax:
if valueMax==0:
if oMin is None and oMax is None:
zrp = getattr(self,'zrangePref',0)
if zrp>0:
valueMax = zrp
valueMin = 0
elif zrp<0:
valueMax = 0
valueMin = zrp
else:
valueMax = 0.01
valueMin = -0.01
elif self.valueMin is None:
valueMin = -0.01
else:
valueMax = 0.01
else:
if valueMax>0:
valueMax = 1.2*valueMax
valueMin = 0.0
else:
valueMax = 0.0
valueMin = 1.2*valueMin
if getattr(self,'_bubblePlot',None):
bubbleMax = float(_findMax(dataSeries,2,0))
frac=.25
bubbleV=frac*(valueMax-valueMin)
self._bubbleV = bubbleV
self._bubbleMax = bubbleMax
self._bubbleRadius = frac*self._length
def special(T,x,func,bubbleV=bubbleV,bubbleMax=bubbleMax):
try:
v = T[2]
except IndexError:
v = bubbleMAx*0.1
bubbleV *= (v/bubbleMax)**0.5
return func(T[x]+bubbleV,T[x]-bubbleV)
if oMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0,special=special)
if oMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0,special=special)
valueMin, valueMax = self._preRangeAdjust(valueMin,valueMax)
rangeRound = self.rangeRound
cMin = valueMin
cMax = valueMax
forceZero = self.forceZero
if forceZero:
if forceZero=='near':
forceZero = min(abs(valueMin),abs(valueMax)) <= 5*(valueMax-valueMin)
if forceZero:
if valueMax<0: valueMax=0
elif valueMin>0: valueMin = 0
abf = self.avoidBoundFrac
do_rr = not getattr(self,'valueSteps',None)
do_abf = abf and do_rr
if not isSeq(abf):
abf = abf, abf
abfiz = getattr(self,'abf_ignore_zero', False)
if not isSeq(abfiz):
abfiz = abfiz, abfiz
do_rr = rangeRound is not 'none' and do_rr
if do_rr:
rrn = rangeRound in ['both','floor']
rrx = rangeRound in ['both','ceiling']
else:
rrn = rrx = 0
abS = self.avoidBoundSpace
do_abs = abS
if do_abs:
if not isSeq(abS):
abS = abS, abS
aL = float(self._length)
go = do_rr or do_abf or do_abs
cache = {}
iter = 0
while go and iter<=10:
iter += 1
go = 0
if do_abf or do_abs:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if do_abf:
i0 = valueStep*abf[0]
i1 = valueStep*abf[1]
else:
i0 = i1 = 0
if do_abs:
sf = (valueMax-valueMin)/aL
i0 = max(i0,abS[0]*sf)
i1 = max(i1,abS[1]*sf)
if rrn: v = T[0]
else: v = valueMin
u = cMin-i0
if (abfiz[0] or abs(v)>fuzz) and v>=u+fuzz:
valueMin = u
go = 1
if rrx: v = T[-1]
else: v = valueMax
u = cMax+i1
if (abfiz[1] or abs(v)>fuzz) and v<=u-fuzz:
valueMax = u
go = 1
if do_rr:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if rrn:
if valueMin<T[0]-fuzz:
valueMin = T[0]-valueStep
go = 1
else:
go = valueMin>=T[0]+fuzz
valueMin = T[0]
if rrx:
if valueMax>T[-1]+fuzz:
valueMax = T[-1]+valueStep
go = 1
else:
go = valueMax<=T[-1]-fuzz
valueMax = T[-1]
if iter and not go:
self._computedValueStep = valueStep
else:
self._computedValueStep = None
self._valueMin = valueMin
self._valueMax = valueMax
origShiftIPC = self.origShiftIPC
origShiftMin = self.origShiftMin
if origShiftMin is not None or origShiftIPC is not None:
origShiftSpecialValue = self.origShiftSpecialValue
self._calcValueStep()
valueMax, valueMin = self._valueMax, self._valueMin
if origShiftSpecialValue is None or abs(origShiftSpecialValue-valueMin)<1e-6:
if origShiftIPC:
m = origShiftIPC*self._valueStep
else:
m = 0
if origShiftMin:
m = max(m,(valueMax-valueMin)*origShiftMin/self._length)
self._valueMin -= m
self._rangeAdjust()
def _pseudo_configure(self):
self._valueMin = self.valueMin
self._valueMax = self.valueMax
self._configure_end()
def _rangeAdjust(self):
"""Override this if you want to alter the calculated range.
E.g. if want a minumamum range of 30% or don't want 100%
as the first point.
"""
pass
def _adjustAxisTicks(self):
'''Override if you want to put slack at the ends of the axis
eg if you don't want the last tick to be at the bottom etc
'''
pass
def _calcScaleFactor(self):
"""Calculate the axis' scale factor.
This should be called only *after* the axis' range is set.
Returns a number.
"""
self._scaleFactor = self._length / float(self._valueMax - self._valueMin)
return self._scaleFactor
def _calcStepAndTickPositions(self):
valueStep = getattr(self,'_computedValueStep',None)
if valueStep:
del self._computedValueStep
self._valueStep = valueStep
else:
self._calcValueStep()
valueStep = self._valueStep
valueMin = self._valueMin
valueMax = self._valueMax
fuzz = 1e-8*valueStep
rangeRound = self.rangeRound
i0 = int(float(valueMin)/valueStep)
v = i0*valueStep
if rangeRound in ('both','floor'):
if v>valueMin+fuzz: i0 -= 1
elif v<valueMin-fuzz: i0 += 1
i1 = int(float(valueMax)/valueStep)
v = i1*valueStep
if rangeRound in ('both','ceiling'):
if v<valueMax-fuzz: i1 += 1
elif v>valueMax+fuzz: i1 -= 1
return valueStep,[i*valueStep for i in range(i0,i1+1)]
def _calcTickPositions(self):
return self._calcStepAndTickPositions()[1]
def _calcTickmarkPositions(self):
"""Calculate a list of tick positions on the axis. Returns a list of numbers."""
self._tickValues = getattr(self,'valueSteps',None)
if self._tickValues: return self._tickValues
self._tickValues = self._calcTickPositions()
self._adjustAxisTicks()
return self._tickValues
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing))
self._valueStep = nextRoundNumber(rawInterval)
else:
self._valueStep = self.valueStep
def _allIntTicks(self):
return _allInt(self._tickValues)
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
f = self._labelTextFormat # perhaps someone already set it
if f is None:
f = self.labelTextFormat or (self._allIntTicks() and '%.0f' or _defaultLabelFormatter)
elif f is str and self._allIntTicks(): f = '%.0f'
elif hasattr(f,'calcPlaces'):
f.calcPlaces(self._tickValues)
post = self.labelTextPostFormat
scl = self.labelTextScale
pos = [self._x, self._y]
d = self._dataIndex
pos[1-d] = self._labelAxisPos()
labels = self.labels
if self.skipEndL!='none':
if self.isXAxis:
sk = self._x
else:
sk = self._y
if self.skipEndL=='start':
sk = [sk]
else:
sk = [sk,sk+self._length]
if self.skipEndL=='end':
del sk[0]
else:
sk = []
nticks = len(self._tickValues)
nticks1 = nticks - 1
for i,tick in enumerate(self._tickValues):
label = i-nticks
if label in labels:
label = labels[label]
else:
label = labels[i]
if f and label.visible:
v = self.scale(tick)
if sk:
for skv in sk:
if abs(skv-v)<1e-6:
v = None
break
if v is not None:
if scl is not None:
t = tick*scl
else:
t = tick
if isinstance(f, str): txt = f % t
elif isSeq(f):
#it's a list, use as many items as we get
if i < len(f):
txt = f[i]
else:
txt = ''
elif hasattr(f,'__call__'):
if isinstance(f,TickLabeller):
txt = f(self,t)
else:
txt = f(t)
else:
raise ValueError('Invalid labelTextFormat %s' % f)
if post: txt = post % txt
pos[d] = v
label.setOrigin(*pos)
label.setText(txt)
#special property to ensure a label doesn't project beyond the bounds of an x-axis
if self.keepTickLabelsInside:
if isinstance(self, XValueAxis): #not done yet for y axes
a_x = self._x
if not i: #first one
x0, y0, x1, y1 = label.getBounds()
if x0 < a_x:
label = label.clone(dx=label.dx + a_x - x0)
if i==nticks1: #final one
a_x1 = a_x +self._length
x0, y0, x1, y1 = label.getBounds()
if x1 > a_x1:
label=label.clone(dx=label.dx-x1+a_x1)
g.add(label)
return g
def scale(self, value):
"""Converts a numeric value to a plotarea position.
The chart first configures the axis, then asks it to
"""
assert self._configured, "Axis cannot scale numbers before it is configured"
if value is None: value = 0
#this could be made more efficient by moving the definition of org and sf into the configuration
org = (self._x, self._y)[self._dataIndex]
sf = self._scaleFactor
if self.reverseDirection:
sf = -sf
org += self._length
return org + sf*(value - self._valueMin)
class XValueAxis(_XTicks,ValueAxis):
"X/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 0
def __init__(self,**kw):
ValueAxis.__init__(self,**kw)
self.labels.boxAnchor = 'n'
self.labels.dx = 0
self.labels.dy = -5
self.tickUp = 0
self.tickDown = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
self.setPosition(20, 50, 150)
self.configure([(10,20,30,40,50)])
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._y = yAxis._y * 1.0
elif mode == 'top':
self._y = (yAxis._y + yAxis._length) * 1.0
elif mode == 'value':
self._y = yAxis.scale(pos) * 1.0
elif mode == 'points':
self._y = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode or 'bottom'
if jam in ('bottom', 'top'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
#additional utilities to help specify calendar dates on which tick marks
#are to be plotted. After some thought, when the magic algorithm fails,
#we can let them specify a number of days-of-the-year to tick in any given
#year.
#################################################################################
#
# Preliminary support objects/functions for the axis used in time series charts
#
#################################################################################
_months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
_maxDays = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def parseDayAndMonth(dmstr):
"""This accepts and validates strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates. It returns a (dd, mm) pair where mm is the
month integer. If the text is not valid it raises an error.
"""
dstr, mstr = dmstr.split('-')
dd = int(dstr)
mstr = mstr.lower()
mm = _months.index(mstr) + 1
assert dd <= _maxDays[mm-1]
return (dd, mm)
class _isListOfDaysAndMonths(Validator):
"""This accepts and validates lists of strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates.
"""
def test(self,x):
if isSeq(x):
answer = True
for element in x:
try:
dd, mm = parseDayAndMonth(element)
except:
answer = False
return answer
else:
return False
def normalize(self,x):
#we store them as presented, it's the most presentable way
return x
isListOfDaysAndMonths = _isListOfDaysAndMonths()
_NDINTM = 1,2,3,6,12,24,60,120,180,240,300,360,420,480,540,600,720,840,960,1080,1200,2400
class NormalDateXValueAxis(XValueAxis):
"""An X axis applying additional rules.
Depending on the data and some built-in rules, the axis
displays normalDate values as nicely formatted dates.
The client chart should have NormalDate X values.
"""
_attrMap = AttrMap(BASE = XValueAxis,
bottomAxisLabelSlack = AttrMapValue(isNumber, desc="Fractional amount used to adjust label spacing"),
niceMonth = AttrMapValue(isBoolean, desc="Flag for displaying months 'nicely'."),
forceEndDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of last date value.'),
forceFirstDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of first date value.'),
forceDatesEachYear = AttrMapValue(isListOfDaysAndMonths, desc='List of dates in format "31-Dec",' +
'"1-Jan". If present they will always be used for tick marks in the current year, rather ' +
'than the dates chosen by the automatic algorithm. Hyphen compulsory, case of month optional.'),
xLabelFormat = AttrMapValue(None, desc="Label format string (e.g. '{mm}/{yy}') or function."),
dayOfWeekName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=7,hi=7), desc='Weekday names.'),
monthName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=12,hi=12), desc='Month names.'),
dailyFreq = AttrMapValue(isBoolean, desc='True if we are to assume daily data to be ticked at end of month.'),
specifiedTickDates = AttrMapValue(NoneOr(SequenceOf(isNormalDate)), desc='Actual tick values to use; no calculations done'),
specialTickClear = AttrMapValue(isBoolean, desc='clear rather than delete close ticks when forced first/end dates'),
skipGrid = AttrMapValue(OneOf('none','top','both','bottom'),"grid lines to skip top bottom both none"),
)
_valueClass = normalDate.ND
def __init__(self,**kw):
XValueAxis.__init__(self,**kw)
# some global variables still used...
self.bottomAxisLabelSlack = 0.1
self.niceMonth = 1
self.forceEndDate = 0
self.forceFirstDate = 0
self.forceDatesEachYear = []
self.dailyFreq = 0
self.xLabelFormat = "{mm}/{yy}"
self.dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.monthName = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
self.specialTickClear = 0
self.valueSteps = self.specifiedTickDates = None
def _scalar2ND(self, x):
"Convert a scalar to a NormalDate value."
d = self._valueClass()
d.normalize(x)
return d
def _dateFormatter(self, v):
"Create a formatted label for some value."
if not isinstance(v,normalDate.NormalDate):
v = self._scalar2ND(v)
d, m = normalDate._dayOfWeekName, normalDate._monthName
try:
normalDate._dayOfWeekName, normalDate._monthName = self.dayOfWeekName, self.monthName
return v.formatMS(self.xLabelFormat)
finally:
normalDate._dayOfWeekName, normalDate._monthName = d, m
def _xAxisTicker(self, xVals):
"""Complex stuff...
Needs explanation...
Yes please says Andy :-(. Modified on 19 June 2006 to attempt to allow
a mode where one can specify recurring days and months.
"""
VC = self._valueClass
axisLength = self._length
formatter = self._dateFormatter
if isinstance(formatter,TickLabeller):
def formatter(tick):
return self._dateFormatter(self,tick)
firstDate = xVals[0] if not self.valueMin else VC(self.valueMin)
endDate = xVals[-1] if not self.valueMax else VC(self.valueMax)
labels = self.labels
fontName, fontSize, leading = labels.fontName, labels.fontSize, labels.leading
textAnchor, boxAnchor, angle = labels.textAnchor, labels.boxAnchor, labels.angle
RBL = _textBoxLimits(formatter(firstDate).split('\n'),fontName,
fontSize,leading or 1.2*fontSize,textAnchor,boxAnchor)
RBL = _rotatedBoxLimits(RBL[0],RBL[1],RBL[2],RBL[3], angle)
xLabelW = RBL[1]-RBL[0]
xLabelH = RBL[3]-RBL[2]
w = max(xLabelW,labels.width or 0,self.minimumTickSpacing)
W = w+w*self.bottomAxisLabelSlack
ticks = []
labels = []
maximumTicks = self.maximumTicks
if self.specifiedTickDates:
ticks = [VC(x) for x in self.specifiedTickDates]
labels = [formatter(d) for d in ticks]
if self.forceFirstDate and firstDate==ticks[0] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate==ticks[-1] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
return ticks, labels
#AR 20060619 - first we try the approach where the user has explicitly
#specified the days of year to be ticked. Other explicit routes may
#be added.
if self.forceDatesEachYear:
forcedPartialDates = list(map(parseDayAndMonth, self.forceDatesEachYear))
#generate the list of dates in the range.
#print 'dates range from %s to %s' % (firstDate, endDate)
firstYear = firstDate.year()
lastYear = endDate.year()
ticks = []
labels = []
yyyy = firstYear
#generate all forced dates between the year it starts and the year it
#ends, adding them if within range.
while yyyy <= lastYear:
for (dd, mm) in forcedPartialDates:
theDate = normalDate.ND((yyyy, mm, dd))
if theDate >= firstDate and theDate <= endDate:
ticks.append(theDate)
labels.append(formatter(theDate))
yyyy += 1
#first and last may still be forced in.
if self.forceFirstDate and firstDate!=ticks[0]:
ticks.insert(0, firstDate)
labels.insert(0,formatter(firstDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate!=ticks[-1]:
ticks.append(endDate)
labels.append(formatter(endDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
#print 'xVals found on forced dates =', ticks
return ticks, labels
def addTick(i, xVals=xVals, formatter=formatter, ticks=ticks, labels=labels):
ticks.insert(0,xVals[i])
labels.insert(0,formatter(xVals[i]))
n = len(xVals)
#otherwise, we apply the 'magic algorithm...' which looks for nice spacing
#based on the size and separation of the labels.
for d in _NDINTM:
k = n/d
if k<=maximumTicks and k*W <= axisLength:
i = n-1
if self.niceMonth:
j = endDate.month() % (d<=12 and d or 12)
if j:
if self.forceEndDate:
addTick(i)
ticks[0]._doSubTicks=0
i -= j
#weird first date ie not at end of month
try:
wfd = firstDate.month() == xVals[1].month()
except:
wfd = 0
while i>=wfd:
addTick(i)
i -= d
if self.forceFirstDate and ticks[0]!=firstDate:
addTick(0)
ticks[0]._doSubTicks=0
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and self.niceMonth and j:
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
try:
if labels[0] and labels[0]==labels[1]:
del ticks[1], labels[1]
except IndexError:
pass
return ticks, labels
raise ValueError('Problem selecting NormalDate value axis tick positions')
def _convertXV(self,data):
'''Convert all XValues to a standard normalDate type'''
VC = self._valueClass
for D in data:
for i in range(len(D)):
x, y = D[i]
if not isinstance(x,VC):
D[i] = (VC(x),y)
def _getStepsAndLabels(self,xVals):
if self.dailyFreq:
xEOM = []
pm = 0
px = xVals[0]
for x in xVals:
m = x.month()
if pm!=m:
if pm: xEOM.append(px)
pm = m
px = x
px = xVals[-1]
if xEOM[-1]!=x: xEOM.append(px)
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
return steps, labels
def configure(self, data):
self._convertXV(data)
xVals = set()
for x in data:
for dv in x:
xVals.add(dv[0])
xVals = list(xVals)
xVals.sort()
VC = self._valueClass
steps,labels = self._getStepsAndLabels(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
valueMin = xVals[0] if valueMin is None else VC(valueMin)
valueMax = xVals[-1] if valueMax is None else VC(valueMax)
self._valueMin, self._valueMax = valueMin, valueMax
self._tickValues = steps
self._labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(_YTicks,ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
if jam in ('left', 'right'):
self.joinToAxis(ja, mode=jam)
elif jam in ('value', 'points'):
self.joinToAxis(ja, mode=jam, pos=self.joinAxisPos)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
class TimeValueAxis:
_mc = 60
_hc = 60*_mc
_dc = 24*_hc
def __init__(self,*args,**kwds):
if not self.labelTextFormat:
self.labelTextFormat = self.timeLabelTextFormatter
self._saved_tickInfo = {}
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing))
#here's where we try to choose the correct value for the unit
if rawInterval >= self._dc:
d = self._dc
self._unit = 'days'
elif rawInterval >= self._hc:
d = self._hc
self._unit = 'hours'
elif rawInterval >= self._mc:
d = self._mc
self._unit = 'minutes'
else:
d = 1
self._unit = 'seconds'
self._unitd = d
if d>1:
rawInterval = int(rawInterval/d)
self._valueStep = nextRoundNumber(rawInterval) * d
else:
self._valueStep = self.valueStep
def timeLabelTextFormatter(self,val):
u = self._unitd
k = (u,tuple(self._tickValues))
if k in self._saved_tickInfo:
fmt = self._saved_tickInfo[k]
else:
uf = float(u)
tv = [v/uf for v in self._tickValues]
s = self._unit[0]
if _allInt(tv):
fmt = lambda x, uf=uf, s=s: '%.0f%s' % (x/uf,s)
else:
stv = ['%.10f' % v for v in tv]
stvl = max((len(v.rstrip('0'))-v.index('.')-1) for v in stv)
if u==1:
fmt = lambda x,uf=uf,fmt='%%.%dfs' % stvl: fmt % (x/uf)
else:
#see if we can represent fractions
fm = 24 if u==self._dc else 60
fv = [(v - int(v))*fm for v in tv]
if _allInt(fv):
s1 = 'h' if u==self._dc else ('m' if u==self._mc else 's')
fmt = lambda x,uf=uf,fm=fm, fmt='%%d%s%%d%%s' % (s,s1): fmt % (int(x/uf),int((x/uf - int(x/uf))*fm))
else:
fmt = lambda x,uf=uf,fmt='%%.%df%s' % (stvl,s): fmt % (x/uf)
self._saved_tickInfo[k] = fmt
return fmt(val)
class XTimeValueAxis(TimeValueAxis,XValueAxis):
def __init__(self,*args,**kwds):
XValueAxis.__init__(self,*args,**kwds)
TimeValueAxis.__init__(self,*args,**kwds)
class AdjYValueAxis(YValueAxis):
"""A Y-axis applying additional rules.
Depending on the data and some built-in rules, the axis
may choose to adjust its range and origin.
"""
_attrMap = AttrMap(BASE = YValueAxis,
leftAxisPercent = AttrMapValue(isBoolean, desc='When true add percent sign to label values.'),
leftAxisOrigShiftIPC = AttrMapValue(isNumber, desc='Lowest label shift interval ratio.'),
leftAxisOrigShiftMin = AttrMapValue(isNumber, desc='Minimum amount to shift.'),
leftAxisSkipLL0 = AttrMapValue(EitherOr((isBoolean,isListOfNumbers)), desc='Skip/Keep lowest tick label when true/false.\nOr skiplist'),
labelVOffset = AttrMapValue(isNumber, desc='add this to the labels'),
)
def __init__(self,**kw):
YValueAxis.__init__(self,**kw)
self.requiredRange = 30
self.leftAxisPercent = 1
self.leftAxisOrigShiftIPC = 0.15
self.leftAxisOrigShiftMin = 12
self.leftAxisSkipLL0 = self.labelVOffset = 0
self.valueSteps = None
def _rangeAdjust(self):
"Adjusts the value range of the axis."
from reportlab.graphics.charts.utils import find_good_grid, ticks
y_min, y_max = self._valueMin, self._valueMax
m = self.maximumTicks
n = list(filter(lambda x,m=m: x<=m,[4,5,6,7,8,9]))
if not n: n = [m]
valueStep, requiredRange = self.valueStep, self.requiredRange
if requiredRange and y_max - y_min < requiredRange:
y1, y2 = find_good_grid(y_min, y_max,n=n,grid=valueStep)[:2]
if y2 - y1 < requiredRange:
ym = (y1+y2)*0.5
y1 = min(ym-requiredRange*0.5,y_min)
y2 = max(ym+requiredRange*0.5,y_max)
if y_min>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif y_min>=0 and y1<0:
y2 = y2 - y1
y1 = 0
self._valueMin, self._valueMax = y1, y2
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
abf = self.avoidBoundFrac
if abf:
i1 = (T[1]-T[0])
if not isSeq(abf):
i0 = i1 = i1*abf
else:
i0 = i1*abf[0]
i1 = i1*abf[1]
_n = getattr(self,'_cValueMin',T[0])
_x = getattr(self,'_cValueMax',T[-1])
if _n - T[0] < i0: self._valueMin = self._valueMin - i0
if T[-1]-_x < i1: self._valueMax = self._valueMax + i1
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
self._valueMin = T[0]
self._valueMax = T[-1]
self._tickValues = T
if self.labelTextFormat is None:
self._labelTextFormat = L
else:
self._labelTextFormat = self.labelTextFormat
if abs(self._valueMin-100)<1e-6:
self._calcValueStep()
vMax, vMin = self._valueMax, self._valueMin
m = max(self.leftAxisOrigShiftIPC*self._valueStep,
(vMax-vMin)*self.leftAxisOrigShiftMin/self._length)
self._valueMin = self._valueMin - m
if self.leftAxisSkipLL0:
if isSeq(self.leftAxisSkipLL0):
for x in self.leftAxisSkipLL0:
try:
L[x] = ''
except IndexError:
pass
L[0] = ''
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
from reportlab.graphics.shapes import _baseGFontNameB
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = _baseGFontNameB
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6c():
"Sample drawing, xcat/yvalue axes, x connected at 100 pts to y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6d():
"Sample drawing, xcat/yvalue axes, x connected at value 20 of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 20
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7a():
"Sample drawing, xvalue/ycat axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7b():
"Sample drawing, xvalue/ycat axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7c():
"Sample drawing, xvalue/ycat axes, y connected at value 30 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 30
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7d():
"Sample drawing, xvalue/ycat axes, y connected at 200 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 200
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
|
{
"content_hash": "2c4da4902a8c29944e28f69953f82ed8",
"timestamp": "",
"source": "github",
"line_count": 2442,
"max_line_length": 222,
"avg_line_length": 38.45864045864046,
"alnum_prop": 0.5556454704203757,
"repo_name": "sandeepkoduri/GAE-html-to-pdf",
"id": "b91ef4f421d93fa202b2c32d7b026304d8d27bb5",
"size": "93996",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libs/reportlab/graphics/charts/axes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5936"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Python",
"bytes": "8032247"
}
],
"symlink_target": ""
}
|
import os
os.system('sudo sh /home/sivsushruth/Cyberpass/add_ip.sh')
|
{
"content_hash": "94f48af166670f756d16e2e1ec4b756c",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 58,
"avg_line_length": 34,
"alnum_prop": 0.7794117647058824,
"repo_name": "GingerNinja23/CyberBoost",
"id": "323cd251474d65c7ad976d5f1585964cddc46a00",
"size": "68",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "add_ip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5901"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
}
|
""" ProgressBar
Example:
.. UIExample:: 100
from flexx import app, event, ui
class Example(ui.Widget):
def init(self):
with ui.HBox():
self.b1 = ui.Button(flex=0, text='Less')
self.b2 = ui.Button(flex=0, text='More')
self.prog = ui.ProgressBar(flex=1, value=0.1, text='{percent} done')
@event.reaction('b1.pointer_down', 'b2.pointer_down')
def _change_progress(self, *events):
for ev in events:
if ev.source is self.b1:
self.prog.set_value(self.prog.value - 0.1)
else:
self.prog.set_value(self.prog.value + 0.1)
"""
from ... import event
from .._widget import Widget, create_element
class ProgressBar(Widget):
""" A widget to show progress.
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_
containing a few HTML elements for rendering.
"""
DEFAULT_MIN_SIZE = 40, 16
CSS = """
.flx-ProgressBar {
border: 1px solid #ddd;
border-radius: 6px;
background: #eee;
}
.flx-ProgressBar > .progress-bar {
/* Use flexbox to vertically align label text */
display: -webkit-flex;
display: -ms-flexbox;
display: -ms-flex;
display: -moz-flex;
display: flex;
-webkit-flex-flow: column;
-ms-flex-flow: column;
-moz-flex-flow: column;
flex-flow: column;
-webkit-justify-content: center;
-ms-justify-content: center;
-moz-justify-content: center;
justify-content: center;
white-space: nowrap;
align-self: stretch;
position: absolute; /* need this on Chrome when in a VBox */
background: #8be;
text-align: center;
/*transition: width 0.2s ease; behaves silly on Chrome */
}
"""
value = event.FloatProp(0, settable=True, doc="""
The progress value.
""")
min = event.FloatProp(0, settable=True, doc="""
The minimum progress value.
""")
max = event.FloatProp(1, settable=True, doc="""
The maximum progress value.
""")
text = event.StringProp('', settable=True, doc="""
The label to display on the progress bar. Occurances of
"{percent}" are replaced with the current percentage, and
"{value}" with the current value.
""")
@event.action
def set_value(self, value):
value = max(self.min, value)
value = min(self.max, value)
self._mutate_value(value)
@event.reaction('min', 'max')
def __keep_value_constrained(self, *events):
self.set_value(self.value)
def _render_dom(self):
global Math
value = self.value
mi, ma = self.min, self.max
perc = 100 * (value - mi) / (ma - mi)
label = self.text
label = label.replace('{value}', str(value))
label = label.replace('{percent}', Math.round(perc) + '%')
attr = {'style__width': perc+'%',
'style__height': '100%',
'className': 'progress-bar',
}
return [create_element('div', attr, label)]
|
{
"content_hash": "c046b7c2d05e7d473abee03507b0d893",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 84,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5480416156670747,
"repo_name": "jrversteegh/flexx",
"id": "a6a793025133a5877707168c1401fa84f4c57327",
"size": "3268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flexx/ui/widgets/_progressbar.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "150"
},
{
"name": "JavaScript",
"bytes": "26636"
},
{
"name": "Python",
"bytes": "978605"
}
],
"symlink_target": ""
}
|
"""Wrapper script to use Pywikibot in 'directory' mode.
Run scripts using:
python pwb.py <name_of_script> <options>
and it will use the package directory to store all user files, will fix up
search paths so the package does not need to be installed, etc.
"""
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
__version__ = '$Id$'
# The following snippet was developed by Ned Batchelder (and others)
# for coverage [1], with python 3 support [2] added later,
# and is available under the BSD license (see [3])
# [1] https://bitbucket.org/ned/coveragepy/src/b5abcee50dbe/coverage/execfile.py
# [2] https://bitbucket.org/ned/coveragepy/src/fd5363090034/coverage/execfile.py
# [3] https://bitbucket.org/ned/coveragepy/src/2c5fb3a8b81c/setup.py?at=default#cl-31
import os
import sys
import types
from warnings import warn
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
PY26 = (PYTHON_VERSION < (2, 7))
versions_required_message = """
Pywikibot not available on:
%s
Pywikibot is only supported under Python 2.6.5+, 2.7.2+ or 3.3+
"""
def python_is_supported():
"""Check that Python is supported."""
# Any change to this must be copied to setup.py
return (PYTHON_VERSION >= (3, 3, 0) or
(PY2 and PYTHON_VERSION >= (2, 7, 2)) or
(PY26 and PYTHON_VERSION >= (2, 6, 5)))
if not python_is_supported():
print(versions_required_message % sys.version)
sys.exit(1)
pwb = None
def remove_modules():
"""Remove pywikibot modules."""
for name in list(sys.modules):
if name.startswith('pywikibot'):
del sys.modules[name]
def tryimport_pwb():
"""Try to import pywikibot.
If so, we need to patch pwb.argvu, too.
If pywikibot is not available, we create a mock object to remove the
need for if statements further on.
"""
global pwb
try:
import pywikibot # noqa
pwb = pywikibot
except RuntimeError:
remove_modules()
os.environ['PYWIKIBOT2_NO_USER_CONFIG'] = '2'
import pywikibot # noqa
pwb = pywikibot
def run_python_file(filename, argv, argvu, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, as unicode strings.
"""
tryimport_pwb()
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
# it's explicitly using str() to bypass unicode_literals in Python 2
main_mod = types.ModuleType(str('__main__'))
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if sys.version_info[0] > 2:
main_mod.__builtins__ = sys.modules['builtins']
else:
main_mod.__builtins__ = sys.modules['__builtin__']
if package:
# it's explicitly using str() to bypass unicode_literals in Python 2
main_mod.__package__ = str(package)
# Set sys.argv and the first path element properly.
old_argv = sys.argv
old_argvu = pwb.argvu
old_path0 = sys.path[0]
sys.argv = argv
pwb.argvu = argvu
sys.path[0] = os.path.dirname(filename)
try:
with open(filename, 'rb') as f:
source = f.read()
exec(compile(source, filename, "exec", dont_inherit=True),
main_mod.__dict__)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
sys.path[0] = old_path0
pwb.argvu = old_argvu
# end of snippet from coverage
def abspath(path):
"""Convert path to absolute path, with uppercase drive letter on win32."""
path = os.path.abspath(path)
if path[0] != '/':
# normalise Windows drive letter
# TODO: use pywikibot.tools.first_upper
path = path[0].upper() + path[1:]
return path
# Establish a normalised path for the directory containing pwb.py.
# Either it is '.' if the user's current working directory is the same,
# or it is the absolute path for the directory of pwb.py
absolute_path = abspath(os.path.dirname(sys.argv[0]))
rewrite_path = absolute_path
sys.path = [sys.path[0], rewrite_path,
os.path.join(rewrite_path, 'pywikibot', 'compat'),
] + sys.path[1:]
try:
import requests
if not hasattr(requests, '__version__'):
print("requests import problem: requests.__version__ does not exist.")
requests = None
except ImportError as e:
print("ImportError: %s" % e)
requests = None
if not requests:
print("Python module requests is required.")
print("Try running 'pip install requests'.")
sys.exit(1)
del requests
if len(sys.argv) > 1 and sys.argv[1][0] != '-':
filename = sys.argv[1]
else:
filename = None
# Skip the filename if one was given
args = sys.argv[(2 if filename else 1):]
# Search for user-config.py before creating one.
try:
# If successful, user-config.py already exists in one of the candidate
# directories. See config2.py for details on search order.
# Use env var to communicate to config2.py pwb.py location (bug T74918).
_pwb_dir = os.path.split(__file__)[0]
if sys.platform == 'win32' and sys.version_info[0] < 3:
_pwb_dir = str(_pwb_dir)
os.environ[str('PYWIKIBOT2_DIR_PWB')] = _pwb_dir
import pywikibot # noqa
except RuntimeError as err:
# user-config.py to be created
print("NOTE: 'user-config.py' was not found!")
if filename is not None and not filename.startswith('generate_'):
print("Please follow the prompts to create it:")
run_python_file('generate_user_files.py',
['generate_user_files.py'],
['generate_user_files.py'])
# because we have loaded pywikibot without user-config.py loaded, we need to re-start
# the entire process. Ask the user to do so.
sys.exit(1)
def main():
"""Command line entry point."""
global filename
if filename:
file_package = None
tryimport_pwb()
argvu = pwb.argvu[1:]
if not filename.endswith('.py'):
filename += '.py'
if not os.path.exists(filename):
testpath = os.path.join(os.path.split(__file__)[0],
'scripts',
filename)
file_package = 'scripts'
if not os.path.exists(testpath):
testpath = os.path.join(
os.path.split(__file__)[0], 'scripts/archive', filename)
file_package = 'scripts.archive'
if os.path.exists(testpath):
filename = testpath
else:
raise OSError("%s not found!" % filename)
# When both pwb.py and the filename to run are within the current
# working directory:
# a) set __package__ as if called using python -m scripts.blah.foo
# b) set __file__ to be relative, so it can be relative in backtraces,
# and __file__ *appears* to be an unstable path to load data from.
# This is a rough (and quick!) emulation of 'package name' detection.
# a much more detailed implementation is in coverage's find_module.
# https://bitbucket.org/ned/coveragepy/src/default/coverage/execfile.py
cwd = abspath(os.getcwd())
if absolute_path == cwd:
absolute_filename = abspath(filename)[:len(cwd)]
if absolute_filename == cwd:
relative_filename = os.path.relpath(filename)
# remove the filename, and use '.' instead of path separator.
file_package = os.path.dirname(
relative_filename).replace(os.sep, '.')
filename = os.path.join(os.curdir, relative_filename)
if file_package and file_package not in sys.modules:
try:
__import__(file_package)
except ImportError as e:
warn('Parent module %s not found: %s'
% (file_package, e), ImportWarning)
run_python_file(filename, [filename] + args, argvu, file_package)
return True
else:
return False
if __name__ == '__main__':
if not main():
print(__doc__)
|
{
"content_hash": "c0770b1be36c500fb13f4efcd061c4fa",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 93,
"avg_line_length": 33.11764705882353,
"alnum_prop": 0.6132622853759621,
"repo_name": "h4ck3rm1k3/pywikibot-core",
"id": "395019e49898ba6fe1a835cf383515d39d3609eb",
"size": "8492",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pwb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "4210758"
},
{
"name": "Shell",
"bytes": "659"
}
],
"symlink_target": ""
}
|
import unittest
def number_digits(n):
if n == 0:
return 1
if n < 0:
return number_digits(-n)
digits = 0
while n != 0:
digits += 1
n //= 10
return digits
def triangle(n):
return n * (n+1) // 2
def square(n):
return n * n
def pentagonal(n):
return n * (3*n-1) // 2
def hexagonal(n):
return n * (2*n-1)
def heptagonal(n):
return n * (5*n-3) // 2
def octagonal(n):
return n * (3*n-2)
class EulerNumbersTest(unittest.TestCase):
def test_number_digits(self):
self.assertEqual(number_digits(1), 1)
self.assertEqual(number_digits(9), 1)
self.assertEqual(number_digits(10), 2)
self.assertEqual(number_digits(15), 2)
self.assertEqual(number_digits(19), 2)
self.assertEqual(number_digits(1234567890123), 13)
self.assertEqual(number_digits(-1), 1)
self.assertEqual(number_digits(-9), 1)
self.assertEqual(number_digits(-15), 2)
self.assertEqual(number_digits(0), 1)
def test_triangle(self):
self.assertEqual(triangle(1), 1)
self.assertEqual(triangle(2), 3)
self.assertEqual(triangle(3), 6)
self.assertEqual(triangle(4), 10)
self.assertEqual(triangle(5), 15)
def test_square(self):
self.assertEqual(square(1), 1)
self.assertEqual(square(2), 4)
self.assertEqual(square(3), 9)
self.assertEqual(square(4), 16)
self.assertEqual(square(5), 25)
def test_pentagonal(self):
self.assertEqual(pentagonal(1), 1)
self.assertEqual(pentagonal(2), 5)
self.assertEqual(pentagonal(3), 12)
self.assertEqual(pentagonal(4), 22)
self.assertEqual(pentagonal(5), 35)
def test_hexagonal(self):
self.assertEqual(hexagonal(1), 1)
self.assertEqual(hexagonal(2), 6)
self.assertEqual(hexagonal(3), 15)
self.assertEqual(hexagonal(4), 28)
self.assertEqual(hexagonal(5), 45)
def test_heptagonal(self):
self.assertEqual(heptagonal(1), 1)
self.assertEqual(heptagonal(2), 7)
self.assertEqual(heptagonal(3), 18)
self.assertEqual(heptagonal(4), 34)
self.assertEqual(heptagonal(5), 55)
def test_octagonal(self):
self.assertEqual(octagonal(1), 1)
self.assertEqual(octagonal(2), 8)
self.assertEqual(octagonal(3), 21)
self.assertEqual(octagonal(4), 40)
self.assertEqual(octagonal(5), 65)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3555be6740cbd0b3bae6cde842efdefc",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 54,
"avg_line_length": 25.95505617977528,
"alnum_prop": 0.6619047619047619,
"repo_name": "brunorijsman/euler-problems-python",
"id": "f949997fe5de94d4cf77f96735cdfd940c534fea",
"size": "2310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "euler/euler_numbers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "365"
},
{
"name": "Python",
"bytes": "51436"
}
],
"symlink_target": ""
}
|
import graphene
from ...core.permissions import ShippingPermissions
from ...shipping import models
from ..channel.types import ChannelContext
from ..core.connection import create_connection_slice, filter_connection_queryset
from ..core.fields import FilterConnectionField, PermissionsField
from ..core.utils import from_global_id_or_error
from ..translations.mutations import ShippingPriceTranslate
from .bulk_mutations import ShippingPriceBulkDelete, ShippingZoneBulkDelete
from .filters import ShippingZoneFilterInput
from .mutations.channels import ShippingMethodChannelListingUpdate
from .mutations.shippings import (
ShippingPriceCreate,
ShippingPriceDelete,
ShippingPriceExcludeProducts,
ShippingPriceRemoveProductFromExclude,
ShippingPriceUpdate,
ShippingZoneCreate,
ShippingZoneDelete,
ShippingZoneUpdate,
)
from .resolvers import resolve_shipping_zones
from .types import ShippingZone, ShippingZoneCountableConnection
class ShippingQueries(graphene.ObjectType):
shipping_zone = PermissionsField(
ShippingZone,
id=graphene.Argument(
graphene.ID, description="ID of the shipping zone.", required=True
),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="Look up a shipping zone by ID.",
permissions=[ShippingPermissions.MANAGE_SHIPPING],
)
shipping_zones = FilterConnectionField(
ShippingZoneCountableConnection,
filter=ShippingZoneFilterInput(
description="Filtering options for shipping zones."
),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="List of the shop's shipping zones.",
permissions=[ShippingPermissions.MANAGE_SHIPPING],
)
@staticmethod
def resolve_shipping_zone(_root, _info, *, id, channel=None):
_, id = from_global_id_or_error(id, ShippingZone)
instance = models.ShippingZone.objects.filter(id=id).first()
return ChannelContext(node=instance, channel_slug=channel) if instance else None
@staticmethod
def resolve_shipping_zones(_root, info, *, channel=None, **kwargs):
qs = resolve_shipping_zones(channel)
qs = filter_connection_queryset(qs, kwargs)
return create_connection_slice(
qs, info, kwargs, ShippingZoneCountableConnection
)
class ShippingMutations(graphene.ObjectType):
shipping_method_channel_listing_update = ShippingMethodChannelListingUpdate.Field()
shipping_price_create = ShippingPriceCreate.Field()
shipping_price_delete = ShippingPriceDelete.Field()
shipping_price_bulk_delete = ShippingPriceBulkDelete.Field()
shipping_price_update = ShippingPriceUpdate.Field()
shipping_price_translate = ShippingPriceTranslate.Field()
shipping_price_exclude_products = ShippingPriceExcludeProducts.Field()
shipping_price_remove_product_from_exclude = (
ShippingPriceRemoveProductFromExclude.Field()
)
shipping_zone_create = ShippingZoneCreate.Field()
shipping_zone_delete = ShippingZoneDelete.Field()
shipping_zone_bulk_delete = ShippingZoneBulkDelete.Field()
shipping_zone_update = ShippingZoneUpdate.Field()
|
{
"content_hash": "309940508d065055b056cfa93b0befa8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 88,
"avg_line_length": 41.06172839506173,
"alnum_prop": 0.7387251954299459,
"repo_name": "mociepka/saleor",
"id": "6bf101b0007466b1edfecb165385475f08e29023",
"size": "3326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/graphql/shipping/schema.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
"""
Infobip Client API Libraries OpenAPI Specification
OpenAPI specification containing public endpoints supported in client API libraries. # noqa: E501
The version of the OpenAPI document: 1.0.172
Contact: support@infobip.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from infobip_api_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class TfaVerifyPinResponse(ModelNormal):
"""
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"attempts_remaining": (int,), # noqa: E501
"msisdn": (str,), # noqa: E501
"pin_error": (str,), # noqa: E501
"pin_id": (str,), # noqa: E501
"verified": (bool,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
"attempts_remaining": "attemptsRemaining", # noqa: E501
"msisdn": "msisdn", # noqa: E501
"pin_error": "pinError", # noqa: E501
"pin_id": "pinId", # noqa: E501
"verified": "verified", # noqa: E501
}
_composed_schemas = {}
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""TfaVerifyPinResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
attempts_remaining (int): Number of remaining PIN attempts.. [optional] # noqa: E501
msisdn (str): Phone number (`MSISDN`) to which the 2FA message was sent.. [optional] # noqa: E501
pin_error (str): Indicates if any error occurs during PIN verification.. [optional] # noqa: E501
pin_id (str): Sent PIN code ID.. [optional] # noqa: E501
verified (bool): Indicates if the phone number (`MSISDN`) was successfully verified.. [optional] # noqa: E501
"""
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_path_to_item = kwargs.pop("_path_to_item", ())
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "52e475ed393d08e0a91dacad82d82bd7",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 122,
"avg_line_length": 41.451977401129945,
"alnum_prop": 0.555267820635137,
"repo_name": "infobip/infobip-api-python-client",
"id": "6e626a5cbb13b095d48ed79cfce29391c9ac2720",
"size": "7337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infobip_api_client/model/tfa_verify_pin_response.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "215859"
}
],
"symlink_target": ""
}
|
from unicodedata import category
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from optparse import make_option
import uuid
import os
import sys
from django.conf import settings
import json
from .export_package import export_package
class Command(BaseCommand):
args = 'package_name'
help = 'Exports all packages.'
def handle(self, *args, **options):
packages = []
for app in settings.INSTALLED_APPS:
if 'workflows.' in app:
packages.append(app)
for package in packages:
package_name = package.split('workflows.')[1]
self.stdout.write("Exporting package "+package_name+"\n")
export_package(package_name,self.stdout)
#temporary fix
#self.stdout.write("Exporting cf_nlp package \n")
#export_package('nlp',self.stdout, dest_folder='/home/matej/platforms/clowdflows-env/cf_nlp/nlp')
|
{
"content_hash": "59f1dc6b06295a3a8f24449de02c6d31",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 105,
"avg_line_length": 36.36666666666667,
"alnum_prop": 0.6801099908340972,
"repo_name": "xflows/clowdflows",
"id": "708eb5f43b048aa0e23984e9f02b2d5cda2756c0",
"size": "1091",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workflows/management/commands/export_all.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "192012"
},
{
"name": "CSS",
"bytes": "106522"
},
{
"name": "HTML",
"bytes": "376832"
},
{
"name": "JavaScript",
"bytes": "793427"
},
{
"name": "Makefile",
"bytes": "385"
},
{
"name": "Prolog",
"bytes": "146760"
},
{
"name": "Python",
"bytes": "31014268"
},
{
"name": "Roff",
"bytes": "58306446"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
import base64
import json
import os
import re
import dill
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.contrib.operators.mlengine_operator import MLEngineBatchPredictionOperator
from airflow.contrib.operators.dataflow_operator import DataFlowPythonOperator
from airflow.exceptions import AirflowException
from airflow.operators.python_operator import PythonOperator
from six.moves.urllib.parse import urlsplit
def create_evaluate_ops(task_prefix,
data_format,
input_paths,
prediction_path,
metric_fn_and_keys,
validate_fn,
batch_prediction_job_id=None,
project_id=None,
region=None,
dataflow_options=None,
model_uri=None,
model_name=None,
version_name=None,
dag=None):
"""
Creates Operators needed for model evaluation and returns.
It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
calling MLEngineBatchPredictionOperator, then summarize and validate
the result via Cloud Dataflow using DataFlowPythonOperator.
For details and pricing about Batch prediction, please refer to the website
https://cloud.google.com/ml-engine/docs/how-tos/batch-predict
and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/
It returns three chained operators for prediction, summary, and validation,
named as <prefix>-prediction, <prefix>-summary, and <prefix>-validation,
respectively.
(<prefix> should contain only alphanumeric characters or hyphen.)
The upstream and downstream can be set accordingly like:
pred, _, val = create_evaluate_ops(...)
pred.set_upstream(upstream_op)
...
downstream_op.set_upstream(val)
Callers will provide two python callables, metric_fn and validate_fn, in
order to customize the evaluation behavior as they wish.
- metric_fn receives a dictionary per instance derived from json in the
batch prediction result. The keys might vary depending on the model.
It should return a tuple of metrics.
- validation_fn receives a dictionary of the averaged metrics that metric_fn
generated over all instances.
The key/value of the dictionary matches to what's given by
metric_fn_and_keys arg.
The dictionary contains an additional metric, 'count' to represent the
total number of instances received for evaluation.
The function would raise an exception to mark the task as failed, in a
case the validation result is not okay to proceed (i.e. to set the trained
version as default).
Typical examples are like this:
def get_metric_fn_and_keys():
import math # imports should be outside of the metric_fn below.
def error_and_squared_error(inst):
label = float(inst['input_label'])
classes = float(inst['classes']) # 0 or 1
err = abs(classes-label)
squared_err = math.pow(classes-label, 2)
return (err, squared_err) # returns a tuple.
return error_and_squared_error, ['err', 'mse'] # key order must match.
def validate_err_and_count(summary):
if summary['err'] > 0.2:
raise ValueError('Too high err>0.2; summary=%s' % summary)
if summary['mse'] > 0.05:
raise ValueError('Too high mse>0.05; summary=%s' % summary)
if summary['count'] < 1000:
raise ValueError('Too few instances<1000; summary=%s' % summary)
return summary
For the details on the other BatchPrediction-related arguments (project_id,
job_id, region, data_format, input_paths, prediction_path, model_uri),
please refer to MLEngineBatchPredictionOperator too.
:param task_prefix: a prefix for the tasks. Only alphanumeric characters and
hyphen are allowed (no underscores), since this will be used as dataflow
job name, which doesn't allow other characters.
:type task_prefix: string
:param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
:type data_format: string
:param input_paths: a list of input paths to be sent to BatchPrediction.
:type input_paths: list of strings
:param prediction_path: GCS path to put the prediction results in.
:type prediction_path: string
:param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
- metric_fn is a function that accepts a dictionary (for an instance),
and returns a tuple of metric(s) that it calculates.
- metric_keys is a list of strings to denote the key of each metric.
:type metric_fn_and_keys: tuple of a function and a list of strings
:param validate_fn: a function to validate whether the averaged metric(s) is
good enough to push the model.
:type validate_fn: function
:param batch_prediction_job_id: the id to use for the Cloud ML Batch
prediction job. Passed directly to the MLEngineBatchPredictionOperator as
the job_id argument.
:type batch_prediction_job_id: string
:param project_id: the Google Cloud Platform project id in which to execute
Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['project_id']` will be used.
:type project_id: string
:param region: the Google Cloud Platform region in which to execute Cloud ML
Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['region']` will be used.
:type region: string
:param dataflow_options: options to run Dataflow jobs. If None, then the
`dag`'s `default_args['dataflow_default_options']` will be used.
:type dataflow_options: dictionary
:param model_uri: GCS path of the model exported by Tensorflow using
tensorflow.estimator.export_savedmodel(). It cannot be used with
model_name or version_name below. See MLEngineBatchPredictionOperator for
more detail.
:type model_uri: string
:param model_name: Used to indicate a model to use for prediction. Can be
used in combination with version_name, but cannot be used together with
model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
then the `dag`'s `default_args['model_name']` will be used.
:type model_name: string
:param version_name: Used to indicate a model version to use for prediciton,
in combination with model_name. Cannot be used together with model_uri.
See MLEngineBatchPredictionOperator for more detail. If None, then the
`dag`'s `default_args['version_name']` will be used.
:type version_name: string
:param dag: The `DAG` to use for all Operators.
:type dag: airflow.DAG
:returns: a tuple of three operators, (prediction, summary, validation)
:rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,
PythonOperator)
"""
# Verify that task_prefix doesn't have any special characters except hyphen
# '-', which is the only allowed non-alphanumeric character by Dataflow.
if not re.match(r"^[a-zA-Z][-A-Za-z0-9]*$", task_prefix):
raise AirflowException(
"Malformed task_id for DataFlowPythonOperator (only alphanumeric "
"and hyphens are allowed but got: " + task_prefix)
metric_fn, metric_keys = metric_fn_and_keys
if not callable(metric_fn):
raise AirflowException("`metric_fn` param must be callable.")
if not callable(validate_fn):
raise AirflowException("`validate_fn` param must be callable.")
if dag is not None and dag.default_args is not None:
default_args = dag.default_args
project_id = project_id or default_args.get('project_id')
region = region or default_args.get('region')
model_name = model_name or default_args.get('model_name')
version_name = version_name or default_args.get('version_name')
dataflow_options = dataflow_options or \
default_args.get('dataflow_default_options')
evaluate_prediction = MLEngineBatchPredictionOperator(
task_id=(task_prefix + "-prediction"),
project_id=project_id,
job_id=batch_prediction_job_id,
region=region,
data_format=data_format,
input_paths=input_paths,
output_path=prediction_path,
uri=model_uri,
model_name=model_name,
version_name=version_name,
dag=dag)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True))
evaluate_summary = DataFlowPythonOperator(
task_id=(task_prefix + "-summary"),
py_options=["-m"],
py_file="airflow.contrib.operators.mlengine_prediction_summary",
dataflow_default_options=dataflow_options,
options={
"prediction_path": prediction_path,
"metric_fn_encoded": metric_fn_encoded,
"metric_keys": ','.join(metric_keys)
},
dag=dag)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, **kwargs):
prediction_path = kwargs["templates_dict"]["prediction_path"]
scheme, bucket, obj, _, _ = urlsplit(prediction_path)
if scheme != "gs" or not bucket or not obj:
raise ValueError("Wrong format prediction_path: %s",
prediction_path)
summary = os.path.join(obj.strip("/"),
"prediction.summary.json")
gcs_hook = GoogleCloudStorageHook()
summary = json.loads(gcs_hook.download(bucket, summary))
return validate_fn(summary)
evaluate_validation = PythonOperator(
task_id=(task_prefix + "-validation"),
python_callable=apply_validate_fn,
provide_context=True,
templates_dict={"prediction_path": prediction_path},
dag=dag)
evaluate_validation.set_upstream(evaluate_summary)
return evaluate_prediction, evaluate_summary, evaluate_validation
|
{
"content_hash": "a5ab636704b38f1eae77aba58aaf61a9",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 87,
"avg_line_length": 44.24782608695652,
"alnum_prop": 0.6653237692836789,
"repo_name": "sergiohgz/incubator-airflow",
"id": "7ce784ebb4ae336a6c3ecf65bd6bf08278ec616b",
"size": "10960",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/mlengine_operator_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152458"
},
{
"name": "JavaScript",
"bytes": "1364816"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "3040105"
},
{
"name": "Shell",
"bytes": "34457"
}
],
"symlink_target": ""
}
|
import abc
import os
import json
import time
import logging
from urllib.parse import urlencode
import jwt
from hailtop.utils import request_retry_transient_errors
log = logging.getLogger(__name__)
class Credentials(abc.ABC):
@staticmethod
def from_file(credentials_file):
with open(credentials_file) as f:
credentials = json.load(f)
credentials_type = credentials['type']
if credentials_type == 'service_account':
return ServiceAccountCredentials(credentials)
if credentials_type == 'authorized_user':
return ApplicationDefaultCredentials(credentials)
raise ValueError(f'unknown Google Cloud credentials type {credentials_type}')
@staticmethod
def default_credentials():
credentials_file = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
if credentials_file is None:
application_default_credentials_file = f'{os.environ["HOME"]}/.config/gcloud/application_default_credentials.json'
if os.path.exists(application_default_credentials_file):
credentials_file = application_default_credentials_file
if credentials_file is None:
raise ValueError('unable to locate Google Cloud credentials')
log.info(f'using credentials file {credentials_file}')
return Credentials.from_file(credentials_file)
async def get_access_token(self, session):
pass
# protocol documented here:
# https://developers.google.com/identity/protocols/oauth2/web-server#offline
# studying `gcloud --log-http print-access-token` was also useful
class ApplicationDefaultCredentials(Credentials):
def __init__(self, credentials):
self.credentials = credentials
async def get_access_token(self, session):
async with await request_retry_transient_errors(
session, 'POST',
'https://www.googleapis.com/oauth2/v4/token',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=urlencode({
'grant_type': 'refresh_token',
'client_id': self.credentials['client_id'],
'client_secret': self.credentials['client_secret'],
'refresh_token': self.credentials['refresh_token']
})) as resp:
return await resp.json()
# protocol documented here:
# https://developers.google.com/identity/protocols/oauth2/service-account
# studying `gcloud --log-http print-access-token` was also useful
class ServiceAccountCredentials(Credentials):
def __init__(self, key):
self.key = key
async def get_access_token(self, session):
now = int(time.time())
scope = 'openid https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/compute'
assertion = {
"aud": "https://www.googleapis.com/oauth2/v4/token",
"iat": now,
"scope": scope,
"exp": now + 300, # 5m
"iss": self.key['client_email']
}
encoded_assertion = jwt.encode(assertion, self.key['private_key'], algorithm='RS256')
async with await request_retry_transient_errors(
session, 'POST',
'https://www.googleapis.com/oauth2/v4/token',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=urlencode({
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': encoded_assertion
})) as resp:
return await resp.json()
|
{
"content_hash": "6f774fd7ec2cd1b641121e6f5c94f989",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 206,
"avg_line_length": 38.333333333333336,
"alnum_prop": 0.6239789196310935,
"repo_name": "cseed/hail",
"id": "60792b1927c77cf22a515ae02a49fc9e5368aea7",
"size": "3795",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hail/python/hailtop/aiogoogle/auth/credentials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "289"
},
{
"name": "C++",
"bytes": "170210"
},
{
"name": "CSS",
"bytes": "20423"
},
{
"name": "Dockerfile",
"bytes": "7426"
},
{
"name": "HTML",
"bytes": "43106"
},
{
"name": "Java",
"bytes": "22564"
},
{
"name": "JavaScript",
"bytes": "730"
},
{
"name": "Jupyter Notebook",
"bytes": "162397"
},
{
"name": "Makefile",
"bytes": "58348"
},
{
"name": "PLpgSQL",
"bytes": "23163"
},
{
"name": "Python",
"bytes": "3477764"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "Scala",
"bytes": "3496240"
},
{
"name": "Shell",
"bytes": "41254"
},
{
"name": "TSQL",
"bytes": "10385"
},
{
"name": "TeX",
"bytes": "7125"
},
{
"name": "XSLT",
"bytes": "9787"
}
],
"symlink_target": ""
}
|
import logging
log = logging.getLogger()
logging.basicConfig(level=logging.DEBUG)
import collections
class Model(collections.MutableMapping):
model = {}
validate = {}
def __init__(self, *args, **kwargs):
self.update(dict(*args, **kwargs))
self.__dict__.update(self.model)
def __getitem__(self, key):
log.debug("Calling __getitem__ for %s", key)
return self.__dict__[key]
def __setitem__(self, key, value):
log.debug("Calling __setitem__ for %s", value)
if key in self.model:
self.__dict__[key] = self.__validate__(key, value)
else:
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return '{}, Model({})'.format(super(Model, self).__repr__(),
self.__dict__)
def items(self):
return self.model.keys()
def __validate__(self, key, value):
valid = self.validate.get(key) or []
if valid:
try:
if value in valid:
return value
except Exception:
raise
else:
return value
class Value(Model):
model = { 'key' : '' , 'value' : '' }
class Tag(Model):
validate = { 'tag' : [ 'symlink',
'registry_key',
'user',
'group',
'directory',
'file',
'modify'
]}
model = { 'tag': '' }
class Action(Model):
validate = { 'action' : [ 'create', 'delete', 'modify' ] }
model = { 'action' : '' , 'tag' : '' , 'values' : [] }
class Plan(Model):
pass
|
{
"content_hash": "89f4c0e629c9afebc949b4499a941045",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 68,
"avg_line_length": 23.860759493670887,
"alnum_prop": 0.47320954907161805,
"repo_name": "xbcsmith/frell",
"id": "a54e4eea3dba758f6838913cfbb1b6fa91e73f76",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/model5.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1178"
},
{
"name": "Perl",
"bytes": "186"
},
{
"name": "Python",
"bytes": "350852"
},
{
"name": "Shell",
"bytes": "2605"
}
],
"symlink_target": ""
}
|
"""
K-Means
"""
# Standard Library Dependencies
from __future__ import division, print_function, absolute_import
from warnings import warn
from time import time
import os
from operator import itemgetter
# External Dependencies
import numpy as np
from scipy.io import loadmat
from numpy.linalg import norm as norm2
# %matplotlib inline # use when in JUPYTER NOTEBOOK (or risk hang)
# plt.ion() # allow ipython %run to terminate without closing figure
# Internal Dependencies
# from classifiers import KMEANS
class KMeans:
def train(self, X_train, y_train):
distinct_labels = np.array(list(distinct_labels))
label_means = []
sds = []
label_variances = []
for l in distinct_labels:
Xl = [x for x, y in zip(X_train, y_train) if y == l] # examples labeled l
mean_l = np.mean(Xl, axis=0)
label_means.append(mean_l)
# Compute S.D. for each examples of each label
s_l = np.std([norm2(x - mean_l) for x in Xl])
label_variances.append(s_l)
# Check how far away means are from each other.
for i, mi in enumerate(label_means):
d2i = [(j, norm2(mi - mj)) for j, mj in enumerate(label_means) if i!=j]
j, dij = min(d2i, key=itemgetter(1))
si, sj = np.sqrt(label_variances[i]), np.sqrt(label_variances[j])
print("label {} :: nearest label = {} :: dist = {}) :: "
"sd_{} = {} :: sd_{} = {}"
"".format(i, j, dij, i, si, j, sj))
self.means = np.array(label_means)
self.sds = np.sqrt(np.array(label_variances))
self.ordered_labels = distinct_labels
def predict(X):
pred_indices = np.argmin([norm2(X - m, axis=1) for m in self.means], axis=0)
return ordered_labels[pred_indices]
########################################################################
# User Parameters
DATASET = 'USPS'
VALIDATION_PERCENTAGE = 0
TESTING_PERCENTAGE = .3
NORMALIZE = False
assert 0 < VALIDATION_PERCENTAGE + TESTING_PERCENTAGE < 1
# Load dataset
try:
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
except: # in case I'm trying to paste into ipython
rootdir = os.getcwd()
datadir = os.path.join(rootdir, 'data')
if DATASET.lower() == 'usps':
dataset_dict = loadmat(os.path.join(datadir, 'usps', 'USPS.mat'))
elif DATASET.lower() == 'mnist':
dataset_dict = loadmat(os.path.join(datadir, 'mnist', 'MNIST.mat'))
elif DATASET.lower() == 'notmnist_small':
dataset_dict = loadmat(os.path.join(datadir, 'notMNIST',
'notMNIST_small_no_duplicates.mat'))
elif DATASET.lower() == 'notmnist_large':
dataset_dict = loadmat(os.path.join(datadir, 'notMNIST',
'notMNIST_large_no_duplicates.mat'))
else:
dataset_dict = loadmat(DATASET)
X = dataset_dict['X'].astype('float32')
y = dataset_dict['y'].ravel()
distinct_labels = set(y)
# Flatten images
m, h, w = X.shape
n = w*h
X = X.reshape((m, n))
# shuffle data
from random import shuffle
permutation = range(m)
shuffle(permutation)
X = X[permutation]
y = y[permutation]
# Convert labels to one-hot format
# def onehot(data, ordered_label_set):
# return [[(1 if l == y else 0) for l in ordered_label_set] for y in data]
# y = np.array(onehot(y, distinct_labels)).astype('float32')
# print(y.shape)
# normalize data
if NORMALIZE:
# X = X - mean(X)
# X = X./mean(sum((X-mean(X)).^2))
raise NotImplementedError
m_train = int(0.6*m)
X_train = X[:m_train]
y_train = y[:m_train]
m_valid = int(0.2*m)
X_valid = X[m_train : m_train + m_valid + 1]
y_valid = y[m_train : m_train + m_valid + 1]
m_test = m - m_valid - m_train
X_test = X[m_train + m_valid : len(X) + 1]
y_test = y[m_train + m_valid : len(y) + 1]
# if BATCH_LIMIT:
# limit = BATCH_LIMIT*BATCH_SIZE
# X_train = X_train[:limit, :]
# Y_train = Y_train[:limit]
### Classification by distance from mean
print("Training Set Shape:", X_train.shape)
print("Testing Set Shape:", X_train.shape)
print("Data type:", X_train.dtype)
# Training set predictions and score
predictions_train = predict(X_train, label_means, distinct_labels)
print("Training Set Accuracy:", sum(y_train == predictions_train)/m_train)
# Test set predictions and score
predictions_test = predict(X_test, label_means, distinct_labels)
print("Test Set Accuracy:", sum(y_test == predictions_test)/m_test)
|
{
"content_hash": "e4d0246142a42bc6316ff09108cad857",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 86,
"avg_line_length": 32.369565217391305,
"alnum_prop": 0.6183120662637117,
"repo_name": "mathandy/Classifiers2LearnWith",
"id": "dd95cebd58690999c9f7eee35db83beb637a0688",
"size": "4467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/kmeans.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "8364"
},
{
"name": "Python",
"bytes": "112028"
},
{
"name": "Shell",
"bytes": "1484"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.