commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
0ee4afce4ba81cff6d13152ab082157afc4718f1 | Create pyspark.py | pyspark.py | pyspark.py | from pyspark import SparkConf, SparkContext
conf = SparkConf().setMaster("local").setAppName("MinTemperatures")
sc = SparkContext(conf = conf)
lines = sc.textFile("file:///Users/Spark/1800.csv")
parsedLines = lines.map(parseLine)
| Python | 0.000009 | |
af495c7a69611f2c1fa744dce000c49033eb2dd7 | Add test for sys.intern(). | tests/basics/sys_intern.py | tests/basics/sys_intern.py | # test sys.intern() function
import sys
try:
sys.intern
except AttributeError:
print('SKIP')
raise SystemExit
s1 = "long long long long long long"
s2 = "long long long" + " long long long"
print(id(s1) == id(s2))
i1 = sys.intern(s1)
i2 = sys.intern(s2)
print(id(i1) == id(i2))
i2_ = sys.intern(i2)
print(id(i2_) == id(i2))
try:
sys.intern(1)
except TypeError:
print("TypeError")
| Python | 0 | |
7323565bdc2a290e97617857da475e8f41d5a43f | Add tee plugin | plugins/tee.py | plugins/tee.py | class Plugin:
def on_command(self, bot, msg, stdin, stdout, reply):
text = stdin.read().strip()
reply(text)
print(text, file=stdout)
def on_help(self):
return "Copy standard input to reply, and also to standard output."
| Python | 0 | |
b8ea356af5121ffd612ccf708fe2372fbae3cc3d | add custom hasher for crypt_sha512 | daiquiri/core/hashers.py | daiquiri/core/hashers.py | # inspired by https://djangosnippets.org/snippets/10572/
from collections import OrderedDict
from django.contrib.auth.hashers import CryptPasswordHasher, mask_hash
from django.utils.encoding import force_str
from django.utils.crypto import get_random_string, constant_time_compare
from django.utils.translation import ugettext_noop as _
class CrypdSHA512PasswordHasher(CryptPasswordHasher):
algorithm = 'crypt_sha512'
def salt(self):
return '$6$' + get_random_string(16)
def encode(self, password, salt):
crypt = self._load_library()
data = crypt.crypt(force_str(password), salt)
return "%s%s" % (self.algorithm, data)
def verify(self, password, encoded):
crypt = self._load_library()
algorithm, rest = encoded.split('$', 1)
salt, hash = rest.rsplit('$', 1)
salt = '$' + salt
assert algorithm == self.algorithm
return constant_time_compare('%s$%s' % (salt, hash), crypt.crypt(force_str(password), salt))
def safe_summary(self, encoded):
algorithm, prefix, salt, hash = encoded.split('$')
assert algorithm == self.algorithm
return OrderedDict([
(_('algorithm'), algorithm),
(_('prefix'), prefix),
(_('salt'), mask_hash(salt)),
(_('hash'), mask_hash(hash)),
])
| Python | 0.000001 | |
ede8d4db9f0a8b3331b299019ec67c92261b2a56 | Make sure we don't blow up if BROKER_URL is None | src/sentry/monitoring/queues.py | src/sentry/monitoring/queues.py | """
sentry.monitoring.queues
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from urlparse import urlparse
from django.conf import settings
from django.utils.functional import cached_property
class RedisBackend(object):
def __init__(self, broker_url):
self.broker_url = broker_url
@cached_property
def client(self):
from redis import StrictRedis
return StrictRedis.from_url(self.broker_url)
def bulk_get_sizes(self, queues):
return [(queue, self.get_size(queue)) for queue in queues]
def get_size(self, queue):
return self.client.llen(queue)
def purge_queue(self, queue):
# This is slightly inaccurate since things could be queued between calling
# LLEN and DEL, but it's close enough for this use case.
size = self.get_size(queue)
self.client.delete(queue)
return size
class AmqpBackend(object):
def __init__(self, broker_url):
dsn = urlparse(broker_url)
self.conn_info = dict(
host=dsn.hostname,
port=dsn.port,
userid=dsn.username,
password=dsn.password,
virtual_host=dsn.path[1:],
)
def get_conn(self):
from librabbitmq import Connection
return Connection(**self.conn_info)
def _get_size_from_channel(self, channel, queue):
# In AMQP, the way to do this is to attempt to create a queue passively.
# which is basically checking for it's existence (passive=True), this also
# returns back the queue size.
try:
_, size, _ = channel.queue_declare(queue, passive=True)
except Exception:
return 0
return size
def bulk_get_sizes(self, queues):
sizes = []
with self.get_conn() as conn:
with conn.channel() as channel:
for queue in queues:
sizes.append((queue, self._get_size_from_channel(channel, queue)))
print(sizes)
return sizes
def get_size(self, queue):
with self.get_conn() as conn:
with conn.channel() as channel:
return self._get_size_from_channel(channel, queue)
def purge_queue(self, queue):
with self.get_conn() as conn:
with conn.channel() as channel:
return channel.queue_purge(queue)
def get_backend_for_broker(broker_url):
if broker_url is None:
raise KeyError
return backends[urlparse(broker_url).scheme](broker_url)
def get_queue_by_name(name):
"Lookup a celery Queue object by it's name"
for queue in settings.CELERY_QUEUES:
if queue.name == name:
return queue
backends = {
'redis': RedisBackend,
'amqp': AmqpBackend,
'librabbitmq': AmqpBackend,
}
try:
backend = get_backend_for_broker(settings.BROKER_URL)
except KeyError:
backend = None
| """
sentry.monitoring.queues
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2016 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from urlparse import urlparse
from django.conf import settings
from django.utils.functional import cached_property
class RedisBackend(object):
def __init__(self, broker_url):
self.broker_url = broker_url
@cached_property
def client(self):
from redis import StrictRedis
return StrictRedis.from_url(self.broker_url)
def bulk_get_sizes(self, queues):
return [(queue, self.get_size(queue)) for queue in queues]
def get_size(self, queue):
return self.client.llen(queue)
def purge_queue(self, queue):
# This is slightly inaccurate since things could be queued between calling
# LLEN and DEL, but it's close enough for this use case.
size = self.get_size(queue)
self.client.delete(queue)
return size
class AmqpBackend(object):
def __init__(self, broker_url):
dsn = urlparse(broker_url)
self.conn_info = dict(
host=dsn.hostname,
port=dsn.port,
userid=dsn.username,
password=dsn.password,
virtual_host=dsn.path[1:],
)
def get_conn(self):
from librabbitmq import Connection
return Connection(**self.conn_info)
def _get_size_from_channel(self, channel, queue):
# In AMQP, the way to do this is to attempt to create a queue passively.
# which is basically checking for it's existence (passive=True), this also
# returns back the queue size.
try:
_, size, _ = channel.queue_declare(queue, passive=True)
except Exception:
return 0
return size
def bulk_get_sizes(self, queues):
sizes = []
with self.get_conn() as conn:
with conn.channel() as channel:
for queue in queues:
sizes.append((queue, self._get_size_from_channel(channel, queue)))
print(sizes)
return sizes
def get_size(self, queue):
with self.get_conn() as conn:
with conn.channel() as channel:
return self._get_size_from_channel(channel, queue)
def purge_queue(self, queue):
with self.get_conn() as conn:
with conn.channel() as channel:
return channel.queue_purge(queue)
def get_backend_for_broker(broker_url):
return backends[urlparse(broker_url).scheme](broker_url)
def get_queue_by_name(name):
"Lookup a celery Queue object by it's name"
for queue in settings.CELERY_QUEUES:
if queue.name == name:
return queue
backends = {
'redis': RedisBackend,
'amqp': AmqpBackend,
'librabbitmq': AmqpBackend,
}
try:
backend = get_backend_for_broker(settings.BROKER_URL)
except KeyError:
backend = None
| Python | 0.999414 |
042d0d899896342e9f2e7b083f8543e8077bf19a | Add tests.py to app skeleton. | lib/rapidsms/skeleton/app/tests.py | lib/rapidsms/skeleton/app/tests.py | from rapidsms.tests.scripted import TestScript
from app import App
class TestApp (TestScript):
apps = (App,)
# define your test scripts here.
# e.g.:
#
# testRegister = """
# 8005551212 > register as someuser
# 8005551212 < Registered new user 'someuser' for 8005551212!
# 8005551212 > tell anotheruser what's up??
# 8005550000 < someuser said "what's up??"
# """
#
# You can also do normal unittest.TestCase methods:
#
# def testMyModel (self):
# self.assertEquals(...)
| Python | 0 | |
27a7079f9edf01abce7912eb52c5091279bc85a1 | ADD | 添加pygal path的变量设定源码 | src/lib/PygalPath.py | src/lib/PygalPath.py | #-*- coding:UTF-8 -*-
import socket
import os
__all__ = ['PYGAL_TOOLTIPS_PATH', 'SVG_JQUERY_PATH']
SERVER_WUHAN = '192.168.60.60'
SERVER_WUHAN_PRE = '192.168.6'
SERVER_BEIJING = '192.168.50.193'
SERVER_BEIJING_PRE = '192.168.5'
SERVER = '10.1.145.70'
#根据本机IP获取pygal模块生成svg文件所需的js文件路径
sname=socket.gethostname()
ipList = socket.gethostbyname_ex(sname)[2]
for ip in ipList:
if SERVER_BEIJING_PRE in ip:
path = SERVER_BEIJING
break
elif SERVER_WUHAN_PRE in ip:
path = SERVER_WUHAN
break
else:
path = SERVER
break
PYGAL_TOOLTIPS_PATH = 'http://%s/pygal-tooltips.js' % path
SVG_JQUERY_PATH = 'http://%s/svg.jquery.js' % path | Python | 0 | |
a3db0306133bc3da1cc00d3c745396539a152839 | Add release script | release.py | release.py | #!/usr/bin/env python
from collections import OrderedDict
from itertools import zip_longest
import json
import os
import re
from subprocess import check_output, CalledProcessError
import sys
from zipfile import ZipFile
def sh(command, v=False):
if v:
print(command)
return check_output(command, text=True).strip()
def parse_version(v):
return [int(s) for s in v.split(".")]
def dereference(link):
try:
return sh(f"git rev-parse --verify -q {link}^0")
except CalledProcessError:
return ""
version_string = sys.argv[1]
prefixed_version = f"v{version_string}"
version = parse_version(version_string)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
assert not sh("git status --porcelain")
assert sh("git branch") == "* master"
with open("manifest.json") as f:
manifest = json.load(f, object_pairs_hook=OrderedDict)
manifest_version = parse_version(manifest["version"])
if version != manifest_version:
delta = list(
vs[0] - vs[1]
for vs in zip_longest(version, manifest_version, fillvalue=0)
)
increment = delta.index(1)
assert all(i == 0 for i in delta[0:increment])
assert all(i <= 0 for i in delta[increment + 1 :])
manifest["version"] = version_string
with open("manifest.json", "w", newline="\n") as f:
json.dump(manifest, f, indent=2)
print("", file=f)
sh(f"git commit -a -m {prefixed_version}", v=True)
tag_commit = dereference(prefixed_version)
if tag_commit:
assert tag_commit == dereference("HEAD")
else:
sh(f"git tag {prefixed_version} -m {prefixed_version}", v=True)
sh("git merge-base --is-ancestor origin/master master")
if dereference("master") != dereference("origin/master"):
sh("git push --follow-tags", v=True)
files = ["manifest.json", "config.js"]
for file in sh("git ls-files").splitlines():
m = lambda p: re.search(p, file)
if m(r"\.(html|js)$") and not m(r"\btest\b"):
files.append(file)
with ZipFile("ergometer.zip", "w") as zip:
for file in files:
print(f"zipping {file}")
zip.write(file)
| Python | 0.000001 | |
9ad9808b9bf7c202bc6dbbe8abd74e1c642982ae | structure migration | structure/migrations/0002_structure_refined.py | structure/migrations/0002_structure_refined.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-09-20 07:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='structure',
name='refined',
field=models.BooleanField(default=False),
),
]
| Python | 0.000002 | |
ca1cb845f312ba295718084fa6c8a0d4e68d49e3 | fix boolean parameter in config file | core/config.py | core/config.py | import argparse
import appdirs
import logging
import os
import configparser
import sys
def defaultBackendRpcPort(config):
if config.TESTNET:
return 14000
else:
return 4000
def defaultBackendRpc(config):
protocol = 'https' if config.BACKEND_RPC_SSL else 'http'
return '{}://{}:{}@{}:{}'.format(protocol, config.BACKEND_RPC_USER, config.BACKEND_RPC_PASSWORD, config.BACKEND_RPC_CONNECT, config.BACKEND_RPC_PORT)
ARGS = [
{'name': 'data-dir', 'params': {'help': 'the directory in which to keep the config file and log file, by default'}},
{'name': 'config-file', 'params': {'help': 'the location of the configuration file'}},
{'name': 'testnet', 'params': {'action': 'store_true', 'help': 'use BTC testnet addresses and block numbers'}},
{'name': 'backend-rpc-connect', 'params': {'help': 'the hostname or IP of the backend bitcoind JSON-RPC server'}, 'default': 'localhost'},
{'name': 'backend-rpc-port', 'params': {'type': int, 'help': 'the backend JSON-RPC port to connect to'}, 'default': defaultBackendRpcPort},
{'name': 'backend-rpc-user', 'params': {'help': 'the username used to communicate with backend over JSON-RPC'}, 'default': 'rpc'},
{'name': 'backend-rpc-password', 'params': {'help': 'the password used to communicate with backend over JSON-RPC'}},
{'name': 'backend-rpc-ssl', 'params': {'action': 'store_true', 'help': 'use SSL to connect to backend (default: false)'}},
{'name': 'backend-rpc-ssl-verify', 'params': {'action': 'store_true', 'help':'verify SSL certificate of backend; disallow use of self‐signed certificates (default: false)'}},
{'name': 'backend-rpc', 'params': {'help': 'the complete RPC url used to communicate with backend over JSON-RPC'}, 'default': defaultBackendRpc},
{'name': 'plugins', 'params': {'action': 'append', 'help': 'active plugins'}, 'default': ['send', 'test']},
]
class Config:
def __init__(self):
# get args
parser = argparse.ArgumentParser(prog="Conterpartyd GUI", description='the GUI for Counterpartyd')
for arg in ARGS:
parser.add_argument('--{}'.format(arg['name']), **arg['params'])
self.args = vars(parser.parse_args())
# Data directory
if self.args['data_dir']:
dataDir = self.args.pop('data_dir')
else:
dataDir = appdirs.user_config_dir(appauthor='Counterparty', appname='counterpartygui', roaming=True)
if not os.path.isdir(dataDir): os.mkdir(dataDir)
# Configuration file
if self.args['config_file']:
configPath = self.args.pop('config_file')
else:
configPath = os.path.join(dataDir, 'counterpartygui.conf')
configFile = configparser.ConfigParser()
configFile.read(configPath)
hasConfig = 'Default' in configFile
# if `key` not in config file, return the default value evenually defined in ARGS.
def getDefaultValue(key):
if hasConfig and key in configFile['Default'] and configFile['Default'][key]:
return configFile['Default'][key]
else:
for arg in ARGS:
if arg['name'] == key and 'default' in arg:
if callable(arg['default']):
return arg['default'](self)
else:
return arg['default']
# Todo: `required` field and exception
return None
# set variables
self.DATA_DIR = dataDir
for arg in ARGS:
argName = arg['name'].replace('-', '_')
if self.args[argName] is None or (isinstance(self.args[argName], bool) and '--{}'.format(arg['name']) not in sys.argv and '-{}'.format(arg['name']) not in sys.argv):
self.args[argName] = getDefaultValue(arg['name'])
setattr(self, argName.upper(), self.args[argName])
| import argparse
import appdirs
import logging
import os
import configparser
def defaultBackendRpcPort(config):
if config.TESTNET:
return 14000
else:
return 4000
def defaultBackendRpc(config):
protocol = 'https' if config.BACKEND_RPC_SSL else 'http'
return '{}://{}:{}@{}:{}'.format(protocol, config.BACKEND_RPC_USER, config.BACKEND_RPC_PASSWORD, config.BACKEND_RPC_CONNECT, config.BACKEND_RPC_PORT)
ARGS = [
{'name': 'data-dir', 'params': {'help': 'the directory in which to keep the config file and log file, by default'}},
{'name': 'config-file', 'params': {'help': 'the location of the configuration file'}},
{'name': 'testnet', 'params': {'action': 'store_true', 'help': 'use BTC testnet addresses and block numbers'}},
{'name': 'backend-rpc-connect', 'params': {'help': 'the hostname or IP of the backend bitcoind JSON-RPC server'}, 'default': 'localhost'},
{'name': 'backend-rpc-port', 'params': {'type': int, 'help': 'the backend JSON-RPC port to connect to'}, 'default': defaultBackendRpcPort},
{'name': 'backend-rpc-user', 'params': {'help': 'the username used to communicate with backend over JSON-RPC'}, 'default': 'rpc'},
{'name': 'backend-rpc-password', 'params': {'help': 'the password used to communicate with backend over JSON-RPC'}},
{'name': 'backend-rpc-ssl', 'params': {'action': 'store_true', 'help': 'use SSL to connect to backend (default: false)'}},
{'name': 'backend-rpc-ssl-verify', 'params': {'action': 'store_true', 'help':'verify SSL certificate of backend; disallow use of self‐signed certificates (default: false)'}},
{'name': 'backend-rpc', 'params': {'help': 'the complete RPC url used to communicate with backend over JSON-RPC'}, 'default': defaultBackendRpc},
{'name': 'plugins', 'params': {'action': 'append', 'help': 'active plugins'}, 'default': ['send', 'test']},
]
class Config:
def __init__(self):
# get args
parser = argparse.ArgumentParser(prog="Conterpartyd GUI", description='the GUI for Counterpartyd')
for arg in ARGS:
parser.add_argument('--{}'.format(arg['name']), **arg['params'])
self.args = vars(parser.parse_args())
# Data directory
if self.args['data_dir']:
dataDir = self.args.pop('data_dir')
else:
dataDir = appdirs.user_config_dir(appauthor='Counterparty', appname='counterpartygui', roaming=True)
if not os.path.isdir(dataDir): os.mkdir(dataDir)
# Configuration file
if self.args['config_file']:
configPath = self.args.pop('config_file')
else:
configPath = os.path.join(dataDir, 'counterpartygui.conf')
configFile = configparser.ConfigParser()
configFile.read(configPath)
hasConfig = 'Default' in configFile
# if `key` not in config file, return the default value evenually defined in ARGS.
def getDefaultValue(key):
if hasConfig and key in configFile['Default'] and configFile['Default'][key]:
return configFile['Default'][key]
else:
for arg in ARGS:
if arg['name'] == key and 'default' in arg:
if callable(arg['default']):
return arg['default'](self)
else:
return arg['default']
# Todo: `required` field and exception
return None
# set variables
self.DATA_DIR = dataDir
for arg in ARGS:
argName = arg['name'].replace('-', '_')
if self.args[argName] is None:
self.args[argName] = getDefaultValue(arg['name'])
setattr(self, argName.upper(), self.args[argName])
| Python | 0.000002 |
be01980afe4b1dbd5a1d5b07651cd7a54c771d01 | Add unit tests for disk module | tests/modules/test_disk.py | tests/modules/test_disk.py | # pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.disk import Module
class MockVFS(object):
def __init__(self, perc):
self.f_blocks = 1024*1024
self.f_frsize = 1
self.f_bavail = self.f_blocks - self.f_blocks*(perc/100.0)
class TestDiskModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self._os = mock.patch("bumblebee.modules.disk.os")
self.os = self._os.start()
self.config.set("disk.path", "somepath")
def tearDown(self):
self._os.stop()
mocks.teardown_test(self)
def test_leftclick(self):
module = Module(engine=self.engine, config={"config":self.config})
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=module)
self.popen.assert_call("nautilus {}".format(self.module.parameter("path")))
def test_warning(self):
self.config.set("disk.critical", "80")
self.config.set("disk.warning", "70")
self.os.statvfs.return_value = MockVFS(75.0)
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("disk.critical", "80")
self.config.set("disk.warning", "70")
self.os.statvfs.return_value = MockVFS(85.0)
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0 | |
da52f7c8c9280fe644a01a324eaad5512870dccb | add models.py | core/models.py | core/models.py | #!/usr/bin/env python
# coding = utf-8
"""
core.models
"""
__author__ = 'Rnd495'
import datetime
import hashlib
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, Float, String, DateTime, Text, Index
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from configs import Configs
configs = Configs.instance()
Base = declarative_base()
class User(Base):
__tablename__ = 'T_User'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(length=64), nullable=False, unique=True, index=Index('User_index_name'))
pwd = Column(String(length=128), nullable=False)
role_id = Column(Integer, nullable=False, index=Index('User_index_role_id'))
register_time = Column(DateTime, nullable=False)
header_url = Column(String(length=256), nullable=True)
def __init__(self, name, pwd,
role_id=0,
header_url=None):
self.name = name
self.pwd = User.password_hash(pwd)
self.register_time = datetime.datetime.now()
self.role_id = role_id
self.header_url = header_url
def __repr__(self):
return "<%s[%s]: %s>" % (type(self).__name__, self.id, self.name)
def get_is_same_password(self, password):
return User.password_hash(password) == self.pwd
def set_password(self, password):
self.pwd = hashlib.sha256(self.name + password)
class Role(Base):
__tablename__ = 'T_Role'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(length=64), nullable=False)
def __init__(self, name, id=None):
self.name = name
if id is not None:
self.id = id
def __repr__(self):
return "<%s[%s]: %s>" % (type(self).__name__, self.id, self.name)
_engine = None
_session_maker = None
_session = None
def get_engine():
global _engine
if not _engine:
_engine = create_engine(configs.database_url, echo=False)
Base.metadata.create_all(_engine)
return _engine
def get_session_maker():
global _session_maker
if not _session_maker:
_session_maker = sessionmaker(bind=get_engine())
return _session_maker
def get_global_session():
global _session
if not _session:
_session = get_session_maker()()
return _session
def get_new_session():
return get_session_maker()()
| Python | 0.000001 | |
ecacda9bab83cd66f25a3ce85f36646c13a61f3f | put full example in its own module | docs/coordinates/sgr-example.py | docs/coordinates/sgr-example.py | # coding: utf-8
""" Astropy coordinate class for the Sagittarius coordinate system """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
# Third-party
import numpy as np
from numpy import radians, degrees, cos, sin
import astropy.coordinates as coord
import astropy.units as u
from astropy.coordinates import transformations
from astropy.coordinates.angles import rotation_matrix
__all__ = ["SgrCoordinates"]
class SgrCoordinates(coord.SphericalCoordinatesBase):
""" A spherical coordinate system defined by the orbit of the Sagittarius
dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.astro.virginia.edu/~srm4n/Sgr/.
"""
__doc__ = __doc__.format(params=coord.SphericalCoordinatesBase. \
_init_docstring_param_templ. \
format(lonnm='Lambda', latnm='Beta'))
def __init__(self, *args, **kwargs):
super(SgrCoordinates, self).__init__()
if len(args) == 1 and len(kwargs) == 0 and
isinstance(args[0], coord.SphericalCoordinatesBase):
newcoord = args[0].transform_to(self.__class__)
self.Lambda = newcoord.Lambda
self.Beta = newcoord.Beta
self._distance = newcoord._distance
else:
super(SgrCoordinates, self).
_initialize_latlon('Lambda', 'Beta', False, args, kwargs,
anglebounds=((0, 360), (-90,90)))
def __repr__(self):
if self.distance is not None:
diststr = ', Distance={0:.2g} {1!s}'.format(self.distance._value,
self.distance._unit)
else:
diststr = ''
msg = "<{0} Lambda={1:.5f} deg, Beta={2:.5f} deg{3}>"
return msg.format(self.__class__.__name__, self.Lambda.degrees,
self.Beta.degrees, diststr)
@property
def lonangle(self):
return self.Lambda
@property
def latangle(self):
return self.Beta
# Define the Euler angles
phi = radians(180+3.75)
theta = radians(90-13.46)
psi = radians(180+14.111534)
rot11 = cos(psi)*cos(phi)-cos(theta)*sin(phi)*sin(psi)
rot12 = cos(psi)*sin(phi)+cos(theta)*cos(phi)*sin(psi)
rot13 = sin(psi)*sin(theta)
rot21 = -sin(psi)*cos(phi)-cos(theta)*sin(phi)*cos(psi)
rot22 = -sin(psi)*sin(phi)+cos(theta)*cos(phi)*cos(psi)
rot23 = cos(psi)*sin(theta)
rot31 = sin(theta)*sin(phi)
rot32 = -sin(theta)*cos(phi)
rot33 = cos(theta)
rotation_matrix = np.array([[rot11, rot12, rot13],
[rot21, rot22, rot23],
[rot31, rot32, rot33]])
# Galactic to Sgr coordinates
@transformations.transform_function(coord.GalacticCoordinates, SgrCoordinates)
def galactic_to_sgr(galactic_coord):
""" Compute the transformation from Galactic spherical to Sgr coordinates.
"""
l = galactic_coord.l.radians
b = galactic_coord.b.radians
X = cos(b)*cos(l)
Y = cos(b)*sin(l)
Z = sin(b)
# Calculate X,Y,Z,distance in the Sgr system
Xs, Ys, Zs = rotation_matrix.dot(np.array([X, Y, Z]))
Zs = -Zs
# Calculate the angular coordinates lambda,beta
Lambda = degrees(np.arctan2(Ys,Xs))
if Lambda<0:
Lambda += 360
Beta = degrees(np.arcsin(Zs/np.sqrt(Xs*Xs+Ys*Ys+Zs*Zs)))
return SgrCoordinates(Lambda, Beta, distance=galactic_coord.distance,
unit=(u.degree, u.degree))
@transformations.transform_function(SgrCoordinates, coord.GalacticCoordinates)
def sgr_to_galactic(sgr_coord):
L = sgr_coord.Lambda.radians
B = sgr_coord.Beta.radians
Xs = cos(B)*cos(L)
Ys = cos(B)*sin(L)
Zs = sin(B)
Zs = -Zs
X, Y, Z = rotation_matrix.T.dot(np.array([Xs, Ys, Zs]))
l = degrees(np.arctan2(Y,X))
b = degrees(np.arcsin(Z/np.sqrt(X*X+Y*Y+Z*Z)))
if l<0:
l += 360
return coord.GalacticCoordinates(l, b, distance=sgr_coord.distance,
unit=(u.degree, u.degree)) | Python | 0 | |
4bf1a14f6b6d3b30f30732bb45f4e8a501dfcbf6 | test github backup | tests/pkcli/github_test.py | tests/pkcli/github_test.py | # -*- coding: utf-8 -*-
u"""test github
:copyright: Copyright (c) 2019 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_backup():
from pykern import pkconfig
pkconfig.reset_state_for_testing({
'PYKERN_PKCLI_GITHUB_TEST_MODE': '1',
'PYKERN_PKCLI_GITHUB_API_PAUSE_SECONDS': '0',
})
from pykern.pkcli import github
from pykern import pkunit
from pykern import pkio
with pkunit.save_chdir_work():
github.backup()
github.backup()
| Python | 0.000003 | |
8fdaeea43e31a1c429703cd4f441a748bcfa8197 | Create create_mask.py | create_mask.py | create_mask.py |
from __future__ import print_function
import argparse
from PIL import ImageFont, ImageDraw, Image
def create_mask(text, font_type, font_size=84):
'''
Creates an image with the given text in it.
'''
# initialize fond with given size
font = ImageFont.truetype(font_type, font_size)
dx, dy = font.getsize(text)
# draw the text to the image
mask = Image.new('RGB', (dx, max(dy, font_size)))
draw = ImageDraw.Draw(mask)
draw.text((0,-int(0.15*font_size)), text, font=font)
return mask
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a mask for char repeat')
parser.add_argument('text', help='the text to print on the image')
parser.add_argument('font', help='select a font (e.g. SourceCodePro-Black.ttf)')
parser.add_argument('-s', dest='font_size', default=84, type=int,
help='size of the font')
parser.add_argument('-p', action='store_true', dest='plot', help='show image')
param = parser.parse_args()
mask = create_mask(param.text, param.font, param.font_size)
mask.save('mask_%s.png' % param.text)
if param.plot:
dx, dy = mask.size
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(mask)
plt.title('text: %s (mask size = %i x %i)' % (param.text, dx, dy))
plt.show()
| Python | 0.000017 | |
79e55030736572608841bbdd3a6022fc2420be45 | implement recursive permutation generation algorithm with switch | switch.py | switch.py | ###
# A permutation generation algorithm.
# this algorithm is implemented by switching neighboring number.
#
# Under here is two implementation recursive and iterative
###
def recursive_PGA_with_switch(width):
'''
Recursive permutation generation algorithm with switch
'''
# condition
assert width > 0
assert type(width) is int
# boundary
if width == 1:
yield '1'
return
# recursion
left = True # direction to move
for sub_permutation in recursive_PGA_with_switch(width-1):
# positions to insert new number
positions = reversed(range(width)) if left else range(width)
left = not left
for i in positions:
perm = [sub_permutation[:i], str(width), sub_permutation[i:]]
#print(perm)
yield(''.join(perm))
if __name__ == '__main__':
for permutation in recursive_PGA_with_switch(3):
print(permutation)
| Python | 0.000002 | |
dfab68c27b3f25f448c0a2a6d0cee347bae2b08f | Add tests for canonicalize | tests/test_canonicalize.py | tests/test_canonicalize.py | from intervals import Interval, canonicalize
def test_canonicalize():
assert canonicalize(Interval([1, 4])).normalized == '[1, 5)'
assert canonicalize(
Interval((1, 7)), lower_inc=True, upper_inc=True
).normalized == '[2, 6]'
assert canonicalize(
Interval([1, 7]), lower_inc=False, upper_inc=True
).normalized == '(0, 7]'
| Python | 0 | |
76468926c1efe4d18477a70d767f91d4c6e38768 | Add test for dotted circles in sample text | tests/test_dottedcircle.py | tests/test_dottedcircle.py | import uharfbuzz as hb
import gflanguages
import pytest
langs = gflanguages.LoadLanguages()
@pytest.fixture
def hb_font():
# Persuade Harfbuzz we have a font that supports
# every codepoint.
face = hb.Face(b"")
font = hb.Font(face)
funcs = hb.FontFuncs.create()
funcs.set_nominal_glyph_func((lambda font,cp,data: cp), None)
font.funcs = funcs
return font
@pytest.mark.parametrize("lang", langs.keys())
def test_dotted_circle(lang, hb_font):
item = langs[lang]
samples = [x for (_,x) in item.sample_text.ListFields()]
for sample in sorted(samples, key=lambda x:len(x)):
buf = hb.Buffer()
buf.add_str(sample)
buf.guess_segment_properties()
hb.shape(hb_font, buf)
ok = not any(info.codepoint == 0x25CC for info in buf.glyph_infos)
assert ok, f"Dotted circle found in {sample} ({lang})"
| Python | 0 | |
7c69ec08967dc38463cfc5e1323d69fd5f261333 | Create config.py | config.py | config.py | #!/usr/bin/env python
import pyvty
user = 'admin'
password = 'password'
host = '10.36.65.227'
config_file = 'config.txt' # name of text file containing config commands.
logfile = 'config_' + host + '.log' # terminal output will be saved in this file.
try:
input_file = open(config_file)
commands = input_file.readlines()
input_file.close()
except IOError as e:
print(e)
exit()
term = pyvty.Terminal(host=host, username=user, password=password, logfile=logfile)
term.send('config term')
for command in commands:
results = term.send(command.rstrip())
for line in results:
print(line.rstrip())
# term.send('write mem') ''' save configuration to disk '''
term.send('end')
term.write('exit')
| Python | 0 | |
7864c8e8591d1de14f18ecfaf880e79de6c7702e | add tests for placeholder class | tests/test_placeholders.py | tests/test_placeholders.py | import re
import pytest
from notifications_utils.field import Placeholder
@pytest.mark.parametrize('body, expected', [
('((with-brackets))', 'with-brackets'),
('without-brackets', 'without-brackets'),
])
def test_placeholder_returns_name(body, expected):
assert Placeholder(body).name == expected
@pytest.mark.parametrize('body, is_conditional', [
('not a conditional', False),
('not? a conditional', False),
('a?? conditional', True),
])
def test_placeholder_identifies_conditional(body, is_conditional):
assert Placeholder(body).is_conditional() == is_conditional
@pytest.mark.parametrize('body, conditional_text', [
('a??b', 'b'),
('a?? b ', ' b '),
('a??b??c', 'b??c'),
])
def test_placeholder_gets_conditional_text(body, conditional_text):
assert Placeholder(body).conditional_text == conditional_text
def test_placeholder_raises_if_accessing_conditional_text_on_non_conditional():
with pytest.raises(ValueError):
Placeholder('hello').conditional_text
@pytest.mark.parametrize('body, value, result', [
('a??b', 'Yes', 'b'),
('a??b', 'No', ''),
])
def test_placeholder_gets_conditional_body(body, value, result):
assert Placeholder(body).get_conditional_body(value) == result
def test_placeholder_raises_if_getting_conditional_body_on_non_conditional():
with pytest.raises(ValueError):
Placeholder('hello').get_conditional_body('Yes')
def test_placeholder_can_be_constructed_from_regex_match():
match = re.search(r'\(\(.*\)\)', 'foo ((bar)) baz')
assert Placeholder.from_match(match).name == 'bar'
| Python | 0 | |
9b1f9e9abd2890bc3e4d9f38109f12de8b488b66 | Create config.py | config.py | config.py | username = "facility_ai"
password = "UncloakIsADick"
client_id = "GORfUXQGjNIveA"
client_secret = "SzPFXaqgVbRxm_V9-IfGL05npPE"
| Python | 0.000002 | |
17f71bfb81393241759e38fb9dce01561aeca3d5 | Add tests to product tags | tests/test_product_tags.py | tests/test_product_tags.py | from mock import Mock
from saleor.product.templatetags.product_images import get_thumbnail, product_first_image
def test_get_thumbnail():
instance = Mock()
cropped_value = Mock(url='crop.jpg')
thumbnail_value = Mock(url='thumb.jpg')
instance.crop = {'10x10': cropped_value}
instance.thumbnail = {'10x10': thumbnail_value}
cropped = get_thumbnail(instance, '10x10', method='crop')
assert cropped == cropped_value.url
thumb = get_thumbnail(instance, '10x10', method='thumbnail')
assert thumb == thumbnail_value.url
def test_get_thumbnail_no_instance():
output = get_thumbnail(instance=None, size='10x10', method='crop')
assert output == '/static/images/product-image-placeholder.png'
def test_product_first_image():
mock_product_image = Mock()
mock_product_image.image = Mock()
mock_product_image.image.crop = {'10x10': Mock(url='crop.jpg')}
mock_queryset = Mock()
mock_queryset.all.return_value = [mock_product_image]
mock_product = Mock(images=mock_queryset)
out = product_first_image(mock_product, '10x10', method='crop')
assert out == 'crop.jpg'
| Python | 0 | |
27444dfefa70759694b755185c2eb6f25216d326 | Remove Node - migration. | devilry/apps/core/migrations/0037_auto_20170620_1515.py | devilry/apps/core/migrations/0037_auto_20170620_1515.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-06-20 15:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0036_auto_20170523_1748'),
]
operations = [
migrations.AlterUniqueTogether(
name='node',
unique_together=set([]),
),
migrations.RemoveField(
model_name='node',
name='admins',
),
migrations.RemoveField(
model_name='node',
name='parentnode',
),
migrations.RemoveField(
model_name='subject',
name='parentnode',
),
migrations.DeleteModel(
name='Node',
),
]
| Python | 0 | |
f1cb5af4f42ccf437cdd6ef06a2056993e54b604 | Create sum13.py | Python/CodingBat/sum13.py | Python/CodingBat/sum13.py | # http://codingbat.com/prob/p167025
def sum13(nums):
sum = 0
i = 0
while i < len(nums):
if nums[i] == 13:
i += 2
continue
else:
sum += nums[i]
i += 1
return sum
| Python | 0.000132 | |
f8f38fde96ab166af6b899d3e841e6a46b7dddd1 | switch to comparing sets of neighbors rather than lists | pysal/weights/tests/test_Wsets.py | pysal/weights/tests/test_Wsets.py | """Unit test for Wsets module."""
import unittest
import pysal
class TestWsets(unittest.TestCase):
"""Unit test for Wsets module."""
def test_w_union(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4)
w2 = pysal.lat2W(6, 4)
w3 = pysal.weights.Wsets.w_union(w1, w2)
self.assertEqual(w1[0], w3[0])
self.assertEqual(set(w1.neighbors[15]), set([11, 14]))
self.assertEqual(set(w2.neighbors[15]), set([11, 14, 19]))
self.assertEqual(set(w3.neighbors[15]), set([19, 11, 14]))
def test_w_intersection(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4)
w2 = pysal.lat2W(6, 4)
w3 = pysal.weights.Wsets.w_union(w1, w2)
self.assertEqual(w1[0], w3[0])
self.assertEqual(set(w1.neighbors[15]), set([11, 14]))
self.assertEqual(set(w2.neighbors[15]), set([11, 14, 19]))
self.assertEqual(set(w3.neighbors[15]), set([19, 11, 14]))
def test_w_difference(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4, rook=False)
w2 = pysal.lat2W(4, 4, rook=True)
w3 = pysal.weights.Wsets.w_difference(w1, w2, constrained=False)
self.assertNotEqual(w1[0], w3[0])
self.assertEqual(set(w1.neighbors[15]), set([10, 11, 14]))
self.assertEqual(set(w2.neighbors[15]), set([11, 14]))
self.assertEqual(set(w3.neighbors[15]), set([10]))
def test_w_symmetric_difference(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4, rook=False)
w2 = pysal.lat2W(6, 4, rook=True)
w3 = pysal.weights.Wsets.w_symmetric_difference(
w1, w2, constrained=False)
self.assertNotEqual(w1[0], w3[0])
self.assertEqual(set(w1.neighbors[15]), set([10, 11, 14]))
self.assertEqual(set(w2.neighbors[15]), set([11, 14, 19]))
self.assertEqual(set(w3.neighbors[15]), set([10, 19]))
def test_w_subset(self):
"""Unit test"""
w1 = pysal.lat2W(6, 4)
ids = range(16)
w2 = pysal.weights.Wsets.w_subset(w1, ids)
self.assertEqual(w1[0], w2[0])
self.assertEqual(set(w1.neighbors[15]), set([11, 14, 19]))
self.assertEqual(set(w2.neighbors[15]), set([11, 14]))
suite = unittest.TestLoader().loadTestsFromTestCase(TestWsets)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| """Unit test for Wsets module."""
import unittest
import pysal
class TestWsets(unittest.TestCase):
"""Unit test for Wsets module."""
def test_w_union(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4)
w2 = pysal.lat2W(6, 4)
w3 = pysal.weights.Wsets.w_union(w1, w2)
self.assertEqual(w1[0], w3[0])
self.assertEqual(w1.neighbors[15], [11, 14])
self.assertEqual(w2.neighbors[15], [11, 14, 19])
self.assertEqual(w3.neighbors[15], [19, 11, 14])
def test_w_intersection(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4)
w2 = pysal.lat2W(6, 4)
w3 = pysal.weights.Wsets.w_union(w1, w2)
self.assertEqual(w1[0], w3[0])
self.assertEqual(w1.neighbors[15], [11, 14])
self.assertEqual(w2.neighbors[15], [11, 14, 19])
self.assertEqual(w3.neighbors[15], [19, 11, 14])
def test_w_difference(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4, rook=False)
w2 = pysal.lat2W(4, 4, rook=True)
w3 = pysal.weights.Wsets.w_difference(w1, w2, constrained=False)
self.assertNotEqual(w1[0], w3[0])
self.assertEqual(w1.neighbors[15], [10, 11, 14])
self.assertEqual(w2.neighbors[15], [11, 14])
self.assertEqual(w3.neighbors[15], [10])
def test_w_symmetric_difference(self):
"""Unit test"""
w1 = pysal.lat2W(4, 4, rook=False)
w2 = pysal.lat2W(6, 4, rook=True)
w3 = pysal.weights.Wsets.w_symmetric_difference(
w1, w2, constrained=False)
self.assertNotEqual(w1[0], w3[0])
self.assertEqual(w1.neighbors[15], [10, 11, 14])
self.assertEqual(w2.neighbors[15], [11, 14, 19])
self.assertEqual(w3.neighbors[15], [10, 19])
def test_w_subset(self):
"""Unit test"""
w1 = pysal.lat2W(6, 4)
ids = range(16)
w2 = pysal.weights.Wsets.w_subset(w1, ids)
self.assertEqual(w1[0], w2[0])
self.assertEqual(w1.neighbors[15], [11, 14, 19])
self.assertEqual(w2.neighbors[15], [11, 14])
suite = unittest.TestLoader().loadTestsFromTestCase(TestWsets)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| Python | 0.000001 |
d408ae00d2e5c0a0e7c5e90e98088d52815c5e49 | Create WikiScrape.py | Python/WikiScrape.py | Python/WikiScrape.py | ## This script was tested using Python 2.7.9
## The MWClient library was used to access the api. It can be found at:
## https://github.com/mwclient/mwclient
import mwclient
site = mwclient.Site(('https', 'en.wikipedia.org'))
site.login('$user', '$pass') # credentials are sanitized from the script.
listpage = site.Pages['User:jebaile7964/World_of_Warcraft']
text = listpage.text()
for page in site.Categories['World_of_Warcraft']:
text += "* [[:" + page.name + "]]\n"
listpage.save(text, summary='Creating list from [[Category:World_of_Warcraft]]')
## results are found at:
## https://en.wikipedia.org/wiki/User:Jebaile7964/World_of_Warcraft
| Python | 0 | |
e455d459590a4f2b16b9a9360b6e33640f5ec7bf | Add a script to check that __all__ in __init__.py is correct | python/allcheck.py | python/allcheck.py | #!/usr/bin/env python
import sys
import re
import glob
import phonenumbers
INTERNAL_FILES = ['phonenumbers/util.py',
'phonenumbers/re_util.py',
'phonenumbers/unicode_util.py']
CLASS_RE = re.compile(r"^class +([A-Za-z][_A-Za-z0-9]+)[ \(:]")
FUNCTION_RE = re.compile("^def +([A-Za-z][_A-Za-z0-9]+)[ \(]")
CONSTANT_RE = re.compile("^([A-Z][_A-Z0-9]+) *= *")
grepped_all = set()
for filename in glob.glob('phonenumbers/*.py'):
if filename in INTERNAL_FILES:
continue
with file(filename, "r") as infile:
for line in infile:
m = CLASS_RE.match(line)
if m:
grepped_all.add(m.group(1))
m = FUNCTION_RE.match(line)
if m:
grepped_all.add(m.group(1))
m = CONSTANT_RE.match(line)
if m:
grepped_all.add(m.group(1))
code_all = set(phonenumbers.__all__)
code_not_grepped = (code_all - grepped_all)
grepped_not_code = (grepped_all - code_all)
if len(code_not_grepped) > 0:
print >> sys.stderr, "Found the following in __all__ but not in grepped code:"
for identifier in code_not_grepped:
print >> sys.stderr, " %s" % identifier
if len(grepped_not_code) > 0:
print >> sys.stderr, "Found the following in grepped code but not in__all__:"
for identifier in grepped_not_code:
print >> sys.stderr, " %s" % identifier
| Python | 0.003175 | |
0deec6fecb527f12ff6851c47820b76db8196a34 | Add files via upload | rosette.py | rosette.py | # This is a basic program showing the functionality of the turtle module.
# It generates a very pretty spiraling pattern.
import turtle # import the turtle module so we can draw
import math # import the math module, we need this for e and pi
t = turtle.Pen() # set a variable to draw with
t.reset() # clear the screen just in case of leftovers
x = 0 # set some variables to play with
y = 5
z = 10
while x <= 999: # the more repeats, the larger the pattern
t.circle(x,13,2) # this basically sets up 3 arcs which can be varied
t.circle(y,17,3)
t.circle(z,19,5)
x = x + 1 # increment or decrement the radius of each arc by an interesting value
y = y / math.e
z = z / math.pi
| Python | 0 | |
48c844e602eaa182c4efaaa0b977765f4248d0a0 | Add a data migration tool | tools/network_migration.py | tools/network_migration.py | import argparse, shelve
def renameDictKeys(storageDict):
for key in storageDict.iterkeys():
if isinstance(storageDict[key], dict):
renameDictKeys(storageDict[key])
if key == options.oldnetwork:
storageDict[options.newnetwork] = storageDict[options.oldnetwork]
del storageDict[options.oldnetwork]
if __name__ == "__main__":
# Parse the command line arguments
parser = argparse.ArgumentParser(description="A tool for PyHeufyBot to migrate all storage data from one network "
"to another.")
parser.add_argument("-s", "--storage", help="The storage file to use", type=str, default="../heufybot.db")
parser.add_argument("-o", "--oldnetwork", help="The name of the old network that the data should be migrated "
"from.", type=str, required=True)
parser.add_argument("-n", "--newnetwork", help="The name of the new network that the data should be migrated to.",
type=str, required=True)
options = parser.parse_args()
storage = shelve.open(options.storage)
d = dict(storage)
renameDictKeys(d)
storage.clear()
storage.update(d)
storage.close()
print "Data has been migrated from '{}' to '{}'.".format(options.oldnetwork, options.newnetwork)
| Python | 0.000006 | |
20269212705bdfd8748be468a50567ba290ad4a1 | Bump PROVISION_VERSION for new APNS. | version.py | version.py | ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '9.4'
| ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '9.3'
| Python | 0 |
bd2cfd63ce51c55c695a12dc13e9ac52872cea5a | Add ternary_axes_subplot.py | ternary/ternary_axes_subplot.py | ternary/ternary_axes_subplot.py | """
Wrapper class for all ternary plotting functions.
"""
from matplotlib import pyplot
import heatmapping
import lines
import plotting
def figure(ax=None, scale=None):
"""
Wraps a Matplotlib AxesSubplot or generates a new one. Emulates matplotlib's
> figure, ax = pyplot.subplot()
Parameters
----------
ax: AxesSubplot, None
The AxesSubplot to wrap
scale: float, None
The scale factor of the ternary plot
"""
ternary_ax = TernaryAxesSubplot(ax=ax, scale=scale)
return ternary_ax.get_figure(), ternary_ax
class TernaryAxesSubplot(object):
"""Wrapper for python-ternary and matplotlib figure."""
def __init__(self, ax=None, scale=None):
if not scale:
scale = 1.0
if ax:
self.ax = ax
self.figure = ax.get_figure()
else:
self.figure, self.ax = pyplot.subplots()
self.set_scale(scale=scale)
def __repr__(self):
return "TernaryAxesSubplot: %s" % self.ax.__hash__()
def resize_drawing_canvas(self):
plotting.resize_drawing_canvas(self.ax, scale=self.get_scale())
def get_figure(self):
return self.figure
def set_scale(self, scale=None):
self._scale = scale
self.resize_drawing_canvas()
def get_scale(self):
return self._scale
def get_axes(self):
return self.ax
def scatter(self, points, **kwargs):
plot_ = plotting.scatter(points, ax=self.ax, **kwargs)
return plot_
def plot(self, points, **kwargs):
plotting.plot(points, ax=self.ax, **kwargs)
def clear_matplotlib_ticks(self, axis="both"):
plotting.clear_matplotlib_ticks(ax=self.ax,
axis=axis)
def left_axis_label(self, label, **kwargs):
plotting.left_axis_label(self.ax, label, **kwargs)
def right_axis_label(self, label, **kwargs):
plotting.right_axis_label(self.ax, label, **kwargs)
def bottom_axis_label(self, label, **kwargs):
plotting.bottom_axis_label(self.ax, label, **kwargs)
def heatmap(self, data, scale=None, cmap_name=None, scientific=False,
style='triangular', colorbar=True):
if not scale:
scale = self._scale
heatmapping.heatmap(data, scale, cmap_name=cmap_name, style=style,
ax=self.ax, scientific=scientific,
colorbar=colorbar)
def heatmapf(self, func, scale=None, cmap_name=None,
boundary=True, style='triangular', colorbar=True,
scientific=True):
if not scale:
scale = self._scale
heatmapping.heatmapf(func, scale, cmap_name=cmap_name,
style=style, boundary=boundary,
ax=self.ax, scientific=scientific,
colorbar=colorbar)
def line(self, p1, p2, **kwargs):
lines.line(self.ax, p1, p2, **kwargs)
def horizontal_line(self, i, **kwargs):
lines.horizontal_line(self.ax, self.get_scale(), i, **kwargs)
def left_parallel_line(self, i, **kwargs):
lines.left_parallel_line(self.ax, self.get_scale(), i, **kwargs)
def right_parallel_line(self, i, **kwargs):
lines.right_parallel_line(self.ax, self.get_scale(), i, **kwargs)
def boundary(self, scale=None, **kwargs):
# Sometimes you want to draw a bigger boundary
if not scale:
scale = self.get_scale()
lines.boundary(scale=scale, ax=self.ax, **kwargs)
def gridlines(self, multiple=None, **kwargs):
lines.gridlines(scale=self.get_scale(), multiple=multiple, ax=self.ax,
**kwargs)
def set_title(self, title, **kwargs):
self.ax.set_title(title, **kwargs)
def save_fig(self, filename, dpi=200, format=None):
self.figure.save_fig(filename, format=format, dpi=dpi)
def legend(self):
self.ax.legend()
def show(self):
pyplot.show()
| Python | 0.999982 | |
426f9c0b63adf7d7085a45bfe3520eb36a2ad6f7 | Fix indents. | evelink/parsing/assets.py | evelink/parsing/assets.py | from evelink import api
from evelink import constants
def parse_assets(api_result):
def handle_rowset(rowset, parent_location):
results = []
for row in rowset.findall('row'):
item = {'id': int(row.attrib['itemID']),
'item_type_id': int(row.attrib['typeID']),
'location_id': int(row.attrib.get('locationID', parent_location)),
'location_flag': int(row.attrib['flag']),
'quantity': int(row.attrib['quantity']),
'packaged': row.attrib['singleton'] == '0',
}
contents = row.find('rowset')
if contents:
item['contents'] = handle_rowset(contents, item['location_id'])
results.append(item)
return results
result_list = handle_rowset(api_result.find('rowset'), None)
# For convenience, key the result by top-level location ID.
result_dict = {}
for item in result_list:
location = item['location_id']
result_dict.setdefault(location, {})
result_dict[location]['location_id'] = location
result_dict[location].setdefault('contents', [])
result_dict[location]['contents'].append(item)
return result_dict
| from evelink import api
from evelink import constants
def parse_assets(api_result):
def handle_rowset(rowset, parent_location):
results = []
for row in rowset.findall('row'):
item = {'id': int(row.attrib['itemID']),
'item_type_id': int(row.attrib['typeID']),
'location_id': int(row.attrib.get('locationID', parent_location)),
'location_flag': int(row.attrib['flag']),
'quantity': int(row.attrib['quantity']),
'packaged': row.attrib['singleton'] == '0',
}
contents = row.find('rowset')
if contents:
item['contents'] = handle_rowset(contents, item['location_id'])
results.append(item)
return results
result_list = handle_rowset(api_result.find('rowset'), None)
# For convenience, key the result by top-level location ID.
result_dict = {}
for item in result_list:
location = item['location_id']
result_dict.setdefault(location, {})
result_dict[location]['location_id'] = location
result_dict[location].setdefault('contents', [])
result_dict[location]['contents'].append(item)
return result_dict
| Python | 0.000001 |
35076b373913381a90aa65e8052036eb51eece46 | add unicode_literals in utils | simiki/utils.py | simiki/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os
import shutil
import errno
from os import path as osp
RESET_COLOR = "\033[0m"
COLOR_CODES = {
"debug" : "\033[1;34m", # blue
"info" : "\033[1;32m", # green
"warning" : "\033[1;33m", # yellow
"error" : "\033[1;31m", # red
"critical" : "\033[1;41m", # background red
}
def color_msg(level, msg):
return COLOR_CODES[level] + msg + RESET_COLOR
def check_path_exists(path):
"""Check if the path(include file and directory) exists"""
if osp.exists(path):
return True
return False
def check_extension(filename):
"""Filter file by suffix
If the file suffix not in the allowed suffixes, the return true and filter.
The `fnmatch` module can also get the suffix:
patterns = ["*.md", "*.mkd", "*.markdown"]
fnmatch.filter(files, pattern)
"""
# Allowed suffixes ( aka "extensions" )
exts = {".md", ".mkd", ".mdown", ".markdown"}
return osp.splitext(filename)[1] in exts
#def copytree(src, dst):
# try:
# shutil.copytree(src, dst)
# except OSError as exc: # python >2.5
# if exc.errno == errno.ENOTDIR:
# shutil.copy(src, dst)
# else: raise
def copytree(src, dst, symlinks=False, ignore=None):
# OSError: [Errno 17] File exists: '/home/tankywoo/simiki/html/css'
if not osp.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = osp.join(src, item)
d = osp.join(dst, item)
if osp.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
if __name__ == "__main__":
print(color_msg("debug", "DEBUG"))
print(color_msg("info", "DEBUG"))
print(color_msg("warning", "WARNING"))
print(color_msg("error", "ERROR"))
print(color_msg("critical", "CRITICAL"))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import shutil
import errno
from os import path as osp
RESET_COLOR = "\033[0m"
COLOR_CODES = {
"debug" : "\033[1;34m", # blue
"info" : "\033[1;32m", # green
"warning" : "\033[1;33m", # yellow
"error" : "\033[1;31m", # red
"critical" : "\033[1;41m", # background red
}
def color_msg(level, msg):
return COLOR_CODES[level] + msg + RESET_COLOR
def check_path_exists(path):
"""Check if the path(include file and directory) exists"""
if osp.exists(path):
return True
return False
def check_extension(filename):
"""Filter file by suffix
If the file suffix not in the allowed suffixes, the return true and filter.
The `fnmatch` module can also get the suffix:
patterns = ["*.md", "*.mkd", "*.markdown"]
fnmatch.filter(files, pattern)
"""
# Allowed suffixes ( aka "extensions" )
exts = {".md", ".mkd", ".mdown", ".markdown"}
return osp.splitext(filename)[1] in exts
#def copytree(src, dst):
# try:
# shutil.copytree(src, dst)
# except OSError as exc: # python >2.5
# if exc.errno == errno.ENOTDIR:
# shutil.copy(src, dst)
# else: raise
def copytree(src, dst, symlinks=False, ignore=None):
# OSError: [Errno 17] File exists: '/home/tankywoo/simiki/html/css'
if not osp.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
s = osp.join(src, item)
d = osp.join(dst, item)
if osp.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
if __name__ == "__main__":
print(color_msg("debug", "DEBUG"))
print(color_msg("info", "DEBUG"))
print(color_msg("warning", "WARNING"))
print(color_msg("error", "ERROR"))
print(color_msg("critical", "CRITICAL"))
| Python | 0.001148 |
d72d2e38d177476470b22ded061dd06b2be3ee88 | Add the quantity-safe allclose from spectral-cube | turbustat/tests/helpers.py | turbustat/tests/helpers.py | from __future__ import print_function, absolute_import, division
from astropy import units as u
from numpy.testing import assert_allclose as assert_allclose_numpy, assert_array_equal
def assert_allclose(q1, q2, **kwargs):
"""
Quantity-safe version of Numpy's assert_allclose
Copyright (c) 2014, spectral-cube developers
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
if isinstance(q1, u.Quantity) and isinstance(q2, u.Quantity):
assert_allclose_numpy(q1.to(q2.unit).value, q2.value, **kwargs)
elif isinstance(q1, u.Quantity):
assert_allclose_numpy(q1.value, q2, **kwargs)
elif isinstance(q2, u.Quantity):
assert_allclose_numpy(q1, q2.value, **kwargs)
else:
assert_allclose_numpy(q1, q2, **kwargs) | Python | 0.000261 | |
6c3d92a2d3eb043e466e3d6ab8e303c025cc7e0a | add ColorPrinter.py | ColorPrinter.py | ColorPrinter.py | class ColorPrinter:
"""
print message to terminal with colored header
"""
def __init__(self, header=''):
self.__header = header
self.__levels = {
'log': '\033[0m', # terminal color header
'info': '\033[1;32;40m', # green header
'warn': '\033[1;33;40m', # yellow header
'error': '\033[1;31;40m', # red header
}
def setHeader(self, header=''):
self.__header = header
def __format(self, message, level='log'):
header = self.__levels.get(level, self.__levels['log']) + self.__header + self.__levels['log'] if self.__header else ''
body = ' ' + message if message else ''
return header + body
# `info` print the message with terminal color header
def log(self, message='', *others):
print self.__format(' '.join((message,) + others), 'log')
return self
# `info` print the message with green header
def info(self, message='', *others):
print self.__format(' '.join((message,) + others), 'info')
return self
# `warn` print the message with yellow header
def warn(self, message='', *others):
print self.__format(' '.join((message,) + others), 'warn')
return self
# `error` print the message with red header
def error(self, message='', *others):
print self.__format(' '.join((message,) + others), 'error')
return self
| Python | 0 | |
b5972a5651ba1ace28ae54d5a1a4f31a07e97670 | add server_costs table | migrations/versions/1e27c434bb14_create_server_costs.py | migrations/versions/1e27c434bb14_create_server_costs.py | """create server_costs table
Revision ID: 1e27c434bb14
Revises: fa0f07475596
Create Date: 2016-03-14 15:57:19.945327
"""
# revision identifiers, used by Alembic.
revision = '1e27c434bb14'
down_revision = 'fa0f07475596'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
'server_costs',
sa.Column('project_url', sa.String(length=255), sa.ForeignKey('projects.url'), nullable=False, primary_key=True),
sa.Column('value', sa.Numeric(precision=10, scale=2), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True)
)
def downgrade():
op.drop_table('server_costs')
| Python | 0.000001 | |
7334b87e6d7a9ef495919a2b13cf926692619fcd | fix min/max typos | modularodm/tests/validators/test_iterable_validators.py | modularodm/tests/validators/test_iterable_validators.py | from modularodm import StoredObject
from modularodm.exceptions import ValidationValueError
from modularodm.fields import IntegerField, StringField
from modularodm.tests import ModularOdmTestCase
from modularodm.validators import MaxLengthValidator, MinLengthValidator
class StringValidatorTestCase(ModularOdmTestCase):
def define_test_objects(self):
class Foo(StoredObject):
_id = IntegerField()
test_field_max = StringField(
list=False,
validate=[MaxLengthValidator(5), ]
)
test_field_min = StringField(
list=False,
validate=[MinLengthValidator(5), ]
)
self.test_object = Foo(_id=0)
return Foo,
def test_max_length_string_validator(self):
self.test_object.test_field_max = 'abc'
self.test_object.save()
self.test_object.test_field_max = 'abcdefg'
with self.assertRaises(ValidationValueError):
self.test_object.save()
def test_min_length_string_validator(self):
self.test_object.test_field_min = 'abc'
with self.assertRaises(ValidationValueError):
self.test_object.save()
self.test_object.test_field_min = 'abcdefg'
self.test_object.save()
class ListValidatorTestCase(ModularOdmTestCase):
def define_test_objects(self):
class Foo(StoredObject):
_id = IntegerField()
test_field_max = IntegerField(
list=True,
list_validate=[MaxLengthValidator(5), ]
)
test_field_min = IntegerField(
list=True,
list_validate=[MinLengthValidator(3), ]
)
self.test_object = Foo(_id=0)
return Foo,
def test_min_length_list_validator(self):
# This test fails.
self.test_object.test_field_min = [1, 2]
with self.assertRaises(ValidationValueError):
self.test_object.save()
self.test_object.test_field_min = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
self.test_object.save()
def test_max_length_list_validator(self):
# This test fails.
self.test_object.test_field_min = [1, 2, 3]
self.test_object.test_field_max = [1, 2, 3]
self.test_object.save()
self.test_object.test_field_max = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
with self.assertRaises(ValidationValueError):
self.test_object.save()
class IterableValidatorCombinationTestCase(ModularOdmTestCase):
def define_test_objects(self):
class Foo(StoredObject):
_id = IntegerField()
test_field = StringField(
list=True,
validate=MaxLengthValidator(3),
list_validate=MinLengthValidator(3)
)
self.test_object = Foo(_id=0)
return Foo,
def test_child_pass_list_fail(self):
self.test_object.test_field = ['ab', 'abc']
with self.assertRaises(ValidationValueError):
self.test_object.save()
def test_child_fail_list_pass(self):
self.test_object.test_field = ['ab', 'abcd', 'adc']
with self.assertRaises(ValidationValueError):
self.test_object.save()
def test_child_fail_list_fail(self):
self.test_object.test_field = ['ab', 'abdc']
with self.assertRaises(ValidationValueError):
self.test_object.save()
| from modularodm import StoredObject
from modularodm.exceptions import ValidationValueError
from modularodm.fields import IntegerField, StringField
from modularodm.tests import ModularOdmTestCase
from modularodm.validators import MaxLengthValidator, MinLengthValidator
class StringValidatorTestCase(ModularOdmTestCase):
def define_test_objects(self):
class Foo(StoredObject):
_id = IntegerField()
test_field = StringField(
list=False,
validate=[MaxLengthValidator(5), ]
)
self.test_object = Foo(_id=0)
return Foo,
def test_max_length_string_validator(self):
self.test_object.test_field = 'abc'
self.test_object.save()
self.test_object.test_field = 'abcdefg'
with self.assertRaises(ValidationValueError):
self.test_object.save()
def test_min_length_string_validator(self):
self.test_object.test_field = 'abc'
with self.assertRaises(ValidationValueError):
self.test_object.save()
self.test_object.test_field = 'abcdefg'
self.test_object.save()
class ListValidatorTestCase(ModularOdmTestCase):
def define_test_objects(self):
class Foo(StoredObject):
_id = IntegerField()
test_field = IntegerField(
list=True,
list_validate=[MaxLengthValidator(5), ]
)
self.test_object = Foo(_id=0)
return Foo,
def test_min_length_list_validator(self):
# This test fails.
self.test_object.test_field = [1, 2, 3]
with self.assertRaises(ValidationValueError):
self.test_object.save()
self.test_object.test_field = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
self.test_object.save()
def test_max_length_list_validator(self):
# This test fails.
self.test_object.test_field = [1, 2, 3]
self.test_object.save()
self.test_object.test_field = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
with self.assertRaises(ValidationValueError):
self.test_object.save()
class IterableValidatorCombinationTestCase(ModularOdmTestCase):
def define_test_objects(self):
class Foo(StoredObject):
_id = IntegerField()
test_field = StringField(
list=True,
validate=MaxLengthValidator(3),
list_validate=MinLengthValidator(3)
)
self.test_object = Foo(_id=0)
return Foo,
def test_child_pass_list_fail(self):
self.test_object.test_field = ['ab', 'abc']
with self.assertRaises(ValidationValueError):
self.test_object.save()
def test_child_fail_list_pass(self):
self.test_object.test_field = ['ab', 'abcd', 'adc']
with self.assertRaises(ValidationValueError):
self.test_object.save()
def test_child_fail_list_fail(self):
self.test_object.test_field = ['ab', 'abdc']
with self.assertRaises(ValidationValueError):
self.test_object.save()
| Python | 0.999987 |
ff519b5145accbc10fcb7baa955bc1fe44774c27 | Add browser/websocket.py | src/Lib/browser/websocket.py | src/Lib/browser/websocket.py | from browser import window
import javascript
WebSocket = javascript.JSConstructor(window.WebSocket) | Python | 0.000001 | |
126491288a532da08fb3923eae2635a84736798d | Add new package: libsamplerate (#16143) | var/spack/repos/builtin/packages/libsamplerate/package.py | var/spack/repos/builtin/packages/libsamplerate/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libsamplerate(AutotoolsPackage):
"""libsamplerate (also known as Secret Rabbit Code) is a library for
performing sample rate conversion of audio data."""
homepage = "http://www.mega-nerd.com/libsamplerate/history.html"
url = "http://www.mega-nerd.com/libsamplerate/libsamplerate-0.1.9.tar.gz"
version('0.1.9', sha256='0a7eb168e2f21353fb6d84da152e4512126f7dc48ccb0be80578c565413444c1')
version('0.1.8', sha256='93b54bdf46d5e6d2354b7034395fe329c222a966790de34520702bb9642f1c06')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
| Python | 0 | |
4e6a6e4f2758bd616f0c2c2703160cbb9c539b63 | add new package (#23843) | var/spack/repos/builtin/packages/py-kubernetes/package.py | var/spack/repos/builtin/packages/py-kubernetes/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyKubernetes(PythonPackage):
"""Official Python client library for kubernetes. """
homepage = "https://kubernetes.io"
git = "https://github.com/kubernetes-client/python.git"
pypi = "kubernetes/kubernetes-17.17.0.tar.gz"
maintainers = ['vvolkl']
version('17.17.0', sha256='c69b318696ba797dcf63eb928a8d4370c52319f4140023c502d7dfdf2080eb79')
version('12.0.1', sha256='ec52ea01d52e2ec3da255992f7e859f3a76f2bdb51cf65ba8cd71dfc309d8daa')
version('12.0.0', sha256='72f095a1cd593401ff26b3b8d71749340394ca6d8413770ea28ce18efd5bcf4c')
version('11.0.0', sha256='1a2472f8b01bc6aa87e3a34781f859bded5a5c8ff791a53d889a8bd6cc550430')
version('10.1.0', sha256='85a767d04f17d6d317374b6c35e09eb168a6bfd9276f0b3177cc206376bad968')
version('10.0.1', sha256='3770a496663396ad1def665eeadb947b3f45217a08b64b10c01a57e981ac8592')
version('9.0.0', sha256='a8b0aed55ba946faea660712595a52ae53a8854df773d96f47a63fa0c9d4e3bf')
depends_on('py-certifi@14.05.14:', type=('build', 'run'))
depends_on('py-six@1.9.0:', type=('build', 'run'))
depends_on('py-python-dateutil@2.5.3:', type=('build', 'run'))
depends_on('py-setuptools@21.0.0:', type=('build'))
depends_on('py-pyyaml@3.12:', type=('build', 'run'))
depends_on('py-google-auth@1.0.1:', type=('build', 'run'))
depends_on('py-ipaddress@1.0.17:', when='^python@:2.8', type=('build', 'run'))
depends_on('py-websocket-client@0.32:0.39,0.43:', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
depends_on('py-requests-oauthlib', type=('build', 'run'))
depends_on('py-urllib3@1.24.2:', type=('build', 'run'))
| Python | 0 | |
04d6fd6ceabf71f5f38fd7cf25cd4ac2bcb6b57f | Add simple web server to display measurements | server.py | server.py | import sqlite3
from flask import Flask, g, render_template_string
app = Flask(__name__)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect('sensors.db')
return db
index_tmpl = """
<!doctype html>
<title>Sensors</title>
<body>
<h1>Senors</h1>
<dl>
<dt>Outside Temperature</dt>
<dd>{{ sensors['ot'] }}</dd>
<dt>Outside Humidity</dt>
<dd>{{ sensors['oh'] }}</dd>
<dt>Inside Temperature</dt>
<dd>{{ sensors['it'] }}</dd>
<dt>Inside Humidity</dt>
<dd>{{ sensors['ih'] }}</dd>
<dt>Barometric Pressure</dt>
<dd>{{ sensors['bp'] }}</dd>
<dt>Time</dt>
<dd>{{ ts }} UTC</dd>
</dl>
"""
@app.route('/')
def index():
db = get_db()
sensors = db.execute('SELECT * FROM measurements GROUP BY sensor')
sensors = [x for x in sensors]
ts = sensors[0][0]
sensors = dict([(x[1], x[2]) for x in sensors])
return render_template_string(index_tmpl, sensors=sensors, ts=ts)
if __name__ == '__main__':
app.debug = False
app.run(host='0.0.0.0')
| Python | 0 | |
acc23fe67231f8b556b2de7bd19f0050cbe379e6 | Add total calculation script | total_prices.py | total_prices.py | prices = {
"banana" : 4,
"apple" : 2,
"orange" : 1.5,
"pear" : 3
}
stock = {
"banana" : 6,
"apple" : 0,
"orange" : 32,
"pear" : 15,
}
total = 0
for key in prices:
print key
print "price: %s" % prices[key]
print "stock: %s" % stock[key]
print prices[key] * stock[key]
total += prices[key] * stock[key]
print total | Python | 0.000001 | |
31af4f92e97c83c42baff4e902cddf8184d84e4d | allow to run tox as 'python -m tox', which is handy on Windoze | tox/__main__.py | tox/__main__.py | from tox._cmdline import main
main()
| Python | 0 | |
f86d07998a2a80fcf9e69cca9d89c2ca4d982e02 | Fix windows dist script | src/etc/copy-runtime-deps.py | src/etc/copy-runtime-deps.py | #!/usr/bin/env python
# xfail-license
# Copies Rust runtime dependencies to the specified directory
import snapshot, sys, os, shutil
def copy_runtime_deps(dest_dir):
for path in snapshot.get_winnt_runtime_deps():
shutil.copy(path, dest_dir)
lic_dest = os.path.join(dest_dir, "third-party")
if os.path.exists(lic_dest):
shutil.rmtree(lic_dest) # copytree() won't overwrite existing files
shutil.copytree(os.path.join(os.path.dirname(__file__), "third-party"), lic_dest)
copy_runtime_deps(sys.argv[1])
| #!/usr/bin/env python
# xfail-license
# Copies Rust runtime dependencies to the specified directory
import snapshot, sys, os, shutil
def copy_runtime_deps(dest_dir):
for path in snapshot.get_winnt_runtime_deps():
shutil.copy(path, dest_dir)
lic_dest = os.path.join(dest_dir, "third-party")
shutil.rmtree(lic_dest) # copytree() won't overwrite existing files
shutil.copytree(os.path.join(os.path.dirname(__file__), "third-party"), lic_dest)
copy_runtime_deps(sys.argv[1])
| Python | 0 |
d206b02e12cf7f5418cd02987313bd7ddd807901 | add geom_tile layer. | ggplot/geoms/geom_tile.py | ggplot/geoms/geom_tile.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from geom import geom
class geom_tile(geom):
VALID_AES = ['x', 'y', 'fill']
def plot_layer(self, layer):
layer = {k: v for k, v in layer.iteritems() if k in self.VALID_AES}
layer.update(self.manual_aes)
x = layer.pop('x')
y = layer.pop('y')
fill = layer.pop('fill')
X = pd.DataFrame({'x': x,
'y': y,
'fill': fill}).set_index(['x', 'y']).unstack(0)
x_ticks = range(0, len(set(x)))
y_ticks = range(0, len(set(y)))
plt.imshow(X, interpolation='nearest', **layer)
return [
{'function': 'set_xticklabels', 'args': [x]},
{'function': 'set_xticks', 'args': [x_ticks]},
{'function': 'set_yticklabels', 'args': [y]},
{'function': 'set_yticks', 'args': [y_ticks]}
]
| Python | 0 | |
d940ce7cbd92c0e886139eaec3faa75aabbce16a | add test models | singleactiveobject/tests/models.py | singleactiveobject/tests/models.py | from singleactiveobject.models import SingleActiveObjectMixin
class SingleActiveObject(SingleActiveObjectMixin):
pass
| Python | 0 | |
71d3375c4ca1acb106f8825d2f39ca602fa47e94 | Test astroid trajectory implementation | src/test/trajectory/test_astroid_trajectory.py | src/test/trajectory/test_astroid_trajectory.py | #!/usr/bin/env python
import unittest
from geometry_msgs.msg import Point
from trajectory.astroid_trajectory import AstroidTrajectory
class AstroidTrajectoryTest(unittest.TestCase):
def setUp(self):
self.delta = 0.000001
self.radius = 5
self.period = 4
self.expected_position = Point()
self.trajectory = AstroidTrajectory(self.radius, self.period)
def test_when_creating_trajectory_the_radius_and_period_are_set(self):
self.assertEqual(self.radius, self.trajectory.radius)
self.assertEqual(self.period, self.trajectory.period)
def test_when_getting_position_after_1s_then_position_at_1s_is_returned(self):
self.expected_position.x = 0
self.expected_position.y = self.radius
self.assertAlmostEqual(self.expected_position, self.trajectory.get_position_at(1))
def test_when_getting_position_after_2s_then_position_at_2s_is_returned(self):
self.expected_position.x = -self.radius
self.expected_position.y = 0
self.assertAlmostEqual(self.expected_position, self.trajectory.get_position_at(2))
| Python | 0.000001 | |
835b5f20061033b6fcf2a8b86203a42c5d4835ee | Add initial unit tests for parameter.py (List) | spotpy/unittests/test_parameter.py | spotpy/unittests/test_parameter.py | import unittest
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy import parameter
import numpy as np
#https://docs.python.org/3/library/unittest.html
class TestListParameterDistribution(unittest.TestCase):
def setUp(self):
self.values = [1, 2, 3, 4, 5]
self.list_param = parameter.List('test', self.values)
self.list_param_repeat = parameter.List('test2', self.values, repeat=True)
def test_list_is_callable(self):
self.assertTrue(callable(self.list_param), "List instance should be callable")
def test_list_gives_throwaway_value_on_first_call(self):
v = self.list_param()
self.assertNotEqual(self.values[0], v)
def test_list_gives_1_value_when_size_is_not_specified(self):
throwaway = self.list_param()
v = self.list_param()
self.assertEqual(self.values[0], v)
def test_list_gives_n_values_when_size_is_n(self):
throwaway = self.list_param()
v = self.list_param(len(self.values))
self.assertEqual(self.values, list(v))
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
a759fee7b1cca1d3966100b480cac80ad4c9ece7 | Iterate the stackexchange corpuses term vectors | termVectors.py | termVectors.py |
from elasticsearch import Elasticsearch
es = Elasticsearch('http://localhost:9200')
def scoredFingerprint(terms):
fp = {}
for term, value in terms.items():
fp[term] = float(value['term_freq']) / float(value['doc_freq'])
return fp
def allCorpusDocs(index='stackexchange', doc_type='post', fields='Body.bigramed'):
query = {
"sort": ["_doc"],
"size": 500
}
resp = es.search(index=index, doc_type=doc_type, scroll='1m', body=query)
while len(resp['hits']['hits']) > 0:
for doc in resp['hits']['hits']:
yield doc['_id']
scrollId = resp['_scroll_id']
resp = es.scroll(scroll_id=scrollId, scroll='1m')
def termVectors(docIds, index='stackexchange', doc_type='post', field='Body.bigramed'):
tvs = es.mtermvectors(ids=docIds, index=index, doc_type=doc_type, fields=field, term_statistics=True)
for tv in tvs['docs']:
try:
yield (tv['_id'], scoredFingerprint(tv['term_vectors'][field]['terms']))
except KeyError:
pass
def groupEveryN(l, n=10):
for i in range(0, len(l), n):
yield l[i:i+n]
def allTermVectors(docIds):
for docIdGroup in groupEveryN(docIds):
for tv in termVectors(docIds=docIdGroup):
yield tv
if __name__ == "__main__":
docIds = [docId for docId in allCorpusDocs()]
print("Fetching %s Term Vectors" % len(docIds))
for tv in allTermVectors(docIds):
print(tv)
| Python | 0.999848 | |
e9a5a0c22de92f3b5eb5df567475736b72c5067c | Add pa300_calc_coord.py | pa300_calc_coord.py | pa300_calc_coord.py | # Std libs
from itertools import product
import sqlite3
# My libs
import constants as c
conn_params = sqlite3.connect(c.sql_params_dropbox)
cur_params = conn_params.cursor()
dats = cur_params.execute('''SELECT mask, mesa, xm_mesa, ym_mesa, xm_pad, ym_pad
FROM mesas''').fetchall()
for dat in dats:
mask, mesa, xm_mesa, ym_mesa, xm_pad, ym_pad = dat
dX, dY = cur_params.execute('SELECT d_X, d_Y FROM masks WHERE mask=?',\
(mask,)).fetchone()
for (X, Y) in product(range(1,21), range(1,21)):
x_mesa = (X-1)*dX + xm_mesa
y_mesa = (Y-1)*dY + ym_mesa
x_pad = (X-1)*dX + xm_pad
y_pad = (Y-1)*dY + ym_pad
print(mask, mesa, X, Y)
cur_params.execute('''INSERT OR REPLACE INTO
coord(mask, mesa, X, Y, xmesa, ymesa, xpad, ypad)
VALUES(?, ?, ?, ?, ?, ?, ?, ?)''',
(mask, mesa, X, Y,
x_mesa, y_mesa, x_pad, y_pad,))
conn_params.commit()
| Python | 0.001616 | |
a52ae7a34b9ec1dd03653c6c735b3930033ac830 | add a sample of visitor pattern for resolving recursion limit problem using generator. | patterns/visitor.py | patterns/visitor.py | """
from python cookbook 3rd edition. PY3 only.
Resolve the recursion limit problem.
"""
import types
class Node:
pass
class NodeVisitor:
def visit(self, node):
stack = [node]
last_result = None
while stack:
try:
last = stack[-1]
if isinstance(last, types.GeneratorType):
stack.append(last.send(last_result))
last_result = None
elif isinstance(last, Node):
stack.append(self._visit(stack.pop()))
else:
last_result = stack.pop()
except StopIteration:
stack.pop()
return last_result
def _visit(self, node):
methname = 'visit_' + type(node).__name__
meth = getattr(self, methname, None)
if meth is None:
meth = self.generic_visit
return meth(node)
def generic_visit(self, node):
raise RuntimeError('No {} method'.format('visit_' +
type(node).__name__))
class UnaryOperator(Node):
def __init__(self, operand):
self.operand = operand
class BinaryOperator(Node):
def __init__(self, left, right):
self.left = left
self.right = right
class Add(BinaryOperator):
pass
class Sub(BinaryOperator):
pass
class Mul(BinaryOperator):
pass
class Div(BinaryOperator):
pass
class Negate(UnaryOperator):
pass
class Number(Node):
def __init__(self, value):
self.value = value
# A sample visitor class that evaluates expressions
class Evaluator(NodeVisitor):
"""
Example for calculating 1 + 2*(3-4) / 5 (=> 0.6)
>>> t1 = Sub(Number(3), Number(4))
>>> t2 = Mul(Number(2), t1)
>>> t3 = Div(t2, Number(5))
>>> t4 = Add(Number(1), t3)
>>> e = Evaluator()
>>> print(e.visit(t4))
0.6
"""
def visit_Number(self, node):
return node.value
def visit_Add(self, node):
return self.visit(node.left) + self.visit(node.right)
def visit_Sub(self, node):
return self.visit(node.left) - self.visit(node.right)
def visit_Mul(self, node):
return self.visit(node.left) * self.visit(node.right)
def visit_Div(self, node):
return self.visit(node.left) / self.visit(node.right)
def visit_Negate(self, node):
return -self.visit(node.operand)
class Evaluator2(NodeVisitor):
"""
Resolve the problem of recursion limit.
Example:
>>> a = Number(0)
>>> for n in range(1, 100000):
... a = Add(a, Number(n))
...
>>> e = Evaluator()
>>> e.visit(a)
Traceback (most recent call last):
...
RuntimeError: maximum recursion depth exceeded in __instancecheck__
>>> e = Evaluator2()
>>> e.visit(a)
4999950000
"""
def visit_Number(self, node):
return node.value
def visit_Add(self, node):
yield (yield node.left) + (yield node.right)
def visit_Sub(self, node):
yield (yield node.left) - (yield node.right)
def visit_Mul(self, node):
yield (yield node.left) * (yield node.right)
def visit_Div(self, node):
yield (yield node.left) / (yield node.right)
def visit_Negate(self, node):
yield -(yield node.operand)
| Python | 0 | |
b3c89917895786bfab5d4fae9ce086767575a506 | Add a deployment script | deploy.py | deploy.py | """ This is a script that deploys Dogbot. """
import os
from pathlib import Path
from ruamel.yaml import YAML
import requests
# load the webhook url from the configuration
with open('config.yml') as f:
webhook_url = YAML(typ='safe').load(f)['monitoring']['health_webhook']
def info(text):
print('\033[32m[info]\033[0m', text)
def post(content=None, *, embed=None, wait_for_server=True):
info('POSTing to {}: {}'.format(webhook_url, content or embed))
payload = {'content': content, 'embeds': [embed]}
requests.post(webhook_url + ('?wait=true' if wait_for_server else ''), json=payload)
def deploy():
""" Deploys Dogbot. """
# resolve path to playbook
playbook = (Path.cwd() / 'deployment' / 'playbook.yml').resolve()
info('Path to Ansible playbook: {}'.format(playbook))
# post!
post(embed={'title': 'Deployment starting.', 'description': 'This shouldn\'t take too long.', 'color': 0xe67e22})
# run the playbook
info('Running Ansible playbook.')
exit_code = os.system('ansible-playbook {}'.format(playbook))
if exit_code != 0:
info('Deployment failed.')
return
info('Finished running playbook.')
# post!
post(embed={'title': 'Deployment finished.', 'description': 'The bot is restarting. This can take a bit.',
'color': 0x2ecc71})
if __name__ == '__main__':
deploy()
| Python | 0.000002 | |
10b3ae6ab5009fe0c43b744dc655bd6512cec041 | Include basic version of contract object | db/contract.py | db/contract.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .common import Base, session_scope
class Contract(Base):
__tablename__ = 'contracts'
__autoload__ = True
def __init__(self, player_id, contract_data):
| Python | 0 | |
aef7c25964883bae893913524bc9ff3dc0bdcde3 | Add a docker helper script (#18) | docker.py | docker.py | #!/usr/bin/env python3
import argparse
import subprocess
IMAGE_NAME = 'cargo-sphinx'
def has_image(name):
cmd = "docker images | awk '{{print $1}}' | grep '^{name}$' > /dev/null".format(
name=name),
proc = subprocess.run(cmd, shell=True)
return proc.returncode == 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument('action', nargs='?',
help="Either 'build', 'shell', or 'docs'")
parser.add_argument('--nocache', action='store_true',
help="When building containers, don't use cached images.")
args = parser.parse_args()
action = args.action
if not has_image(IMAGE_NAME) or action == 'build':
run_build(IMAGE_NAME, nocache=args.nocache)
if action == 'build':
return
if action == 'shell':
run_shell(IMAGE_NAME)
elif action == 'docs':
run_docs(IMAGE_NAME)
else:
print("Unknown action '{}' specified.")
def run_build(image, nocache=False):
nocache_arg = "--no-cache" if nocache else ""
cmd = "docker build --rm=true -t {name} {nocache} .".format(
name=image, nocache=nocache_arg)
subprocess.run(cmd, shell=True, check=True)
def run_shell(image):
cmd = """docker run -it \\
-v "$(pwd):/{name}" \\
--workdir=/{name} \\
{name} \\
/bin/bash""".format(name=image)
subprocess.run(cmd, shell=True)
def run_docs(image):
cmd = """docker run -it \\
-v "$(pwd):/{name}" \\
--workdir=/{name}/docs \\
{name} \\
make clean html""".format(name=image)
subprocess.run(cmd, shell=True)
if __name__ == "__main__":
main()
| Python | 0.000022 | |
647fd44f829a308dc16eb86a663dc1a3719476ab | add solution for Search a 2D Matrix II | algorithms/searchA2DMatrixII/searchA2DMatrixII.py | algorithms/searchA2DMatrixII/searchA2DMatrixII.py | class Solution:
# @param {integer[][]} matrix
# @param {integer} target
# @return {boolean}
def searchMatrix(self, matrix, target):
n = len(matrix)
m = len(matrix[0])
x = n-1
y = 0
while x >= 0 and y < m:
if matrix[x][y] == target:
return True
if matrix[x][y] > target:
x -= 1
else:
y += 1
return False
| Python | 0.000005 | |
00e68a20d4691ae3172ae0bb11b8440387acc0d6 | Add the server based on oslo.service. | pycom/server.py | pycom/server.py | # encoding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import os
import socket
import logging
import functools
import greenlet
import eventlet
from oslo_service import service
LOG = logging.getLogger(__name__)
def listen_socket(host, port, backlog=1024, reuse=True):
sock = socket.socket()
if reuse:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(backlog)
return sock
def wrap_exc(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception:
return None
return inner
class ServerBase(service.ServiceBase):
def __init__(self, pool_size=None):
self.pool_size = pool_size
self._pool = eventlet.GreenPool(self.pool_size)
self._server = None
def serve(self, pool):
raise NotImplementedError("The method of serve MUST be implemented")
def _spawn(self, pool):
pid = os.getpid()
try:
self.serve(pool)
finally:
pool.waitall()
LOG.info("[Process{0}] the server exited".format(pid))
def start(self):
self._server = eventlet.spawn(self.serve, pool=self._pool)
def stop(self):
if self._server is not None:
# let eventlet close socket
self._pool.resize(0)
self._server.kill()
def wait(self):
try:
if self._server is not None:
num = self._pool.running()
LOG.debug("Waiting server to finish %d requests.", num)
self._pool.waitall()
except greenlet.GreenletExit:
LOG.info("Server has stopped.")
def reset(self):
self._pool.resize(self.pool_size)
class SocketServer(ServerBase):
def __init__(self, handler, host, port, pool_size=None, backlog=1024, timeout=None):
self.host = host
self.port = port
self.sock = listen_socket(self.host, self.port, backlog)
LOG.info("Listen %s:%s" % (self.host, self.port))
self.handler = handler
self.timeout = timeout
super(SocketServer, self).__init__(pool_size)
def handle(self, conn, addr):
try:
self.handler(conn, addr)
except socket.timeout:
LOG.info("socket from {0} time out".format(addr))
finally:
try:
conn.close()
except socket.error:
pass
def serve(self, pool):
pid = os.getpid()
try:
while True:
try:
conn, addr = self.sock.accept()
conn.settimeout(self.timeout)
LOG.debug("[Process{0}] accepted {1}".format(pid, addr))
pool.spawn_n(self.handle, conn, addr)
except socket.error as e:
LOG.error("[Process{0}] can not handle the request from {1}: {2}".format(pid, addr, e))
except (KeyboardInterrupt, SystemExit):
LOG.info("[Process{0}] the server is exiting".format(pid))
break
finally:
try:
self.sock.close()
except socket.error as e:
pass
class TaskServer(ServerBase):
def __init__(self, task_fn, task_num=1, pool_size=None, *args, **kwargs):
super(TaskServer, self).__init__(pool_size)
self.task_fn = task_fn
self.task_num = task_num
self.args = args
self.kwargs = kwargs
def _wrap_exc(self):
try:
self.task_fn(*self.args, **self.kwargs)
except Exception:
pass
def server(self, pool):
for i in range(self.task_num):
pool.spawn_n(self._wrap_exc)
pool.waitall()
| Python | 0.000001 | |
ada6128817769886e2869944fac3a8cea0b5b109 | Add a missing module | pykmer/timer.py | pykmer/timer.py | """
This module provides a simple timer class for instrumenting code.
"""
import time
class timer(object):
def __init__(self):
self.start = time.time()
self.sofar = 0.0
self.paused = False
self.events = 0
def pause(self):
now = time.time()
self.sofar += now - self.start
self.paused = True
def resume(self):
self.start = time.time()
self.paused = False
def stop(self):
if not self.paused:
now = time.time()
self.sofar += now - self.start
def tick(self, n = 1):
self.events += n
def reset(self):
self.start = time.time()
self.sofar = 0
self.paused = False
def time(self):
sofar = self.sofar
if not self.paused:
now = time.time()
sofar += now - self.start
return sofar
def rate(self, n = None):
if n is None:
n = self.events
return n / self.time()
| Python | 0.000048 | |
6e767e8f5b219d9883fb1a16846830efabac7d5b | Add python | python/hello.py | python/hello.py | print("Hello, World!")
| Python | 0.998925 | |
01472504fc42137a05a85ae5ad6d4b7956865680 | Add autosolver for regex. | quiz/5-regex.py | quiz/5-regex.py | #!/usr/bin/env python3
def make_array(text):
import re
regex = re.compile('(\d+)->(\d+)')
pairs = regex.findall(text)
ret = list()
for (src, dst) in pairs:
src = int(src)
dst = int(dst)
ret.append((src, dst))
return ret
def make_transitive_closure(states, eps_trans):
while True:
changed = False
for src in range(len(states)):
for dst in eps_trans[src]:
if dst not in states:
states.add(dst)
changed = True
if not changed:
return states
def make_epsilon_transition(regex):
trans = list(map(lambda x: set(), range(len(regex))))
stack = []
for i in range(len(regex)):
c = regex[i]
group_begin = i
if c == '(':
trans[i].add(i + 1)
stack.append(i)
elif c == '|':
stack.append(i)
elif c == ')':
trans[i].add(i + 1)
top = stack.pop()
if regex[top] == '(':
group_begin = top
elif regex[top] == '|':
group_begin = stack.pop()
trans[group_begin].add(top + 1)
trans[top].add(i)
elif c == '*':
trans[i].add(i + 1)
if i + 1 < len(regex) and regex[i + 1] == '*':
trans[group_begin].add(i + 1)
trans[i + 1].add(group_begin)
return trans
def solve_q1(regex, query):
eps_trans = make_epsilon_transition(regex)
states = set()
states.add(0)
make_transitive_closure(states, eps_trans)
for i in query:
new_states = set()
for st in states:
if st == len(regex):
continue
if i == regex[st]:
new_states.add(st + 1)
states = make_transitive_closure(new_states, eps_trans)
for i in list(states):
print(i, end=' ')
print()
def solve_q2(regex, queries):
eps_trans = make_epsilon_transition(regex)
for q in queries:
if q[1] in eps_trans[q[0]]:
print('y', end=' ')
else:
print('n', end=' ')
print()
q1_regex = ' ( ( A | ( C * B ) ) * A ) '
q1_query = ' A B B A B C '
q2_regex = ' ( A ( ( C D * ) * | B ) ) '
q2_query = '''
8->3
10->12
7->5
4->9
0->1
2->10
3->8
'''
solve_q1(q1_regex.replace(' ', ''),
q1_query.replace(' ', ''))
solve_q2(q2_regex.replace(' ', ''), make_array(q2_query))
| Python | 0 | |
2c1b5aedc5f4503a738ef7e9ffa0a7f969fecfef | add argparse example | Python/calculator_argp.py | Python/calculator_argp.py | import argparse
def main():
parser = argparse.ArgumentParser(description='Calculate two input numbers')
parser.add_argument(
'first', metavar='int', type=int,
help='first number')
parser.add_argument(
'oper', metavar='oper', type=str,
help='operator +, - or * ')
parser.add_argument(
'second', metavar='int', type=int,
help='second number')
args = parser.parse_args()
first = int(args.first)
second = int(args.second)
oper = args.oper
res = ''
if oper == '+':
res = first + second
elif oper == '-':
res = first - second
elif oper == '*':
res = first * second
else:
print "Not supported"
print res
if __name__ == "__main__":
main()
| Python | 0.000032 | |
ed5f68211e93df983a5e15c7f1ce812b810b49c0 | Add ANTs package (#7717) | var/spack/repos/builtin/packages/ants/package.py | var/spack/repos/builtin/packages/ants/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ants(CMakePackage):
"""ANTs extracts information from complex datasets that include imaging.
Paired with ANTsR (answer), ANTs is useful for managing, interpreting
and visualizing multidimensional data. ANTs is popularly considered a
state-of-the-art medical image registration and segmentation toolkit.
ANTs depends on the Insight ToolKit (ITK), a widely used medical image
processing library to which ANTs developers contribute.
"""
homepage = "http://stnava.github.io/ANTs/"
url = "https://github.com/ANTsX/ANTs/archive/v2.2.0.tar.gz"
version('2.2.0', '5661b949268100ac0f7baf6d2702b4dd')
def install(self, spec, prefix):
with working_dir(join_path('spack-build', 'ANTS-build'), create=False):
make("install")
install_tree('Scripts', prefix.bin)
def setup_environment(self, spack_env, run_env):
run_env.set('ANTSPATH', self.prefix.bin)
| Python | 0 | |
744cedc7e999f96aa0646bb43c039882991228ae | Add Asio package (#24485) | var/spack/repos/builtin/packages/asio/package.py | var/spack/repos/builtin/packages/asio/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os.path
class Asio(AutotoolsPackage):
"""C++ library for network and low-level I/O programming."""
homepage = "http://think-async.com/Asio/"
url = "https://github.com/chriskohlhoff/asio/archive/1.18.2.tar.gz"
git = "https://github.com/chriskohlhoff/asio.git"
maintainers = ["msimberg"]
version(
"1.18.2",
sha256="8d67133b89e0f8b212e9f82fdcf1c7b21a978d453811e2cd941c680e72c2ca32",
)
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("m4", type="build")
depends_on("libtool", type="build")
stds = ("11", "14", "17")
variant(
"cxxstd",
default="11",
values=stds,
multi=False,
description="Use the specified C++ standard when building.",
)
variant(
"separate_compilation",
default=False,
description="Compile Asio sources separately",
)
variant(
"boost_coroutine",
default=False,
description="Enable support for Boost.Coroutine.",
)
depends_on("boost +context +coroutine", when="+boost_coroutine")
variant("boost_regex", default=False, description="Enable support for Boost.Regex.")
depends_on("boost +regex", when="+boost_regex")
for std in stds:
depends_on("boost cxxstd=" + std, when="cxxstd={0} ^boost".format(std))
def configure_args(self):
variants = self.spec.variants
args = [
"CXXFLAGS=-std=c++{0}".format(variants["cxxstd"].value),
]
if variants["separate_compilation"].value:
args.append("--enable-separate-compilation")
if variants["boost_coroutine"].value:
args.append("--enable-boost-coroutine")
if variants["boost_coroutine"].value or variants["boost_regex"].value:
args.append("--with-boost={self.spec['boost'].prefix}")
return args
def url_for_version(self, version):
return "https://github.com/chriskohlhoff/asio/archive/asio-{0}.tar.gz".format(
version.dashed
)
@property
def configure_directory(self):
return os.path.join(self.stage.source_path, "asio")
| Python | 0 | |
b1feed0ced6d1328cc39bc9bba36331ec6da7803 | Add ban for pgp/gpg private key blocks | pre_commit_hooks/detect_private_key.py | pre_commit_hooks/detect_private_key.py | from __future__ import print_function
import argparse
import sys
BLACKLIST = [
b'BEGIN RSA PRIVATE KEY',
b'BEGIN DSA PRIVATE KEY',
b'BEGIN EC PRIVATE KEY',
b'BEGIN OPENSSH PRIVATE KEY',
b'BEGIN PRIVATE KEY',
b'PuTTY-User-Key-File-2',
b'BEGIN SSH2 ENCRYPTED PRIVATE KEY',
b'BEGIN PGP PRIVATE KEY BLOCK',
]
def detect_private_key(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
private_key_files = []
for filename in args.filenames:
with open(filename, 'rb') as f:
content = f.read()
if any(line in content for line in BLACKLIST):
private_key_files.append(filename)
if private_key_files:
for private_key_file in private_key_files:
print('Private key found: {}'.format(private_key_file))
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(detect_private_key())
| from __future__ import print_function
import argparse
import sys
BLACKLIST = [
b'BEGIN RSA PRIVATE KEY',
b'BEGIN DSA PRIVATE KEY',
b'BEGIN EC PRIVATE KEY',
b'BEGIN OPENSSH PRIVATE KEY',
b'BEGIN PRIVATE KEY',
b'PuTTY-User-Key-File-2',
b'BEGIN SSH2 ENCRYPTED PRIVATE KEY',
]
def detect_private_key(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to check')
args = parser.parse_args(argv)
private_key_files = []
for filename in args.filenames:
with open(filename, 'rb') as f:
content = f.read()
if any(line in content for line in BLACKLIST):
private_key_files.append(filename)
if private_key_files:
for private_key_file in private_key_files:
print('Private key found: {}'.format(private_key_file))
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(detect_private_key())
| Python | 0 |
6b9b9642ca09f3b33bdf61bb5dacbaa7c29de8fc | Create __main__.py | src/__main__.py | src/__main__.py | Python | 0.000164 | ||
a58a7f3206168ae98b952e804404c46b89e81640 | Add a snippet (Pillow). | python/pil/python3_pillow_fork/show.py | python/pil/python3_pillow_fork/show.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import PIL.Image as pil_img # PIL.Image is a module not a class...
def main():
"""Main function"""
img = pil_img.open("lenna.png") # It works also with .jpg, ...
img.show()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
21f789eb05788fcaf0be1960b3c1171437d8a299 | Replace Dict by Mapping. | zerver/lib/session_user.py | zerver/lib/session_user.py | from __future__ import absolute_import
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from typing import Mapping, Optional
from six import text_type
def get_session_dict_user(session_dict):
# type: (Mapping[text_type, int]) -> Optional[int]
# Compare django.contrib.auth._get_user_session_key
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session):
# type: (Session) -> int
return get_session_dict_user(session.get_decoded())
| from __future__ import absolute_import
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from typing import Dict, Optional
from six import text_type
def get_session_dict_user(session_dict):
# type: (Dict[text_type, int]) -> Optional[int]
# Compare django.contrib.auth._get_user_session_key
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session):
# type: (Session) -> int
return get_session_dict_user(session.get_decoded())
| Python | 0 |
7037762247bd40455eb1944dc21684561c5f97ba | add a __init__ file | dataScraping/__init__.py | dataScraping/__init__.py | #!/usr/bin/env python
| Python | 0.000342 | |
2b4544820bf6549bc172f8d5b3532a9103190920 | add utility I used to generate random ph and temp readings | utils/generate_random_values.py | utils/generate_random_values.py | import random
ph = [random.uniform(0, 14) for x in range(30000)]
temp = [random.uniform(55, 90) for x in range(30000)]
temp_file = open('temp.csv', 'w+')
ph_file = open('ph.csv', 'w+')
for x in range(len(temp)):
temp_file.write("%.2f," % temp[x])
ph_file.write("%.2f," % ph[x])
temp_file.close()
ph_file.close()
| Python | 0 | |
4883bd13c6e07a0568c29fd26a141888b52292b7 | Add retriever object for player draft information | utils/player_draft_retriever.py | utils/player_draft_retriever.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import requests
from lxml import html
from db.team import Team
from db.player_draft import PlayerDraft
class PlayerDraftRetriever():
NHL_PLAYER_DRAFT_PREFIX = "https://www.nhl.com/player"
DRAFT_INFO_REGEX = re.compile(
"(\d{4})\s(.+),\s(\d+).+\srd,.+\((\d+).+\soverall\)")
def __init__(self):
pass
def retrieve_draft_information(self, player_id):
url = "/".join((self.NHL_PLAYER_DRAFT_PREFIX, str(player_id)))
r = requests.get(url)
doc = html.fromstring(r.text)
raw_draft_info = doc.xpath(
"//li[@class='player-bio__item']/span[text() = " +
"'Draft:']/parent::li/text()")
if not raw_draft_info:
print("No draft information found")
return
raw_draft_info = raw_draft_info.pop()
print(raw_draft_info)
match = re.search(self.DRAFT_INFO_REGEX, raw_draft_info)
if match:
year = int(match.group(1))
team = Team.find_by_orig_abbr(match.group(2))
round = int(match.group(3))
overall = int(match.group(4))
draft_info = PlayerDraft(
player_id, team.team_id, year, round, overall)
draft_info_db = PlayerDraft.find_by_player_id(player_id)
if draft_info_db:
if draft_info_db != draft_info:
draft_info_db.update(draft_info)
| Python | 0 | |
a5e18330ac84a93b9a3ffe7d8493c401d3ade11e | Create version.py | nilmtk/version.py | nilmtk/version.py | version = '0.1.0'
| Python | 0.000001 | |
776350aaaed8a8e3f00a492c1a1735c24f595d89 | add config_dialog.py | dialogs/config_dialog.py | dialogs/config_dialog.py | #-*- coding: utf-8 -*-
from win32ui import IDD_SET_TABSTOPS
from win32ui import IDC_EDIT_TABS
from win32ui import IDC_PROMPT_TABS
from win32con import IDOK
from win32con import IDCANCEL
import win32ui
import win32con
from pywin.mfc import dialog
IDC_EDIT_USERNAME = 2000
IDC_EDIT_PASSWORD = 2001
def ConfigDialogTemplate():
style = win32con.DS_SETFONT | win32con.DS_MODALFRAME | win32con.DS_FIXEDSYS | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
listCs = cs | win32con.LBS_NOINTEGRALHEIGHT | win32con.WS_VSCROLL | win32con.WS_TABSTOP
dlg = [[u'输入用户名密码', (0, 0, 200, 75), style, None, (8, "MS Sans Serif")], ]
s = cs | win32con.CBS_DROPDOWN | win32con.WS_VSCROLL | win32con.WS_TABSTOP
dlg.append([130, u"账号:", -1, (30, 10, 50, 10), cs | win32con.SS_LEFT])
dlg.append(["EDIT", "", IDC_EDIT_USERNAME, (70, 8, 100, 12), cs])
dlg.append([130, u"密码:", -1, (30, 30, 50, 30), cs | win32con.SS_LEFT])
dlg.append(["EDIT", "", IDC_EDIT_PASSWORD, (70, 30, 100, 12), cs])
s = cs | win32con.WS_TABSTOP
dlg.append([128, u"确认", win32con.IDOK, (30, 50, 50, 15), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, u"取消", win32con.IDCANCEL, (120, 50, 50, 15), s])
return dlg
class ConfigDialog(dialog.Dialog):
def __init__(self):
dialog.Dialog.__init__(self, ConfigDialogTemplate())
self.DoModal()
def OnInitDialog(self):
self.username_control = self.GetDlgItem(IDC_EDIT_USERNAME)
self.password_control = self.GetDlgItem(IDC_EDIT_PASSWORD)
def OnDestroy(self, msg):
del self.username_control
del self.password_control
def OnOK(self):
if self.username_control.GetLine() and self.password_control.GetLine():
self.username = self.username_control.GetLine()
self.password = self.password_control.GetLine()
self._obj_.OnOK()
| Python | 0.000003 | |
42faf76ffe421802e628dd2a79f518765d43284b | Create recordsCheck.py | recordsCheck.py | recordsCheck.py | import tensorflow as tf
import glob as glob
import getopt
import sys
import cPickle as pkl
import numpy as np
import time
opts, _ = getopt.getopt(sys.argv[1:],"",["input_dir=", "input_file=", "output_file="])
input_dir = "/data/video_level_feat_v3/"
input_file = ""
output_file = ""
print(opts)
for opt, arg in opts:
if opt in ("--input_dir"):
input_dir = arg
if opt in ("--input_file"):
input_file = arg
if opt in ("--output_file"):
output_file = arg
f = open(input_dir, 'rb')
filepaths = pkl.load(f)
f.close()
filepaths = [input_dir+x for x in filepaths]
features_format = {}
feature_names = []
for x in ['q0', 'q1', 'q2', 'q3', 'q4', 'mean', 'stddv', 'skew', 'kurt', 'iqr', 'rng', 'coeffvar', 'efficiency']:
features_format[x + '_rgb_frame'] = tf.FixedLenFeature([1024], tf.float32)
features_format[x + '_audio_frame'] = tf.FixedLenFeature([128], tf.float32)
feature_names.append(str(x + '_rgb_frame'))
feature_names.append(str(x + '_audio_frame'))
features_format['video_id'] = tf.FixedLenFeature([], tf.string)
features_format['labels'] = tf.VarLenFeature(tf.int64)
features_format['video_length'] = tf.FixedLenFeature([], tf.float32)
start_time = time.time()
errors = []
counter = 0
for filepath in filepaths:
print(counter)
counter += 1
filepaths_queue = tf.train.string_input_producer([filepath], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filepaths_queue)
features = tf.parse_single_example(serialized_example,features=features_format)
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
try:
while True:
proc_features, = sess.run([features])
except tf.errors.OutOfRangeError, e:
coord.request_stop(e)
except:
print("ERROR : "+filepath)
errors.append(filepath)
finally:
print(time.time() - start_time)
coord.request_stop()
coord.join(threads)
f = open(output_file, 'wb')
pkl.dump(errors, f, protocol=pkl.HIGHEST_PROTOCOL)
pkl.dump(counter, f, protocol=pkl.HIGHEST_PROTOCOL)
f.close()
| Python | 0 | |
f4bd76b7ebe376a2a0cea0ac1a44be4d741ce5c5 | Create LeetCode-541.py | LeetCode-541.py | LeetCode-541.py | import math
class Solution(object):
def ReverseStr(self, str, k):
ans=''
n = int (math.ceil(len(str) / (2.0*k) ))
for i in range(n):
ans += str[2*i*k:(2*i+1)*k][::-1] #reverse k str
print '1',ans
ans += str[(2*i+1)*k:(2*i+2)*k]
print '2',ans
return ans
rs=Solution()
print rs.ReverseStr('sjodfjoig',3)
s='sjodfjoig'
print s[0:1]
a=''
a += s[8:20]
print s[10] #why???
print s[10:12] #
print 'a=',a
| Python | 0 | |
ed1dd068e41138f1f3b18b028e20e542965d2c7f | add word_dictionary.py task from week7 | Algo-1/week7/1-Word-Dictionary/word_dictionary.py | Algo-1/week7/1-Word-Dictionary/word_dictionary.py | class WordDictionary:
class Node:
def __init__(self, char):
# char is a substring of the phone number
self.char = char
# 10 digits
self.children_nodes = [None for i in range(26)]
self.isTerminal = False
def get_char(self):
return self.char
def add_node(self, node):
index = ord(node.char[0]) - 97
self.children_nodes[index] = node
def get_node(self, char):
index = ord(char) - 97
return self.children_nodes[index]
def __repr__(self):
return self.char
def insert(self, string):
current_node = self.root
for index in range(len(string)):
char = string[index]
result_node = current_node.get_node(char)
if result_node is None:
new_node = WordDictionary.Node(string[index:])
if index == len(string) - 1:
new_node.isTerminal = True
current_node.add_node(new_node)
current_node = new_node
else:
current_node = result_node
return self.root
def contains(self, phone_number):
root = self.root
index = 1
phone_number = str(phone_number)
current_node = root.get_node(phone_number[index - 1])
while current_node is not None and index < len(phone_number):
current_node = current_node.get_node(phone_number[index])
index += 1
# print(current_node)
if current_node is not None:
return True
return False
def __init__(self):
self.root = WordDictionary.Node('')
def main():
w = WordDictionary()
# w.insert('alabala')
# w.insert('asdf')
# print(w.contains('alabala'))
# w.insert('aladin')
# print(w.contains('asdf'))
# print(w.contains('aladin'))
# w.insert('circle')
# print(w.contains('rectangle'))
# print(w.contains('square'))
N = int(input())
while N != 0:
c = input()
command = c.split()
if command[0] == 'insert':
w.insert(command[1])
elif command[0] == 'contains':
print(w.contains(command[1]))
else:
pass
N -= 1
if __name__ == '__main__':
main()
| Python | 0.999988 | |
a71b50f4b6a3bc1e760e3796f8c14f6c3e865a34 | replace identity translators with None | modularodm/translators/__init__.py | modularodm/translators/__init__.py | from dateutil import parser as dateparser
from bson import ObjectId
class DefaultTranslator(object):
null_value = None
to_default = None
from_default = None
class JSONTranslator(DefaultTranslator):
def to_datetime(self, value):
return str(value)
def from_datetime(self, value):
return dateparser.parse(value)
def to_ObjectId(self, value):
return str(value)
def from_ObjectId(self, value):
return ObjectId(value)
class StringTranslator(JSONTranslator):
null_value = 'none'
def to_default(self, value):
return str(value)
def from_int(self, value):
return int(value)
def from_float(self, value):
return float(value)
def from_bool(self, value):
return bool(value) | from dateutil import parser as dateparser
from bson import ObjectId
class DefaultTranslator(object):
null_value = None
def to_default(self, value):
return value
def from_default(self, value):
return value
def to_ObjectId(self, value):
return str(value)
def from_ObjectId(self, value):
return ObjectId(value)
class JSONTranslator(DefaultTranslator):
def to_datetime(self, value):
return str(value)
def from_datetime(self, value):
return dateparser.parse(value)
class StringTranslator(JSONTranslator):
null_value = 'none'
def to_default(self, value):
return str(value)
def from_int(self, value):
return int(value)
def from_float(self, value):
return float(value)
def from_bool(self, value):
return bool(value) | Python | 0.999999 |
4cdeb6987910d4d5b33d37486ddeaafcde54bb2f | add classify_neuralnet script | hvc/classify_neuralnet.py | hvc/classify_neuralnet.py | #from standard library
import glob
import sys
import os
import shelve
#from third-party
import numpy as np
import scipy.io as scio # to load matlab files
import numpy as np
from scipy.io import wavfile
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import StratifiedKFold
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint, CSVLogger
#from hvc
import hvc.utils.utils
import hvc.neuralnet.models
from hvc.utils import sequences
from hvc.audio.evfuncs import load_cbin,load_notmat
# get command line arguments
args = sys.argv
if len(args) != 2: # (first element, args[0], is the name of this script)
raise ValueError('Script requires one command line arguments, TRAIN_DIR')
TRAIN_DIR = args[1]
os.chdir(TRAIN_DIR)
try:
classify_dict = scio.loadmat('.\\classify\\to_classify.mat')
except FileNotFoundError:
print("Did not find required files in the directory supplied as command-line
" argument.\nPlease double check directory name.")
classify_dirs = classify_dict['classify_dirs']
clf_file = classify_dict['clf_file'][0] #[0] because string stored in np array
extension_id = clf_file.find('.dat')
# need to get rid of '.dat' extension before calling shelve with filename
clf_file = clf_file[:extension_id]
clf_file = '.\\train\\svmrbf_knn_results\\' + clf_file
clf_type = classify_dict['clf_type']
#need to get full directory path
with shelve.open(clf_file, 'r') as shlv:
if clf_type=='knn':
clf = shlv['knn_clf']
scaler = shlv['knn_scaler']
elif clf_type=='svm':
clf = shlv['svm_clf']
scaler = shlv['svm_scaler']
# used in loop below, see there for explanation
SHOULD_BE_DOUBLE = ['Fs',
'min_dur',
'min_int',
'offsets',
'onsets',
'sm_win',
'threshold']
#loop through dirs
for classify_dir in classify_dirs:
os.chdir(classify_dir)
notmats = glob.glob('*.not.mat')
if type(clf)==neighbors.classification.KNeighborsClassifier:
ftr_files = glob.glob('*knn_ftr.to_classify*')
elif type(clf)==SVC:
ftr_files = glob.glob('*svm_ftr.to_classify*')
for ftr_file,notmat in zip(ftr_files,notmats):
if type(clf)==neighbors.classification.KNeighborsClassifier:
samples = load_from_mat(ftr_file,'knn','classify')
elif type(clf)==SVC:
samples = load_from_mat(ftr_file,'svm','classify')
samples_scaled = scaler.transform(samples)
pred_labels = clf.predict(samples_scaled)
#chr() to convert back to character from uint32
pred_labels = [chr(val) for val in pred_labels]
# convert into one long string, what evsonganalty expects
pred_labels = ''.join(pred_labels)
notmat_dict = scio.loadmat(notmat)
notmat_dict['predicted_labels'] = pred_labels
notmat_dict['classifier_type'] = clf_type
notmat_dict['classifier_file'] = clf_file
print('saving ' + notmat)
# evsonganaly/Matlab expects all vars as double
for key, val in notmat_dict.items():
if key in SHOULD_BE_DOUBLE:
notmat_dict[key] = val.astype('d')
scio.savemat(notmat,notmat_dict)
| Python | 0.000782 | |
bb785321cbb9d372f2009a4577404ae75fbd889a | exclude TestApp from cppcheck script | Scripts/cppcheck/cppcheck.py | Scripts/cppcheck/cppcheck.py | # run from root sources directory: python Scripts/cppcheck/cppcheck.py
import os
ignoredEndings = ["is never used", "It is safe to deallocate a NULL pointer", "Throwing exception in destructor"]
ignoredContent = ["MyGUI_UString"]
def isIgnoredWarning(warning):
for ignore in ignoredEndings:
if warning.endswith(ignore):
return True
for ignore in ignoredContent:
if warning.find(ignore) != -1:
return True
return False
def parseOutput():
file = open("temp.cppcheck", 'r')
line = file.readline()
while line != "":
line = line[0:len(line)-1]
if (not isIgnoredWarning(line)):
print line
line = file.readline()
file.close ()
def checkFolderSources(folder) :
os.system("cppcheck --enable=all -I Scripts/cppcheck " + folder + " 2>temp.cppcheck")
parseOutput()
#checkFolderSources('MyGUIEngine')
os.system("cppcheck --enable=all -I Scripts/cppcheck -I MyGUIEngine/include MyGUIEngine/src 2>temp.cppcheck")
parseOutput()
checkFolderSources('Demos')
checkFolderSources('Tools')
checkFolderSources('UnitTests/UnitTest_*')
checkFolderSources('Common')
#checkFolderSources('Platforms/OpenGL')
# include temporary disabled due to cppcheck bug
#os.system("cppcheck --enable=all -I Scripts/cppcheck -I Platforms/OpenGL/OpenGLPlatform/include Platforms/OpenGL/OpenGLPlatform/src 2>temp.cppcheck")
os.system("cppcheck --enable=all -I Scripts/cppcheck Platforms/OpenGL/OpenGLPlatform/src 2>temp.cppcheck")
parseOutput()
#checkFolderSources('Platforms/Ogre')
os.system("cppcheck --enable=all -I Scripts/cppcheck -I Platforms/Ogre/OgrePlatform/include Platforms/Ogre/OgrePlatform/src 2>temp.cppcheck")
parseOutput()
#checkFolderSources('Platforms/DirectX')
os.system("cppcheck --enable=all -I Scripts/cppcheck -I Platforms/DirectX/DirectXPlatform/include Platforms/DirectX/DirectXPlatform/src 2>temp.cppcheck")
parseOutput()
checkFolderSources('Plugins')
checkFolderSources('Wrapper')
| # run from root sources directory: python Scripts/cppcheck/cppcheck.py
import os
ignoredEndings = ["is never used", "It is safe to deallocate a NULL pointer", "Throwing exception in destructor"]
ignoredContent = ["MyGUI_UString"]
def isIgnoredWarning(warning):
for ignore in ignoredEndings:
if warning.endswith(ignore):
return True
for ignore in ignoredContent:
if warning.find(ignore) != -1:
return True
return False
def parseOutput():
file = open("temp.cppcheck", 'r')
line = file.readline()
while line != "":
line = line[0:len(line)-1]
if (not isIgnoredWarning(line)):
print line
line = file.readline()
file.close ()
def checkFolderSources(folder) :
os.system("cppcheck --enable=all -I Scripts/cppcheck " + folder + " 2>temp.cppcheck")
parseOutput()
#checkFolderSources('MyGUIEngine')
os.system("cppcheck --enable=all -I Scripts/cppcheck -I MyGUIEngine/include MyGUIEngine/src 2>temp.cppcheck")
parseOutput()
checkFolderSources('Demos')
checkFolderSources('Tools')
checkFolderSources('UnitTests')
checkFolderSources('Common')
#checkFolderSources('Platforms/OpenGL')
# include temporary disabled due to cppcheck bug
#os.system("cppcheck --enable=all -I Scripts/cppcheck -I Platforms/OpenGL/OpenGLPlatform/include Platforms/OpenGL/OpenGLPlatform/src 2>temp.cppcheck")
os.system("cppcheck --enable=all -I Scripts/cppcheck Platforms/OpenGL/OpenGLPlatform/src 2>temp.cppcheck")
parseOutput()
#checkFolderSources('Platforms/Ogre')
os.system("cppcheck --enable=all -I Scripts/cppcheck -I Platforms/Ogre/OgrePlatform/include Platforms/Ogre/OgrePlatform/src 2>temp.cppcheck")
parseOutput()
#checkFolderSources('Platforms/DirectX')
os.system("cppcheck --enable=all -I Scripts/cppcheck -I Platforms/DirectX/DirectXPlatform/include Platforms/DirectX/DirectXPlatform/src 2>temp.cppcheck")
parseOutput()
checkFolderSources('Plugins')
checkFolderSources('Wrapper')
| Python | 0.000001 |
bc34d530f4a21b5f06228d626f446c617b9c8876 | Add example that mirrors defconfig and oldconfig. | examples/defconfig_oldconfig.py | examples/defconfig_oldconfig.py | # Produces exactly the same output as the following script:
#
# make defconfig
# echo CONFIG_ETHERNET=n >> .config
# make oldconfig
# echo CONFIG_ETHERNET=y >> .config
# yes n | make oldconfig
#
# This came up in https://github.com/ulfalizer/Kconfiglib/issues/15.
import kconfiglib
import sys
conf = kconfiglib.Config(sys.argv[1])
# Mirrors defconfig
conf.load_config("arch/x86/configs/x86_64_defconfig")
conf.write_config(".config")
# Mirrors the first oldconfig
conf.load_config(".config")
conf["ETHERNET"].set_user_value('n')
conf.write_config(".config")
# Mirrors the second oldconfig
conf.load_config(".config")
conf["ETHERNET"].set_user_value('y')
for s in conf:
if s.get_user_value() is None and 'n' in s.get_assignable_values():
s.set_user_value('n')
# Write the final configuration
conf.write_config(".config")
| Python | 0 | |
7c270e2fb5e3169f179e045cc58fdd4d58672859 | add fixCAs.py to master | fixCAs.py | fixCAs.py | import sys
from valuenetwork.valueaccounting.models import *
agents = EconomicAgent.objects.all()
#import pdb; pdb.set_trace()
count = 0
for agent in agents:
agent.is_context = agent.agent_type.is_context
try:
agent.save()
count = count + 1
except:
print "Unexpected error:", sys.exc_info()[0]
print "count = " + str(count)
| Python | 0 | |
171d573082e528b1f103db7ea22022fdcb24d629 | Create count-depth_true_false_unknown.py | binning/count-depth_true_false_unknown.py | binning/count-depth_true_false_unknown.py | #!/usr/bin/env python
from sys import argv, stdout, stderr, exit
from numpy import mean
# simple dummy weight function counting each sequences as one
class oneweight:
__getitem__ = lambda self,key: 1
def usage():
print >> stderr, 'Usage: ', argv[0], '--labels lab.racol --predictions pred.racol [--with-unknown-labels --weights sequences.weights --scale .001]'
if __name__ == "__main__":
import getopt
# parse command line options
try:
opts, args = getopt.getopt( argv[1:], 'hl:p:w:s:u', ['help', 'labels=','predictions=','weights=','scale=','with-unknown-labels'] )
except getopt.GetoptError, err:
print str( err ) # will print something like "option -a not recognized"
usage()
exit(2)
# default parameters
reffile = None
predfile = None
weightfile = None
unknown_labels = False
scale = 1
for o, a in opts:
if o in ("-h", "--help"):
usage()
exit()
elif o in ("-l", "--labels"):
reffile = a
elif o in ("-p", "--predictions"):
predfile = a
elif o in ("-w", "--weights"):
weightfile = a
elif o in ("-u", "--with-unknown-labels"):
unknown_labels = True
elif o in ("-s", "--scale"):
scale = float( a )
else:
assert False, "unhandled option"
if not reffile:
print >>stderr, "you must specify a file for taxonomic labels"
usage()
exit( 3 )
if not predfile:
print >>stderr, "you must specify a file for taxonomic predictions"
usage()
exit( 4 )
# read ref assignments
ref={}
with open( reffile, "r" ) as f:
for line in f:
if line[0] != "#":
line = line.rstrip( "\n" ).split( "\t" )
ref[line[0]] = line[1:]
# read predictions
pred={}
with open( predfile, "r" ) as f:
for line in f:
if line[0] != "#":
line = line.rstrip( "\n" ).split( "\t" )
pred[line[0]] = line[1:]
# read weights if given
if weightfile:
weight = {}
with open( weightfile, "r" ) as f:
for line in f:
name, w = line.strip().split( "\t", 2 )[:2]
weight[name] = int( w )
else:
weight = oneweight()
# output only false lines in modified format
correct = {}
incorrect = {}
unknowns = {}
depth = 0
counter = 0
for seq, path in ref.items():
try:
counter += 1
#print path, pred[seq]
plen = min( len( path ), len( pred[seq]) )
for otax, ptax in zip( path, pred[seq] ):
if ptax == "":
plen -= 1
elif unknown_labels and otax == "":
try:
unknowns[plen] += weight[seq]
except KeyError:
unknowns[plen] = weight[seq]
break
elif ptax == otax:
try:
correct[plen] += weight[seq]
except KeyError:
correct[plen] = weight[seq]
break
else:
try:
incorrect[plen] += weight[seq]
except KeyError:
incorrect[plen] = weight[seq]
break
if not plen:
try:
correct[plen] += weight[seq]
except KeyError:
correct[plen] = weight[seq]
depth = max( depth, plen )
del pred[seq] #remove entry from predictions
except KeyError: #handle no prediction as root assignment
try:
correct[0] += weight[seq]
except KeyError:
correct[0] = weight[seq]
stderr.write( "%s not found in prediction file\n" % (seq) )
for seq, path in pred.items():
counter += 1
plen = len( path )
for tax in path:
if tax != "":
try:
unknowns[plen] += weight[seq]
except KeyError:
unknowns[plen] = weight[seq]
break
else:
plen -= 1
if not plen:
try:
unknowns[0] += weight[seq]
except KeyError:
unknowns[0] = weight[seq]
depth = max( depth, plen )
if type( weight ) == dict:
assert sum( correct.values() + incorrect.values() + unknowns.values() ) == sum( weight.values() )
else:
assert counter == sum( correct.values() + incorrect.values() + unknowns.values() )
print "depth\ttrue\tfalse\tunknown"
if scale == 1:
for l in range( depth + 1 ):
print "%d\t%d\t%d\t%d" % (l, correct.get( l, 0 ), incorrect.get( l, 0 ), unknowns.get( l, 0 ))
else:
for l in range( depth + 1 ):
print "%d\t%.2f\t%.2f\t%.2f" % (l, scale*correct.get( l, 0 ), scale*incorrect.get( l, 0 ), scale*unknowns.get( l, 0 ))
| Python | 0.999989 | |
42d1ec95b69c80a5d4e60af6e088f8d30f95f160 | Create documentation of DataSource Settings | ibmcnx/test/objsExport.py | ibmcnx/test/objsExport.py | #-----------------------------------------------------------------
# Configuration Objects Export Script
# Exploits the AdminConfig component of wsadmin to export WAS configuration
# objects (ie. DataSource, J2CConnetionFactory, etc) to an EXISTING folder.
# You must specify the folder, the type of objects you want to export
# and at least the cell they belong to. Optionally, you can further
# refine the scope by adding a node and a server name.
# It will produce two files for every exported object:
#
# 1) ObjectID.props, containing header-like information
# 2) ObjectID, containing the actual configuration attributes
#
# All of the configuration objects matching the imposed criteria
# will be exported to files. Please don't touch the header info,
# unless you know what you're doing and/or it's necessary (see
# objsImport.py), but feel free to modify the attributes as required.
# Deleting the .props file (or changing its extension) will prevent
# the object from being imported by the objsImport.py wsadmin script.
# This is intended to let you easily select which objects to import.
#
# -------------
# NOTE: specifying a certain scope will export every object belonging to it
# and its hierarchically dependent entities. For example, if you only specify
# a cell, objects defined on its nodes and servers will be exported as well.
# This behaviour is somehow different w.r.t. what you observe on WAS webconsole
# (i.e., when you list objects of a certain type, defined on a certain scope).
# -------------
#
# Usage:
# wsadmin -lang jython [other wsadmin params] -f objExport.py <export_path> <obj_type> <cell_name> [<node_name> [<server_name>]]
#
# eg. wsadmin -lang jython -conntype none -f objsExport.py /u/myuserid/export DataSource myCell
# wsadmin -lang jython -conntype none -f objsExport.py /u/myuserid/export DataSource myCell myNodeA
# wsadmin -lang jython -conntype none -f objsExport.py /u/myuserid/export DataSource myCell myNodeA myServer1
#
# wsadmin -conntype SOAP -host 9.123.45.67 -port 8880 ...
# wsadmin -conntype RMI -host AIX61 -port 2809 ...
#
# Parameters:
# export_path: path to the local folder to receive your exported objects
# obj_type: type of objects to be exported, as defined by AdminConfig.types()
# cell_name: cell name as it appears in WAS webconsole
# node_name: node name as it appears in WAS webconsole
# server_name: server name as it appears in WAS webconsole
#
#-----------------------------------------------------------------
import os, string
from java.io import FileOutputStream
from java.util import Properties
from java.lang import System
from sys import argv, exit
def exportObjectsAttributes(saveDir, objType, cellName, nodeName, srvName):
lineSeparator = System.getProperty("line.separator")
if saveDir[-1] != "/":
saveDir = saveDir + "/"
#-------------------------------------------------------------
# get the object ID corresponding to specified scope
#-------------------------------------------------------------
if srvName != "":
scopeID = AdminConfig.getid("/Cell:" + cellName + "/Node:" + nodeName + "/Server:" + srvName + "/")
elif nodeName != "":
scopeID = AdminConfig.getid("/Cell:" + cellName + "/Node:" + nodeName + "/")
else:
scopeID = AdminConfig.getid("/Cell:" + cellName + "/")
if scopeID == "" or scopeID == None:
print "Failed: Could not find the Cell, Node or Server you asked for!"
return
#-------------------------------------------------------------
# check if to-be-exported object type requires a provider
#-------------------------------------------------------------
typeAttributes = AdminConfig.attributes(objType).split(lineSeparator)
needsProvider = 0
for typeAttribute in typeAttributes:
if typeAttribute.startswith("provider "):
needsProvider = 1
#-------------------------------------------------------------
# iterate on all configuration objects of the specified type
# belonging to the specified scope
#-------------------------------------------------------------
for objItem in AdminConfig.list(objType, scopeID).split(lineSeparator):
if objItem == "" or objItem == None:
print "Failed: Could not locate any Configuration Object of type " + objType + " in the scope you specified!"
else:
#-------------------------------------------------------------
# get current configuration object attributes
#-------------------------------------------------------------
props = Properties()
objItemName = AdminConfig.showAttribute(objItem, "name")
print "Exporting Item: " + objItemName
objItemAttributes = AdminConfig.showall(objItem)
#-------------------------------------------------------------
# build the string needed to obtain parent object ID,
# this NEED NOT be the same as the specified scope
#-------------------------------------------------------------
verticalSlash = objItem.rfind("|")
openBracket = objItem.rfind("(", 0, verticalSlash)
parentPath = objItem[openBracket+1:verticalSlash]
objParentPath = ""
serversInd = parentPath.find("servers")
if serversInd != -1:
objParentPath = "Server:" + parentPath[serversInd+8:] + "/"
else:
serversInd = len(parentPath)+1
nodesInd = parentPath.find("nodes")
if nodesInd != -1:
objParentPath = "Node:" + parentPath[nodesInd+6:serversInd-1] + "/" + objParentPath
else:
nodesInd = len(parentPath)+1
cellsInd = parentPath.find("cells")
if cellsInd != -1:
objParentPath = "Cell:" + parentPath[cellsInd+6:nodesInd-1] + "/" + objParentPath
objParentPath = "/" + objParentPath
#-------------------------------------------------------------
# get parent object ID or provider object ID if needed
#-------------------------------------------------------------
objItemParent = AdminConfig.getid(objParentPath)
if needsProvider:
objItemParent = AdminConfig.showAttribute(objItem, "provider")
#-------------------------------------------------------------
# write .props file and attributes file
#-------------------------------------------------------------
props.setProperty("ObjectType", objType)
props.setProperty("ObjectParent", objItemParent)
tmp = objItem
if tmp[0] == "\"":
tmp = tmp[1:]
if tmp[-1] == "\"":
tmp = tmp[:-1]
ind = tmp.rfind(".xml#")
objItemFileName = tmp[ind+5:-1]
props.setProperty("ObjectFileName", objItemFileName)
savePropertiesToFile(props, saveDir, objItem, objItemFileName)
objItemFileOutputStream = FileOutputStream(saveDir + objItemFileName)
objItemFileOutputStream.write(objItemAttributes)
objItemFileOutputStream.close()
def savePropertiesToFile(props, saveDir, header, objItemRootFileName):
fileOutStream = FileOutputStream(saveDir + objItemRootFileName + ".props")
props.store(fileOutStream, header)
fileOutStream.close()
#-----------------------------------------------------------------
# Main
#-----------------------------------------------------------------
if len(argv) == 5:
saveDir = argv[0]
objType = argv[1]
cellName = argv[2]
nodeName = argv[3]
srvName = argv[4]
elif len(argv) == 4:
saveDir = argv[0]
objType = argv[1]
cellName = argv[2]
nodeName = argv[3]
srvName = ""
elif len(argv) == 3:
saveDir = argv[0]
objType = argv[1]
cellName = argv[2]
nodeName = ""
srvName = ""
else:
print "\n" + "Failed: Incorrect number of parameters!"
exit()
print ""
exportObjectsAttributes(saveDir, objType, cellName, nodeName, srvName)
| Python | 0 | |
38f90c31f6a0f4459a8ba2f96205d80588b384c5 | Add CollectDict (incomplete dicts) | calvin/actorstore/systemactors/json/CollectDict.py | calvin/actorstore/systemactors/json/CollectDict.py | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, condition, manage
from calvin.utilities.calvinlogger import get_actor_logger
_log = get_actor_logger(__name__)
class CollectDict(Actor):
"""
Collect tokens from token port, forming a dict according to mapping. May produce
a partial dictionary.
Inputs:
token(routing="collect-any-tagged"): token
Outputs:
dict : Collected dictionary according to 'mapping'
"""
@manage(['mapping'])
def init(self, mapping):
self.mapping = mapping
def will_start(self):
self.inports['token'].set_config({'port-mapping':self.mapping})
@condition(['token'], ['dict'])
def collect_tokens(self, token):
_log.info("token: %r" % (token,))
return ActionResult(production=(token,))
action_priority = (collect_tokens, )
test_args = []
test_kwargs = {'select':{}}
| Python | 0 | |
625139f9d3e5c06f4e5b355eaa070389f9a81954 | Add utils module | website/addons/dropbox/utils.py | website/addons/dropbox/utils.py | # -*- coding: utf-8 -*-
import os
from website.project.utils import get_cache_content
from website.addons.dropbox.client import get_node_addon_client
def get_file_name(path):
return os.path.split(path.strip('/'))[1]
# TODO(sloria): TEST ME
def render_dropbox_file(file_obj, client=None):
# Filename for the cached MFR HTML file
cache_name = file_obj.get_cache_filename(client=client)
node_settings = file_obj.node.get_addon('dropbox')
rendered = get_cache_content(node_settings, cache_name)
if rendered is None: # not in MFR cache
dropbox_client = client or get_node_addon_client(node_settings)
file_response, metadata = dropbox_client.get_file_and_metadata(file_obj.path)
rendered = get_cache_content(
node_settings=node_settings,
cache_file=cache_name,
start_render=True,
file_path=get_file_name(file_obj.path),
file_content=file_response.read(),
download_path=file_obj.download_url
)
return rendered
| Python | 0.000001 | |
9ed712af79934be25de4b8166ab6c23d1111d024 | Add MV powerflow db tables creation script | calc_ego_powerflow/setup_schema_tables.py | calc_ego_powerflow/setup_schema_tables.py | #!/usr/bin/env python3
# coding: utf-8
from oemof import db
from oemof.db import tools
def create_powerflow_schema(engine, schema, tables):
"""Creates new powerflow schema in database
Parameters
----------
engine: SQLalchemy engine
tables: dict
Values are list of columns/constraints
"""
conn = engine.connect()
group = 'oeuser'
try:
# create schema
sql_create_schema = '''CREATE SCHEMA {0} AUTHORIZATION {1};
'''.format(schema, group)
conn.execute(sql_create_schema)
# grant acess rights for schema
tools.grant_schema(conn, schema, group)
# alter privileges for schema
sql_privileges_tables = '''ALTER DEFAULT PRIVILEGES IN SCHEMA {0}
GRANT INSERT, SELECT, UPDATE, DELETE, TRUNCATE, REFERENCES, TRIGGER
ON TABLES TO {1};'''.format(schema, group)
conn.execute(sql_privileges_tables)
sql_privileges_sequences = '''ALTER DEFAULT PRIVILEGES IN SCHEMA {0}
GRANT SELECT, UPDATE, USAGE ON SEQUENCES TO {1};
'''.format(schema, group)
conn.execute(sql_privileges_sequences)
sql_privileges_functions = '''ALTER DEFAULT PRIVILEGES IN SCHEMA {0}
GRANT EXECUTE ON FUNCTIONS TO {1};'''
conn.execute(sql_privileges_functions)
except:
print('Schema {} already existed and is not created newly...'\
.format(schema))
# iterate over tables and create them
sql_create = 'CREATE TABLE'
for table in tables.keys():
sql_create_table = sql_create + ' {0}.{1} ('.format(schema, table) + \
', '.join(tables[table]) + ') WITH(OIDS = FALSE);'
# try:
conn.execute(sql_create_table)
# except:
# print('Table {} already existed and is not created newly...'\
# .format(table))
tools.grant_db_access(conn, schema, table, group)
# Bus.metadata.drop_all(engine)
if __name__ == '__main__':
"""Create or delete powerflow schema"""
engine = db.engine(section='oedb')
schema = 'calc_ego_mv_powerflow'
# tables = [Bus, Line, Generator, Load, Storage, Source]
tables = {
'bus': ['bus_id bigint NOT NULL', 'v_nom double precision',
'v_mag_pu_min double precision DEFAULT 0',
'v_mag_pu_max double precision',
'geom geometry(Point,4326)',
"scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'CONSTRAINT bus_data_pkey PRIMARY KEY (bus_id, scn_name)'],
'generator': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'generator_id bigint NOT NULL', 'bus bigint',
"control text DEFAULT 'PQ'::text",
'p_nom double precision DEFAULT 0',
'p_min_pu_fixed double precision DEFAULT 0',
'p_max_pu_fixed double precision DEFAULT 1',
'sign double precision DEFAULT 1',
'CONSTRAINT generator_data_pkey PRIMARY KEY (generator_id, scn_name)'],
'line': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'line_id bigint NOT NULL', 'bus0 bigint', 'bus1 bigint',
'x numeric DEFAULT 0',
'r numeric DEFAULT 0',
'g numeric DEFAULT 0',
'b numeric DEFAULT 0',
's_nom numeric DEFAULT 0',
'length double precision',
'cables integer',
'geom geometry(MultiLineString,4326)',
'CONSTRAINT line_data_pkey PRIMARY KEY (line_id, scn_name)'],
'load': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
"load_id bigint NOT NULL",
'bus bigint',
'sign double precision DEFAULT (-1)',
'CONSTRAINT load_data_pkey PRIMARY KEY (load_id, scn_name)'],
'temp_resolution': ['temp_id bigint NOT NULL',
'timesteps bigint NOT NULL',
'resolution text',
'start_time timestamp without time zone', # style: YYYY-MM-DD HH:MM:SS
'CONSTRAINT temp_resolution_pkey PRIMARY KEY (temp_id)'],
'transformer': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'trafo_id bigint NOT NULL',
'bus0 bigint',
'bus1 bigint',
'x numeric DEFAULT 0',
'r numeric DEFAULT 0',
'g numeric DEFAULT 0',
'b numeric DEFAULT 0',
's_nom double precision DEFAULT 0',
'tap_ratio double precision',
'geom geometry(MultiLineString,4326)',
'CONSTRAINT transformer_data_pkey PRIMARY KEY (trafo_id, scn_name)'],
'scenario_settings': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'bus character varying',
'bus_v_mag_set character varying',
'generator character varying',
'generator_pq_set character varying',
'line character varying',
'load character varying',
'load_pq_set character varying',
'storage character varying',
'storage_pq_set character varying',
'temp_resolution character varying',
'transformer character varying',
'CONSTRAINT scenario_settings_pkey PRIMARY KEY (scn_name)'],
'bus_v_mag_set': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'bus_id bigint NOT NULL',
'temp_id integer NOT NULL',
'v_mag_pu_set double precision[]',
'CONSTRAINT bus_v_mag_set_pkey PRIMARY KEY (bus_id, temp_id, scn_name)'],
# 'CONSTRAINT bus_v_mag_set_temp_fkey FOREIGN KEY (temp_id) REFERENCES calc_ego_hv_powerflow.temp_resolution (temp_id)'],
'generator_pq_set': ["scn_name character varying NOT NULL DEFAULT 'Status Quo'::character varying",
'generator_id bigint NOT NULL',
'temp_id integer NOT NULL',
'p_set double precision[]',
'q_set double precision[]',
'p_min_pu double precision[]',
'p_max_pu double precision[]',
'CONSTRAINT generator_pq_set_pkey PRIMARY KEY (generator_id, temp_id, scn_name)'],
# 'CONSTRAINT generator_pq_set_temp_fkey FOREIGN KEY (temp_id) REFERENCES calc_ego_hv_powerflow.temp_resolution (temp_id)'],
'load_pq_set': ["scn_name character varying NOT NULL "
"DEFAULT 'Status Quo'::character varying",
'load_id bigint NOT NULL',
'temp_id integer NOT NULL',
'p_set double precision[]',
'q_set double precision[]',
'CONSTRAINT load_pq_set_pkey '
'PRIMARY KEY (load_id, temp_id, scn_name)'],
# 'CONSTRAINT load_pq_set_temp_fkey FOREIGN KEY (temp_id) '
# 'REFERENCES calc_ego_hv_powerflow.temp_resolution (temp_id)']
}
create_powerflow_schema(engine, schema, tables) | Python | 0 | |
1cf4de645dd44269b01b7f57322a3edca8334fc8 | Add another example script for MIDI output: a minimal drum pattern sequencer | mididrumbox.py | mididrumbox.py | from microbit import button_a, display
from microbit import uart
from microbit import running_time, sleep
NOTE_ON = 0x90
CONTROLLER_CHANGE = 0xB0
PROGRAM_CHANGE = 0xC0
class MidiOut:
def __init__(self, device, channel=1):
if channel < 1 or channel > 16:
raise ValueError('channel must be an integer between 1..16.')
self.channel = channel
self.device = device
def channel_message(self, command, *data, ch=None):
command = (command & 0xf0) | ((ch if ch else self.channel) - 1 & 0xf)
msg = [command] + [value & 0x7f for value in data]
self.device.write(bytes(msg))
def note_on(self, note, velocity=127, ch=None):
self.channel_message(NOTE_ON, note, velocity, ch=ch)
def control_change(self, control, value, lsb=False, ch=None):
self.channel_message(CONTROLLER_CHANGE, control,
value >> 7 if lsb else value, ch=ch)
if lsb and control < 20:
self.channel_message(CONTROLLER_CHANGE, control + 32, value, ch=ch)
def program_change(self, program, ch=None):
self.channel_message(PROGRAM_CHANGE, program, ch=ch)
class Pattern:
velocities = {
"-": None, # continue note
".": 0, # off
"+": 10, # ghost
"s": 60, # soft
"m": 100, # medium
"x": 120, # hard
}
def __init__(self, src):
self.step = 0
self.instruments = []
self._active_notes = {}
pattern = (line.strip() for line in src.split('\n'))
pattern = (line for line in pattern
if line and not line.startswith('#'))
for line in pattern:
parts = line.split(" ", 2)
if len(parts) == 3:
note, hits, description = parts
elif len(parts) == 2:
note, hits = parts
description = None
else:
continue
note = int(note)
self.instruments.append((note, hits))
self.steps = max(len(hits) for _, hits in self.instruments)
def playstep(self, midiout, channel=10):
for note, hits in self.instruments:
velocity = self.velocities.get(hits[self.step])
if velocity is not None:
if self._active_notes.get(note):
# velocity==0 <=> note off
midiout.note_on(note, 0, ch=channel)
self._active_notes[note] = 0
if velocity > 0:
midiout.note_on(note, max(1, velocity), ch=channel)
self._active_notes[note] = velocity
self.step = (self.step + 1) % self.steps
class Sequencer:
def __init__(self, midiout, bpm=120, channel=10):
self.midiout = midiout
self.mpq = 15000. / max(20, min(bpm, 400))
self.channel = channel
def play(self, pattern, kit=None):
if kit:
self.midiout.program_change(kit, ch=self.channel)
# give MIDI instrument some time to load drumkit
sleep(300)
while True:
last_tick = running_time()
pattern.playstep(self.midiout, self.channel)
timetowait = max(0, self.mpq - (running_time() - last_tick))
if timetowait > 0:
sleep(timetowait)
FUNKYDRUMMER = """\
36 x.x.......x..x..
38 ....x..m.m.mx..m
42 xxxxx.x.xxxxx.xx
46 .....x.x.....x..
"""
while True:
if button_a.is_pressed():
display.set_pixel(0, 0, 0)
break
display.set_pixel(0, 0, 5)
sleep(100)
display.set_pixel(0, 0, 0)
sleep(100)
# Initialize UART for MIDI
uart.init(baudrate=31250)
midi = MidiOut(uart)
seq = Sequencer(midi, bpm=90)
seq.play(Pattern(FUNKYDRUMMER), kit=9)
| Python | 0.000034 | |
821a3826110ecfc64ab431b7028af3aae8aa80db | Add 20150522 question. | LeetCode/house_robbers.py | LeetCode/house_robbers.py | """
You are a professional robber planning to rob houses along a street. Each house
has a certain amount of money stashed, the only constraint stopping you from
robbing each of them is that adjacent houses have security system connected and
it will automatically contact the police if two adjacent houses were broken
into on the same night.
Given a list of non-negative integers representing the amount of money of each
house, determine the maximum amount of money you can rob tonight without
alerting the police.
"""
class Solution:
# @param {integer[]} nums
# @return {integer}
def rob(self, nums):
if not nums:
return 0
current, previous, result = 0, 0, 0
for x in nums:
temp = current
current = max(current, x + previous)
previous = temp
return current
| Python | 0.000001 | |
06cd9e8e5006d68d7656b7f147442e54aaf9d7a1 | Add female Public Health College and Club | clubs/migrations/0035_add_public_health_college.py | clubs/migrations/0035_add_public_health_college.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_college(apps, schema_editor):
Club = apps.get_model('clubs', 'Club')
College = apps.get_model('clubs', 'College')
StudentClubYear = apps.get_model('core', 'StudentClubYear')
year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015,
end_date__year=2016)
female_presidency = Club.objects.get(english_name="Presidency (Riyadh/Female)",
year=year_2015_2016)
r_i_f = College.objects.create(city='R', section='NG', name='I',
gender='F')
Club.objects.create(name="كلية الصحة العامة والمعلوماتية الصحية",
english_name="College of Public Health and Health Informatics",
description="",
email="pending@ksau-hs.edu.sa",
parent=female_presidency,
gender="F",
year=year_2015_2016,
city="R",
college=r_i_f)
def remove_college(apps, schema_editor):
Club = apps.get_model('clubs', 'Club')
College = apps.get_model('clubs', 'College')
StudentClubYear = apps.get_model('core', 'StudentClubYear')
year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015,
end_date__year=2016)
College.objects.get(city='R', section='NG', name='I',
gender='F').delete()
Club.objects.get(english_name="College of Public Health and Health Informatics",
city='R', gender='F', year=year_2015_2016)
class Migration(migrations.Migration):
dependencies = [
('clubs', '0034_club_media_assessor'),
]
operations = [
migrations.RunPython(
add_college,
reverse_code=remove_college),
]
| Python | 0 | |
c2257a268662c4ea220c6c4d869d38c9f9ab55de | Create hcsr04.py | hcsr04.py | hcsr04.py | !/usr/bin/env python
#
# HC-SR04 interface code for the Raspberry Pi
#
# William Henning @ http://Mikronauts.com
#
# uses joan's excellent pigpio library
#
# Does not quite work in one pin mode, will be updated in the future
#
import time
import pigpio
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
def _echo1(gpio, level, tick):
global _high
_high = tick
def _echo0(gpio, level, tick):
global _done, _high, _time
_time = tick - _high
_done = True
def readDistance2(_trig, _echo):
global pi, _done, _time
_done = False
pi.set_mode(_trig, pigpio.OUTPUT)
pi.gpio_trigger(_trig,50,1)
pi.set_mode(_echo, pigpio.INPUT)
time.sleep(0.0001)
tim = 0
while not _done:
time.sleep(0.001)
tim = tim+1
if tim > 50:
return 99999
return _time
pi = pigpio.pi('localhost',1234)
if __name__ == "__main__":
my_echo1 = pi.callback(10, pigpio.RISING_EDGE, _echo1)
my_echo0 = pi.callback(10, pigpio.FALLING_EDGE, _echo0)
my_echo1 = pi.callback(25, pigpio.RISING_EDGE, _echo1)
my_echo0 = pi.callback(25, pigpio.FALLING_EDGE, _echo0)
my_echo1 = pi.callback(8, pigpio.RISING_EDGE, _echo1)
my_echo0 = pi.callback(8, pigpio.FALLING_EDGE, _echo0)
my_echo1 = pi.callback(5, pigpio.RISING_EDGE, _echo1)
my_echo0 = pi.callback(5, pigpio.FALLING_EDGE, _echo0)
my_echo1 = pi.callback(12, pigpio.RISING_EDGE, _echo1)
my_echo0 = pi.callback(12, pigpio.FALLING_EDGE, _echo0)
my_echo1 = pi.callback(16, pigpio.RISING_EDGE, _echo1)
my_echo0 = pi.callback(16, pigpio.FALLING_EDGE, _echo0)
while 1:
#print "DISTANCE 1: ",(readDistance2(24,10)/58),"\tDISTANCE 2: ",(readDistance2(9,25)/58),"\tDI$
#print "DISTANCE 2: ",(readDistance2(9,25)/58)
#print "DISTANCE 3: ",(readDistance2(11,8)/58)
#print "DISTANCE 4: ",(readDistance2(7,5)/58)
#print "DISTANCE 5: ",(readDistance2(6,12)/58)
#print "DISTANCE 6: ",(readDistance2(19,16)/58)
mc.set("d1",(readDistance2(24,10)/58))
mc.set("d2",(readDistance2(9,25)/58))
mc.set("d3",(readDistance2(11,8)/58))
mc.set("d4",(readDistance2(7,5)/58))
mc.set("d5",(readDistance2(6,12)/58))
mc.set("d6",(readDistance2(19,16)/58))
time.sleep(0.1)
# my_echo1.cancel()
# my_echo0.cancel()
| Python | 0 | |
f4357343df1d13f5828c233e84d14586a1f786d0 | add functools03.py | trypython/stdlib/functools03.py | trypython/stdlib/functools03.py | # coding: utf-8
"""
functoolsモジュールについて
singledispatch関数についてのサンプルです.
"""
import functools
import html
import numbers
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
# ---------------------------------------------
# singledispatch化したい関数に対して
# @functools.singledispatch デコレータを適用する
# 同じ呼び出しで呼び先を分岐させたい関数に対して
# @関数名.register(型) を付与すると登録される。
# ---------------------------------------------
@functools.singledispatch
def htmlescape(obj):
content = html.escape(repr(obj))
return f'<pre>{content}</pre>'
@htmlescape.register(str)
def _(text):
return f'<p>{text}</p>'
@htmlescape.register(numbers.Integral)
def _(n):
return f'<pre>0x{n}</pre>'
class Sample(SampleBase):
def exec(self):
pr('singledispatch(obj)', htmlescape((1, 2, 3)))
pr('singledispatch(str)', htmlescape('hello world'))
pr('singledispatch(int)', htmlescape(100))
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| Python | 0.000009 | |
17cdae7f50a7ed15c4e8a84cdb0000a32f824c5f | Add an oauth example script. | examples/outh/getaccesstoken.py | examples/outh/getaccesstoken.py | import webbrowser
import tweepy
"""
Query the user for their consumer key/secret
then attempt to fetch a valid access token.
"""
if __name__ == "__main__":
consumer_key = raw_input('Consumer key: ').strip()
consumer_secret = raw_input('Consumer secret: ').strip()
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# Open authorization URL in browser
webbrowser.open(auth.get_authorization_url())
# Ask user for verifier pin
pin = raw_input('Verification pin number from twitter.com: ').strip()
# Get access token
token = auth.get_access_token(verifier=pin)
# Give user the access token
print 'Access token:'
print ' Key: %s' % token.key
print ' Secret: %s' % token.secret
| Python | 0 | |
62f44daaf325d94c7374836f3bb50fd5694c62c0 | Add utilities/extract_scores.py | wikiclass/utilities/extract_scores.py | wikiclass/utilities/extract_scores.py |
r"""
Gathers the scores for a set of revisions and
prints a TSV to stdout of the format:
<page_id>\t<title>\n<rev_id>\t<prediction>\t<weighted_sum>
See https://phabricator.wikimedia.org/T135684 for more information.
Usage:
extract_scores -h | --help
extract_scores --dump=<dump-file>... --model=<model-file>
[--verbose]
[--rev-scores=<path>]
Options:
-h --help Prints out this documentation.
--dump=<dump-file> Path to dump file.
--model=<model-file> Path to the model file.
--verbose Prints dots and stuff to stderr
--rev-scores=<path> The location to write output to.
[default: <stdout>]
"""
from revscoring import ScorerModel
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
import logging
import sys
import docopt
import mwxml
import mysqltsv
logger = logging.getLogger(__name__)
r_text = revision_oriented.revision.text
CLASS_WEIGHTS = {
'Stub': 0,
'Start': 1,
'C': 2,
'B': 3,
'GA': 4,
'FA': 5
}
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
dumps = args['--dump']
with open(args['--model']) as f:
model = ScorerModel.load(f)
headers=["page_id", "title", "rev_id", "prediction", "weighted_sum"]
if args['--rev-scores'] == "<stdout>":
rev_scores = mysqltsv.Writer(sys.stdout, headers=headers)
else:
rev_scores = mysqltsv.Writer(open(args['--rev-scores'], "w"),
headers=headers)
verbose = args['--verbose']
run(dumps, model, rev_scores, verbose=verbose)
def run(paths, model, rev_scores, verbose=False):
def process_dump(dump, path):
for page in dump:
if int(page.namespace) != 0:
continue
for revision in page:
feature_values = list(solve(model.features,
cache={r_text: revision.text}))
yield (revision.id, model.score(feature_values), page.title, page.id)
for rev_id, score, title, page_id in mwxml.map(process_dump, paths):
weighted_sum = sum(CLASS_WEIGHTS[cls] * score['probability'][cls]
for cls in score['probability'])
rev_scores.write(
[page_id,
title,
rev_id,
CLASS_WEIGHTS[score['prediction']],
weighted_sum]
)
if verbose:
sys.stderr.write(CLASS_WEIGHTS[score['prediction']])
sys.stderr.flush()
if verbose:
sys.stderr.write("\n")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\n^C Caught. Exiting...")
| Python | 0 | |
c4e8e6d73e70e568d4c4386d7c1bab07ade2b8f0 | use DFS instead of BFS to do checking, it's simpler and results in more obvious message ordering | tc/checker.py | tc/checker.py | import logging
from django.template import loader, base, loader_tags, defaulttags
class TemplateChecker(object):
registered_rules = []
def __init__(self):
self.warnings = []
self.errors = []
def check_template(self, path):
"""
Checks the given template for badness.
"""
try:
template = loader.get_template(path)
except (base.TemplateSyntaxError, base.TemplateDoesNotExist), e:
self.errors.append(e)
return
rules = [r(self, template) for r in self.registered_rules]
# depth-first search of the template nodes
#TODO should probably use deque, since we're doing popleft() a lot?
nodes = template.nodelist
self._recursive_check(nodes, [], rules)
def _recursive_check(self, nodes, ancestors, rules):
for node in nodes:
node.parent = ancestors[-1] if ancestors else None
children = None
if isinstance(node, base.TextNode):
if not node.s.strip():
# skip further processing for blank text nodes
continue
elif getattr(node, 'nodelist', None):
children = node.nodelist
valid = True
for rule in rules:
if rule.visit_node(node) is False:
valid = False
rule.log(node)
if valid and children:
self._recursive_check(children, ancestors+[node], rules)
def _log(self, level, node, message):
# TODO get line number of node in template somehow
logging.log(level, message)
def info(self, node, message):
self._log(logging.INFO, node, message)
def warn(self, node, message):
self.warnings.append(message)
self._log(logging.WARN, node, message)
def error(self, node, message):
self.errors.append(message)
self._log(logging.ERROR, node, message)
### RULES - base classes
class RuleMeta(type):
"""
Automatically register rule classes with TemplateChecker
"""
def __new__(meta, className, bases, classDict):
cls = type.__new__(meta, className, bases, classDict)
try:
Rule
except NameError:
pass
else:
TemplateChecker.registered_rules.append(cls)
return cls
class Rule(object):
"""
Determines when a node is legal and when it isn't.
Nodes are visited in a breadth-first fashion.
"""
__metaclass__ = RuleMeta
def __init__(self, checker, template):
"""
Create a Rule for the given checker and template.
"""
self._info = {}
self.checker = checker
self.template = template
def visit_node(self, node):
"""
Returns whether a node is valid in the current context.
If False, the node's children will not be processed.
"""
return None
def log(self, node):
"""
Must be implemented to log an error or warning for the node.
This is only called if visit_node() returns False.
"""
raise NotImplementedError
### RULES - actual rules
class TextOutsideBlocksInExtended(Rule):
"""
No point having text nodes outside of blocks in extended templates.
"""
def visit_node(self, node):
if isinstance(node, loader_tags.ExtendsNode):
self._info['extends_node'] = node
elif self._info.get('extends_node'):
if not isinstance(node, (loader_tags.BlockNode, defaulttags.LoadNode, defaulttags.CommentNode)):
if node.parent == self._info['extends_node']:
return False
def log(self, node):
self.checker.warn(node, 'Text outside of blocks in extended template')
| import logging
from django.template import loader, base, loader_tags, defaulttags
class TemplateChecker(object):
registered_rules = []
def __init__(self):
self.warnings = []
self.errors = []
def check_template(self, path):
"""
Checks the given template for badness.
"""
try:
template = loader.get_template(path)
except (base.TemplateSyntaxError, base.TemplateDoesNotExist), e:
self.errors.append(e)
return
rules = [r(self, template) for r in self.registered_rules]
# breadth-first search of the template nodes
#TODO should probably use deque, since we're doing popleft() a lot?
nodes = template.nodelist[:]
# set parent nodes, so rules can traverse up the hierarchy if they want
for n in nodes:
n.parent = None
while nodes:
node = nodes.pop(0)
children = []
if isinstance(node, base.TextNode):
if not node.s.strip():
# skip further processing for blank text nodes
continue
elif getattr(node, 'nodelist', None):
children = node.nodelist[:]
for child in children:
child.parent = node
valid = True
for rule in rules:
if rule.visit_node(node) is False:
valid = False
rule.log(node)
if valid:
nodes.extend(children)
def _log(self, level, node, message):
# TODO get line number of node in template somehow
logging.log(level, message)
def info(self, node, message):
self._log(logging.INFO, node, message)
def warn(self, node, message):
self.warnings.append(message)
self._log(logging.WARN, node, message)
def error(self, node, message):
self.errors.append(message)
self._log(logging.ERROR, node, message)
### RULES - base classes
class RuleMeta(type):
"""
Automatically register rule classes with TemplateChecker
"""
def __new__(meta, className, bases, classDict):
cls = type.__new__(meta, className, bases, classDict)
try:
Rule
except NameError:
pass
else:
TemplateChecker.registered_rules.append(cls)
return cls
class Rule(object):
"""
Determines when a node is legal and when it isn't.
Nodes are visited in a breadth-first fashion.
"""
__metaclass__ = RuleMeta
def __init__(self, checker, template):
"""
Create a Rule for the given checker and template.
"""
self._info = {}
self.checker = checker
self.template = template
def visit_node(self, node):
"""
Returns whether a node is valid in the current context.
If False, the node's children will not be processed.
"""
return None
def log(self, node):
"""
Must be implemented to log an error or warning for the node.
This is only called if visit_node() returns False.
"""
raise NotImplementedError
### RULES - actual rules
class TextOutsideBlocksInExtended(Rule):
"""
No point having text nodes outside of blocks in extended templates.
"""
def visit_node(self, node):
if isinstance(node, loader_tags.ExtendsNode):
self._info['extends_node'] = node
elif self._info.get('extends_node'):
if not isinstance(node, (loader_tags.BlockNode, defaulttags.LoadNode, defaulttags.CommentNode)):
if node.parent == self._info['extends_node']:
return False
def log(self, node):
self.checker.warn(node, 'Non-empty text node outside of blocks in extended template')
| Python | 0 |
3898bec1a5470c79f93e7c69f6700a4af1801670 | Create love6.py | Python/CodingBat/love6.py | Python/CodingBat/love6.py | # http://codingbat.com/prob/p100958
def love6(a, b):
return ( (a == 6) or (b == 6) or (a+b == 6) or (abs(a-b) == 6) )
| Python | 0 | |
d27a9e09659a8d990b7b07963fb72fe2d25572c2 | test. nothing important | shutdowntimer.py | shutdowntimer.py | #!/usr/bin/python3
# use python 3.x
# simple shutdown timer script for windows
import os
print("-----SHUTDOWN TIMER-----")
while(True):
a = input("Press S to schedule shutdown.\nPress C to cancel shutdown.\n")
if(a == 's' or a == 'S'):
try:
hours = int(input("\n\nEnter hours: "))
except ValueError:
hours = 0
try:
minutes = int(input("Enter minutes: "))
except ValueError:
minutes=0
seconds = hours * 60 * 60 + minutes * 60
os.system('shutdown -s -t {}'.format(seconds))
print("Your system will shutdown in {} hours and {} minutes".format(hours,minutes))
break
elif(a=='C' or a=='c'):
os.system('shutdown -a')
break
else:
print("Sorry. Try again.")
| Python | 0.999986 | |
be095fdb2163575803020cefcfa0d86cff1d990f | Create new package (#6453) | var/spack/repos/builtin/packages/r-lars/package.py | var/spack/repos/builtin/packages/r-lars/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLars(RPackage):
"""Efficient procedures for fitting an entire lasso sequence with the cost
of a single least squares fit."""
homepage = "https://cran.r-project.org/web/packages/lars/index.html"
url = "https://cran.r-project.org/src/contrib/lars_1.2.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/lars"
depends_on('r@3.4.0:3.4.9')
version('1.2', '2571bae325f6cba1ad0202ea61695b8c')
version('1.1', 'e94f6902aade09b13ec25ba2381384e5')
version('0.9-8', 'e6f9fffab2d83898f6d3d811f04d177f')
| Python | 0 | |
4acf6d76bf7ec982573331835f7bcddd8487b18b | Add package for unison | var/spack/repos/builtin/packages/unison/package.py | var/spack/repos/builtin/packages/unison/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Unison(Package):
"""Unison is a file-synchronization tool for OSX, Unix, and
Windows. It allows two replicas of a collection of files and
directories to be stored on different hosts (or different disks
on the same host), modified separately, and then brought up to
date by propagating the changes in each replica to the
other."""
homepage = "https://www.cis.upenn.edu/~bcpierce/unison/"
url = "https://www.seas.upenn.edu/~bcpierce/unison//download/releases/stable/unison-2.48.3.tar.gz"
version('2.48.4', '5334b78c7e68169df7de95f4c6c4b60f')
depends_on('ocaml', type='build')
parallel = False
def install(self, spec, prefix):
make('./mkProjectInfo')
make('UISTYLE=text')
mkdirp(prefix.bin)
install('unison', prefix.bin)
set_executable(join_path(prefix.bin, 'unison'))
| Python | 0 | |
77d3756d27758276c084cf20693202cfa645df3e | Add fptool.py that will replace flash_fp_mcu | util/fptool.py | util/fptool.py | #!/usr/bin/env python3
# Copyright 2021 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to manage the fingerprint system on Chrome OS."""
import argparse
import os
import shutil
import subprocess
import sys
def cmd_flash(args: argparse.Namespace) -> int:
"""
Flash the entire firmware FPMCU using the native bootloader.
This requires the Chromebook to be in dev mode with hardware write protect
disabled.
"""
if not shutil.which('flash_fp_mcu'):
print('Error - The flash_fp_mcu utility does not exist.')
return 1
cmd = ['flash_fp_mcu']
if args.image:
if not os.path.isfile(args.image):
print(f'Error - image {args.image} is not a file.')
return 1
cmd.append(args.image)
print(f'Running {" ".join(cmd)}.')
sys.stdout.flush()
p = subprocess.run(cmd)
return p.returncode
def main(argv: list) -> int:
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(dest='subcommand', title='subcommands')
# This method of setting required is more compatible with older python.
subparsers.required = True
# Parser for "flash" subcommand.
parser_decrypt = subparsers.add_parser('flash', help=cmd_flash.__doc__)
parser_decrypt.add_argument(
'image', nargs='?', help='Path to the firmware image')
parser_decrypt.set_defaults(func=cmd_flash)
opts = parser.parse_args(argv)
return opts.func(opts)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 0.999999 | |
bc32b2bccc82caecea0cf936e13c3ae70d0e9486 | Add script to remove broken images. | utils/check.py | utils/check.py | from pathlib import Path
from PIL import Image
from concurrent.futures import ProcessPoolExecutor
import os
import sys
def verify_or_delete(filename):
try:
Image.open(filename).load()
except OSError:
return False
return True
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Remove Broken Images\nUsage: python check.py <dir>')
exit(-1)
filenames = list(Path(sys.args[1]).rglob('*.*'))
with ProcessPoolExecutor() as executor:
broken, total = 0, len(filenames)
jobs = executor.map(verify_or_delete, filenames)
for i, (filename, verified) in enumerate(zip(filenames, jobs)):
if not verified:
broken += 1
os.system('rm "%s"' % filename)
print('Checking %d/%d, %d deleted...' %
(i + 1, total, broken), end='\r')
print('\nDone.')
| Python | 0 | |
388bbd915a5e40a2e096eb22ab294ffcbd3db936 | Add a gmm, currently wrapping sklearn | bananas/model.py | bananas/model.py | import numpy
# FIXME: copy the functions here
from sklearn.mixture.gmm import log_multivariate_normal_density, logsumexp
class GMM(object):
def __init__(self, weights, means, covs):
self.weights = numpy.array(weights)
self.means = numpy.array(means)
self.covs = numpy.array(covs)
def score(self, X, return_responsibilities=False):
nc = len(self.weights)
X = numpy.array(X)
if X.ndim == 1:
X = X[:, None]
if X.size == 0:
return numpy.array([]), numpy.empty((0, len(self.weights)))
if X.shape[1] != self.means.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = numpy.log(self.weights)) + \
log_multivariate_normal_density(X,
self.means,
self.covs, 'full')
logprob = logsumexp(lpr, axis=1)
if return_responsibilities:
responsibilities = numpy.exp(lpr - logprob[:, None])
return logprob, responsibilities
return logprob
@classmethod
def fit(kls, nc, X):
# FIXME: get rid of this and add weights support
from sklearn import mixture
model = mixture.GMM(nc, covariance_type='full', n_iter=100)
model.fit(X)
if not model.converged_:
raise ValueError("Your data is strange. Gaussian mixture failed to converge")
return kls(model.weights_, model.means_, model.covars_)
class Confidence(object):
def __init__(self, model, confidence_table)
self.model = model
self.confidence_table = confidence_table
def score(self, X):
x, y = self.confidence_table
sc = self.model.score(X)
return numpy.interp(sc, x, y, left=1., right=0.)
@classmethod
def fit(kls, model, X, vmin=-5, vmax=0, nb=100):
sc = model.score(X)
confidence_levels = 1 - numpy.logspace(vmin, vmax, num=nb)
# FIXME: add weight support here
sc_cl = numpy.percentile(sc, 100. - confidence_levels * 100.)
confidence_table = numpy.array([sc_cl, confidence_levels])
return kls(model, confidence_table)
| Python | 0 | |
b1bea70df1f62e4c0447a406b77266b804eec5df | add new Package (#15894) | var/spack/repos/builtin/packages/nanomsg/package.py | var/spack/repos/builtin/packages/nanomsg/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Nanomsg(CMakePackage):
"""The nanomsg library is a simple high-performance
implementation of several 'scalability protocols'"""
homepage = "https://nanomsg.org/"
url = "https://github.com/nanomsg/nanomsg/archive/1.0.0.tar.gz"
version('1.1.5', sha256='218b31ae1534ab897cb5c419973603de9ca1a5f54df2e724ab4a188eb416df5a')
version('1.0.0', sha256='24afdeb71b2e362e8a003a7ecc906e1b84fd9f56ce15ec567481d1bb33132cc7')
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.