commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
2321603eb706745e20e70d156a3894a7f3ac38eb | Add the Gamerscore and Tier of the account (#12867) | homeassistant/components/sensor/xbox_live.py | homeassistant/components/sensor/xbox_live.py | """
Sensor for Xbox Live account status.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.xbox_live/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_API_KEY, STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['xboxapi==0.1.1']
_LOGGER = logging.getLogger(__name__)
CONF_XUID = 'xuid'
ICON = 'mdi:xbox'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_XUID): vol.All(cv.ensure_list, [cv.string])
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Xbox platform."""
from xboxapi import xbox_api
api = xbox_api.XboxApi(config.get(CONF_API_KEY))
devices = []
# request personal profile to check api connection
profile = api.get_profile()
if profile.get('error_code') is not None:
_LOGGER.error("Can't setup XboxAPI connection. Check your account or "
" api key on xboxapi.com. Code: %s Description: %s ",
profile.get('error_code', STATE_UNKNOWN),
profile.get('error_message', STATE_UNKNOWN))
return
for xuid in config.get(CONF_XUID):
new_device = XboxSensor(hass, api, xuid)
if new_device.success_init:
devices.append(new_device)
if devices:
add_devices(devices, True)
class XboxSensor(Entity):
"""A class for the Xbox account."""
def __init__(self, hass, api, xuid):
"""Initialize the sensor."""
self._hass = hass
self._state = STATE_UNKNOWN
self._presence = {}
self._xuid = xuid
self._api = api
# get profile info
profile = self._api.get_user_gamercard(self._xuid)
if profile.get('success', True) and profile.get('code') is None:
self.success_init = True
self._gamertag = profile.get('gamertag')
self._gamerscore = profile.get('gamerscore')
self._picture = profile.get('gamerpicSmallSslImagePath')
self._tier = profile.get('tier')
else:
_LOGGER.error("Can't get user profile %s. "
"Error Code: %s Description: %s",
self._xuid,
profile.get('code', STATE_UNKNOWN),
profile.get('description', STATE_UNKNOWN))
self.success_init = False
@property
def name(self):
"""Return the name of the sensor."""
return self._gamertag
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
attributes['gamerscore'] = self._gamerscore
attributes['tier'] = self._tier
for device in self._presence:
for title in device.get('titles'):
attributes[
'{} {}'.format(device.get('type'), title.get('placement'))
] = title.get('name')
return attributes
@property
def entity_picture(self):
"""Avatar of the account."""
return self._picture
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
def update(self):
"""Update state data from Xbox API."""
presence = self._api.get_user_presence(self._xuid)
self._state = presence.get('state', STATE_UNKNOWN)
self._presence = presence.get('devices', {})
| """
Sensor for Xbox Live account status.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.xbox_live/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_API_KEY, STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['xboxapi==0.1.1']
_LOGGER = logging.getLogger(__name__)
CONF_XUID = 'xuid'
ICON = 'mdi:xbox'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_XUID): vol.All(cv.ensure_list, [cv.string])
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Xbox platform."""
from xboxapi import xbox_api
api = xbox_api.XboxApi(config.get(CONF_API_KEY))
devices = []
# request personal profile to check api connection
profile = api.get_profile()
if profile.get('error_code') is not None:
_LOGGER.error("Can't setup XboxAPI connection. Check your account or "
" api key on xboxapi.com. Code: %s Description: %s ",
profile.get('error_code', STATE_UNKNOWN),
profile.get('error_message', STATE_UNKNOWN))
return
for xuid in config.get(CONF_XUID):
new_device = XboxSensor(hass, api, xuid)
if new_device.success_init:
devices.append(new_device)
if devices:
add_devices(devices, True)
class XboxSensor(Entity):
"""A class for the Xbox account."""
def __init__(self, hass, api, xuid):
"""Initialize the sensor."""
self._hass = hass
self._state = STATE_UNKNOWN
self._presence = {}
self._xuid = xuid
self._api = api
# get profile info
profile = self._api.get_user_gamercard(self._xuid)
if profile.get('success', True) and profile.get('code') is None:
self.success_init = True
self._gamertag = profile.get('gamertag')
self._picture = profile.get('gamerpicSmallSslImagePath')
else:
_LOGGER.error("Can't get user profile %s. "
"Error Code: %s Description: %s",
self._xuid,
profile.get('code', STATE_UNKNOWN),
profile.get('description', STATE_UNKNOWN))
self.success_init = False
@property
def name(self):
"""Return the name of the sensor."""
return self._gamertag
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
for device in self._presence:
for title in device.get('titles'):
attributes[
'{} {}'.format(device.get('type'), title.get('placement'))
] = title.get('name')
return attributes
@property
def entity_picture(self):
"""Avatar of the account."""
return self._picture
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
def update(self):
"""Update state data from Xbox API."""
presence = self._api.get_user_presence(self._xuid)
self._state = presence.get('state', STATE_UNKNOWN)
self._presence = presence.get('devices', {})
| Python | 0 |
0f5c0168b257436882f837e5d521cce46a740ad6 | Add symbol translator to make utf-8 variables compilable | finat/greek_alphabet.py | finat/greek_alphabet.py | """Translation table from utf-8 to greek variable names, taken from:
https://gist.github.com/piquadrat/765262#file-greek_alphabet-py
"""
def translate_symbol(symbol):
"""Translates utf-8 sub-strings into compilable variable names"""
name = symbol.decode("utf-8")
for k, v in greek_alphabet.iteritems():
name = name.replace(k, v)
return name
greek_alphabet = {
u'\u0391': 'Alpha',
u'\u0392': 'Beta',
u'\u0393': 'Gamma',
u'\u0394': 'Delta',
u'\u0395': 'Epsilon',
u'\u0396': 'Zeta',
u'\u0397': 'Eta',
u'\u0398': 'Theta',
u'\u0399': 'Iota',
u'\u039A': 'Kappa',
u'\u039B': 'Lamda',
u'\u039C': 'Mu',
u'\u039D': 'Nu',
u'\u039E': 'Xi',
u'\u039F': 'Omicron',
u'\u03A0': 'Pi',
u'\u03A1': 'Rho',
u'\u03A3': 'Sigma',
u'\u03A4': 'Tau',
u'\u03A5': 'Upsilon',
u'\u03A6': 'Phi',
u'\u03A7': 'Chi',
u'\u03A8': 'Psi',
u'\u03A9': 'Omega',
u'\u03B1': 'alpha',
u'\u03B2': 'beta',
u'\u03B3': 'gamma',
u'\u03B4': 'delta',
u'\u03B5': 'epsilon',
u'\u03B6': 'zeta',
u'\u03B7': 'eta',
u'\u03B8': 'theta',
u'\u03B9': 'iota',
u'\u03BA': 'kappa',
u'\u03BB': 'lamda',
u'\u03BC': 'mu',
u'\u03BD': 'nu',
u'\u03BE': 'xi',
u'\u03BF': 'omicron',
u'\u03C0': 'pi',
u'\u03C1': 'rho',
u'\u03C3': 'sigma',
u'\u03C4': 'tau',
u'\u03C5': 'upsilon',
u'\u03C6': 'phi',
u'\u03C7': 'chi',
u'\u03C8': 'psi',
u'\u03C9': 'omega',
}
| Python | 0 | |
03951a227bfafb0b1017354bdbf3a1247322fc9b | Fix cycler tests | axelrod/tests/unit/test_cycler.py | axelrod/tests/unit/test_cycler.py | """Test for the Cycler strategies."""
import itertools
import axelrod
from .test_player import TestPlayer, test_four_vector
C, D = 'C', 'D'
class TestAntiCycler(TestPlayer):
name = "AntiCycler"
player = axelrod.AntiCycler
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating"""
responses = [C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C]
self.responses_test([], [], responses)
def test_cycler_factory(cycle):
class TestCycler(TestPlayer):
name = "Cycler %s" % cycle
player = getattr(axelrod, 'Cycler%s' % cycle)
expected_classifier = {
'memory_depth': len(cycle),
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating"""
for i in range(20):
responses = itertools.islice(itertools.cycle(cycle), i)
self.responses_test([], [], responses)
return TestCycler
TestCyclerCCD = test_cycler_factory("CCD")
TestCyclerCCCD = test_cycler_factory("CCCD")
TestCyclerCCCCCD = test_cycler_factory("CCCCCD")
| """Test for the Cycler strategies."""
import itertools
import axelrod
from .test_player import TestPlayer, test_four_vector
C, D = 'C', 'D'
class TestAntiCycler(TestPlayer):
name = "AntiCycler"
player = axelrod.AntiCycler
expected_classifier = {
'memory_depth': float('inf'),
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating"""
responses = [C, D, C, C, D, C, C, C, D, C, C, C, C, D, C, C, C]
self.responses_test([], [], responses)
def test_cycler_factory(cycle):
class TestCycler(TestPlayer):
name = "Cycler %s" % cycle
player = getattr(axelrod, 'Cycler%s' % cycle)
expected_classifier = {
'memory_depth': 1,
'stochastic': False,
'inspects_source': False,
'manipulates_source': False,
'manipulates_state': False
}
def test_strategy(self):
"""Starts by cooperating"""
for i in range(20):
responses = itertools.islice(itertools.cycle(cycle), i)
self.responses_test([], [], responses)
return TestCycler
TestCyclerCCD = test_cycler_factory("CCD")
TestCyclerCCCD = test_cycler_factory("CCCD")
TestCyclerCCCCCD = test_cycler_factory("CCCCCD")
| Python | 0 |
ca2269c5ae568cd63253af7bc614a79d26f7f8ac | Add ns_drop_indexes command. | needlestack/management/commands/ns_drop_indexes.py | needlestack/management/commands/ns_drop_indexes.py | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, unicode_literals
from django.core.management.base import BaseCommand, CommandError
from needlestack import commands
class Command(BaseCommand):
help = 'Sync all defined indexes with a current backend'
option_list = BaseCommand.option_list + (
make_option('--backend',
action='store',
dest='backend',
default='default'),)
def handle(self, *args, **options):
commands.drop_indexes(options["backend"], options["verbosity"])
| Python | 0 | |
b8a84e612d67f7948d6dec8c202ac8a73390f9dc | make sure all protein ids are unique in a genbank file | proteins/unique_protein_ids.py | proteins/unique_protein_ids.py | """
Test a genbank file and make sure all the protein_ids are unique
"""
import os
import sys
import argparse
from Bio import SeqIO
__author__ = 'Rob Edwards'
__copyright__ = 'Copyright 2020, Rob Edwards'
__credits__ = ['Rob Edwards']
__license__ = 'MIT'
__maintainer__ = 'Rob Edwards'
__email__ = 'raedwards@gmail.com'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=" ")
parser.add_argument('-f', help='genbank file', required=True)
args = parser.parse_args()
pids = set()
rc = 0
for seq in SeqIO.parse(args.f, "genbank"):
rc+=1;
print(f"record {rc}: {seq.id}")
for feat in seq.features:
if feat.type != "CDS":
continue
if 'protein_id' not in feat.qualifiers:
thisid = " ".join(feat.qualifiers.get('locus_tag', [str(feat.location)]))
print(f"No protein id in {thisid}")
continue
pid = "|".join(feat.qualifiers["protein_id"])
if pid in pids:
print(f"{pid} is not unique")
pids.add(pid) | Python | 0 | |
61fa404da3eeb3b695b12f398c27f641e1e681e2 | add codegen script for fname.pyf.src -> _fnamemodule.c | tools/generate_f2pymod.py | tools/generate_f2pymod.py | """
Process f2py template files (`filename.pyf.src` -> `filename.pyf`)
Usage: python generate_pyf.py filename.pyf.src -o filename.pyf
"""
import os
import sys
import subprocess
import argparse
from numpy.distutils.from_template import process_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str,
help="Path to the input file")
parser.add_argument("-o", "--outfile", type=str,
help="Path to the output file")
args = parser.parse_args()
# Read .pyf.src file
code = process_file(args.infile)
# Write out the .pyf file
outdir = os.path.split(args.outfile)[0]
outdir_abs = os.path.join(os.getcwd(), outdir)
fname_pyf = os.path.join(outdir,
os.path.splitext(os.path.split(args.infile)[1])[0])
with open(fname_pyf, 'w') as f:
f.write(code)
# Now invoke f2py to generate the C API module file
p = subprocess.Popen([sys.executable, '-m', 'numpy.f2py', fname_pyf,
'--build-dir', outdir_abs], #'--quiet'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.getcwd())
out, err = p.communicate()
if not (p.returncode == 0):
raise RuntimeError(f"Writing {args.outfile} with f2py failed!\n"
f"{out}\n"
r"{err}")
if __name__ == "__main__":
main()
| Python | 0 | |
0f94251c7cc844042c9e3ce160d78e4d81d895ea | add log module | src/log.py | src/log.py | import logging
import os
from datetime import datetime
class LOG(object):
logger = None
def __init__(self, log_dir):
if log_dir:
if not os.path.exists(log_dir):
os.makedirs(log_dir)
self.logger = logging.getLogger('simple-db-migrate')
now = datetime.now()
filename = "%s/%s.log" %(os.path.abspath(log_dir), now.strftime("%Y%m%d%H%M%S"))
hdlr = logging.FileHandler(filename)
formatter = logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.DEBUG)
def debug(self, msg):
if self.logger:
self.logger.debug(msg)
def info(self, msg):
if self.logger:
self.logger.info(msg)
def error(self, msg):
if self.logger:
self.logger.error(msg)
def warn(self, msg):
if self.logger:
self.logger.warn(msg)
| Python | 0.000002 | |
bc567eda01abcaf23717f5da5f494c1be46f47da | Create ValAnagram_001.py | leetcode/242-Valid-Anagram/ValAnagram_001.py | leetcode/242-Valid-Anagram/ValAnagram_001.py | class Solution:
# @param {string} s
# @param {string} t
# @return {boolean}
def anaRepresentation(self, s):
p = {}
for c in s:
if c in p:
p[c] += 1
else:
p[c] = 1
return p
def isAnagram(self, s, t):
if len(s) != len(t):
return False
p = self.anaRepresentation(s)
q = self.anaRepresentation(t)
for c in p:
if c not in q or (c in q and p[c] != q[c]):
return False
return True
| Python | 0.000002 | |
682d6b3ca9c4a0dd49f9762ddd20ac746971e3eb | Create solution.py | leetcode/easy/find_the_difference/py/solution.py | leetcode/easy/find_the_difference/py/solution.py | class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
import collections
import itertools
c1 = collections.Counter(s)
c2 = collections.Counter(t)
for char in set(itertools.chain(s, t)):
if c1[char] != c2[char]:
return char
return None
| Python | 0.000018 | |
9e128fdd5af0598a233416de5a1e8f2d3a74fdc0 | Enforce unique paths and names | spaces/migrations/0006_unique_space_document.py | spaces/migrations/0006_unique_space_document.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-15 02:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spaces', '0005_document_space_doc'),
]
operations = [
migrations.AlterField(
model_name='space',
name='name',
field=models.CharField(max_length=100, unique=True),
),
migrations.AlterField(
model_name='space',
name='path',
field=models.CharField(max_length=40, unique=True),
),
migrations.AlterUniqueTogether(
name='document',
unique_together=set([('path', 'parent')]),
),
]
| Python | 0.00002 | |
0256868a3b261e598689eebdf5ac5f939ea20a0d | add test cases for mni module | lib/neuroimaging/reference/tests/test_mni.py | lib/neuroimaging/reference/tests/test_mni.py | import unittest
import numpy as N
import neuroimaging.reference.mni as mni
class MNITest(unittest.TestCase):
def testMNI(self):
""" ensure all elementes of the interface exist """
m = mni.MNI
g = mni.generic
m_v = mni.MNI_voxel
m_w = mni.MNI_world
m_m = mni.MNI_mapping
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
d91adef072e2150edde62a49bea4eecb6a26a6ac | add sns_notify script | sns_notify.py | sns_notify.py | #!/usr/bin/env python
from datetime import datetime, date
from dateutil import parser
from google_sheets import get_service
SHEET_ID = "1lpa9p_dCyTckREf09-oA2C6ZAMACCrgD9W3HQSKeoSI"
def is_valid_period(start, end):
"""
今日が start, end の範囲内かどうかを返す
:params start: 通知開始日の文字列または空文字
:params end: 通知終了日の文字列または空文字
:return: True: 通知範囲内、False: 通知範囲外
"""
# 文字列を date 型にする
try:
start = parser.parse(start).date()
except ValueError:
start = date(2000, 1, 1) # 過去の日付にする
try:
end = parser.parse(end).date()
except ValueError:
end = date(3000, 1, 1) # 未来の日付にする
today = date.today()
# 今日が範囲内かどうかを返す
return start <= today <= end
def sns_notify(row, now):
"""
スプレッドシートのデータ1行分をSNSに通知する。
データは以下の形式。
1. 通知日(YYYY/MM/DD または曜日指定)
2. 通知時刻
3. 送信メッセージ
4. 送信するURL
5. 通知開始日
6. 通知終了日
7. twitter通知フラグ(1なら通知)
8. facebook通知フラグ(1なら通知)
:param row: スプレッドシートの1行分のデータ
:param now: 現在時刻(datetime)
"""
# データの件数が少なかったらなにもしない
if len(row) < 7:
return
# 通知期間の範囲外ならなにもしない
if not is_valid_period(row[4], row[5]):
return
# 通知対象日時じゃなかったらなにもしない
# メッセージ送信する
if row[6] == '1':
pass
if row[7] == '1':
pass
def main():
"""
PyCon JP Twitter/Facebook通知シートからデータを読み込んで通知する
"""
now = datetime.now()
service = get_service()
# シートから全データを読み込む
result = service.spreadsheets().values().get(
spreadsheetId=SHEET_ID, range='messages!A4:H').execute()
for row in result.get('values', []):
# 1行のデータを元にSNSへの通知を実行
sns_notify(row, now)
if __name__ == '__main__':
main()
| Python | 0.000003 | |
a0789a4bad7747073257d8976534b33ab9862ec4 | Add unit test for IssueRegister view | feed/tests/test_issueregisterview.py | feed/tests/test_issueregisterview.py | from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from feed.views import IssueRegisterViewSet
from workflow.models import IssueRegister, Organization, TolaUser
class IssueRegisterViewsTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('john', 'lennon@thebeatles.com', 'johnpassword')
self.user.is_superuser = True
self.user.is_staff = True
self.user.save()
IssueRegister.objects.bulk_create([
IssueRegister(name='IssueRegister1'),
IssueRegister(name='IssueRegister2'),
])
factory = APIRequestFactory()
self.request_get = factory.get('/api/issueregister/')
self.request_post = factory.post('/api/issueregister/')
def test_list_issueregister_superuser(self):
self.request_get.user = self.user
view = IssueRegisterViewSet.as_view({'get': 'list'})
response = view(self.request_get)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 2)
def test_list_issueregister_normaluser(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request_get.user = self.user
view = IssueRegisterViewSet.as_view({'get': 'list'})
response = view(self.request_get)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
def test_list_issueregister_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
IssueRegister.objects.create(name='IssueRegister0', organization=organization)
self.request_get.user = self.user
view = IssueRegisterViewSet.as_view({'get': 'list'})
response = view(self.request_get)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
def test_create_issueregister_normaluser_one_result(self):
self.user.is_superuser = False
self.user.is_staff = False
self.user.save()
organization = Organization.objects.create(name="TestOrg")
TolaUser.objects.create(user=self.user, organization=organization)
self.request_post.user = self.user
view = IssueRegisterViewSet.as_view({'post': 'create'})
response = view(self.request_post)
self.assertEqual(response.status_code, 201)
# check if the obj created has the user organization
self.request_get.user = self.user
view = IssueRegisterViewSet.as_view({'get': 'list'})
response = view(self.request_get)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
| Python | 0 | |
05e8f84356c63ab953f5c2a3d3d06ee1760008d0 | Add list_queue plugin | flexget/plugins/filter/list_queue.py | flexget/plugins/filter/list_queue.py | import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('list_queue')
class ListQueue(object):
schema = {
'type': 'array',
'items': {
'allOf': [
{'$ref': '/schema/plugins?group=list'},
{
'maxProperties': 1,
'error_maxProperties': 'Plugin options within list_queue plugin must be indented 2 more spaces '
'than the first letter of the plugin name.',
'minProperties': 1
}
]
}
}
def on_task_filter(self, task, config):
for item in config:
for plugin_name, plugin_config in item.iteritems():
thelist = plugin.get_plugin_by_name(plugin_name).instance.get_list(plugin_config)
for entry in task.all_entries:
if entry in thelist:
entry.accept()
def on_task_learn(self, task, config):
for item in config:
for plugin_name, plugin_config in item.iteritems():
thelist = plugin.get_plugin_by_name(plugin_name).instance.get_list(plugin_config)
thelist -= task.accepted
@event('plugin.register')
def register_plugin():
plugin.register(ListQueue, 'list_queue', api_ver=2)
| Python | 0.000002 | |
f7a69e24912c3b9ed52201b52c79be4407884c3a | add module util for trying to resolve an ipv6 netmask to cidr. not perfect, but not meant to be either. | library/module_utils/network/f5/ipaddress.py | library/module_utils/network/f5/ipaddress.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
def ipv6_netmask_to_cidr(mask):
"""converts an IPv6 netmask to CIDR form
According to the link below, CIDR is the only official way to specify
a subset of IPv6. With that said, the same link provides a way to
loosely convert an netmask to a CIDR.
Arguments:
mask (string): The IPv6 netmask to convert to CIDR
Returns:
int: The CIDR representation of the netmask
References:
https://stackoverflow.com/a/33533007
http://v6decode.com/
"""
bit_masks = [
0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800,
0xfc00, 0xfe00, 0xff00, 0xff80, 0xffc0,
0xffe0, 0xfff0, 0xfff8, 0xfffc, 0xfffe,
0xffff
]
count = 0
try:
for w in mask.split(':'):
if not w or int(w, 16) == 0:
break
count += bit_masks.index(int(w, 16))
return count
except:
return -1
| Python | 0 | |
8922f9430ec2844a3a14621ad0625aa45999c92a | fix args order | lixian_hash.py | lixian_hash.py | #!/usr/bin/env python
import hashlib
import lixian_hash_ed2k
import lixian_hash_bt
import os
def lib_hash_file(h, path):
with open(path, 'rb') as stream:
while True:
bytes = stream.read(1024*1024)
if not bytes:
break
h.update(bytes)
return h.hexdigest()
def sha1_hash_file(path):
return lib_hash_file(hashlib.sha1(), path)
def verify_sha1(path, sha1):
return sha1_hash_file(path).lower() == sha1.lower()
def md5_hash_file(path):
return lib_hash_file(hashlib.md5(), path)
def verify_md5(path, md5):
return md5_hash_file(path).lower() == md5.lower()
def md4_hash_file(path):
return lib_hash_file(hashlib.new('md4'), path)
def verify_md4(path, md4):
return md4_hash_file(path).lower() == md4.lower()
def dcid_hash_file(path):
h = hashlib.sha1()
size = os.path.getsize(path)
with open(path, 'rb') as stream:
if size < 0xF000:
h.update(stream.read())
else:
h.update(stream.read(0x5000))
stream.seek(size/3)
h.update(stream.read(0x5000))
stream.seek(size-0x5000)
h.update(stream.read(0x5000))
return h.hexdigest()
def verify_dcid(path, dcid):
return dcid_hash_file(path).lower() == dcid.lower()
def main(args):
option = args.pop(0)
if option.startswith('--verify'):
hash_fun = {'--verify-sha1':verify_sha1,
'--verify-md5':verify_md5,
'--verify-md4':verify_md4,
'--verify-dcid':verify_dcid,
'--verify-ed2k':lixian_hash_ed2k.verify_ed2k_link,
'--verify-bt': lambda f, t: lixian_hash_bt.verify_bt_file(t, f),
}[option]
assert len(args) == 2
hash, path = args
if hash_fun(path, hash):
print 'looks good...'
else:
print 'failed...'
else:
hash_fun = {'--sha1':sha1_hash_file,
'--md5':md5_hash_file,
'--md4':md4_hash_file,
'--dcid':dcid_hash_file,
'--ed2k':lixian_hash_ed2k.generate_ed2k_link,
'--info-hash':lixian_hash_bt.info_hash,
}[option]
for f in args:
h = hash_fun(f)
print '%s *%s' % (h, f)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
main(args)
| #!/usr/bin/env python
import hashlib
import lixian_hash_ed2k
import lixian_hash_bt
import os
def lib_hash_file(h, path):
with open(path, 'rb') as stream:
while True:
bytes = stream.read(1024*1024)
if not bytes:
break
h.update(bytes)
return h.hexdigest()
def sha1_hash_file(path):
return lib_hash_file(hashlib.sha1(), path)
def verify_sha1(path, sha1):
return sha1_hash_file(path).lower() == sha1.lower()
def md5_hash_file(path):
return lib_hash_file(hashlib.md5(), path)
def verify_md5(path, md5):
return md5_hash_file(path).lower() == md5.lower()
def md4_hash_file(path):
return lib_hash_file(hashlib.new('md4'), path)
def verify_md4(path, md4):
return md4_hash_file(path).lower() == md4.lower()
def dcid_hash_file(path):
h = hashlib.sha1()
size = os.path.getsize(path)
with open(path, 'rb') as stream:
if size < 0xF000:
h.update(stream.read())
else:
h.update(stream.read(0x5000))
stream.seek(size/3)
h.update(stream.read(0x5000))
stream.seek(size-0x5000)
h.update(stream.read(0x5000))
return h.hexdigest()
def verify_dcid(path, dcid):
return dcid_hash_file(path).lower() == dcid.lower()
def main(args):
option = args.pop(0)
if option.startswith('--verify'):
hash_fun = {'--verify-sha1':verify_sha1,
'--verify-md5':verify_md5,
'--verify-md4':verify_md4,
'--verify-dcid':verify_dcid,
'--verify-ed2k':lixian_hash_ed2k.verify_ed2k_link,
'--verify-bt':lixian_hash_bt.verify_bt_file,
}[option]
assert len(args) == 2
hash, path = args
if hash_fun(path, hash):
print 'looks good...'
else:
print 'failed...'
else:
hash_fun = {'--sha1':sha1_hash_file,
'--md5':md5_hash_file,
'--md4':md4_hash_file,
'--dcid':dcid_hash_file,
'--ed2k':lixian_hash_ed2k.generate_ed2k_link,
'--info-hash':lixian_hash_bt.info_hash,
}[option]
for f in args:
h = hash_fun(f)
print '%s *%s' % (h, f)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
main(args)
| Python | 0.999403 |
0814bbf6867a4bdd9d92c63e467f237b6129ee28 | add solution for palindrome number | leetcode/palindrome-number/sol.py | leetcode/palindrome-number/sol.py | #!/usr/bin/env python
class Solution:
# @return a boolean
def isPalindrome(self, x):
if x == -1:
return True
def ll(x):
return 0 if x == 0 or x == -1 else ll(x/10)+1
p = x >= 0
l = ll(x)
print "x is %d l is %d" % (x, l)
t = x
for a in range(l/2):
mark = 10**(a)+10**(l-1-a)
b = (t / (10**(a))) % 10
b = b if p else 10-b
t = (t - b * mark) if p else (t+b*mark)
# print "t=%d" % (t)
if l % 2:
b = (t/(10**(l/2))) % 10
b = b if p else 10-b
t = (t - b * (10**(l/2))) if p else (t+b*(10**(l/2)))
return t == 0
if __name__ == "__main__":
sol = Solution()
print sol.isPalindrome(-2147483648)
print sol.isPalindrome(1234321)
print sol.isPalindrome(-1234321)
print sol.isPalindrome(1)
print sol.isPalindrome(-1)
print sol.isPalindrome(-11)
| Python | 0.999935 | |
d2a92c5d628f426c26374dea6cb37bd35ba18812 | print variables | bin/basenji_variables.py | bin/basenji_variables.py | #!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import os
import sys
import time
import h5py
import tensorflow as tf
from basenji import params
from basenji import seqnn
"""
basenji_variables.py
Print a model's variables, typically for debugging purposes.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file>'
parser = OptionParser(usage)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error('Must provide parameters, model, and test data HDF5')
else:
params_file = args[0]
model_file = args[1]
#######################################################
# model parameters and placeholders
job = params.read_job_params(params_file)
model = seqnn.SeqNN()
model.build(job)
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(v.name, v.shape)
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| Python | 0.000103 | |
517a326b2869190bb1c0a676f467e1529e119259 | Create enigma.py | enigma.py | enigma.py | from EnigmaMachine import Plugboard
from EnigmaMachine import Rotor
from EnigmaMachine import Reflector
from EnigmaMachine import Machine
import ConfigParser
def configureRotor(n):
# Opens the Rotor configurations file.
config_file = ConfigParser.RawConfigParser()
config_file.read('Config/rotor_config.cfg')
# Prints instructions to the user along with a list of the valid
# rotor configurations.
print "-" * 65
if n == 1:
print "Choose the first rotor and its starting position."
if n == 2:
print "Choose the second rotor and its starting position."
if n == 3:
print "Choose the third rotor and its starting position."
print "Select the rotor you wish to use. Valid choices are:"
print config_file.sections()
# Gets the rotor configuration from the user and ensures it's valid.
while True:
rotor_id = raw_input("Choose Rotor: ")
if config_file.has_section(rotor_id):
break
else:
print "No such rotor exists."
# Gets the starting position from the user and ensures it's valid.
print "Starting position should be a number between 0 and 25."
while True:
try:
rotor_starting_position = int(raw_input("Choose Starting Position: "))
# If user doesn't enter an integer, the resulting exception
# will be handled here.
except:
print 'You must enter a number.'
# If the integer entered by the user is not between 0 and 25,
# then the user will be informed their input is invalid and
# will be re-prompted.
else:
if rotor_starting_position < 0 or rotor_starting_position > 25:
print 'You must enter a number between 0 and 25.'
else: # If input is valid, the while loop is broken.
break
# Initializes the rotor and returns it to main().
rotor = Rotor.Rotor(rotor_id, rotor_starting_position, config_file)
return rotor
def configureReflector():
# Opens the Reflector configurations file.
config_file = ConfigParser.RawConfigParser()
config_file.read('Config/reflector_config.cfg')
# Prints the reflectors in the reflector configurations file.
print "-" * 65
print "Select the reflector you wish to use. Valid choices are:"
print config_file.sections()
# While loop ensures user's input is valid.
while True:
# Gets the reflector name from the user.
reflector_id = raw_input("Choose reflector: ")
# If reflector_id is not a section in the config file, the while loop
# repeats. If reflector_id is valid, the while loop is broken.
if config_file.has_section(reflector_id):
break
else:
print "No such reflector exists."
# Initializes the reflector and returns it to main().
reflector = Reflector.Reflector(reflector_id, config_file)
return reflector
def configurePlugboard():
# Explains how to configure the plugboard.
print "-" * 65
print "Choose the plugboard settings. The plugboard allows you to swap"
print "one letter for another before and after it runs through the rotors."
print "Input should take the form:"
print "ab, cd, ef, gh"
print "You can choose as many pairs as you like, but you cannot"
print "repeat letters."
# Gets the plugboard settings from the user.
pairs = raw_input('> ')
# Configures the plugboard.
plugboard = Plugboard.Plugboard(pairs)
# Returns the plugboard to main().
return plugboard
def main():
# Configures the machine.
enigma_machine = Machine.EnigmaMachine(
configurePlugboard(),
configureRotor(1),
configureRotor(2),
configureRotor(3),
configureReflector()
)
# Gets the user's message.
message = raw_input('Input Message: ')
# Put's the message in the Enigma Machine.
enigma_machine.inputMessage(message)
# Encrypts the message
converted_message = enigma_machine.convertMessage()
# Opens an output file and writes new_message to it.
output_file = open('output.txt', 'w')
output_file.write(converted_message)
output_file.close()
# Prints a message to the user letting them know their output is ready
print '-' * 65
print "Your encrypted message is available in output.txt"
print "Remember your plugboard settings, the rotors you chose, their"
print "starting positions, and the reflector you used. You will need"
print "these to decrypt the message. To decrypt, rerun the program"
print "with the same settings and enter the encrypted message.\n"
if __name__ == "__main__":
main()
| Python | 0.018018 | |
0598e61d9bcef2217f22cce2deeec08ed6868575 | Add rmd.py | scripts/rmd.py | scripts/rmd.py | #!/usr/bin/env python
import argparse
import sys
import logging
import os
import os.path as pt
import shutil
class App(object):
def run(self, args):
name = pt.basename(args[0])
parser = self.create_parser(name)
opts = parser.parse_args(args[1:])
return self.main(name, opts)
def create_parser(self, name):
p = argparse.ArgumentParser(
prog=name,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Run rmd script')
p.add_argument(
'rmd_file',
help='RMD file')
p.add_argument(
'-o', '--out_file',
help='Output file')
p.add_argument(
'-f', '--format',
help='Output format',
default='html',
choices=['html', 'pdf', 'word'])
p.add_argument(
'--cmd',
help='R command')
p.add_argument(
'--copy',
help='Copy to file')
p.add_argument(
'--test',
help='Print command without executing',
action='store_true')
p.add_argument(
'--verbose',
help='More detailed log messages',
action='store_true')
p.add_argument(
'--log_file',
help='Write log messages to file')
return p
def main(self, name, opts):
logging.basicConfig(filename=opts.log_file,
format='%(levelname)s (%(asctime)s): %(message)s')
log = logging.getLogger(name)
if opts.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(opts)
rmd_file = opts.rmd_file
if opts.copy:
shutil.copyfile(rmd_file, opts.copy)
rmd_file = opts.copy
_format = opts.format
out_file = opts.out_file
if out_file is None:
out_file = '%s.%s' % (pt.splitext(rmd_file)[0], opts.format)
else:
_format = pt.splitext(out_file)[1][1:]
Rcmd = ''
if opts.cmd is not None:
Rcmd = '%s;' % (opts.cmd)
cmd = "library(rmarkdown); {c} render('{r}', output_file='{o}', output_format='{f}_document')"
cmd = cmd.format(c=Rcmd, r=rmd_file, o=out_file, f=_format)
cmd = 'Rscript -e "%s"' % (cmd)
print(cmd)
if not opts.test:
os.system(cmd)
return 0
if __name__ == '__main__':
app = App()
app.run(sys.argv)
| Python | 0.000002 | |
30a8e40efee241dd6aa3b534814655b9f70cfffe | Add 020-valid-parentheses.py, but missed case "([])", the description is confused | 020-valid-parentheses.py | 020-valid-parentheses.py | """
Question:
Valid Parentheses My Submissions Question Solution
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
Performance:
1. Total Accepted: 71155 Total Submissions: 265078 Difficulty: Easy
"""
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
valid_set = set(["()", "[]", "{}"])
max_group = (len(s) + 1) / 2
is_valid = True
for idx in xrange(max_group):
curr_group = s[idx*2:idx*2+2]
if curr_group not in valid_set:
is_valid = False
break
return is_valid
assert Solution().isValid("()") is True
assert Solution().isValid("()[]{}") is True
assert Solution().isValid("([])") is True
assert Solution().isValid("(]") is False
assert Solution().isValid("([)]") is False
assert Solution().isValid("[") is False
| Python | 0.998825 | |
8249d33898500d9d39e8bee3d44d39c2a6034659 | Add script to create overlays | scripts/create_overlays.py | scripts/create_overlays.py | """Varcan smart tool."""
import click
from dtoolcore import DataSet
@click.command()
@click.argument('dataset_uri')
@click.option('--config-path', type=click.Path(exists=True))
def main(dataset_uri, config_path=None):
dataset = DataSet.from_uri(dataset_uri, config_path=config_path)
def name_from_identifier(identifier):
item_properties = dataset.item_properties(identifier)
name = item_properties['relpath'].rsplit('.', 1)[0]
return name
useful_name_overlay = {
identifier: name_from_identifier(identifier)
for identifier in dataset.identifiers
}
dataset.put_overlay("useful_name", useful_name_overlay)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
5510f90565809471e545584419b22980b63a1864 | Add metadata | bids_writer/_metadata.py | bids_writer/_metadata.py | # -*- coding: utf-8 -*-
version = "0.1.0"
author = "Nathan Vack"
author_email = "njvack@wisc.edu"
license = "MIT"
copyright = "Copyright 2015 Boards of Regent of the University of Wisconsin System"
url = "https://github.com/njvack/bids-json-writer"
| Python | 0.000044 | |
65f574973bbde545c1c815d0ad21e4a8d3f3b59d | Add initial cbio client | bioagents/cbio_client.py | bioagents/cbio_client.py | import os
import json
import logging
import requests
from collections import defaultdict
logger = logging.getLogger(__name__)
base_url = 'https://www.cbioportal.org/api'
resources_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), os.pardir, 'resources')
patient_list_cache = os.path.join(resources_dir, 'cbio_patients.json')
def get_patient_list():
if os.path.exists(patient_list_cache):
logger.info('Loading patient list from cache at %s' %
patient_list_cache)
with open(patient_list_cache, 'r') as fh:
patient_list = json.load(fh)
else:
logger.info('Querying patient list from cBioPortal')
url = base_url + '/patients'
res = requests.get(url)
patient_list = res.json()
with open(patient_list_cache, 'w') as fh:
json.dump(patient_list, fh, indent=1)
patients_by_id = defaultdict(list)
patients_by_study = defaultdict(list)
for patient in patient_list:
patients_by_id[patient['patientId']].append(patient)
patients_by_study[patient['studyId']].append(patient)
return dict(patients_by_id), dict(patients_by_study)
patients_by_id, patients_by_study = get_patient_list()
| Python | 0 | |
b38b9e62c174ff55d496bec2fb6599bee8262a3c | Add plot_compare_methods from scikit-learn | manifold/plot_compare_methods.py | manifold/plot_compare_methods.py | # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
def compare():
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
if __name__ == '__main__':
compare()
| Python | 0.000001 | |
bf60d3c48a30863571a8700fa5a843be48e7646b | add vat_reckoner | components/vat_reckoner/vat_reckoner.py | components/vat_reckoner/vat_reckoner.py | #! /usr/bin/env python
from json import loads, dumps
from pika import BlockingConnection, ConnectionParameters
RABBIT_MQ_HOST = '54.76.183.35'
RABBIT_MQ_PORT = 5672
def vat(ch, method, properties, body):
product = loads(body)
sku, price = product['sku'], product['price']
vat = price * 0.20
vat_fact = {'sku': sku, 'vat': vat}
print 'Calculated vat %s' % (vat_fact,)
channel.basic_publish(exchange='alex2',
routing_key='vat',
body=dumps(vat_fact))
connection = BlockingConnection(ConnectionParameters(host=RABBIT_MQ_HOST,
port=RABBIT_MQ_PORT))
channel = connection.channel()
channel.exchange_declare(exchange='alex2', type='topic')
result = channel.queue_declare(exclusive=True)
queue = result.method.queue
channel.queue_bind(exchange='alex2', queue=queue, routing_key='new_products')
channel.basic_consume(vat, queue=queue, no_ack=True)
channel.start_consuming()
| Python | 0.000284 | |
77d90ec03eff1946a422e5471cc1a64708eff0f4 | Test dramatis personae | shakespearelang/tests/unit/test_dramatis_personae.py | shakespearelang/tests/unit/test_dramatis_personae.py | from shakespearelang import Shakespeare
from shakespearelang.errors import ShakespeareRuntimeError
import pytest
MANY_CHARACTERS_PLAY = """
A lot of people.
Achilles, a test.
Christopher Sly, a test.
Demetrius, a test.
John of Lancaster, a test.
Juliet, a test.
Mistress Overdone, a test.
Romeo, a test.
Stephano, a test.
The Abbot of Westminster, a test.
The Ghost, a test.
Titania, a test.
Vincentio, a test.
"""
def test_correct_characters():
s = Shakespeare('Foo. Juliet, a test. Romeo, a test. The Ghost, a test.')
assert sorted([c.name for c in s.characters]) == ['Juliet', 'Romeo', 'The Ghost']
def test_no_characters():
s = Shakespeare('Foo. Act I: The beginning.')
assert s.characters == []
def test_many_characters():
s = Shakespeare(MANY_CHARACTERS_PLAY)
assert sorted([c.name for c in s.characters]) == [
'Achilles',
'Christopher Sly',
'Demetrius',
'John of Lancaster',
'Juliet',
'Mistress Overdone',
'Romeo',
'Stephano',
'The Abbot of Westminster',
'The Ghost',
'Titania',
'Vincentio',
]
| Python | 0.000007 | |
0ba11dd47dac04f3f7a314cf320558ccbc9eb148 | Add test for water polygon name dropping. | integration-test/1477-water-layer-too-big.py | integration-test/1477-water-layer-too-big.py | # -*- encoding: utf-8 -*-
from . import FixtureTest
class WaterLayerTooBigTest(FixtureTest):
def test_drop_label(self):
from tilequeue.tile import calc_meters_per_pixel_area
from shapely.ops import transform
from tilequeue.tile import reproject_mercator_to_lnglat
import math
import dsl
for zoom in range(5, 16):
area = 270.0 * calc_meters_per_pixel_area(zoom)
radius = math.sqrt(area / math.pi)
coord = 2 ** (zoom - 1)
# larger feature should retain name
shape = dsl.tile_centre_shape(
zoom, coord, coord).buffer(radius * 1.1)
shape_lnglat = transform(
reproject_mercator_to_lnglat, shape)
self.generate_fixtures(
dsl.way(1, shape_lnglat, {
'natural': 'water',
'name': 'Foo',
}),
)
self.assert_has_feature(
zoom, coord, coord, 'water', {
'kind': 'water',
'name': 'Foo',
})
# smaller shape should drop it
shape = dsl.tile_centre_shape(
zoom, coord, coord).buffer(radius / 1.1)
shape_lnglat = transform(
reproject_mercator_to_lnglat, shape)
self.generate_fixtures(
dsl.way(1, shape_lnglat, {
'natural': 'water',
'name': 'Foo',
}),
)
self.assert_has_feature(
zoom, coord, coord, 'water', {
'kind': 'water',
'name': type(None),
})
| Python | 0 | |
a3e538830305d8a6651c5ed46e2dfdffe41c28e6 | Add a module for ssh 'console' API | confluent_server/confluent/plugins/shell/ssh.py | confluent_server/confluent/plugins/shell/ssh.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'jjohnson2'
# This plugin provides an ssh implementation comforming to the 'console'
# specification. consoleserver or shellserver would be equally likely
# to use this.
import confluent.interface.console as conapi
import eventlet
paramiko = eventlet.import_patched('paramiko')
class SshShell(conapi.Console):
def __init__(self, node, config, username='', password=''):
self.node = node
self.nodeconfig = config
self.username = username
self.password = password
self.inputmode = 0 # 0 = username, 1 = password...
def recvdata(self):
while self.connected:
pendingdata = self.shell.recv(8192)
if pendingdata == '':
self.datacallback(conapi.ConsoleEvent.Disconnect)
return
self.datacallback(pendingdata)
def connect(self, callback):
# for now, we just use the nodename as the presumptive ssh destination
#TODO(jjohnson2): use a 'nodeipget' utility function for architectures
# that would rather not use the nodename as anything but an opaque
# identifier
self.datacallback = callback
if self.username is not '':
self.logon()
else:
self.inputmode = 0
callback('\r\nlogin as: ')
return
def logon(self):
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.client.AutoAddPolicy())
try:
self.ssh.connect(self.node, username=self.username,
password=self.password, allow_agent=False,
look_for_keys=False)
except paramiko.AuthenticationException:
self.inputmode = 0
self.username = ''
self.password = ''
self.datacallback('\r\nlogin as: ')
return
self.inputmode = 2
self.connected = True
self.shell = self.ssh.invoke_shell()
self.rxthread = eventlet.spawn(self.recvdata)
def write(self, data):
if self.inputmode == 0:
self.username += data
if '\r' in self.username:
self.username, self.password = self.username.split('\r')
lastdata = data.split('\r')[0]
if lastdata != '':
self.datacallback(lastdata)
self.datacallback('\r\nEnter password: ')
self.inputmode = 1
else:
# echo back typed data
self.datacallback(data)
elif self.inputmode == 1:
self.password += data
if '\r' in self.password:
self.password = self.password.split('\r')[0]
self.datacallback('\r\n')
self.logon()
else:
self.shell.sendall(data)
def close(self):
self.ssh.close()
def create(nodes, element, configmanager, inputdata):
if len(nodes) == 1:
return SshShell(nodes[0], configmanager) | Python | 0.000043 | |
865dc29421c1e9ef4bf340bf32164863cc5f2006 | Add management command to list installed spiders | app/raw/management/commands/list_spiders.py | app/raw/management/commands/list_spiders.py | from django.core.management import BaseCommand
from raw.utils import list_spiders
class Command(BaseCommand):
help = 'List installed spiders'
def handle(self, *args, **options):
for spider in list_spiders():
print spider
| Python | 0 | |
77966f7f993e526467b2e54e0d12241354efec16 | add spec for re2 | build/fbcode_builder/specs/re2.py | build/fbcode_builder/specs/re2.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def fbcode_builder_spec(builder):
return {
'steps': [
builder.github_project_workdir('google/re2', 'build'),
builder.cmake_install('google/re2'),
],
}
| Python | 0 | |
15388e09ab537d3731891353c54f53105c4a7ee4 | add files | weixin_pay.py | weixin_pay.py | #!/usr/bin/env python
# coding=utf-8
__author__ = 'youqingkui'
| Python | 0.000002 | |
b7360d6ba397f8654f4e051227aa86a1ebe693f7 | Add main program | follow.py | follow.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from github import Github
# usage
def usage():
print 'Follow GitHub user\'s starred, watching and following.'
print
print 'Usage: python follow.py <token> <user>'
print
print 'token: Go to https://github.com/settings/tokens and `Generate new token` with scope `public_repo`.'
print
print 'user: GitHub user ID you want to follow.'
# args
if len(sys.argv) != 3:
usage()
exit(1)
# variable
me = Github(sys.argv[1])
namedUser = Github().get_user(sys.argv[2])
# action
for starred in namedUser.get_starred().reversed:
me.get_user().add_to_starred(starred)
for subscription in namedUser.get_subscriptions().reversed:
me.get_user().add_to_subscriptions(subscription)
for watched in namedUser.get_watched().reversed:
me.get_user().add_to_watched(watched)
for following in namedUser.get_following().reversed:
me.get_user().add_to_following(following)
| Python | 0.000002 | |
9080d20bd61ac66a534c834a17a9825808416512 | Add pre-stage hook for FrostNumberModel | metadata/FrostNumberModel/hooks/pre-stage.py | metadata/FrostNumberModel/hooks/pre-stage.py | """A hook for modifying parameter values read from the WMT client."""
import os
import shutil
from wmt.utils.hook import find_simulation_input_file
from topoflow_utils.hook import assign_parameters
file_list = []
def execute(env):
"""Perform pre-stage tasks for running a component.
Parameters
----------
env : dict
A dict of component parameter values from WMT.
"""
assign_parameters(env, file_list)
env['fn_out_filename'] = 'frostnumber_output.dat'
for fname in file_list:
src = find_simulation_input_file(env[fname])
shutil.copy(src, os.curdir)
| Python | 0 | |
70849edc52acc1c559b35a55c7f1925c1cbf57ad | add new tagcount plugin for yawt rewrite | yawtext/tagcount.py | yawtext/tagcount.py | from flask import current_app, g, Blueprint
import jsonpickle
from yawt.utils import save_file, load_file
import os
tagcountsbp = Blueprint('tagcounts', __name__)
@tagcountsbp.app_context_processor
def tagcounts():
tagcountfile = current_app.config['YAWT_TAGCOUNT_FILE']
tvars = {}
if os.path.isfile(tagcountfile):
tagbase = current_app.config['YAWT_TAGCOUNT_BASE']
if not tagbase.endswith('/'):
tagbase += '/'
tvars = {'tagcounts': jsonpickle.decode(load_file(tagcountfile)),
'tagbase': tagbase}
return tvars
class YawtTagCount(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
self.tagcounts = {}
def init_app(self, app):
app.config.setdefault('YAWT_TAGCOUNT_BASE', '')
app.config.setdefault('YAWT_TAGCOUNT_FILE', '/tmp/tagcounts')
app.register_blueprint(tagcountsbp)
def on_pre_walk(self):
self.tagcounts = {}
def on_visit_article(self, article):
if hasattr(article.info, 'taglist'):
for tag in article.info.taglist:
if tag in self.tagcounts:
self.tagcounts[tag] += 1
else:
self.tagcounts[tag] = 1
def on_post_walk(self):
pickled_counts = jsonpickle.encode(self.tagcounts)
save_file(current_app.config['YAWT_TAGCOUNT_FILE'], pickled_counts)
def on_files_changed(self, files_modified, files_added, files_removed):
pickled_counts = load_file(current_app.config['YAWT_TAGCOUNT_FILE'])
self.tagcounts = jsonpickle.decode(pickled_counts)
for f in files_removed + files_modified:
article = g.store.fetch_article_by_repofile(f)
for tag in article.info.taglist:
self.tagcounts[tag] -= 1
for f in files_modified + files_added:
article = g.store.fetch_article_by_repofile(f)
self.on_visit_article(article)
self.on_post_walk()
| Python | 0 | |
d19aaf0fd3c88c08b2b8563030dd38c0cea3631b | Add unit test for `parse_cluster_info` (#22205) | dashboard/modules/job/tests/test_sdk.py | dashboard/modules/job/tests/test_sdk.py | import pytest
from typing import Dict, Optional, Tuple
from unittest.mock import Mock, patch
from ray.dashboard.modules.job.sdk import parse_cluster_info
@pytest.mark.parametrize(
"address_param",
[
("ray://1.2.3.4:10001", "ray", "1.2.3.4:10001"),
("other_module://", "other_module", ""),
("other_module://address", "other_module", "address"),
],
)
@pytest.mark.parametrize("create_cluster_if_needed", [True, False])
@pytest.mark.parametrize("cookies", [None, {"test_cookie_key": "test_cookie_val"}])
@pytest.mark.parametrize("metadata", [None, {"test_metadata_key": "test_metadata_val"}])
@pytest.mark.parametrize("headers", [None, {"test_headers_key": "test_headers_val"}])
def test_parse_cluster_info(
address_param: Tuple[str, str, str],
create_cluster_if_needed: bool,
cookies: Optional[Dict[str, str]],
metadata: Optional[Dict[str, str]],
headers: Optional[Dict[str, str]],
):
"""
Test ray.dashboard.modules.job.sdk.parse_cluster_info for different
format of addresses.
"""
mock_get_job_submission_client_cluster = Mock(return_value="Ray ClusterInfo")
mock_module = Mock()
mock_module.get_job_submission_client_cluster_info = Mock(
return_value="Other module ClusterInfo"
)
mock_import_module = Mock(return_value=mock_module)
address, module_string, inner_address = address_param
with patch.multiple(
"ray.dashboard.modules.job.sdk",
get_job_submission_client_cluster_info=mock_get_job_submission_client_cluster,
), patch.multiple("importlib", import_module=mock_import_module):
if module_string == "ray":
assert (
parse_cluster_info(
address,
create_cluster_if_needed=create_cluster_if_needed,
cookies=cookies,
metadata=metadata,
headers=headers,
)
== "Ray ClusterInfo"
)
mock_get_job_submission_client_cluster.assert_called_once_with(
inner_address,
create_cluster_if_needed=create_cluster_if_needed,
cookies=cookies,
metadata=metadata,
headers=headers,
)
elif module_string == "other_module":
assert (
parse_cluster_info(
address,
create_cluster_if_needed=create_cluster_if_needed,
cookies=cookies,
metadata=metadata,
headers=headers,
)
== "Other module ClusterInfo"
)
mock_import_module.assert_called_once_with(module_string)
mock_module.get_job_submission_client_cluster_info.assert_called_once_with(
inner_address,
create_cluster_if_needed=create_cluster_if_needed,
cookies=cookies,
metadata=metadata,
headers=headers,
)
| Python | 0 | |
c47966819a2bd8450ee731533c50f9c8d92b86a2 | Create smtp_sender.py | smtp_sender.py | smtp_sender.py | #!/usr/bin/env python
'''SMTP sender'''
from __future__ import print_function
from optparse import OptionParser
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email import utils
import smtplib
import os
import sys
import mimetypes
# shut up about constants pylint!
# pylint: disable-msg=C0103
def detect_filetype(fn, bodyFile=False):
'''Detect mime type information about a file'''
ct, enc = mimetypes.guess_type(fn)
if ct is None and bodyFile:
fdata = open(fn, 'rb').read()
ct = 'text/html' if '<html>' in fdata else 'text/plain'
else:
if ct is None or enc is not None:
ct = 'application/octet-stream'
return ct.split('/', 1) # [maintype, subtype]
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options]')
parser.add_option('-d', '--debug', dest='debug',
action='store_true', help='show debugging messages')
parser.add_option('-s', '--subject', dest='subject', help='email subject [required]')
parser.add_option('-f', '--from', dest='sender', help='email from address [required]')
parser.add_option('-b', '--body', dest='body', help='email body')
parser.add_option('-t', '--bodyfile', dest='bodyfile', help='email body from file')
parser.add_option('-z', '--host', dest='host', help='SMTP host [required]')
parser.add_option('-p', '--port', dest='port', type='int', help='SMTP port [required]')
#parser.add_option('-e', '--base64', dest='b64', action='store_true',
# help='Base64 encoding (default)')
parser.add_option('-o', '--bodyencode', dest='bodyencode',
action='store_true', help='Use selected encoder to encode body content')
parser.add_option('-r', '--recipients',
dest='recipients',
help='comma separated list of email recipients [required]')
parser.add_option('-a', '--attachments',
dest='attachments',
help='comma separated list of email attachments')
parser.add_option('-i', '--binary', dest='binary',
help='Dont detect attachment filetype, send binary')
opts, args = parser.parse_args()
msg = MIMEMultipart()
msg['Message-ID'] = utils.make_msgid()
try:
recipients = opts.recipients.replace(' ', '').split(',')
msg['Subject'] = opts.subject
msg['from'] = opts.sender
msg['To'] = ', '.join(recipients)
host = opts.host
port = int(opts.port)
except (ValueError, NameError, IndexError, AttributeError, TypeError):
print('Required options missing')
print(parser.format_help())
sys.exit(1)
# giving myself the option to add other encoders later on if desired
encoder = lambda x: x.encode('base64')
encoder.__doc__ = 'base64'
#charset = 'UTF-8'
charset = 'us-ascii'
if opts.bodyfile:
if os.path.exists(opts.bodyfile):
fd = open(opts.bodyfile, 'rb').read()
maintype, subtype = detect_filetype(opts.bodyfile)
needEncode = False
try:
tdata = fd.encode('ascii')
except:
needEncode = True
if opts.bodyencode or needEncode:
body = MIMEBase(maintype, subtype, charset=charset)
body.set_payload(encoder(fd))
body.add_header('Content-Transfer-Encoding', encoder.__doc__)
else:
body = MIMEText(tdata, _subtype=subtype, _charset=charset)
msg.attach(body)
else:
print('Provided body file does not exist')
sys.exit(1)
elif opts.body:
if opts.bodyencode:
dd = 'text/html' if '<html>' in opts.body else 'text/plain'
maintype, subtype = dd.split('/', 1)
body = MIMEBase(maintype, subtype, charset=charset)
body.set_payload(encoder(opts.body))
body.add_header('Content-Transfer-Encoding', encoder.__doc__)
msg.attach(body)
else:
msg.preamble = opts.body
else:
print('Provide either body text or body file')
print(parser.format_help())
sys.exit(1)
if opts.attachments:
# check for option to not detect filetype
for filename in opts.attachments.replace(' ', '').split(','):
attachment = MIMEBase('application', 'octet-stream', charset=charset)
attachment.set_payload(open(filename, 'rb').read())
attachment.add_header('Content-Transfer-Encoding', encoder.__doc__)
#encoders.encode_base64(attachment)
base_filename = filename.split(os.path.sep)[-1]
attachment.add_header('Content-Disposition', 'attachment', filename=base_filename)
msg.attach(attachment)
smtpi = smtplib.SMTP(host, port)
smtpi.sendmail(opts.sender, recipients, msg.as_string())
smtpi.quit()
| Python | 0.000026 | |
4d570475d22cc85dd55c4b68bd7321cec7be8e7e | Add bytecode patcher to change the snooper URL and interval (see wiki.vg/Session#Snoop) | snoop_patch.py | snoop_patch.py | from os import chdir
from tempfile import mkdtemp
from shutil import rmtree
from struct import pack
from subprocess import check_output
def jar_contents(j_path):
return check_output(['jar', 'tf', j_path]).split("\n")
def jar_extract(j_path):
return check_output(['jar', 'xf', j_path])
def jar_update(j_path, t_path, c_path):
c_path = c_path[len(t_path)+1:]
return check_output(['jar', 'uf', j_path, '-C', t_path, c_path])
def jlong(v):
return pack('>q', v)
def jstring(v):
return "%s%s" % (pack('>h', len(v)), v)
def jurl(h, p):
p = "" if p == 80 else ":%d" % p
return jstring("http://%s%s/" % (h,p))
def patch(j_path, host, port, interval):
#Marker file to put in jar
m_name = '.snooper-patched'
#Get jar contents
j_contents = jar_contents(j_path)
#Make a temporary directory
t_path = mkdtemp(prefix='mark2-patch-')
chdir(t_path)
#Extract the jar
jar_extract(j_path)
#Figure out what we need to replace
if m_name in j_contents:
f = open("%s/%s" % (t_path, m_name), "r")
old_host, old_port, old_interval = f.read().split("\n")
old_port = int(old_port)
old_interval = int(old_interval)
f.close()
else:
old_host, old_port, old_interval = 'snoop.minecraft.net', 80, 900000
replace = {
jlong(old_interval): jlong(interval),
jurl(old_host, old_port): jurl(host, port)}
#Find the relevant class
c_path = None
c_data = None
for name in j_contents:
name = "%s/%s" % (t_path, name)
if not name.endswith(".class"):
continue
f = open(name, 'r')
data = f.read()
f.close()
found = True
for k in replace.keys():
found &= data.find(k) != -1
if found:
c_path = name
c_data = data
break
#Patch if found
if c_path != None:
#Update file contents
for find, replace in replace.iteritems():
c_data = c_data.replace(find, replace)
#Write to file
f = open(c_path, 'wb')
f.write(c_data)
f.close()
#Update jar
jar_update(j_path, t_path, c_path)
#Add marker that it's been patched
m_path = "%s/%s" % (t_path, m_name)
f = open(m_path, "w")
f.write("%s\n%d\n%d" % (host, port, interval))
f.close()
jar_update(j_path, t_path, m_path)
rmtree(t_path)
return c_path != None
| Python | 0 | |
3c685922756a582030980f319014ba308735ee2c | add nextlaunch command | src/tenyksscripts/scripts/rockets.py | src/tenyksscripts/scripts/rockets.py | import datetime
import requests
import time
def run(data, settings):
if data["payload"] != "nextlaunch":
return
launches = requests.get("https://launchlibrary.net/1.2/launch", params={"next": 1, "mode": "verbose"}).json()
if not launches["count"]:
return "No launches scheduled"
launch = launches["launches"][0]
delta = datetime.timedelta(seconds=launch["netstamp"] - int(time.time()))
return "Next launch: {name}. When: {time} (in {delta})".format(
name=launch["name"],
time=launch["net"],
delta=delta
)
| Python | 0.000001 | |
ba3e2a81a5e89c010473820732835d8bf7ccc39a | Create morningstar.py | morningstar.py | morningstar.py | import os
import sys
import threading
import thread
import time
import settings
import subprocess
import psutil
class watchman(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
badwinprocs = ['taskmgr', 'regedit', 'mbam', 'cmd', 'command']
if 'lucifer' in sys.argv[0]:
exe = "morningstar"
else:
exe = "lucifer"
while 1:
#
processlist = psutil.pids()
x = False
for process in processlist:
try:
proc = psutil.Process(process)
print proc.name()
if exe in proc.name():
x = True
elif proc.name() in badwinprocs:
proc.stop()
except: print 'psutil error'
if x == False:
print exe + ' not running...'
os.popen('Shutdown -s -f -t 000')
sys.exit()
#break
#
def startup():
time.sleep(5)
try:
startupshit = glob.glob("*.exe")
for nigger in startupshit:
try:
if nigger in sys.argv[0]:
pass
else:
os.popen(killcmd + ' ' + nigger)
except:
print prefix + "couldn't kill the " + nigger # HA!
subprocess.check_call("attrib +R +S +H " + sys.argv[0], shell=True)
except:
pass
if 'lucifer' in sys.argv[0]:
print "[ > ] Morningstar loaded"
else:
thread.start_new_thread(startup, ())
print "[ > ] Startup loaded"
time.sleep(5)
watchman().start()
print "[ > ] Watchman loaded"
| Python | 0.000007 | |
7d546ca0ce8e2e8ef4f71abda50764817ce83c0b | add mouse_click.py | mouse_click.py | mouse_click.py | from pymouse import PyMouse
from time import sleep
m = PyMouse()
sleep(5)
x=969
y=581
a = 1
while a == 1:
m.click(x,y)#移動到(x,y)並且點擊
sleep(0.1)
p = m.position() #獲取目前位置
if not 900<p[0]<1000: #x座標不在 900~1000內 離開迴圈
break | Python | 0.000006 | |
92d3667471c3e1bfc91c2c925ef7cd33eed477e4 | Add run up problem | proteus/SWFlows/tests/run_up_problem/SWFlow.py | proteus/SWFlows/tests/run_up_problem/SWFlow.py | from __future__ import division
from builtins import object
from past.utils import old_div
from proteus import *
from proteus.default_p import *
from proteus.mprans import SW2D
from proteus.mprans import SW2DCV
from proteus.Domain import RectangularDomain
import numpy as np
from proteus import (Domain, Context,
MeshTools as mt)
from proteus.Profiling import logEvent
import proteus.SWFlows.SWFlowProblem as SWFlowProblem
from proteus import WaveTools as wt
# *************************** #
# ***** GENERAL OPTIONS ***** #
# *************************** #
opts= Context.Options([
('sw_model',0,"sw_model = {0,1} for {SWEs,DSWEs}"),
("final_time",9.0,"Final time for simulation"),
("dt_output",0.1,"Time interval to output solution"),
("cfl",0.33,"Desired CFL restriction"),
("refinement",4,"Refinement level")
])
###################
# DOMAIN AND MESH #
###################
L=(45.0,4.5)
refinement = opts.refinement
domain = RectangularDomain(L=L,x=[-35,0,0])
# CREATE REFINEMENT #
nnx0=6
nnx = (nnx0-1)*(2**refinement)+1
nny = old_div((nnx-1),10)+1
he = old_div(L[0],float(nnx-1))
triangleOptions="pAq30Dena%f" % (0.5*he**2,)
#################
# SOLITARY WAVE #
#################
g = SWFlowProblem.default_physical_parameters['gravity']
h0 = 1.0
a = 0.30 # amplitude
slope = 1.0 / 19.850
k_wavenumber = np.sqrt(3.0 * a/(4.0 * h0**3)) # wavenumber
z = np.sqrt(3.0 * a * h0) / (2.0 * h0 * np.sqrt(h0 * (1.0 + a)))
wavelength = 2.0 / k_wavenumber * np.arccosh(np.sqrt(1.0 / 0.050)) # wavelength of solitary wave
c = np.sqrt(g * (1.0 + a) * h0)
x0 = - h0/slope - wavelength/2.0 # location of the toe of the beach
def soliton(x,t): #
sechSqd = (1.00/np.cosh( z*(x-x0-c*t)))**2.00
return a * h0 * sechSqd
def u(x,t):
eta = soliton(X[0],t)
return c * eta / (h0 + eta)
def bathymetry(X):
x=X[0]
return numpy.maximum(slope*x,-h0)
###############################
##### BOUNDARY CONDITIONS #####
###############################
def water_height_DBC(X,flag):
if X[0]==-35:
return lambda x,t: h0
elif X[0]==L[0]:
return lambda X,t: 0.
def x_mom_DBC(X,flag):
if X[0]==-35:
return lambda X,t: 0.0
elif X[0]==L[0]:
return lambda X,t: 0.0
##############################
##### INITIAL CONDITIONS #####
##############################
class water_height_at_t0(object):
def uOfXT(self,X,t):
eta = soliton(X[0],0)
h = eta-bathymetry(X)
hp = max(h,0.)
return hp
class x_mom_at_t0(object):
def uOfXT(self,X,t):
eta = soliton(X[0],0)
h = eta-bathymetry(X)
hp = max(h,0.)
Umom = hp * c * eta / (h0 + eta)
return Umom
class Zero():
def uOfXT(self,X,t):
return 0.0
# ********************************** #
# ***** Create mySWFlowProblem ***** #
# ********************************** #
outputStepping = SWFlowProblem.OutputStepping(opts.final_time,dt_output=opts.dt_output)
initialConditions = {'water_height': water_height_at_t0(),
'x_mom': x_mom_at_t0(),
'y_mom': Zero()}
boundaryConditions = {'water_height': water_height_DBC,
'x_mom': x_mom_DBC,
'y_mom': lambda x,flag: lambda x,t: 0.0}
mySWFlowProblem = SWFlowProblem.SWFlowProblem(sw_model=0,
cfl=0.33,
outputStepping=outputStepping,
structured=True,
he=he,
nnx=nnx,
nny=nny,
domain=domain,
initialConditions=initialConditions,
boundaryConditions=boundaryConditions,
bathymetry=bathymetry)
mySWFlowProblem.physical_parameters['LINEAR_FRICTION']=0
mySWFlowProblem.physical_parameters['mannings']=0
| Python | 0.000029 | |
9ef3260ba5d27a3274fa6d3112e36091f04989f9 | add file | resource-4/permutations/permutationToInteger.py | resource-4/permutations/permutationToInteger.py | def permutationToInteger(perm):
permLen = len(perm)
elts = range(permLen)
num = 0
for i in range(permLen):
digit = elts.index(perm[i])
num += digit * math.factorial(permLen - i - 1)
del elts(digit)
return num
| Python | 0.000001 | |
6f36d99229bfc33783511c7efb2f90fb50dda570 | Add importing for rhino pre.rhino | sectionproperties/pre/rhino.py | sectionproperties/pre/rhino.py | import pathlib
from typing import List, Union
from shapely.geometry.polygon import Polygon
from rhino_shapely_interop.importers import RhImporter
def load_3dm(r3dm_filepath: Union[pathlib.Path, str], **kwargs) -> List[Polygon]:
"""Load a Rhino `.3dm` file and import the single surface planer breps.
:param r3dm_filepath:
File path to the rhino `.3dm` file.
:type r3dm_filepath: pathlib.Path or string
:param \**kwargs:
See below.
:raises RuntimeError:
A RuntimeError is raised if no polygons are found in the file.
This is dependent on the keyword arguments.
Try adjusting the keyword arguments if this error is raised.
:return:
List of Polygons found in the file.
:rtype: List[shapely.geometry.Polygon]
:Keyword Arguments:
* *refine_num* (``int, optional``) --
Bézier curve interpolation number. In Rhino a surface's edges are nurb based curves.
Shapely does not support nurbs, so the individual Bézier curves are interpolated using straight lines.
This parameter sets the number of straight lines used in the interpolation.
Default is 1.
* *vec1* (``numpy.ndarray, optional``) --
A 3d vector in the Shapely plane. Rhino is a 3D geometry environment.
Shapely is a 2D geometric library.
Thus a 2D plane needs to be defined in Rhino that represents the Shapely coordinate system.
`vec1` represents the 1st vector of this plane. It will be used as Shapely's x direction.
Default is [1,0,0].
* *vec2* (``numpy.ndarray, optional``) --
Continuing from `vec1`, `vec2` is another vector to define the Shapely plane.
It must not be [0,0,0] and it's only requirement is that it is any vector in the Shapely plane (but not equal to `vec1`).
Default is [0,1,0].
* *plane_distance* (``float, optional``) --
The distance to the Shapely plane.
Default is 0.
* *project* (``boolean, optional``) --
Controls if the breps are projected onto the plane in the direction of the Shapley plane's normal.
Default is True.
* *parallel* (``boolean, optional``) --
Controls if only the rhino surfaces that have the same normal as the Shapely plane are yielded.
If true, all non parallel surfaces are filtered out.
Default is False.
"""
rhi = RhImporter.from_file(str(r3dm_filepath))
list_polygons = list(rhi.get_planer_brep(**kwargs))
if len(list_polygons)==0:
raise RuntimeError(
f"No shapely.Polygon objects found. "
f"Consider adjusting the keyword arguments. "
f"File name: {r3dm_filepath}. "
)
return list_polygons
def load_brep_encoding(brep: str, **kwargs) -> Polygon:
"""Load an encoded single surface planer brep.
:param brep:
Rhino3dm.Brep encoded as a string.
:type brep: str
:param \**kwargs:
See below.
:raises RuntimeError:
A RuntimeError is raised if no polygons are found in the encoding.
This is dependent on the keyword arguments.
Try adjusting the keyword arguments if this error is raised.
:return:
The Polygons found in the encoding string.
:rtype: shapely.geometry.Polygon
:Keyword Arguments:
* *refine_num* (``int, optional``) --
Bézier curve interpolation number. In Rhino a surface's edges are nurb based curves.
Shapely does not support nurbs, so the individual Bézier curves are interpolated using straight lines.
This parameter sets the number of straight lines used in the interpolation.
Default is 1.
* *vec1* (``numpy.ndarray, optional``) --
A 3d vector in the Shapely plane. Rhino is a 3D geometry environment.
Shapely is a 2D geometric library.
Thus a 2D plane needs to be defined in Rhino that represents the Shapely coordinate system.
`vec1` represents the 1st vector of this plane. It will be used as Shapely's x direction.
Default is [1,0,0].
* *vec2* (``numpy.ndarray, optional``) --
Continuing from `vec1`, `vec2` is another vector to define the Shapely plane.
It must not be [0,0,0] and it's only requirement is that it is any vector in the Shapely plane (but not equal to `vec1`).
Default is [0,1,0].
* *plane_distance* (``float, optional``) --
The distance to the Shapely plane.
Default is 0.
* *project* (``boolean, optional``) --
Controls if the breps are projected onto the plane in the direction of the Shapley plane's normal.
Default is True.
* *parallel* (``boolean, optional``) --
Controls if only the rhino surfaces that have the same normal as the Shapely plane are yielded.
If true, all non parallel surfaces are filtered out.
Default is False.
"""
rhi = RhImporter.from_serialzed_brep(brep)
geom = list(rhi.get_planer_brep(**kwargs))
if len(geom)==0:
raise RuntimeError(f"No shapely.Polygon objects found for encoded object")
return geom | Python | 0 | |
dafa0060460a2d4e820fbdafd33e51363bac0259 | Create 01.Mean.py | 01.Python/01.Mean.py | 01.Python/01.Mean.py | import numpy as np
A = np.array([[10,14,11,7,9.5,15,19],[8,9,17,14.5,12,18,15.5],
[15,7.5,11.5,10,10.5,7,11],[11.5,11,9,12,14,12,7.5]])
B = A.T
print B
print(np.mean(B))
print(np.mean(B,axis=0))
print(np.mean(A,axis=1))
| Python | 0 | |
9570da3427121628d4e144c1092da155583a496d | Add Python benchmark | lib/node_modules/@stdlib/math/base/special/asinh/benchmark/python/benchmark.py | lib/node_modules/@stdlib/math/base/special/asinh/benchmark/python/benchmark.py | #!/usr/bin/env python
"""Benchmark asinh."""
import timeit
name = "asinh"
repeats = 3
iterations = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import asinh; from random import random;"
stmt = "y = asinh(200.0*random() - 100.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(3):
print("# python::" + name)
elapsed = t.timeit(number=iterations)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(repeats, repeats)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| Python | 0.000138 | |
d1ba1a02385581375831fd4b394f68ade4cbb101 | Create RX_TX.py | home/hairygael/RX_TX.py | home/hairygael/RX_TX.py | arduino = Runtime.createAndStart("arduino","Arduino")
arduino.setBoardMega()
arduino.connect("COM7")
arduino1 = Runtime.createAndStart("arduino1","Arduino")
arduino1.setBoardAtmega328()
#connecting arduino1 to arduino Serial1 instead to a COMX
arduino1.connect(arduino,"Serial1")
servo = Runtime.createAndStart("servo","Servo")
servo.attach(arduino1,5)
#attaching procedure take a bit more time to do, wait a little before using it
sleep(1)
servo.moveTo(90)
| Python | 0.000295 | |
c760c3387b6dcf5bd171960a3e64306c7f2519d0 | add a rotating colored triangle | pynodegl-utils/pynodegl_utils/examples/misc.py | pynodegl-utils/pynodegl_utils/examples/misc.py | import math
from pynodegl import Texture, Shader, TexturedShape, Rotate, AnimKeyFrameScalar, Triangle
from pynodegl_utils.misc import scene
@scene()
def triangle(cfg):
frag_data = '''
#version 100
precision mediump float;
varying vec2 var_tex0_coords;
void main(void)
{
vec2 c = var_tex0_coords;
gl_FragColor = vec4(c.y-c.x, 1.0-c.y, c.x, 1.0);
}'''
a = 0.5
b = a * math.sqrt(3) / 2.0
c = a * 1/2.
triangle = Triangle((0, a, 0), (b, -c, 0), (-b, -c, 0))
s = Shader(fragment_data=frag_data)
node = TexturedShape(triangle, s, Texture())
node = Rotate(node, axis=(0,0,1))
node.add_animkf(AnimKeyFrameScalar(0, 0),
AnimKeyFrameScalar(cfg.duration, -360*2))
return node
| Python | 0.000003 | |
3e9fc08e096ddb212cf40a285887b7ed5dd8897b | Fix running coverage for nose tests (PY-14869) | python/helpers/coverage_runner/run_coverage.py | python/helpers/coverage_runner/run_coverage.py | """Coverage.py's main entrypoint."""
import os
import sys
bundled_coverage_path = os.getenv('BUNDLED_COVERAGE_PATH')
if bundled_coverage_path:
sys_path_backup = sys.path
sys.path = [p for p in sys.path if p != bundled_coverage_path]
from coverage.cmdline import main
sys.path = sys_path_backup
else:
from coverage.cmdline import main
coverage_file = os.getenv('PYCHARM_COVERAGE_FILE')
coverage_file = coverage_file[0:-len(".coverage")]
run_cov = os.getenv('PYCHARM_RUN_COVERAGE')
if os.getenv('CREATE_TEMP_COVERAGE_FILE'):
line = 'LOG: PyCharm: File mapping:%s\t%s\n'
import tempfile
(h, new_cov_file) = tempfile.mkstemp(prefix='pycharm-coverage')
print(line%(coverage_file + ".coverage", new_cov_file + ".coverage"))
print(line%(coverage_file + '.syspath.txt', new_cov_file + '.syspath.txt'))
print(line%(coverage_file + '.xml', new_cov_file + '.xml'))
coverage_file = new_cov_file + ".cov"
if coverage_file:
os.environ['COVERAGE_FILE'] = coverage_file + ".coverage"
if run_cov:
a_file = open(coverage_file + '.syspath.txt', mode='w')
a_file.write(os.getcwd()+"\n")
for path in sys.path: a_file.write(path + "\n")
a_file.close()
argv = []
for arg in sys.argv:
if arg.startswith('-m'):
argv.append('-m')
argv.append(arg[2:])
else:
argv.append(arg)
sys.argv = argv
cwd = os.getcwd()
try:
main()
finally:
if run_cov:
os.chdir(cwd)
main(["xml", "-o", coverage_file + ".xml", "--ignore-errors"]) | """Coverage.py's main entrypoint."""
import os
import sys
bundled_coverage_path = os.getenv('BUNDLED_COVERAGE_PATH')
if bundled_coverage_path:
sys_path_backup = sys.path
sys.path = [p for p in sys.path if p != bundled_coverage_path]
from coverage.cmdline import main
sys.path = sys_path_backup
else:
from coverage.cmdline import main
coverage_file = os.getenv('PYCHARM_COVERAGE_FILE')
run_cov = os.getenv('PYCHARM_RUN_COVERAGE')
if os.getenv('CREATE_TEMP_COVERAGE_FILE'):
line = 'LOG: PyCharm: File mapping:%s\t%s\n'
import tempfile
(h, new_cov_file) = tempfile.mkstemp(prefix='pycharm-coverage')
print(line%(coverage_file, new_cov_file))
print(line%(coverage_file + '.syspath.txt', new_cov_file + '.syspath.txt'))
print(line%(coverage_file + '.xml', new_cov_file + '.xml'))
coverage_file = new_cov_file
if coverage_file:
os.environ['COVERAGE_FILE'] = coverage_file
if run_cov:
a_file = open(coverage_file + '.syspath.txt', mode='w')
a_file.write(os.getcwd()+"\n")
for path in sys.path: a_file.write(path + "\n")
a_file.close()
argv = []
for arg in sys.argv:
if arg.startswith('-m'):
argv.append('-m')
argv.append(arg[2:])
else:
argv.append(arg)
sys.argv = argv
cwd = os.getcwd()
main()
if run_cov:
os.chdir(cwd)
main(["xml", "-o", coverage_file + ".xml", "--ignore-errors"]) | Python | 0 |
fd5ba7ad61a8c7c9aad6b3f1404d819ae21085d1 | Add 'calc_pb_flux.py' to calculate the particle background | bin/calc_pb_flux.py | bin/calc_pb_flux.py | #!/usr/bin/env python3
#
# Copyright (c) 2017 Weitian LI <liweitianux@live.com>
# MIT license
"""
Calculate the particle background flux (e.g., 9.5-12.0 keV) of the spectra.
flux = counts / exposure / area
where 'counts' is the total photon counts within the specified energy range;
'area' is the value of the ``BACKSCAL`` stored in the spectrum.
therefore, the output flux has arbitrary unit.
"""
import argparse
from _context import acispy
from acispy.spectrum import Spectrum
def main():
parser = argparse.ArgumentParser(
description="Calculate the particle background for spectra")
parser.add_argument("-L", "--energy-low", dest="elow",
type=int, default=9500,
help="lower energy limit of the particle " +
"background [eV] (default: 9500 eV)")
parser.add_argument("-H", "--energy-high", dest="ehigh",
type=int, default=12000,
help="upper energy limit of the particle " +
"background [eV] (default: 12000 eV)")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="show verbose information")
parser.add_argument("infile", nargs="+",
help="input spectra")
args = parser.parse_args()
for f in args.infile:
print("=== %s ===" % f)
spec = Spectrum(f)
flux = spec.calc_pb_flux(elow=args.elow, ehigh=args.ehigh,
verbose=args.verbose)
print("flux = %.5g" % flux)
if __name__ == "__main__":
main()
| Python | 0.000024 | |
d03e19c2bbd16e31bbf5a8f893a3ecaa16283b45 | Add tests for soc.modules.gsoc.logic.duplicates. | tests/app/soc/modules/gsoc/logic/test_duplicates.py | tests/app/soc/modules/gsoc/logic/test_duplicates.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for soc.modules.gsoc.logic.duplicates.
"""
__authors__ = [
'"Praveen Kumar" <praveen97uma@gmail.com>',
]
import unittest
from soc.modules.gsoc.logic import duplicates as duplicate_logic
from soc.modules.gsoc.models.proposal_duplicates import GSoCProposalDuplicate
from soc.modules.gsoc.models.proposal_duplicates_status import \
GSoCProposalDuplicatesStatus
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from tests.profile_utils import GSoCProfileHelper
class DuplicatesTest(unittest.TestCase):
"""Tests duplicate detection functions in GSoC.
"""
def createGSoCProposalDuplicate(self, student, program):
"""Creates and returns a seeded GSoCPropoposalDuplicate entity for
a given student in a given program.
"""
properties = {'program': program, 'student': student, 'is_duplicate': False}
proposal_duplicate = seeder_logic.seed(GSoCProposalDuplicate, properties)
return proposal_duplicate
def createStudent(self, email, program):
profile_helper = GSoCProfileHelper(program, dev_test=False)
profile_helper.createOtherUser(email)
student = profile_helper.createStudent()
return student
def setUp(self):
"""Setup required to test the functions.
Two different program entities are created with their own set of students
assigned to them. Some of the students in each program have duplicate
proposals and some not.
"""
self.program1 = seeder_logic.seed(GSoCProgram)
#Create GSoCStudents in program1
self.gsoc_students = []
for i in xrange(5):
email = 'test%s@example.com' % str(i)
student = self.createStudent(email, self.program1)
self.gsoc_students.append(student)
#Create a GSoCProposalDuplicate entity for all the students
#in self.gsoc_students for program1.
self.proposal_duplicates = []
for student in self.gsoc_students:
proposal_duplicate = self.createGSoCProposalDuplicate(student,
self.program1)
self.proposal_duplicates.append(proposal_duplicate)
#Create other program entity.
self.program2 = seeder_logic.seed(GSoCProgram)
#Create students in program2.
self.other_gsoc_students = []
for i in xrange(5):
email = 'othertest%s@example.com' % str(i)
student = self.createStudent(email, self.program2)
self.other_gsoc_students.append(student)
#Create a GSoCProposalDuplicate entity for all the students
#in self.other_gsoc_students for program2.
self.other_proposal_duplicates = []
for student in self.other_gsoc_students:
proposal_duplicate = self.createGSoCProposalDuplicate(student,
self.program2)
self.other_proposal_duplicates.append(proposal_duplicate)
#Create a GSocProposalDuplicateStatusEntity for other_program
properties = {'program': self.program2}
self.gpds = seeder_logic.seed(GSoCProposalDuplicatesStatus, properties)
def testGetOrCreateStatusForProgram(self):
"""Tests if a ProposalDuplicateStatus entity for a program is created or set.
"""
#program has no ProposalDuplicateStatus entity. Check if the entity
#is created for the program.
program_entity = self.program1
actual_pds = duplicate_logic.getOrCreateStatusForProgram(program_entity)
self.assertEqual(actual_pds.program, program_entity)
#program has a ProposalDuplicateStatus Entity.
program_entity = self.program2
expected_pds = self.gpds
actual_pds = duplicate_logic.getOrCreateStatusForProgram(program_entity)
self.assertEqual(actual_pds.key(), expected_pds.key())
def testDeleteAllForProgram(self):
"""Tests if all proposal duplicates for a program are deleted.
"""
#Before deleting.
q = GSoCProposalDuplicate.all()
q.filter('program', self.program1)
q_result = q.fetch(limit=10)
actual = [entity.key() for entity in q_result]
expected = [entity.key() for entity in self.proposal_duplicates]
self.assertEqual(actual, expected)
#Delete duplicate proposals for program.
duplicate_logic.deleteAllForProgram(self.program1)
q = GSoCProposalDuplicate.all()
q.filter('program', self.program1)
actual = q.fetch(limit=10)
expected = []
self.assertEqual(actual, expected)
#Test that duplicate proposals for other program were not deleted.
expected = [entity.key() for entity in self.other_proposal_duplicates]
q = GSoCProposalDuplicate.all()
q.filter('program', self.program2)
q_result = q.fetch(limit=10)
actual = [entity.key() for entity in q_result]
self.assertEqual(actual, expected)
def testDeleteAllForProgramNonDupesOnlyIsTrue(self):
"""Tests if only those proposals are deleted which have is_duplicate set
to false.
"""
#is_duplicate is set to False by default for all the GSoCProposalDuplicate
#entities. So, test if all the entities in program1 are deleted.
duplicate_logic.deleteAllForProgram(self.program1, non_dupes_only=True)
q = GSoCProposalDuplicate.all()
q.filter('program', self.program1)
q.filter('is_duplicate', False)
q_result = q.fetch(limit=10)
expected = []
actual = [entity.key() for entity in q_result]
self.assertEqual(actual, expected)
#set is_duplicate = True for each of the first 3 students in
#self.other_gsoc_students and test if these are not deleted for program2.
for i in xrange(3):
self.other_proposal_duplicates[i].is_duplicate = True
self.other_proposal_duplicates[i].put()
duplicate_logic.deleteAllForProgram(self.program2, non_dupes_only=True)
q = GSoCProposalDuplicate.all()
q.filter('program', self.program2)
q.filter('is_duplicate', False)
q_result = q.fetch(limit=10)
expected = []
actual = [entity.key() for entity in q_result]
self.assertEqual(actual, expected)
#check if entities with is_duplicate=True are not deleted
q = GSoCProposalDuplicate.all()
q.filter('program', self.program2)
q.filter('is_duplicate', True)
q_result = q.fetch(limit=10)
expected = [entity.key() for entity in self.other_proposal_duplicates[:3]]
actual = [entity.key() for entity in q_result]
self.assertEqual(actual, expected)
| Python | 0 | |
6fb6e67792085b6ee910f1d0b8ed3e89f15dd60d | add script to datamine the reports via nltk | smelly_london/all_reports_smell_search_final.py | smelly_london/all_reports_smell_search_final.py |
from map import mapping
# walk through the os and get all files
# read each file in tern and go through line by line
# print lines that contain smell and the report name
from os import listdir
import nltk.data
import json
SMELL_WORDS = ['smell', 'stench', 'stink', 'odour', 'sniff', 'effluvium']
REPORTS_DIR = '/Users/deborah/Documents/scripts/python_work/project2016/Full Text Online'
global finalResult
finalResult = {}
def addToDic(d, report, rDate, val):
d.setDefault(report, []).append(val)
return d
def getFileNames():
'''Retrieve file names'''
fileNames = [f for f in listdir(REPORTS_DIR) if f.endswith('txt')]
return fileNames
def processFile(fileName):
path = REPORTS_DIR + '/' + fileName
references = []
with open(path) as f:
for line in f:
report_tokenized = tokenize(line)
for scentence in report_tokenized:
for word in SMELL_WORDS:
if word in scentence.lower():
references.append(scentence)
return references
def tokenize(sentence):
parser = nltk.data.load('tokenizers/punkt/english.pickle')
result = parser.tokenize(sentence.strip())
return result
def saveObject(results):
'''Save results dictionary as file'''
with open('processed_results.txt', 'w') as outfile:
json.dump(results, outfile)
def performAnalysis(fileName, references):
'''Create the resuts output'''
# splits a fileName into :['Acton', '1900', 'b19783358', 'txt']
splitReport = fileName.split('.')
bID = splitReport[2]
year = splitReport[1]
try:
region = mapping[bID]
except:
return
# print bID
if region in finalResult:
nestedDic = finalResult[region]
else:
nestedDic = {}
nestedDic[year] = references
finalResult[region] = nestedDic
# if nestedDic[splitReport[1]]:
# val = nestedDic[splitReport[1]]
# nestedDic[splitReport[1]] = len(references) + val
# else:
# if len(references):
# nestedDic[splitReport[1]] = len(references)
# # nestedDic.setDefault(splitReport[1], 0).__add__(len(references))
# result[region] = nestedDic
# print(result)
# for k,v in result.iteritems():
def main():
# tokenize(s)
fileNames = getFileNames()
# f1 = fileNames[0]
# processFile(f1)
fileNames = fileNames[:100]
for f in fileNames:
references = processFile(f)
if references:
performAnalysis(f, references)
saveObject(finalResult)
if __name__ == '__main__':
main() | Python | 0 | |
5b31e63043e3c3652f751d4a85e6bcdf925f797e | Create q3.py | work/q3.py | work/q3.py | def fibonacci_number(n, m, count):
if count <= 10:
print(n, end=" ")
return fibonacci_number(m, n + m, count + 1)
fibonacci_number(0, 1, 0)
| Python | 0.000019 | |
9e546d3c2e2f77f9345947da5c12bc65b936e576 | Add attention layer | models/attention/decoders/attention_layer.py | models/attention/decoders/attention_layer.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from collections import namedtuple
import tensorflow as tf
# template
AttentionDecoderOutput = namedtuple(
"DecoderOutput",
"logits predicted_ids cell_output attention_scores attention_context")
class AttentionLayer(object):
"""Attention layer. This implementation is based on
https://arxiv.org/abs/1409.0473.
Bahdanau, Dzmitry, Kyunghyun Cho, and Yoshua Bengio.
"Neural machine translation by jointly learning to align and translate."
arXiv preprint arXiv:1409.0473 (2014).
Args:
num_units: Number of units used in the attention layer
attention_type: bahdanau
Returns:
"""
def __init__(self, num_units, attention_type='bahdanau'):
self.num_units = num_units
self.attention_type = attention_type
def define(self, encoder_states, current_decoder_state, values, values_length):
"""Computes attention scores and outputs.
Args:
encoder_states: The outputs of the encoder and equivalent to `values`.
This is used to calculate attention scores.
A tensor of shape `[batch, time, encoder_num_units]` where each element
in the `time` dimension corresponds to the decoder states for
that value.
current_decoder_state: The current state of the docoder.
This is used to calculate attention scores.
A tensor of shape `[batch, ...]`
values: The sequence of encoder outputs to compute attention over.
A tensor of shape `[batch, time, encoder_num_units]`.
values_length: An int32 tensor of shape `[batch]` defining
the sequence length of the attention values.
Returns:
A tuple `(attention_weights, attention_context)`.
`attention_weights` is vector of length `time` where each element
is the normalized "score" of the corresponding `inputs` element.
ex.) [α_{0,j}, α_{1,j}, ..., α_{T_i,j}]
(T_i: input length, j: time index of output)
`attention_context` is the final attention layer output
corresponding to the weighted inputs.
A tensor fo shape `[batch, encoder_num_units]`.
ex.)
"""
# Fully connected layers to transform both encoder_states and
# current_decoder_state into a tensor with `num_units` units
# h_j (j: time index of output)
att_encoder_states = tf.contrib.layers.fully_connected(
inputs=encoder_states,
num_outputs=self.num_units,
activation_fn=None,
scope="att_encoder_states")
# s_{i-1} (time index of input)
att_decoder_state = tf.contrib.layers.fully_connected(
inputs=current_decoder_state,
num_outputs=self.num_units,
activation_fn=None,
scope="att_decoder_state")
# TODO: Divide self.num_units into encoder_num_units and
# decoder_num_units
# NOTE: エンコーダがBidirectionalのときユニット数を2倍にすることに注意
# Compute attention scores over encoder outputs (energy: e_ij)
scores = self.attention_score(self.num_units,
att_encoder_states,
att_decoder_state)
# Replace all scores for padded inputs with tf.float32.min
num_scores = tf.shape(scores)[1] # input length
scores_mask = tf.sequence_mask(
lengths=tf.to_int32(values_length),
maxlen=tf.to_int32(num_scores),
dtype=tf.float32)
# ex.)
# tf.sequence_mask([1, 3, 2], 5) = [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
scores = scores * scores_mask + ((1.0 - scores_mask) * tf.float32.min)
# TODO: for underflow?
# Normalize the scores (attention_weights: α_ij (j=0,1,...))
attention_weights = tf.nn.softmax(scores, name="attention_weights")
# TODO: Add beta(temperature) in order to smooth output probabilities
# Calculate the weighted average of the attention inputs
# according to the scores
# c_i = sigma_{j}(α_ij * h_j)
attention_context = tf.expand_dims(attention_weights, axis=2) * values
attention_context = tf.reduce_sum(
attention_context, axis=1, name="attention_context")
values_depth = values.get_shape().as_list()[-1] # = encoder_num_units
attention_context.set_shape([None, values_depth])
return (attention_weights, attention_context)
def attention_score_func(self, encoder_states, current_decoder_state):
"""An attentin layer that calculates attention scores.
Args:
encoder_states: The sequence of encoder outputs
A tensor of shape `[batch ,time, encoder_num_units]`
current_decoder_state: The current state of the docoder
A tensor of shape `[batch, decoder_num_units]`
Returns:
attention_sum: The summation of attention scores (energy: e_ij)
A tensor of shape `[batch, time, ?]`
"""
if self.attention_type == 'bahdanau':
# TODO: tf.variable_scope()
v_att = tf.get_variable("v_att",
shape=[self.num_units],
dtype=tf.float32)
# calculates a batch- and time-wise dot product with a variable
# v_a = tanh(W_a * s_{i-1} + U_a * h_j)
attention_sum = tf.reduce_sum(v_att * tf.tanh(tf.expand_dims(current_decoder_state, axis=1) +
encoder_states), [2])
# TODO: what does [2] mean? axis?
elif self.attention_type == 'layer_dot':
# calculates a batch- and time-wise dot product
attention_sum = tf.reduce_sum(tf.expand_dims(current_decoder_state, axis=1) *
encoder_states, [2])
else:
# TODO: Add other versions
raise ValueError('attention_type is "bahdanau" or "layer_dot".')
return attention_sum
| Python | 0.000001 | |
0383796cb681404e6c4794f1321ad62a9945b572 | add script to output all leagues of users | checkLeagues.py | checkLeagues.py | import settings as settings
import funcs
accountMaps = funcs.readAccountsFile("accounts.txt")
def getLeagueForAccountMap(accountMap):
league = funcs.getLeague(settings.regions[accountMap['region']], accountMap['bnet'])
return (accountMap['redditName'], league)
newLeagues = map(getLeagueForAccountMap, accountMaps)
print newLeagues
| Python | 0 | |
c20cde04d1a5a2939e7f5c0953725fd043c5b849 | add media migration | molo/core/migrations/0067_media_migration.py | molo/core/migrations/0067_media_migration.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def convert_media_to_molo_media(apps, schema_editor):
from molo.core.models import MoloMedia, ArticlePage
from wagtailmedia.models import Media
for media in Media.objects.all():
new_media = MoloMedia.objects.create(
title=media.title, file=media.file, duration=media.duration,
type=media.type, width=media.width,
height=media.height, thumbnail=media.thumbnail)
for article in ArticlePage.objects.all():
for block in article.body:
if block.block_type is 'media' and block.value is media.id:
block.value = new_media.id
article.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0066_add_custom_media_model'),
]
operations = [
migrations.RunPython(convert_media_to_molo_media),
]
| Python | 0.000001 | |
77b6c86359376af5eb8de63ae89d9316776b26bc | Add missing migration | tracpro/polls/migrations/0034_auto_20170323_1315.py | tracpro/polls/migrations/0034_auto_20170323_1315.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0033_auto_20170307_1338'),
]
operations = [
migrations.AlterField(
model_name='pollrun',
name='region',
field=models.ForeignKey(blank=True, to='groups.Region', help_text='Panel where the poll was conducted.', null=True, verbose_name='panel'),
),
]
| Python | 0.000008 | |
255c7ff91bc4918ce13d32cba2b871e3d0befad8 | revert that url change | polio/urls.py | polio/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'polio.views.home', name='home'),
url(r'^datapoints/', include('datapoints.app_urls.urls', namespace="datapoints")),
url(r'^datapoints/indicators/', include('datapoints.app_urls.indicator_urls', namespace="indicators")),
url(r'^datapoints/regions/', include('datapoints.app_urls.region_urls', namespace="regions")),
url(r'^admin/', include(admin.site.urls)),
)
| from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'polio.views.home', name='home'),
url(r'^uf04/datapoints/', include('datapoints.app_urls.urls', namespace="datapoints")),
url(r'^uf04/datapoints/indicators/', include('datapoints.app_urls.indicator_urls', namespace="indicators")),
url(r'^uf04/datapoints/regions/', include('datapoints.app_urls.region_urls', namespace="regions")),
url(r'^uf04/admin/', include(admin.site.urls)),
)
| Python | 0.000005 |
5f12ada7fe0ddb44274e18decbaea0d05ab4471f | Solve Code Fights lineup problem | CodeFights/lineUp.py | CodeFights/lineUp.py | #!/usr/local/bin/python
# Code Fights Lineup Problem
def lineUp(commands):
aligned, tmp = 0, 0
com_dict = {"L": 1, "A": 0, "R": -1}
for c in commands:
tmp += com_dict[c]
if tmp % 2 == 0:
aligned += 1
return aligned
def main():
tests = [
["LLARL", 3],
["RLR", 1],
["", 0],
["L", 0],
["A", 1],
["AAAAAAAAAAAAAAA", 15],
["RRRRRRRRRRLLLLLLLLLRRRRLLLLLLLLLL", 16],
["AALAAALARAR", 5]
]
for t in tests:
res = lineUp(t[0])
ans = t[1]
if ans == res:
print("PASSED: lineUp({}) returned {}"
.format(t[0], res))
else:
print("FAILED: lineUp({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| Python | 0.0007 | |
8deb0dc2743d1d85899cb636b88ed831c05838a9 | Make machine action button translatable | DiscoverUM3Action.py | DiscoverUM3Action.py | from cura.MachineAction import MachineAction
from UM.Application import Application
from PyQt5.QtCore import pyqtSignal, pyqtProperty, pyqtSlot
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
class DiscoverUM3Action(MachineAction):
def __init__(self):
super().__init__("DiscoverUM3Action", catalog.i18nc("@action","Connect via Network"))
self._qml_url = "DiscoverUM3Action.qml"
self._network_plugin = None
printerDetected = pyqtSignal()
@pyqtSlot()
def startDiscovery(self):
if not self._network_plugin:
self._network_plugin = Application.getInstance().getOutputDeviceManager().getOutputDevicePlugin("JediWifiPrintingPlugin")
self._network_plugin.addPrinterSignal.connect(self._onPrinterAdded)
self.printerDetected.emit()
def _onPrinterAdded(self, *args):
self.printerDetected.emit()
@pyqtProperty("QVariantList", notify = printerDetected)
def foundDevices(self):
if self._network_plugin:
printers = self._network_plugin.getPrinters()
return [printers[printer] for printer in printers]
else:
return []
@pyqtSlot(str)
def setKey(self, key):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
if "key" in global_container_stack.getMetaData():
global_container_stack.setMetaDataEntry("key", key)
else:
global_container_stack.addMetaDataEntry("key", key)
if self._network_plugin:
# Ensure that the connection states are refreshed.
self._network_plugin.reCheckConnections()
| from cura.MachineAction import MachineAction
from UM.Application import Application
from PyQt5.QtCore import pyqtSignal, pyqtProperty, pyqtSlot
class DiscoverUM3Action(MachineAction):
def __init__(self):
super().__init__("DiscoverUM3Action", "Discover printers")
self._qml_url = "DiscoverUM3Action.qml"
self._network_plugin = None
printerDetected = pyqtSignal()
@pyqtSlot()
def startDiscovery(self):
if not self._network_plugin:
self._network_plugin = Application.getInstance().getOutputDeviceManager().getOutputDevicePlugin("JediWifiPrintingPlugin")
self._network_plugin.addPrinterSignal.connect(self._onPrinterAdded)
self.printerDetected.emit()
def _onPrinterAdded(self, *args):
self.printerDetected.emit()
@pyqtProperty("QVariantList", notify = printerDetected)
def foundDevices(self):
if self._network_plugin:
printers = self._network_plugin.getPrinters()
return [printers[printer] for printer in printers]
else:
return []
@pyqtSlot(str)
def setKey(self, key):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
if "key" in global_container_stack.getMetaData():
global_container_stack.setMetaDataEntry("key", key)
else:
global_container_stack.addMetaDataEntry("key", key)
if self._network_plugin:
# Ensure that the connection states are refreshed.
self._network_plugin.reCheckConnections()
| Python | 0.000016 |
c486b8df5861fd883b49ea8118d40d73f5b4e7b8 | Add download apikey test case | tardis/tardis_portal/tests/test_download_apikey.py | tardis/tardis_portal/tests/test_download_apikey.py | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test import TestCase
from tastypie.test import ResourceTestCase
from django.test.client import Client
from django.conf import settings
from django.contrib.auth.models import User
class ApiKeyDownloadTestCase(ResourceTestCase):
def setUp(self):
# create a test user
self.username = 'test'
self.email = 'test@example.com'
self.password = 'passw0rd'
self.user = User.objects.create_user(username=self.username,
email=self.email,
password=self.password)
def tearDown(self):
self.user.delete()
def testView(self):
download_api_key_url = reverse('tardis.tardis_portal.views.download_api_key')
client = Client()
# Expect redirect to login
response = client.get(download_api_key_url)
self.assertEqual(response.status_code, 302)
# Login as user
login = client.login(username=self.username, password=self.password)
self.assertTrue(login)
response = client.get(download_api_key_url)
self.assertEqual(response['Content-Disposition'],
'inline; filename="{0}.key"'.format(self.username))
self.assertEqual(response.status_code, 200)
response_content = ""
for c in response.streaming_content:
response_content += c
self.assertEqual(response_content,
self.create_apikey(username=self.username,
api_key=user.api_key.key))
| Python | 0 | |
601636b75595031ef9478297f9a52132a9bff9eb | Add herwig3 (#19406) | var/spack/repos/builtin/packages/herwig3/package.py | var/spack/repos/builtin/packages/herwig3/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import shutil
class Herwig3(AutotoolsPackage):
"""Herwig is a multi-purpose particle physics event generator."""
homepage = "https://herwig.hepforge.org"
url = "https://herwig.hepforge.org/downloads/Herwig-7.2.1.tar.bz2"
version('7.2.1', sha256='d4fff32f21c5c08a4b2e563c476b079859c2c8e3b78d853a8a60da96d5eea686')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('lhapdf', type='link')
depends_on('thepeg@2.2.1', when='@7.2.1', type='link')
depends_on('boost', type='link')
depends_on('python', type=('build', 'run'))
depends_on('gsl', type='link')
depends_on('fastjet', type='link')
depends_on('vbfnlo@3:', type='link')
depends_on('madgraph5amc', type='link')
depends_on('njet', type='link')
depends_on('py-gosam', type='link', when='^python@2.7:2.7.99')
depends_on('gosam-contrib', type='link')
depends_on('openloops', type='link')
force_autoreconf = True
def autoreconf(self, spec, prefix):
autoreconf('--install', '--verbose', '--force')
@run_before('build')
def install_lhapdfsets(self):
mkdirp(self.prefix.tmppdfsets)
lhapdf = which('lhapdf')
if self.spec.satisfies('@7.2.0:'):
lhapdf("--pdfdir=" + self.prefix.tmppdfsets,
# "--source=/cvmfs/sft.cern.ch/lcg/external/lhapdfsets/current",
# "--listdir=/cvmfs/sft.cern.ch/lcg/external/lhapdfsets/current",
"install", "MHT2014lo68cl", "MMHT2014nlo68cl",
"CT14lo", "CT14nlo")
def configure_args(self):
args = ['--with-gsl=' + self.spec['gsl'].prefix,
'--with-thepeg=' + self.spec['thepeg'].prefix,
'--with-thepeg-headers=' + self.spec['thepeg'].prefix.include,
'--with-fastjet=' + self.spec['fastjet'].prefix,
'--with-boost=' + self.spec['boost'].prefix,
'--with-madgraph=' + self.spec['madgraph5amc'].prefix,
'--with-openloops=' + self.spec['openloops'].prefix,
'--with-gosam-contrib=' + self.spec['gosam-contrib'].prefix,
'--with-njet=' + self.spec['njet'].prefix,
'--with-vbfnlo=' + self.spec['vbfnlo'].prefix]
if self.spec.satisfies('^python@2.7:2.7.99'):
args.append('--with-gosam=' + self.spec['gosam'].prefix)
return args
def flag_handler(self, name, flags):
if name == 'fcflags':
flags.append('-std=legacy')
return (None, flags, None)
elif name in ['cflags', 'cxxflags', 'cppflags']:
return (None, flags, None)
return (flags, None, None)
def setup_build_environment(self, env):
thepeg_home = self.spec['thepeg'].prefix
env.prepend_path('LD_LIBRARY_PATH', thepeg_home.lib.ThePEG)
env.set('LHAPDF_DATA_PATH', self.prefix.tmppdfsets)
env.set('HERWIGINCLUDE', '-I' + self.prefix.include)
env.set('BOOSTINCLUDE', '-I' + self.spec['boost'].prefix.include)
env.set('HERWIGINSTALL', self.prefix)
def build(self, spec, prefix):
make()
with working_dir('MatrixElement/FxFx'):
make()
def install(self, spec, prefix):
make('install')
with working_dir('MatrixElement/FxFx'):
make('install')
@run_after('install')
def remove_lhapdfsets(self):
shutil.rmtree(self.prefix.tmppdfsets)
| Python | 0 | |
af67d052fc78e56ac7f934f4c90f00d2eb097bb3 | Add StarFinder tests | photutils/detection/tests/test_starfinder.py | photutils/detection/tests/test_starfinder.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for StarFinder.
"""
from astropy.modeling.models import Gaussian2D
from astropy.tests.helper import catch_warnings
import numpy as np
import pytest
from ..starfinder import StarFinder
from ...datasets import make_100gaussians_image
from ...utils.exceptions import NoDetectionsWarning
try:
import scipy # noqa
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
DATA = make_100gaussians_image()
y, x = np.mgrid[0:25, 0:25]
g = Gaussian2D(1, 12, 12, 3, 2, theta=np.pi / 6.)
PSF = g(x, y)
@pytest.mark.skipif('not HAS_SCIPY')
class TestStarFinder:
def test_starfind(self):
finder1 = StarFinder(10, PSF)
finder2 = StarFinder(30, PSF)
tbl1 = finder1(DATA)
tbl2 = finder2(DATA)
assert len(tbl1) > len(tbl2)
def test_inputs(self):
with pytest.raises(ValueError):
StarFinder(10, PSF, min_separation=-1)
with pytest.raises(ValueError):
StarFinder(10, PSF, brightest=-1)
with pytest.raises(ValueError):
StarFinder(10, PSF, brightest=3.1)
def test_nosources(self):
with catch_warnings(NoDetectionsWarning) as warning_lines:
finder = StarFinder(100, PSF)
tbl = finder(DATA)
assert tbl is None
assert 'No sources were found.' in str(warning_lines[0].message)
def test_min_separation(self):
finder1 = StarFinder(10, PSF, min_separation=0)
finder2 = StarFinder(10, PSF, min_separation=50)
tbl1 = finder1(DATA)
tbl2 = finder2(DATA)
assert len(tbl1) > len(tbl2)
def test_peakmax(self):
finder1 = StarFinder(10, PSF, peakmax=None)
finder2 = StarFinder(10, PSF, peakmax=50)
tbl1 = finder1(DATA)
tbl2 = finder2(DATA)
assert len(tbl1) > len(tbl2)
with catch_warnings(NoDetectionsWarning) as warning_lines:
starfinder = StarFinder(10, PSF, peakmax=5)
tbl = starfinder(DATA)
assert tbl is None
assert ('Sources were found, but none pass'
in str(warning_lines[0].message))
def test_brightest(self):
finder = StarFinder(10, PSF, brightest=10)
tbl = finder(DATA)
assert len(tbl) == 10
fluxes = tbl['flux']
assert fluxes[0] == np.max(fluxes)
finder = StarFinder(40, PSF, peakmax=120)
tbl = finder(DATA)
assert len(tbl) == 1
def test_mask(self):
starfinder = StarFinder(10, PSF)
mask = np.zeros(DATA.shape, dtype=bool)
mask[0:100] = True
tbl1 = starfinder(DATA)
tbl2 = starfinder(DATA, mask=mask)
assert len(tbl1) > len(tbl2)
assert min(tbl2['ycentroid']) > 100
| Python | 0 | |
72203e529f083cbc9427b02348cc178e4443031c | Add new package: libuser (#18916) | var/spack/repos/builtin/packages/libuser/package.py | var/spack/repos/builtin/packages/libuser/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libuser(AutotoolsPackage):
"""A user and group account administration library."""
homepage = "https://pagure.io/libuser"
url = "http://releases.pagure.org/libuser/libuser-0.62.tar.xz"
version('0.62', sha256='a58ff4fabb01a25043b142185a33eeea961109dd60d4b40b6a9df4fa3cace20b')
version('0.61', sha256='0a114a52446e12781e2ffdf26f59df0d14e7809c7db5e551d3cf61c4e398751d')
version('0.60', sha256='b1f73408ebfee79eb01a47c5879a2cdef6a00b75ee24870de7df1b816ff483eb')
depends_on('glib')
depends_on('linux-pam')
depends_on('popt')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.sbin)
| Python | 0 | |
bef4f7e01e048280680287ed3ac27ec3b2464859 | Add thin setup.py | thin.setup.py | thin.setup.py | #!/usr/bin/env python
'''
The setup script for salt
'''
# For Python 2.5. A no-op on 2.6 and above.
from __future__ import with_statement
import os
import sys
from distutils.cmd import Command
from distutils.command.clean import clean
from distutils.sysconfig import get_python_lib, PREFIX
from distutils.core import setup
# Change to salt source's directory prior to running any command
try:
setup_dirname = os.path.dirname(__file__)
except NameError:
# We're most likely being frozen and __file__ triggered this NameError
# Let's work around that
setup_dirname = os.path.dirname(sys.argv[0])
if setup_dirname != '':
os.chdir(setup_dirname)
salt_version = os.path.join(
os.path.abspath(setup_dirname), 'salt', 'version.py'
)
salt_reqs = os.path.join(
os.path.abspath(setup_dirname), 'requirements.thin.txt'
)
exec(compile(open(salt_version).read(), salt_version, 'exec'))
class TestCommand(Command):
description = 'Run tests'
user_options = [
('runtests-opts=', 'R', 'Command line options to pass to runtests.py')
]
def initialize_options(self):
self.runtests_opts = None
def finalize_options(self):
pass
def run(self):
from subprocess import Popen
self.run_command('build')
build_cmd = self.get_finalized_command('build_ext')
runner = os.path.abspath('tests/runtests.py')
test_cmd = sys.executable + ' {0}'.format(runner)
if self.runtests_opts:
test_cmd += ' {0}'.format(self.runtests_opts)
print('running test')
test_process = Popen(
test_cmd, shell=True,
stdout=sys.stdout, stderr=sys.stderr,
cwd=build_cmd.build_lib
)
test_process.communicate()
sys.exit(test_process.returncode)
class Clean(clean):
def run(self):
clean.run(self)
# Let's clean compiled *.py[c,o]
remove_extensions = ('.pyc', '.pyo')
for subdir in ('salt', 'tests'):
root = os.path.join(os.path.dirname(__file__), subdir)
for dirname, dirnames, filenames in os.walk(root):
for filename in filenames:
for ext in remove_extensions:
if filename.endswith(ext):
os.remove(os.path.join(dirname, filename))
break
install_version_template = '''\
# This file was auto-generated by salt's setup on \
{date:%A, %d %B %Y @ %H:%m:%S UTC}.
__version__ = {version!r}
__version_info__ = {version_info!r}
'''
NAME = 'salt-thin'
VER = __version__
DESC = ('Portable, distributed, remote execution and '
'configuration management system')
mod_path = os.path.join(get_python_lib(), 'salt/modules')
if 'SYSCONFDIR' in os.environ:
etc_path = os.environ['SYSCONFDIR']
else:
etc_path = os.path.join(os.path.dirname(PREFIX), 'etc')
with open(salt_reqs) as f:
lines = f.read().split('\n')
requirements = [line for line in lines if line]
setup_kwargs = {'name': NAME,
'version': VER,
'description': DESC,
'author': 'Thomas S Hatch',
'author_email': 'thatch45@gmail.com',
'url': 'http://saltstack.org',
'cmdclass': {
'test': TestCommand,
'clean': Clean,
},
'classifiers': ['Programming Language :: Python',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
('License :: OSI Approved ::'
' Apache Software License'),
'Operating System :: POSIX :: Linux',
'Topic :: System :: Clustering',
'Topic :: System :: Distributed Computing',
],
'packages': ['salt',
'salt.cli',
'salt.ext',
'salt.auth',
'salt.wheel',
'salt.tops',
'salt.grains',
'salt.modules',
'salt.pillar',
'salt.renderers',
'salt.returners',
'salt.runners',
'salt.states',
'salt.fileserver',
'salt.search',
'salt.output',
'salt.utils',
],
'package_data': {'salt.modules': ['rh_ip/*.jinja']},
# Required for esky builds
'install_requires': requirements,
# The dynamic module loading in salt.modules makes this
# package zip unsafe. Required for esky builds
'zip_safe': False
}
setup_kwargs['scripts'] = ['scripts/salt-call',]
if __name__ == '__main__':
setup(**setup_kwargs)
| Python | 0.000001 | |
ac09970129df9c5292344287b04a1be143fac681 | add diag openmp | tests/examples/openmp/diagnostics.py | tests/examples/openmp/diagnostics.py | # coding: utf-8
import numpy as np
from matplotlib import pyplot as plt
def matrix_product():
procs = [1, 4, 8, 16, 28]
times = [1194.849, 305.231, 69.174,37.145, 22.731]
n_groups = len(procs)
# ...
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.4
rects1 = plt.bar(index, times, bar_width,
alpha=opacity,
color='b',
label='OpenMP')
plt.xlabel('Number of Processors')
plt.ylabel('CPU time')
plt.title('Weak scaling')
labels = [str(i) for i in procs]
plt.xticks(index + bar_width / 2, labels)
plt.legend()
plt.tight_layout()
plt.savefig("matrix_product_scalability.png")
plt.clf()
# ...
# ...
speedup = [times[0]/b for b in times[1:]]
n_groups = len(speedup)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.4
rects1 = plt.bar(index, speedup, bar_width,
alpha=opacity,
color='b',
label='OpenMP')
plt.xlabel('Number of Processors')
plt.ylabel('Speedup')
plt.title('Speedup')
labels = [str(i) for i in procs[1:]]
plt.xticks(index + bar_width / 2, labels)
plt.legend()
plt.tight_layout()
plt.savefig("matrix_product_speedup.png")
plt.clf()
# ...
matrix_product()
| Python | 0.000146 | |
3da13d9597b49a7d929dd84806d1c10b99cf8bea | Create yadisk.py | cogs/utils/api/yadisk.py | cogs/utils/api/yadisk.py | import json
import requests
__version__ = '0.1.2-dev'
USER_AGENT = 'pycopy/{}'.format(__version__)
BASE_URL = 'https://api.copy.com'
AUTH_URL = BASE_URL + '/auth_user' # TODO: should use /rest
OBJECTS_URL = BASE_URL + '/list_objects' # TODO: should use /rest
DOWNLOAD_URL = BASE_URL + '/download_object' # TODO: should use /rest
class Copy(object):
def __init__(self, username, password):
self.session = requests.session()
self.session.headers.update({'X-Client-Type': 'api',
'X-Api-Version': '1',
'User-Agent': USER_AGENT, })
self.authenticate(username, password)
def _get(self, url, *args, **kwargs):
return self.session.get(url, *args, **kwargs)
def _post(self, url, data, *args, **kwargs):
return self.session.post(url, {'data': json.dumps(data), }, *args,
**kwargs)
def authenticate(self, username, password):
response = self._post(AUTH_URL,
{'username': username, 'password': password, })
json_response = response.json()
if 'auth_token' not in json_response:
raise ValueError("Error while authenticating")
self.user_data = json_response
self.auth_token = json_response['auth_token']
self.session.headers.update({'X-Authorization': self.auth_token, })
def list_files(self, dir_path):
file_list = []
list_wtrmark = False
while (True):
response = self._post(OBJECTS_URL, {'path': dir_path, 'list_watermark': list_wtrmark, })
for file in response.json()['children']:
if file['type'] == 'file':
file_list.append(file['path'].split("/")[-1])
#print(file_list[-1])
list_wtrmark = response.json()['list_watermark']
#print(list_wtrmark)
#print(response.json())
if (response.json()['more_items'] == '0'):
#print('break')
break
return file_list
def direct_link(self, file_path):
object_url = BASE_URL + '/rest/meta/copy/' + file_path
response = self.session.get(object_url)
return response.json()['url']
def get_file(self, file_path):
url = self.direct_link(file_path)
r = self._post(DOWNLOAD_URL, {'path': file_path}, stream=True)
r.raw.decode_content = True
return r.raw
def dwnload_file(self, file_path):
url = self.direct_link(file_path)
local_filename = "tmp_uploads/" + url.split('/')[-1]
r = self._get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() #commented by recommendation from J.F.Sebastian
return local_filename
def get_headers_str(self):
headers_str = ""
for key, value in self.session.headers.items():
headers_str += "{}: {}\r\n".format(key, value)
return headers_str
| Python | 0.000002 | |
52b4faf9ea670cf6c73fffa2e5f6430f57c79a74 | Create GDAL_NDVIoriginal.py | GDAL_NDVIoriginal.py | GDAL_NDVIoriginal.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 3 11:56:48 2011
The main program. The program takes 3 files as input:
#. The red band (any GDAL-compatible format will do)
#. the near-infrarred band (again as above)
#. An output file name
dditionally, one can specify the output format. By default, we use GeoTIFF.
@author: Jose Gómez-Dans (UCL/NCEO) - j.gomez-dans@ucl.ac.uk
"""
import os
import sys
import optparse
import numpy as np
from osgeo import gdal
# this allows GDAL to throw Python Exceptions
gdal.UseExceptions()
try:
src_ds = gdal.Open( '/home//leandro//GINF//2226813_2011-07-16T143934_RE5_3A-NAC_7033379_139849.tif' )
except RuntimeError, e:
print 'Unable to open .tif'
print e
sys.exit(1)
bandcollect = []
print "[ RASTER BAND COUNT ]: ", src_ds.RasterCount
for band in range( src_ds.RasterCount ):
band += 1
print "[ GETTING BAND ]: ", band
srcband = src_ds.GetRasterBand(band)
if srcband is None:
continue
stats = srcband.GetStatistics( True, True )
if stats is None:
continue
print "[ STATS ] = Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] )
bandcollect.append(srcband)
print bandcollect
c=0
for corre in bandcollect:
print "corre:"
print corre
srcband = bandcollect[c]
stats = srcband.GetStatistics( True, True )
if stats is None:
continue
print "[ STATS ] = Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] )
print c
c+=1
print "bandcolect"
print bandcollect
bred = bandcollect[3]
bnir = bandcollect[5]
ndvicalc = calculate_ndvi(bred,bnir)
print ndvicalc
def calculate_ndvi ( red_filename, nir_filename ):
"""
A function to calculate the Normalised Difference Vegetation Index
from red and near infrarred reflectances. The reflectance data ought to
be present on two different files, specified by the varaibles
`red_filename` and `nir_filename`. The file format ought to be
recognised by GDAL
"""
g_red = gdal.Open ( red_filename )
red = g_red.ReadAsArray()
g_nir = gdal.Open ( nir_filename )
nir = g_nir.ReadAsArray()
if ( g_red.RasterXSize != g_nir.RasterXSize ) or \
( g_red.RasterYSize != g_nir.RasterYSize ):
print "ERROR: Input datasets do't match!"
print "\t Red data shape is %dx%d" % ( red.shape )
print "\t NIR data shape is %dx%d" % ( nir.shape )
sys.exit ( -1 )
passer = np.logical_and ( red > 1, nir > 1 )
ndvi = np.where ( passer, (1.*nir - 1.*red ) / ( 1.*nir + 1.*red ), -999 )
return ndvi
#def save_raster ( output_name, raster_data, dataset, driver="GTiff" ):
#"""
#A function to save a 1-band raster using GDAL to the file indicated
#by ``output_name``. It requires a GDAL-accesible dataset to collect
#the projection and geotransform.
#"""
## Open the reference dataset
#g_input = gdal.Open ( dataset )
## Get the Geotransform vector
#geo_transform = g_input.GetGeoTransform ()
#x_size = g_input.RasterXSize # Raster xsize
#y_size = g_input.RasterYSize # Raster ysize
#srs = g_input.GetProjectionRef () # Projection
## Need a driver object. By default, we use GeoTIFF
#if driver == "GTiff":
#driver = gdal.GetDriverByName ( driver )
#dataset_out = driver.Create ( output_name, x_size, y_size, 1, \
#gdal.GDT_Float32, ['TFW=YES', \
#'COMPRESS=LZW', 'TILED=YES'] )
#else:
#driver = gdal.GetDriverByName ( driver )
#dataset_out = driver.Create ( output_name, x_size, y_size, 1, \
#gdal.GDT_Float32 )
#
#dataset_out.SetGeoTransform ( geo_transform )
#dataset_out.SetProjection ( srs )
#dataset_out.GetRasterBand ( 1 ).WriteArray ( \
#raster_data.astype(np.float32) )
#dataset_out.GetRasterBand ( 1 ).SetNoDataValue ( float(-999) )
#dataset_out = None
#if __name__ == "__main__":
#
#arg_parser = optparse.OptionParser()
#arg_parser.add_option( '-r', '--red', dest="red_fname", \
#help="The RED data" )
#arg_parser.add_option( '-n', '--nir', dest="nir_fname", \
#help="The NIR data" )
#arg_parser.add_option( '-o', '--output', dest="out_fname", \
#help="The output dataset" )
#arg_parser.add_option( '-f', '--format', dest="out_format", \
#default="GTiff", help="Output format" )
#options, extra_junk = arg_parser.parse_args ()
#
#if not os.path.exists ( options.red_fname ):
#print "ERROR: The red filename %s does not exist" % options.red_fname
#sys.exit ( -1 )
#if not os.path.exists ( options.nir_fname ):
#print "ERROR: The nir filename %s does not exist" % options.nir_fname
#sys.exit ( -1 )
#if os.path.exists ( options.out_fname):
#print "ERROR: The output filename %s does already exist" % \
#options.out_fname
#print "\t Select a different one, or delete the file."
#sys.exit ( -1 )
#
#
#c_ndvi = calculate_ndvi ( options.red_fname, options.nir_fname )
#save_raster ( options.out_fname, c_ndvi, options.red_fname, \
#driver=options.out_format )
| Python | 0 | |
65f6f78008d4f961c9ebe5d8047b0f2c742fe15f | Add unittest for QInputDialog.getXXX() methods | tests/qtgui/qinputdialog_get_test.py | tests/qtgui/qinputdialog_get_test.py | import unittest
from PySide import QtCore, QtGui
from helper import UsesQApplication, TimedQApplication
class TestInputDialog(TimedQApplication):
def testGetDouble(self):
QtGui.QInputDialog.getDouble(None, "title", "label")
def testGetInt(self):
QtGui.QInputDialog.getInt(None, "title", "label")
def testGetInteger(self):
QtGui.QInputDialog.getInteger(None, "title", "label")
def testGetItem(self):
QtGui.QInputDialog.getItem(None, "title", "label", QtCore.QStringList(["1", "2", "3"]))
def testGetText(self):
QtGui.QInputDialog.getText(None, "title", "label")
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
52189e2161e92b36df47a04c2150dff38f81f5e9 | Add mocked tests for activation | tests/unit/tests/test_activations.py | tests/unit/tests/test_activations.py | from unittest import mock
from django.test import TestCase
from viewflow import activation, flow
from viewflow.models import Task
class TestActivations(TestCase):
def test_start_activation_lifecycle(self):
flow_task_mock = mock.Mock(spec=flow.Start())
act = activation.StartActivation()
act.initialize(flow_task_mock)
act.prepare()
act.done()
act.task.prepare.assert_called_once_with()
act.task.done.assert_called_once_with()
act.process.start.assert_called_once_with()
flow_task_mock.activate_next.assert_any_call(act)
def test_view_activation_activate(self):
flow_task_mock = mock.Mock(spec=flow.View(lambda *args, **kwargs: None))
prev_activation_mock = mock.Mock(spec=activation.StartActivation())
act = activation.ViewActivation.activate(flow_task_mock, prev_activation_mock)
act.task.save.assert_has_calls(())
def test_view_activation_lifecycle(self):
flow_task_mock = mock.Mock(spec=flow.View(lambda *args, **kwargs: None))
task_mock = mock.Mock(spec=Task())
act = activation.ViewActivation()
act.initialize(flow_task_mock, task_mock)
act.prepare()
act.done()
act.task.prepare.assert_called_once_with()
act.task.done.assert_called_once_with()
flow_task_mock.activate_next.assert_any_call(act)
| Python | 0 | |
e0df929e07e30c514b2b39f515bfd3102d1ebfe7 | Add annotate experiment | Source/Git/Experiments/git_annotate.py | Source/Git/Experiments/git_annotate.py | #!/usr/bin/python3
import sys
import git
r = git.Repo( sys.argv[1] )
num = 0
for info in r.blame( 'HEAD', sys.argv[2] ):
num += 1
commit = info[0]
all_lines = info[1]
print( '%s %6d:%s' % (commit, num, all_lines[0]) )
for line in all_lines[1:]:
num += 1
print( '%*s %6d:%s' % (40, '', num, line) )
| Python | 0.000007 | |
51d581c7bca0fcacf8604b898f96394847865e15 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/e1e64a45b138980a6d8c125bacc81f22142d2b53. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "e1e64a45b138980a6d8c125bacc81f22142d2b53"
TFRT_SHA256 = "5afd4500e88c75188e29e68273438b849d57d800ed982bbe292325148ad3e016"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "0dcdcc3f57a96bc354e66f3805dff4f619e2b93f"
TFRT_SHA256 = "940edcaf656cbbfee314689fd7e52aaa02bd07197bd4139f24aec64eee74c7a8"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000001 |
5dd31aa3cfacb6bd157d50ac3d310b8064a46b80 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/5f6e52142a3592d0cfa058dbfd140cad49ed451a. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "5f6e52142a3592d0cfa058dbfd140cad49ed451a"
TFRT_SHA256 = "8e1efbd7df0fdeb5186b178d7c8b90c33ba80cef54999e988097bd1ff0f4e8fe"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "736eeebfb56c6d0de138f4a29286140d8c26d927"
TFRT_SHA256 = "b584ee5ce5ecaadf289b0997987dfb5eec6cf3623f30b83028923cad20914e61"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0 |
0c13207eeda65754532bab5888cc33693fb06834 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/b87ea071c60db54775b92da8e0eed8477ab96a6a. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "b87ea071c60db54775b92da8e0eed8477ab96a6a"
TFRT_SHA256 = "61b8951d9236a82c54be8db871cd427013ec24ae17b0e681829a634e4f0388b3"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "75318fbce7817886508abd18dd5ea3b35d552372"
TFRT_SHA256 = "233d123e6287e105acb2b464db68b753624dfe5c27f299ff6b2dbe29ef40e9e3"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| Python | 0.000003 |
1eb980caefcbaaa4b29f7c3d92f27e490003e208 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/9562f24de39c95b4a076f7e0a0eb79cb980a9c72. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "9562f24de39c95b4a076f7e0a0eb79cb980a9c72"
TFRT_SHA256 = "6fda4b556e5100e83ba292b8907c82f152740bb9eb157dc64e9c01ed2c4536e8"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "de22adc4126843c3cf142e0a829d153dc94cdd73"
TFRT_SHA256 = "e345d2ae1d385ebaf41531c831bb1025cab260fe20daa5b6024c1d07c1ebfd0c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000006 |
a73671995e1c5b920f5f93226c7bf3e7501a7448 | Add test for GoogLeNet | tests/test_googlenet.py | tests/test_googlenet.py | import unittest
import numpy
from chainer import cuda
from chainer import testing
from chainer.testing import attr
from chainer.variable import Variable
from .. import googlenet
@unittest.skipUnless(googlenet.available, 'Pillow is required')
@attr.slow
class TestGoogLeNet(unittest.TestCase):
def setUp(self):
self.link = googlenet.GoogLeNet(pretrained_model=None)
def test_available_layers(self):
result = self.link.available_layers
self.assertIsInstance(result, list)
self.assertEqual(len(result), 19)
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(numpy.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(numpy.float32)))
y1 = cuda.to_cpu(self.link(x1)['prob'].data)
self.assertEqual(y1.shape, (1, 1000))
def test_call_cpu(self):
self.check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
def test_prepare(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
x3 = numpy.random.uniform(0, 255, (160, 120, 3)).astype(numpy.float32)
x4 = numpy.random.uniform(0, 255, (1, 160, 120)).astype(numpy.float32)
x5 = numpy.random.uniform(0, 255, (3, 160, 120)).astype(numpy.uint8)
y1 = googlenet.prepare(x1)
self.assertEqual(y1.shape, (3, 224, 224))
self.assertEqual(y1.dtype, numpy.float32)
y2 = googlenet.prepare(x2)
self.assertEqual(y2.shape, (3, 224, 224))
self.assertEqual(y2.dtype, numpy.float32)
y3 = googlenet.prepare(x3, size=None)
self.assertEqual(y3.shape, (3, 160, 120))
self.assertEqual(y3.dtype, numpy.float32)
y4 = googlenet.prepare(x4)
self.assertEqual(y4.shape, (3, 224, 224))
self.assertEqual(y4.dtype, numpy.float32)
y5 = googlenet.prepare(x5, size=None)
self.assertEqual(y5.shape, (3, 160, 120))
self.assertEqual(y5.dtype, numpy.float32)
def check_extract(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.extract([x1, x2], layers=['pool5', 'loss3_fc'])
self.assertEqual(len(result), 2)
y1 = cuda.to_cpu(result['pool5'].data)
self.assertEqual(y1.shape, (2, 1024, 1, 1))
self.assertEqual(y1.dtype, numpy.float32)
y2 = cuda.to_cpu(result['loss3_fc'].data)
self.assertEqual(y2.shape, (2, 1000))
self.assertEqual(y2.dtype, numpy.float32)
x3 = numpy.random.uniform(0, 255, (80, 60)).astype(numpy.uint8)
result = self.link.extract([x3], layers=['pool1'], size=None)
self.assertEqual(len(result), 1)
y3 = cuda.to_cpu(result['pool1'].data)
self.assertEqual(y3.shape, (1, 64, 20, 15))
self.assertEqual(y3.dtype, numpy.float32)
def test_extract_cpu(self):
self.check_extract()
@attr.gpu
def test_extract_gpu(self):
self.link.to_gpu()
self.check_extract()
def check_predict(self):
x1 = numpy.random.uniform(0, 255, (320, 240, 3)).astype(numpy.uint8)
x2 = numpy.random.uniform(0, 255, (320, 240)).astype(numpy.uint8)
result = self.link.predict([x1, x2], oversample=False)
y = cuda.to_cpu(result.data)
self.assertEqual(y.shape, (2, 1000))
self.assertEqual(y.dtype, numpy.float32)
result = self.link.predict([x1, x2], oversample=True)
y = cuda.to_cpu(result.data)
self.assertEqual(y.shape, (2, 1000))
self.assertEqual(y.dtype, numpy.float32)
def test_predict_cpu(self):
self.check_predict()
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
self.check_predict()
testing.run_module(__name__, __file__)
| Python | 0 | |
fd01a25c0f5cb9ba75b2a659d47d1d3902242c5e | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/c3e082762b7664bbc7ffd2c39e86464928e27c0c. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "c3e082762b7664bbc7ffd2c39e86464928e27c0c"
TFRT_SHA256 = "9b7fabe6e786e6437bb7cd1a4bed8416da6f08969266e57945805017092900c6"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "5a604f55b0d725eb537fd1a7cb6a88fcc6fd9b73"
TFRT_SHA256 = "004f312a2c65165e301b101add213013603c8822e479b4be63e2f95a3f972ebd"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000003 |
06cfa4c7055ec997dcb3aec11732ee1be5330b75 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/285e48bc47db23a479637fd1e2767b9a35dc2c9b. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "285e48bc47db23a479637fd1e2767b9a35dc2c9b"
TFRT_SHA256 = "6f0067d0cb7bb407caeef060603b6e33f1231cddf1ce4ce2ebce027dc418764f"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "962d1c7a123f01ccdb39e0d1959794f432b0ffeb"
TFRT_SHA256 = "ce0f2f86d19850e8951514b0e3f76950d07a8dc79d053de3d7a4cf402389351a"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| Python | 0.000004 |
27cb9279670bd513a1559f4865500d84869bb9f0 | Test module for Predictor class. | tests/test_predictor.py | tests/test_predictor.py | #! /usr/env/bin python
import numpy as np
from pyboas import predictor, models
# Build random 3-parameter normal posterior.
posterior = np.random.randn(100, 3)
def toy_model(param, time):
time = np.atleast_1d(time)[:, np.newaxis]
a = param[:, 0]
b = param[:, 1]
c = param[:, 2]
return a*time**2 + b*time + c
def test_basic_shape():
"""Test basic shape conditions on output of predictions."""
time = np.random.rand(4, )
pred1 = predictor.GaussPredictor(posterior, toy_model)
pred1.make_prediction(time)
# Test shape of predictive distributions and x
assert pred1.x.shape == pred1.predictives.shape
# Test len of time array and predictives
assert len(time) == len(pred1.predictives)
return
def test_time_concatenation():
"""
Test feature to concatenate prediction times over make_prediction calls.
"""
# Built random time array
time = np.random.rand(4,)
pred1 = predictor.GaussPredictor(posterior, toy_model)
pred2 = predictor.GaussPredictor(posterior, toy_model)
# Run first predictor with full time array
pred1.make_prediction(time)
# Run second predictor twice
pred2.make_prediction(time[:2])
pred2.make_prediction(time[2:])
assert np.allclose(pred1.predictives, pred2.predictives)
assert np.allclose(pred1.x, pred2.x)
return
def test_sample_draw():
# Built random time array
time = np.random.rand(4, )
pred1 = predictor.GaussPredictor(posterior, toy_model)
pred1.samplepredictive(time, 100)
def ok():
print('\033[92mOK\033[0m')
def failed():
print('\033[91mFAILED\033[0m')
def test_all():
print('Testing basic functioning....\t'),
try:
test_basic_shape()
ok()
except AssertionError:
failed()
print('Testing time concatenation....\t'),
try:
test_time_concatenation()
ok()
except AssertionError:
failed()
return
if __name__ == '__main__':
test_all()
| Python | 0 | |
0b443cab974a0d0ce58a2cb4fdd68c7992377eb8 | add chisquare test comparing random sample with cdf (first try of commit) | scipy/stats/tests/test_discrete_chisquare.py | scipy/stats/tests/test_discrete_chisquare.py |
import numpy as np
from scipy import stats
debug = False
def check_discrete_chisquare(distname, arg, alpha = 0.01):
'''perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
uses global variable debug for printing results
'''
# define parameters for test
n=50000
nsupp = 20
wsupp = 1.0/nsupp
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n,*arg)
# construct intervals with minimum mass 1/nsupp
# intervalls are left-half-open as in a cdf difference
distsupport = xrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1)
last = 0
distsupp = [max(distfn.a, -1000)]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii,*arg)
if current - last >= wsupp-1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1-wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1-last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp+1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq,hsupp = np.histogram(rvs,histsupp,new=True)
cdfs = distfn.cdf(distsupp,*arg)
(chis,pval) = stats.chisquare(np.array(freq),n*distmass)
# print and return results
if debug:
print 'chis,pval:', chis, pval
print 'len(distsupp), len(distmass), len(hsupp), len(freq)'
print len(distsupp), len(distmass), len(hsupp), len(freq)
print 'distsupp', distsupp
print 'distmass', n*np.array(distmass)
print 'freq', freq
print 'itemfreq', stats.itemfreq(rvs)
print 'n*pmf', n*distfn.pmf(list(distsupport)[:10],*arg)
assert (pval > alpha), 'chisquare - test for %s' \
'at arg = %s' % (distname,str(arg))
def test_discrete_rvs_cdf():
distdiscrete = [
['bernoulli',(0.3,)],
['binom', (5, 0.4)],
['boltzmann',(1.4, 19)],
['dlaplace', (0.8,)],
['geom', (0.5,)],
['hypergeom',(30, 12, 6)],
['logser', (0.6,)],
['nbinom', (5, 0.5)],
['planck', (4.1,)],
['poisson', (0.6,)],
['randint', (7, 31)],
['zipf', (2,)] ]
for distname, arg in distdiscrete:
if debug:
print distname
yield check_discrete_chisquare, distname, arg
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| Python | 0 | |
7ac3540c2b49bcfd933fe1167f92a9b3c0cdf438 | Add a stub for matching boss catalogue. | py/legacyproduct/bin/match-boss-catalogue.py | py/legacyproduct/bin/match-boss-catalogue.py | #!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
from legacyproduct.internal import sharedmem
import argparse
import os, sys
from time import time
from scipy.spatial import cKDTree as KDTree
import fitsio
def main():
ns = parse_args()
bricks = list_bricks(ns)
tree, boss = read_boss(ns.boss)
# convert to radian
tol = ns.tol / (60. * 60. * 180) * numpy.pi
for brickname, path in bricks:
data = process(brickname, path, tree, boss, tol)
destpath = os.path.join(ns.dest, os.path.relpath(path, os.src))
save_file(destpath, data, {}, ns.format)
def process(brickname, path, tree, boss, tol, ns):
objects = fitsio.read(path, 1, upper=True)
pos = radec2pos(objects['RA'], objects['DEC'])
i, d = tree.query(pos, 1)
mask = d < i
result = numpy.empty(len(objects), boss.dtype)
result[mask] = boss[i[mask]]
result[~mask]['SURVEY'] = 'N/A'
return result
def save_file(filename, data, header, format):
if format == 'fits':
fitsio.write(filename, data, extname='DECALS-BOSS', header=header, clobber=True)
elif format == 'hdf5':
import h5py
with h5py.File(filename, 'w') as ff:
dset = ff.create_dataset('DECALS-BOSS', data=data)
for key in header:
dset.attrs[key] = header[key]
else:
raise ValueError("Unknown format")
def radec2pos(ra, dec):
pos = numpy.empty(len(ra), ('f4', 3))
pos[:, 2] = numpy.sin(dec / 180. * numpy.pi)
pos[:, 1] = numpy.cos(dec / 180. * numpy.pi)
pos[:, 0] = pos[:, 1]
pos[:, 0] *= numpy.sin(ra / 180. * numpy.pi)
pos[:, 1] *= numpy.cos(ra / 180. * numpy.pi)
return pos
def read_boss(filename):
boss = fitsio.FITS(filename, upper=True)[1][:]
ra = boss['PLUG_RA']
dec = boss['PLUG_DEC']
pos = radec2pos(ra, dec)
tree = KDTree(pos)
return tree, boss
def list_bricks(ns):
t0 = time()
if ns.filelist is not None:
d = dict([(parse_filename(fn.strip()), fn.strip())
for fn in open(ns.filelist, 'r').readlines()])
else:
d = dict(iter_tractor(ns.src))
if ns.verbose:
print('enumerated %d bricks in %g seconds' % (
len(d), time() - t0))
#- Load list of bricknames to use
if ns.bricklist is not None:
bricklist = np.loadtxt(ns.bricklist, dtype='S8')
# TODO: skip unknown bricks?
d = dict([(brickname, d[brickname])
for brickname in bricklist])
t0 = time()
bricks = sorted(d.items())
return bricks
def parse_args():
ap = argparse.ArgumentParser(
description="""Match Boss Catalogue for DECALS.
This will create a mirror of tractor catalogue directories, but each file would only contains
The corresponding object in BOSS DR12.
"""
)
ap.add_argument("boss", help="BOSS DR12 catalogue. e.g. /global/project/projectdirs/cosmo/work/sdss/cats/specObj-dr12.fits")
ap.add_argument("src", help="Path to the root directory of all tractor files")
ap.add_argument("dest", help="Path to the root directory of output matched catalogue")
ap.add_argument('-f', "--format", choices=['fits', 'hdf5'], default="fits",
help="Format of the output sweep files")
ap.add_argument('-t', "--tolerance", default=0.01,
help="Tolerance of the angular distance for a match, in arc-seconds")
ap.add_argument('-F', "--filelist", default=None,
help="list of tractor brickfiles to use; this will avoid expensive walking of the path.")
ap.add_argument('-b', "--bricklist",
help="""Filename with list of bricknames to include.
If not set, all bricks in src are included, sorted by brickname.
""")
ap.add_argument('-v', "--verbose", action='store_true')
ap.add_argument("--numproc", type=int, default=None,
help="""Number of concurrent processes to use. 0 for sequential execution.
Default is to use OMP_NUM_THREADS, or the number of cores on the node.""")
return ap.parse_args()
| Python | 0 | |
34d5b5cdc058f1c9055b82151b518251fa3b4f74 | Add tool to create combined smart contract files | tools/join-contracts.py | tools/join-contracts.py | import os
import click
import re
from click.types import File
IMPORT_RE = re.compile(r'^import +["\'](?P<contract>[^"\']+.sol)["\'];$')
"""
Utility to join solidity contracts into a single output file by recursively
resolving imports.
example usage:
$ cd raiden/smart_contracts
$ python ../../tools/join-contracts.py SomeContractWithImports.sol joined.sol
"""
class ContractJoiner(object):
def __init__(self):
self.have_pragma = False
self.seen = set()
def join(self, contract_file):
out = []
if contract_file.name in self.seen:
print('Skipping duplicate {}'.format(contract_file.name))
return []
self.seen.add(contract_file.name)
print('Reading {}'.format(contract_file.name))
for line in contract_file:
line = line.strip('\r\n')
stripped_line = line.strip()
if stripped_line.startswith('pragma'):
if not self.have_pragma:
self.have_pragma = True
out.append(line)
elif stripped_line.startswith('import'):
match = IMPORT_RE.match(stripped_line)
if match:
next_file = match.groupdict().get('contract')
if next_file and os.path.exists(next_file):
with open(next_file) as next_contract:
out.extend(self.join(next_contract))
else:
out.append(line)
return out
@click.command()
@click.argument('contract', type=File())
@click.argument('output', type=File('w'))
def main(contract, output):
output.write("\n".join(ContractJoiner().join(contract)))
if __name__ == '__main__':
main()
| Python | 0 | |
e06416a61826229ebd0cccdc519b6dc39d8a0fd9 | Add migration to remove models. | server/migrations/0088_auto_20190304_1313.py | server/migrations/0088_auto_20190304_1313.py | # Generated by Django 2.1.4 on 2019-03-04 18:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('server', '0087_auto_20190301_1424'),
]
operations = [
migrations.AlterUniqueTogether(
name='installedupdate',
unique_together=set(),
),
migrations.RemoveField(
model_name='installedupdate',
name='machine',
),
migrations.RemoveField(
model_name='pendingappleupdate',
name='machine',
),
migrations.AlterUniqueTogether(
name='updatehistory',
unique_together=set(),
),
migrations.RemoveField(
model_name='updatehistory',
name='machine',
),
migrations.AlterUniqueTogether(
name='updatehistoryitem',
unique_together=set(),
),
migrations.RemoveField(
model_name='updatehistoryitem',
name='update_history',
),
migrations.DeleteModel(
name='InstalledUpdate',
),
migrations.DeleteModel(
name='PendingAppleUpdate',
),
migrations.DeleteModel(
name='UpdateHistory',
),
migrations.DeleteModel(
name='UpdateHistoryItem',
),
]
| Python | 0 | |
722c823f8af13d53d5068f1581614f09a042f8d9 | Create techpaisascrap.py | techpaisascrap.py | techpaisascrap.py | import urllib
import re
import msvcrt as m
import json
import datetime as DT
import numpy as np
import csv
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
import operator
def getColumn(filename, column):
results = csv.reader(open(filename))
return [result[column] for result in results]
total =0
data=[0]
dates=[]
#while True:
htmlfile = urllib.urlopen("http://techpaisa.com/chart/acc/volume/?xhr")
htmltext= htmlfile.read()
print "one"
htmlfile2 = urllib.urlopen("http://techpaisa.com/chart/acc/adx/?xhr")
htmltext2=htmlfile2.read()
htmlfile3=urllib.urlopen("http://techpaisa.com/supres/acc")
htmltext3=htmlfile3.read()
g=0
while (g<40):
w=1
if (g/2==0):
w=1
if (g==9):
w=0
if (g==11):
w=0
if (g==13):
w=0
if (g==17):
w=0
if (g==15):
w=0
if (g==19):
w=0
if (g==21):
w=0
if (g==23):
w=0
if (g==25):
w=0
if (g==27):
w=0
if (g==29):
w=0
if (g==31):
w=0
if (g==33):
w=0
if (g==35):
w=0
if (g==37):
w=0
if (g==39):
w=0
if (g==0):
w=2
if (g==1):
w=0
if (g==3):
w=0
if (g==5):
w=0
if (g==7):
w=0
print htmltext3.split(':')[5].split(',')[g].split('[')[w]
g=g+1
i=4000 #for simple average calc
k=5 #for SMA calc
SMA=0.0000
print "pop"
#_______________________________________________________________________________________#
#calculating the average of the term
writer = csv.writer(open('prices.csv', 'wb'))
while i <int(len(htmltext.split("#"))-1):
#Since printing the values will kill the speed!!!!!!!!!!!!!!!!
#print "date", htmltext.split("#")[i].split(",")[0],"price",htmltext.split("#")[i].split(",")[1],"volume",htmltext.split("#")[i].split(",")[2]
i+=1
dates=DT.datetime.strptime(htmltext.split("#")[i].split(",")[0].split("0:00")[0], "%Y/%m/%d")
dates2=date2num(DT.datetime.strptime(htmltext.split("#")[i].split(",")[0], "%Y/%m/%d"))
val = float(htmltext.split("#")[i].split(",")[1])
writer.writerow(([dates2][0],[val][0]))
total=total+val
average =total/(i-1)
print "average price ", average
#_______________________________________________________________________________________#
#calculating the SMA( simple moving average over five days )
writer = csv.writer(open('smadates.csv', 'wb'))
while k <=int(len(htmltext.split("#"))-1):
#Since printing the values will kill the speed!!!!!!!!!!!!!!!!
#print "date", htmltext.split("#")[i].split(",")[0],"price",htmltext.split("#")[i].split(",")[1],"volume",htmltext.split("#")[i].split(",")[2]
while (k<int(len(htmltext.split("#"))-1)):
total=float(htmltext.split("#")[k].split(",")[1])+float(htmltext.split("#")[k-1].split(",")[1])+float(htmltext.split("#")[k-2].split(",")[1])+float(htmltext.split("#")[k-3].split(",")[1])+float(htmltext.split("#")[k-4].split(",")[1])
SMA=total/5
break
dates=DT.datetime.strptime(htmltext.split("#")[k].split(",")[0].split("0:00")[0], "%Y/%m/%d")
dates2=date2num(DT.datetime.strptime(htmltext.split("#")[k].split(",")[0], "%Y/%m/%d"))
#Since printing the values will kill the speed!!!!!!!!!!!!!!!!
#print k," date",dates," AMount ",SMA
writer.writerow(([dates2][0],[SMA][0]))
k+=1
print "blah"
time = getColumn("smadates.csv",0)
volt = getColumn("smadates.csv",1)
#### OLD CODE FOR THE CHART...CAN BE REMOVED LATER!
#data1=np.genfromtxt('smadata.csv', skip_header=1) #suppose it is in the current working directory
#data2=np.genfromtxt('smadates.csv', skip_header=1)
#plt.plot(data1,'o-')
#plt.savefig('mpl_font_testA.png')
#plt.plot(data2, '.b', label='aErr < bErr')
plt.figure("SMA over the entire period")
plt.xlabel("Time(ms)")
plt.ylabel("Volt(mV)")
plt.plot(time,volt)
plt.show('else')
#_______________________________________________________________________________________#
#calculate the ATR as chart
####### THRESHOLD: 25!
k=14
plusdi14=0
minusdi14=0
writer3 = csv.writer(open('atrchart.csv', 'wb'))
while k <int(len(htmltext2.split("#"))-1):
#collecting all the values for the ATR representation :
dates2=date2num(DT.datetime.strptime(htmltext2.split("#")[k].split(",")[0], "%Y/%m/%d"))
ADX= htmltext2.split("#")[k].split(",")[1]
plusdi= htmltext2.split("#")[k].split(",")[2]
minusdi=htmltext2.split("#")[k].split(",")[3]
i+=1
m=14
print 'working'
while (m<0):
plusdi14=float(htmltext.split("#")[k-m].split(",")[2])+plusdi14
minusdi14=float(htmltext.split("#")[k-m].split(",")[3])+minusdi14
m=m-1
diffdi=operator.abs(plusdi14-minusdi14)
summdi=plusdi14+minusdi14
print diffdi
print summdi
writer.writerow(([dates2][0],plusdi14,minusdi14))
| Python | 0.000001 | |
04a4d7887664753f87d6ccd0921c87160d8ced26 | Create 002_gen.py | 002/002_gen.py | 002/002_gen.py | #!/usr/bin/env python
def fibonatti(n_max=4000000):
f1, f2 = 1, 1
while f2 <= n_max:
yield f2
f2 += f1
f1 = f2 - f1
answer = sum(f for f in fibonatti() if f % 2 == 0)
print(answer)
| Python | 0.000005 | |
92f88fb9021094f1429f5175d01a354c4ad35880 | add initial gyp to build freetype lib (problems with cflags not showing up in xcode) | gyp/freetype.gyp | gyp/freetype.gyp | {
# 'includes': [
# 'common.gypi',
# ],
'targets': [
{
'target_name': 'skfreetype',
'type': 'static_library',
'sources': [
'../third_party/freetype/src/base/ftbbox.c',
'../third_party/freetype/src/base/ftbitmap.c',
'../third_party/freetype/src/base/ftglyph.c',
'../third_party/freetype/src/base/ftlcdfil.c',
'../third_party/freetype/src/base/ftstroke.c',
'../third_party/freetype/src/base/ftxf86.c',
'../third_party/freetype/src/base/ftbase.c',
'../third_party/freetype/src/base/ftsystem.c',
'../third_party/freetype/src/base/ftinit.c',
'../third_party/freetype/src/base/ftgasp.c',
'../third_party/freetype/src/base/ftfstype.c',
'../third_party/freetype/src/raster/raster.c',
'../third_party/freetype/src/sfnt/sfnt.c',
'../third_party/freetype/src/smooth/smooth.c',
'../third_party/freetype/src/autofit/autofit.c',
'../third_party/freetype/src/truetype/truetype.c',
'../third_party/freetype/src/cff/cff.c',
'../third_party/freetype/src/psnames/psnames.c',
'../third_party/freetype/src/pshinter/pshinter.c',
# added for linker
'../third_party/freetype/src/lzw/ftlzw.c',
'../third_party/freetype/src/gzip/ftgzip.c',
'../third_party/freetype/src/cid/type1cid.c',
'../third_party/freetype/src/bdf/bdf.c',
'../third_party/freetype/src/psaux/psaux.c',
'../third_party/freetype/src/pcf/pcf.c',
'../third_party/freetype/src/pfr/pfr.c',
'../third_party/freetype/src/type1/type1.c',
'../third_party/freetype/src/type42/type42.c',
'../third_party/freetype/src/winfonts/winfnt.c',
],
'include_dirs': [
'../third_party/freetype/internal',
'../third_party/freetype/builds',
'../third_party/freetype/include',
'../third_party/freetype',
],
'cflags': [
'-W',
'-Wall',
'-fPIC',
'-DPIC',
'-DDARWIN_NO_CARBON',
'-DFT2_BUILD_LIBRARY',
],
'direct_dependent_settings': {
'include_dirs': [
'../third_party/freetype/include', # For ft2build.h
],
},
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| Python | 0.000002 | |
e8efe8de59b32e7b78fcf801dccce36e7ec53768 | implement regular Kmeans | models/Kmeans_2.py | models/Kmeans_2.py | # -*- coding: utf-8 -*-
# My Model
from utils.ops import ops
import tensorflow as tf
import numpy as np
from sklearn.datasets import make_blobs
#############################################
# Deep Adaptive Separator Model #
#############################################
class KMeans:
def __init__(self, nb_clusters, nb_iterations=50, graph=None, input_tensor=None):
self.nb_clusters = nb_clusters
self.nb_iterations = nb_iterations
if input_tensor == None:
self.graph = tf.Graph()
with self.graph.as_default():
# Spectrogram, embeddings
# shape = [batch, T*F , E ]
self.X = tf.placeholder("float", [None, None, None])
self.input_dim = tf.shape(self.X)[1]
begin = tf.random_uniform([], minval=0, maxval=self.input_dim-self.nb_clusters, dtype=tf.int32)
self.centroids = tf.identity(self.X[: , begin:begin+nb_clusters, :])
self.network
# Create a session for this model based on the constructed graph
self.sess = tf.Session(graph = self.graph)
else:
self.X = input_tensor
self.input_dim = tf.shape(self.X)[1]
begin = tf.random_uniform([], minval=0, maxval=self.input_dim-self.nb_clusters, dtype=tf.int32)
self.centroids = tf.identity(self.X[: , begin:begin+nb_clusters, :])
self.network
def init(self):
with self.graph.as_default():
self.sess.run(tf.global_variables_initializer())
@ops.scope
def network(self):
i = tf.constant(0)
cond = lambda i, m: tf.less(i, self.nb_iterations)
_ , self.centroids = tf.while_loop(cond, self.body,[i, self.centroids])
return self.centroids, self.get_labels(self.centroids, self.X)
def body(self ,i, centroids):
with tf.name_scope('iteration'):
# Checking the closest clusters
labels = self.get_labels(centroids, self.X)
# Creating the matrix equality [ B , S , TF], equality[: , s, :] = [labels == s](float32)
cluster_range = tf.range(0, tf.shape(centroids)[1])
equality = tf.map_fn(lambda r: tf.cast(tf.equal(labels, r), tf.float32), cluster_range, dtype=tf.float32)
equality = tf.transpose(equality, [1 , 0, 2])
new_centroids = tf.matmul(equality, self.X)/tf.reduce_sum(equality, axis=2, keep_dims=True)
return [i+1, new_centroids]
def get_labels(self, centroids, X):
centroids_ = tf.expand_dims(centroids, 1)
X_ = tf.expand_dims(X, 2)
return tf.argmin(tf.norm(X_ - centroids_, axis=3), axis=2, output_type=tf.int32)
def fit(self, X_train):
return self.sess.run(self.network, {self.X: X_train})
if __name__ == "__main__":
nb_samples = 10000
E = 2
nb_clusters = 2
# X1 = np.random.random_sample((nb_samples/2, E))
# X2 = np.random.random_sample((nb_samples/2, E)) + 2
# X = np.reshape(np.concatenate((X1,X2), axis=0), (1, nb_samples ,E))
# X = np.reshape(np.concatenate((X, X), axis=0), (2, nb_samples ,E))
# print X.shape
X, y = make_blobs(n_samples=nb_samples, centers=nb_clusters, n_features=E)
X = X[np.newaxis,:]
y = y[np.newaxis,:]
print y
kmean = KMeans(nb_clusters)
kmean.init()
centroids, labels = kmean.fit(X)
print labels
print y
if np.all((y-labels) == 0) or np.all((y+labels) == 1):
print 'OK'
| Python | 0.000102 | |
66137a8710bf3b778c860af8d6278ee0c97bbab4 | Add script to delete unused users on JupyterHub | scripts/delete-unused-users.py | scripts/delete-unused-users.py | #!/usr/bin/env python3
"""
Delete unused users from a JupyterHub.
JupyterHub performance sometimes scales with *total* number
of users, rather than running number of users. While that should
be fixed, we can work around it by deleting unused users once in
a while. This script will delete anyone who hasn't registered
any activity in a given period of time, double checking to
make sure they aren't active right now. This will require users to
log in again the next time they use the hub, but that's probably
ok.
"""
import argparse
from jhub_client.api import JupyterHubAPI
from dateutil.parser import parse
import asyncio
from datetime import timedelta, datetime
async def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'hub_url',
help='Fully qualified URL to the JupyterHub'
)
args = argparser.parse_args()
to_delete = []
async with JupyterHubAPI(hub_url=args.hub_url) as hub:
users = await hub.list_users()
for user in users:
last_activity_str = user.get('last_activity', False)
if last_activity_str:
try:
last_activity = parse(user['last_activity'])
except:
print(user['last_activity'])
raise
if last_activity and datetime.now().astimezone() - last_activity < timedelta(hours=24) and user['server'] is not None:
print(f"Not deleting {user['name']}")
else:
to_delete.append(user['name'])
print(f"Deleting {user['name']}")
for i, username in enumerate(to_delete):
print(f'{i+1} of {len(to_delete)}: deleting {username}')
await hub.delete_user(username)
if __name__ == '__main__':
asyncio.run(main())
| Python | 0 | |
ad6aa623bbd8f316ab7fb8c389d1c9c74b17ae8c | add util module for converting an update job into xml | rpath_repeater/utils/update_job_formatter.py | rpath_repeater/utils/update_job_formatter.py | #!/usr/bin/python
#
# Copyright (c) 2012 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
from xml.etree import cElementTree as etree
class Formatter(object):
__slots__ = [ 'jobs', 'root', 'changes' ]
def __init__(self, updateJob):
self.jobs = []
if updateJob is not None:
self.jobs = updateJob.getJobs()
self.root = None
self.changes = None
def format(self):
self.root = etree.Element('preview')
self.changes = etree.SubElement(self.root, 'conary_package_changes')
for oneJob in self.jobs:
for j in oneJob:
self._formatJob(j)
def toxml(self):
return etree.tostring(self.root)
def _formatJob(self, job):
(name, (oldVer, oldFla), (newVer, newFla)) = job[:3]
if oldVer is None:
self._formatInstall(name, newVer, newFla)
elif newVer is None:
self._formatErase(name, oldVer, oldFla)
else:
self._formatUpdate(name, oldVer, oldFla, newVer, newFla)
def _formatInstall(self, name, version, flavor):
node = self._newPackageChange('added')
self._packageSpec(node, 'added_conary_package', name, version, flavor)
def _formatErase(self, name, version, flavor):
node = self._newPackageChange('removed')
self._packageSpec(node, 'removed_conary_package', name, version, flavor)
def _formatUpdate(self, name, oldVersion, oldFlavor, newVersion, newFlavor):
node = self._newPackageChange('changed')
self._packageSpec(node, 'from', name, oldVersion, oldFlavor)
self._packageSpec(node, 'to', name, newVersion, newFlavor)
diff = etree.SubElement(node, 'conary_package_diff')
self._fieldDiff(diff, 'version', oldVersion, newVersion)
self._fieldDiff(diff, 'flavor', oldFlavor, newFlavor)
def _newPackageChange(self, type):
node = etree.SubElement(self.changes, 'conary_package_change')
etree.SubElement(node, 'type').text = type
return node
def _packageSpec(self, parent, tag, name, version, flavor):
node = etree.SubElement(parent, tag)
etree.SubElement(node, 'name').text = str(name)
etree.SubElement(node, 'version').text = str(version)
etree.SubElement(node, 'flavor').text = str(flavor)
return node
def _fieldDiff(self, parent, tag, oldValue, newValue):
if oldValue == newValue:
return
node = etree.SubElement(parent, tag)
etree.SubElement(node, 'from').text = str(oldValue)
etree.SubElement(node, 'to').text = str(newValue)
| Python | 0 | |
7b14028f3796981974b6d01b98277326123c0395 | add get_flatpage template tag | core/templatetags/get_flatpage.py | core/templatetags/get_flatpage.py | from django import template
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import get_current_site
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, url):
self.context_name = context_name
self.url = template.Variable(url)
def render(self, context):
if 'request' in context:
site_pk = get_current_site(context['request']).pk
else:
site_pk = settings.SITE_ID
try:
flatpage = FlatPage.objects.get(sites__id=site_pk, url=self.url.resolve(context))
except ObjectDoesNotExist:
flatpage = FlatPage(url=self.url.resolve(context))
context[self.context_name] = flatpage
return ''
@register.tag
def get_flatpage(parser, token):
"""
Retrieves the flatpage object for the specified url
Syntax::
{% get_flatpages ['url'] as context_name %}
Example usage::
{% get_flatpages '/about/' as about_page %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url'] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) == 4:
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
url = bits[1]
return FlatpageNode(context_name, url)
else:
raise template.TemplateSyntaxError(syntax_message)
| Python | 0 | |
b8a07ce36cfeb2679ace05b26d6adc1e525d6044 | Add feature computation module | husc/features.py | husc/features.py | import functools as fun
import numpy as np
from scipy.stats.mstats import mquantiles
from scipy import ndimage as nd
from skimage import feature, color, io as imio, img_as_float, \
morphology as skmorph
from skimage import filter as imfilter, measure
def lab_hist(rgb_image, **kwargs):
return np.histogram(color.rgb2lab(rgb_image), **kwargs)
# threshold and labeling number of objects, statistics about object size and
# shape
def intensity_object_features(im, adaptive_t_radius=51):
"""Segment objects based on intensity threshold and compute properties.
Parameters
----------
im : 2D np.ndarray of float or uint8.
The input image.
adaptive_t_radius : int, optional
The radius to calculate background with adaptive threshold.
Returns
-------
f : 1D np.ndarray of float
The feature vector.
"""
tim1 = im > imfilter.threshold_otsu(im)
f1 = object_features(tim1, im)
tim2 = imfilter.threshold_adaptive(im, adaptive_t_radius)
f2 = object_features(tim2, im)
f = np.concatenate([f1, f2])
return f
def object_features(bin_im, im, erode=2):
"""Compute features about objects in a binary image.
Parameters
----------
bin_im : 2D np.ndarray of bool
The image of objects.
im : 2D np.ndarray of float or uint8
The actual image.
erode : int, optional
Radius of erosion of objects.
Returns
-------
f : 1D np.ndarray of float
The feature vector.
"""
selem = skmorph.disk(erode)
if erode > 0:
bin_im = nd.binary_erosion(bin_im, selem)
lab_im, n_objs = nd.label(bin_im)
if erode > 0:
lab_im = nd.grey_dilate(lab_im, footprint=selem)
feats = measure.regionprops(lab_im,
['Area', 'Eccentricity', 'EulerNumber',
'Extent', 'MinIntensity', 'MeanIntensity',
'MaxIntensity', 'Solidity'],
intensity_image=im)
feats = np.array([props.values() for props in feats], np.float)
feature_quantiles = mquantiles(feats, [0.05, 0.25, 0.5, 0.75, 0.95],
axis=0)
f = np.concatenate([np.array([n_objs], np.float),
feature_quantiles.ravel()])
return f
full_feature_list = \
[fun.partial(np.histogram, bins=16, range=(0.0, 1.0)),
fun.partial(lab_hist, bins=16, range=(0.0, 1.0)),
feature.hog
]
# TO-DO: add segmentation features
def image_feature_vector(im, feature_list=None):
if type(im) == str:
im = img_as_float(imio.imread(im))
if feature_list is None:
feature_list = full_feature_list
features = np.concatenate([f(im) for f in feature_list])
return features
| Python | 0.000001 | |
f16a7e43ce4d9dc82fd4bfca34d80f0447bd57db | add isStaffOrReadOnly permissions | treeherder/webapp/api/permissions.py | treeherder/webapp/api/permissions.py | from rest_framework.permissions import BasePermission
from rest_framework.permissions import SAFE_METHODS
class IsStaffOrReadOnly(BasePermission):
"""
The request is authenticated as an admin staff (eg. sheriffs), or is a read-only request.
"""
def has_permission(self, request, view):
return (request.method in SAFE_METHODS or
request.user and
request.user.is_authenticated() and
request.user.is_staff) | Python | 0.00003 | |
5a77678a44ec9838e943b514a586dbd96b8bdfdc | Add migration for license change | modelview/migrations/0042_auto_20171215_0953.py | modelview/migrations/0042_auto_20171215_0953.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-12-15 08:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0041_merge_20171211_1420'),
]
operations = [
migrations.AlterField(
model_name='basicfactsheet',
name='license',
field=models.CharField(choices=[('MIT Licence', 'MIT Licence'), ('Apache Licence', 'Apache Licence'), ('GNU GPL Licence', 'GNU GPL Licence'), ('Other', 'Other'), ('Unknown', 'Unknown')], default='Unknown', max_length=20, verbose_name='License'))
]
| Python | 0 | |
5522285af9179441e56f65405037bb3a4c1c1274 | Revert "Important fixes" | soccer/gameplay/plays/testing/triangle_pass.py | soccer/gameplay/plays/testing/triangle_pass.py | import robocup
import play
import behavior
import skills.move
import skills.capture
import tactics.coordinated_pass
import constants
import main
import enum
## A demo play written during a teaching session to demonstrate play-writing
# Three robots form a triangle on the field and pass the ball A->B->C->A and so on.
class TrianglePass(play.Play):
class State(enum.Enum):
## 2 robots get on the corners of a triangle,
# while a third fetches the ball
setup = 1
## The robots continually pass to each other
passing = 2
def __init__(self):
super().__init__(continuous=True)
# register states - they're both substates of "running"
self.add_state(TrianglePass.State.setup,
behavior.Behavior.State.running)
self.add_state(TrianglePass.State.passing,
behavior.Behavior.State.running)
self.add_transition(behavior.Behavior.State.start,
TrianglePass.State.setup, lambda: True,
'immediately')
self.add_transition(TrianglePass.State.setup,
TrianglePass.State.passing,
lambda: self.all_subbehaviors_completed(),
'all subbehaviors completed')
self.triangle_points = [
robocup.Point(0, constants.Field.Length / 2.0),
robocup.Point(constants.Field.Width / 4,
constants.Field.Length / 4),
robocup.Point(-constants.Field.Width / 4,
constants.Field.Length / 4),
]
def on_enter_setup(self):
closestPt = min(self.triangle_points,
key=lambda pt: pt.dist_to(main.ball().pos))
otherPts = list(self.triangle_points)
otherPts.remove(closestPt)
self.add_subbehavior(skills.move.Move(otherPts[0]), 'move1')
self.add_subbehavior(skills.move.Move(otherPts[1]), 'move2')
self.add_subbehavior(skills.capture.Capture(), 'capture')
def on_exit_setup(self):
self.remove_all_subbehaviors()
def execute_passing(self):
# If we had a pass in progress before and it finished, remove it
if self.has_subbehaviors():
if self.all_subbehaviors()[0].is_done_running():
self.remove_all_subbehaviors()
# if we're not currently passing, start a new pass
if not self.has_subbehaviors():
# pick pass from and to points
kickFrom = min(self.triangle_points,
key=lambda pt: pt.dist_to(main.ball().pos))
kickFromIdx = self.triangle_points.index(kickFrom)
kickToIdx = (kickFromIdx + 1) % len(self.triangle_points)
kickToPt = self.triangle_points[kickToIdx]
# add the pass subbehavior
self.add_subbehavior(
tactics.coordinated_pass.CoordinatedPass(kickToPt), 'pass')
def on_exit_passing(self):
self.remove_all_subbehaviors()
| Python | 0 | |
0a4c100f9fb6e7540320fb7c55aeebdffe91c6d1 | add primenumber.py | primenumber.py | primenumber.py | lower = int(input("Enter lower range: "))
upper = int(input("Enter upper range: "))
for num in range(lower,upper + 1):
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
print(num)
| Python | 0.998844 | |
64ced324f05de20f839782913cfb13d147d49dd6 | create a scheduler example file to test on live | code-samples/web_scraper/jared/scheduling_script.py | code-samples/web_scraper/jared/scheduling_script.py | from time import sleep
from apscheduler.schedulers.background import BackgroundScheduler as Scheduler
import logging
import datetime
# create a scheduler
s = Scheduler()
# This is what I want to happen
def job():
logging.basicConfig(filename='scheduled_task.log',level=logging.INFO,
format='%(asctime)s %(message)s line: %(lineno)d')
try:
logging.info( "scheduled event")
except Exception as e:
print("open file failed")
def main():
newTime = datetime.datetime.now() + datetime.timedelta(seconds = 2)
s.add_job(job, 'cron', hour='0-23')
s.start()
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
if __name__ == "__main__":
main()
# Running a python script with python script & will fork that process immediately, so you can close the terminal. | Python | 0 | |
a9a6a3dafc8901ffeeb89862fdc79f7099ba311a | Add UTF-8 test | test/test_utf8.py | test/test_utf8.py | # -*- coding: utf-8 -*-
# Monary - Copyright 2011-2014 David J. C. Beach
# Please see the included LICENSE.TXT and NOTICE.TXT for licensing information.
import pymongo
import monary
def setup():
with pymongo.Connection("127.0.0.1") as c:
c.drop_database("monary_test")
c.monary_test.data.insert({"test" : u"aあ"})
c.monary_test.data.insert({"test" : u"âéÇ"})
c.monary_test.data.insert({"test" : u"αλΩ"})
def teardown():
with pymongo.Connection("127.0.0.1") as c:
c.drop_database("monary_test")
def test_utf8():
with monary.Monary("127.0.0.1") as m:
[data] = m.query("monary_test",
"data",
{},
["test"],
["string:8"],
sort="sequence")
expected = ["aあ", "âéÇ", "αλΩ"]
for x, y in zip(data, expected):
assert x == y
| Python | 0.000569 | |
6740c6192ab9bf37767230981b86e446486d4c43 | implement basic plugin loader for laser | mythril/laser/ethereum/plugins/plugin_loader.py | mythril/laser/ethereum/plugins/plugin_loader.py | from mythril.laser.ethereum.svm import LaserEVM
from mythril.laser.ethereum.plugins.plugin import LaserPlugin
class LaserPluginLoader:
"""
The LaserPluginLoader is used to abstract the logic relating to plugins.
Components outside of laser thus don't have to be aware of the interface that plugins provide
"""
def __init__(self, symbolic_vm: LaserEVM):
""" Initializes the plugin loader
:param symbolic_vm: symbolic virtual machine to load plugins for
"""
self.symbolic_vm = symbolic_vm
self.laser_plugins = []
def load(self, laser_plugin: LaserPlugin):
""" Loads the plugin
:param laser_plugin: plugin that will be loaded in the symbolic virtual machine
"""
laser_plugin.initialize(self.symbolic_vm)
self.laser_plugins.append(laser_plugin)
def is_enabled(self, laser_plugin: LaserPlugin):
""" Returns whether the plugin is loaded in the symbolic_vm
:param laser_plugin: plugin that will be checked
"""
return laser_plugin in self.laser_plugins
| Python | 0 | |
a01f4d47410ee1bf164d8b962f6337f8c39f0d16 | add quicksort recursive | sort/quick_sort/python/quicksort-recusive.py | sort/quick_sort/python/quicksort-recusive.py |
def quickSort(arr):
sort(arr,0,len(arr)-1)
def sort(arr, low, high):
if (low < high):
p = partition(arr, low, high)
sort(arr, low, p - 1)
sort(arr, p + 1, high)
def partition(arr, low, high):
pivot = arr[high]
i = (low - 1)
for j in range(low,high):
if (arr[j] <= pivot):
i+= 1
arr[i],arr[j] = arr[j],arr[i]
arr[i+1],arr[high] = arr[high],arr[i+1]
return i + 1 | Python | 0.000114 | |
e6b381a617808c500e115d5e3715dc2ae454e896 | Add command line tool | src/psd_tools2/__main__.py | src/psd_tools2/__main__.py | from __future__ import unicode_literals
import logging
import docopt
from psd_tools2 import PSDImage
from psd_tools2.version import __version__
try:
from IPython.lib.pretty import pprint
except ImportError:
from pprint import pprint
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
def main():
"""
psd-tools command line utility.
Usage:
psd-tools export <input_file> <output_file> [options]
psd-tools show <input_file> [options]
psd-tools debug <input_file> [options]
psd-tools -h | --help
psd-tools --version
Options:
-v --verbose Be more verbose.
Example:
psd-tools show example.psd # Show the file content
psd-tools export example.psd example.png # Export as PNG
psd-tools export example.psd[0] example-0.png # Export layer as PNG
"""
args = docopt.docopt(main.__doc__, version=__version__)
if args['--verbose']:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if args['export']:
input_parts = args['<input_file>'].split('[')
input_file = input_parts[0]
if len(input_parts) > 1:
indices = [int(x.rstrip(']')) for x in input_parts[1:]]
else:
indices = []
layer = PSDImage.open(input_file)
for index in indices:
layer = layer[index]
if isinstance(layer, PSDImage) and layer.has_preview():
image = layer.topil()
else:
image = layer.compose()
image.save(args['<output_file>'])
elif args['show']:
psd = PSDImage.open(args['<input_file>'])
pprint(psd)
elif args['debug']:
psd = PSDImage.open(args['<input_file>'])
pprint(psd._psd)
if __name__ == "__main__":
main()
| Python | 0.00002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.