commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
3020472569a49f01331ebb150f004e2684196b8e | add expression to improve the domain | bin/tools/expression.py | bin/tools/expression.py | #!/usr/bin/env python
def _is_operator( element ):
return isinstance( element, str ) and element in ['&','|']
def _is_leaf( element ):
return isinstance( element, tuple ) and len( element ) == 3 and element[1] in ['=', '<>', '!=', '<=', '<', '>', '>=', 'like', 'not like', 'ilike', 'not ilike']
def _is_expression( element ):
return isinstance( element, tuple ) and len( element ) > 2 and _is_operator( element[0] )
class expression_leaf( object ):
def __init__(self, operator, left, right ):
self.operator = operator
self.left = left
self.right = right
def parse( self ):
return self
def to_sql( self ):
return "%s %s %s" % ( self.left, self.operator, self.right )
class expression( object ):
def __init__( self, exp ):
if isinstance( exp, tuple ):
if not _is_leaf( exp ) and not _is_operator( exp[0] ):
exp = list( exp )
if isinstance( exp, list ):
if len( exp ) == 1 and _is_leaf( exp[0] ):
exp = exp[0]
else:
if not _is_operator( exp[0][0] ):
exp.insert( 0, '&' )
exp = tuple( exp )
else:
exp = exp[0]
self.exp = exp
self.operator = '&'
self.children = []
def parse( self ):
if _is_leaf( self.exp ):
self.children.append( expression_leaf( self.exp[1], self.exp[0], self.exp[2] ).parse() )
elif _is_expression( self.exp ):
self.operator = self.exp[0]
for element in self.exp[1:]:
if not _is_operator( element ) and not _is_leaf(element):
self.children.append( expression(element).parse() )
else:
if _is_leaf(element):
self.children.append( expression_leaf( element[1], element[0], element[2] ).parse() )
return self
def to_sql( self ):
return "( %s )" % ((" %s " % {'&' : 'AND', '|' : 'OR' }[self.operator]).join([child.to_sql() for child in self.children]))
| Python | 0.000012 | |
0226bec54c30a31c0005e7318b69c58a379cfbc9 | refactor output function | mystarspilot/view.py | mystarspilot/view.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from colorama import Fore, Back, Style
class SearchResultView(object):
def print_search_result(self, search_result, keywords=None):
if search_result is not None:
for repo in search_result:
self.print_repo_name(repo)
self.print_repo_url(repo)
self.print_repo_language(repo)
self.print_repo_description(repo)
self.print_summary(search_result)
def print_summary(self, search_result):
self._print('', end='\n')
count = len(search_result)
fore_color = Fore.GREEN if count else Fore.YELLOW
text = "({} star{} found)".format(count if count else "No", 's' if count > 1 else '')
self._print(text, fore_color, end='\n')
def print_repo_name(self, repo):
self._print(repo.full_name, Fore.GREEN)
def print_repo_url(self, repo):
self._print("[{}]".format(repo.html_url), Fore.YELLOW)
def print_repo_language(self, repo):
if repo.language:
self._print(repo.language, Fore.BLUE, end='\n')
def print_repo_description(self, repo):
if repo.description:
self._print(repo.description, end='\n')
def _print(self, text='', fore_color=Fore.RESET, end=' '):
print(fore_color + text, end='')
print(Fore.RESET + Back.RESET + Style.RESET_ALL, end=end)
| Python | 0.998671 | |
245e661c50df41942ca3f0c8ee794532e3c02c4c | Translate PowerShell sample NCM.ExecuteScript.ps1 to python | samples/ncm_execute_script.py | samples/ncm_execute_script.py | from __future__ import print_function
import re
import requests
from orionsdk import SwisClient
from time import sleep
def main():
npm_server = 'localhost'
username = 'admin'
password = ''
swis = SwisClient(npm_server, username, password)
ip = '10.199.252.6'
data = swis.query('SELECT NodeID FROM Cirrus.Nodes WHERE AgentIP = @ip', ip=ip)['results']
nodeId = data[0]['NodeID']
script = 'show clock'
swis.invoke('Cirrus.ConfigArchive', 'Execute', [nodeId], script, username)
transferId = '{{{0}}}:{1}:ExecuteScript'.format(nodeId, username)
status = 'Queued'
while status != 'Complete' and status != 'Error':
sleep(1)
data = swis.query('SELECT T.Status, T.Error FROM Cirrus.TransferQueue T WHERE T.TransferID=@transfer', transfer=transferId)['results']
status = data[0]['Status']
data = swis.query('SELECT T.Log FROM Cirrus.TransferQueue T WHERE T.TransferID=@transfer', transfer=transferId)['results']
output = data[0]['Log']
print(output)
requests.packages.urllib3.disable_warnings()
if __name__ == '__main__':
main()
| Python | 0.999999 | |
7e91549abc8d185deb231c937d7740606f9454ec | add pmi element unit test | test_pmi_element.py | test_pmi_element.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# The unit test case for pmi.TopkHeap and PMIElement
#
# @author: Jason Wu (Jasonwbw@yahoo.com)
from pmi import PMIElement
import sys
import unittest
class PMIElementTestCase(unittest.TestCase):
def setUp(self):
pass
def tearGt(self):
f = PMIElement('f', 12)
e = PMIElement('e', 11)
self.assertEqual(True, e < f)
self.assertEqual(True, f > e)
def testEq(self):
f = PMIElement('f', 11)
e = PMIElement('e', 11)
g = PMIElement('e', 11)
self.assertEqual(False, e == f)
self.assertEqual(True, e == g)
def testPrintSomething(self):
pass
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
3c52683e759f146ad247c6e397d5d49dd1cc9966 | Create __init__.py | testing/__init__.py | testing/__init__.py | Python | 0.000429 | ||
a770c91ea6761d890387b4b6e130cb495817eea0 | Improve the sc2parse debugging script. | sc2reader/scripts/sc2parse.py | sc2reader/scripts/sc2parse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import sc2reader
import traceback
def main():
for argument in sys.argv[1:]:
for path in sc2reader.utils.get_files(argument):
try:
replay = sc2reader.load_replay(path, debug=True)
except sc2reader.exceptions.ReadError as e:
print e.replay.filename
print '{build} - {real_type} on {map_name} - Played {start_time}'.format(**e.replay.__dict__)
print '[ERROR]', e.message
for event in e.game_events[-5:]:
print '{0} - {1}'.format(hex(event.type),event.bytes.encode('hex'))
e.buffer.seek(e.location)
print e.buffer.peek(50).encode('hex')
print
except Exception as e:
print path
replay = sc2reader.load_replay(path, debug=True, load_level=1)
print '{build} - {real_type} on {map_name} - Played {start_time}'.format(**replay.__dict__)
print '[ERROR]', e
traceback.print_exc()
print
if __name__ == '__main__':
main() | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import sc2reader
def main():
for replay in sc2reader.load_replays(sys.argv[1:], verbose=True):
pass
if __name__ == '__main__':
main() | Python | 0 |
98295608a2ba4519d12212532380253bba4372ed | Add script that recommends scrape task schedule based on recent run timings | scripts/frequency_analysis.py | scripts/frequency_analysis.py | import asyncio
import attr
import pprint
import dateutil.parser
from datetime import timedelta
from bobsled.core import bobsled
from bobsled.base import Status
def recommend_frequency_for_task(runs):
total_duration = timedelta(seconds=0)
longest_duration = timedelta(seconds=0)
for run in runs:
start = dateutil.parser.parse(run.start)
end = dateutil.parser.parse(run.end)
duration = end - start
total_duration += duration
if duration > longest_duration:
longest_duration = duration
average = total_duration / len(runs)
if longest_duration.seconds <= 60*10:
return '0 */2 * * ?'
elif longest_duration.seconds <= 60*60:
return '0 */6 * * ?'
else:
return 'daily'
async def analyze_frequency():
await bobsled.initialize()
tasks = [attr.asdict(t) for t in await bobsled.storage.get_tasks()]
results = await asyncio.gather(
*[bobsled.run.get_runs(task_name=t["name"], latest=4) for t in tasks]
)
recommendations = []
for task, latest_runs in zip(tasks, results):
# make recommendations for scrape tasks that have runs
if latest_runs and '-scrape' in task['name']:
if all(run.status is Status.Success for run in latest_runs):
recommendation = recommend_frequency_for_task(latest_runs)
else:
# a recent run failed, made a note of that
recommendation = 'n/a - at least one recent task failed'
recommendations.append({
'task': task['name'],
'current_schedule': task['triggers'][0]['cron'],
'recommended': recommendation
})
changed_recommendations = []
for recommendation in recommendations:
if recommendation['recommended'] != 'daily' and 'n/a' not in recommendation['recommended']\
and recommendation['current_schedule'] != recommendation['recommended']:
changed_recommendations.append(recommendation)
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(changed_recommendations)
def main():
# asyncio.run(bobsled.initialize()) # this makes a threading problem if it's here
asyncio.run(analyze_frequency())
if __name__ == "__main__":
main()
| Python | 0 | |
dd22ea800dbbeffaace1927804d50da60ee77a30 | Fix TestPages.test_homepage_with_anonymous_giver. | tests/test_pages.py | tests/test_pages.py | from __future__ import print_function, unicode_literals
import datetime
from mock import patch
import pytz
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import GITHUB_USER_UNREGISTERED_LGTEST, Harness
from gittip.testing.client import TestClient
from gittip.utils import update_homepage_queries_once
from aspen.http.request import UnicodeWithParams
class TestPages(Harness):
def setUp(self):
super(Harness, self).setUp()
self.client = TestClient()
def get(self, url, returning='body'):
request = self.client.get(url)
return getattr(request, returning)
def test_homepage(self):
actual = self.client.get('/').body
expected = "Sustainable Crowdfunding"
assert expected in actual
def test_homepage_with_anonymous_giver(self):
self.platforms.twitter.get_account(UnicodeWithParams('bob', {})).opt_in("bob")
alice = self.make_participant('alice', anonymous=True, last_bill_result='')
alice.set_tip_to('bob', 1)
update_homepage_queries_once(self.db)
actual = self.client.get('/').body
expected = "Anonymous"
assert expected in actual
def test_profile(self):
self.make_participant('cheese',
claimed_time=datetime.datetime.now(pytz.utc))
expected = "I'm grateful for gifts"
actual = self.get('/cheese/').decode('utf8') # deal with cent sign
assert expected in actual
def test_widget(self):
self.make_participant('cheese',
claimed_time=datetime.datetime.now(pytz.utc))
expected = "javascript: window.open"
actual = self.get('/cheese/widget.html')
assert expected in actual
def test_bank_account(self):
expected = "add<br> or change your bank account"
actual = self.get('/bank-account.html')
assert expected in actual
def test_credit_card(self):
expected = "add<br> or change your credit card"
actual = self.get('/credit-card.html')
assert expected in actual
def test_github_associate(self):
expected = "Forbidden, program!"
actual = self.get('/on/github/associate')
assert expected in actual
def test_twitter_associate(self):
expected = "Forbidden, program!"
actual = self.get('/on/twitter/associate')
assert expected in actual
def test_about(self):
expected = "small weekly cash gifts"
actual = self.get('/about/')
assert expected in actual
def test_about_stats(self):
expected = "have joined Gittip"
actual = self.get('/about/stats.html')
assert expected in actual
def test_about_charts(self):
expected = "Money transferred"
actual = self.get('/about/charts.html')
assert expected in actual
@patch('gittip.elsewhere.github.requests')
def test_github_proxy(self, requests):
requests.get().status_code = 200
requests.get().text = GITHUB_USER_UNREGISTERED_LGTEST
expected = "lgtest has not joined"
actual = self.get('/on/github/lgtest/').decode('utf8')
assert expected in actual
# This hits the network. XXX add a knob to skip this
def test_twitter_proxy(self):
expected = "twitter has not joined"
actual = self.get('/on/twitter/twitter/').decode('utf8')
assert expected in actual
def test_404(self):
actual = self.get('/about/four-oh-four.html')
assert "Page Not Found" in actual
assert "{%" not in actual
def test_bank_account_complete(self):
expected = "Page Not Found"
actual = self.get('/bank-account-complete.html')
assert expected in actual
def test_bank_account_json(self):
expected = "Page Not Found"
actual = self.get('/bank-account.json')
assert expected in actual
def test_credit_card_json(self):
expected = "Page Not Found"
actual = self.get('/credit-card.json')
assert expected in actual
| from __future__ import print_function, unicode_literals
import datetime
from mock import patch
import pytz
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import GITHUB_USER_UNREGISTERED_LGTEST, Harness
from gittip.testing.client import TestClient
from gittip.utils import update_homepage_queries_once
class TestPages(Harness):
def setUp(self):
super(Harness, self).setUp()
self.client = TestClient()
def get(self, url, returning='body'):
request = self.client.get(url)
return getattr(request, returning)
def test_homepage(self):
actual = self.client.get('/').body
expected = "Sustainable Crowdfunding"
assert expected in actual
def test_homepage_with_anonymous_giver(self):
TwitterAccount("bob", {}).opt_in("bob")
alice = self.make_participant('alice', anonymous=True, last_bill_result='')
alice.set_tip_to('bob', 1)
update_homepage_queries_once(self.db)
actual = self.client.get('/').body
expected = "Anonymous"
assert expected in actual
def test_profile(self):
self.make_participant('cheese',
claimed_time=datetime.datetime.now(pytz.utc))
expected = "I'm grateful for gifts"
actual = self.get('/cheese/').decode('utf8') # deal with cent sign
assert expected in actual
def test_widget(self):
self.make_participant('cheese',
claimed_time=datetime.datetime.now(pytz.utc))
expected = "javascript: window.open"
actual = self.get('/cheese/widget.html')
assert expected in actual
def test_bank_account(self):
expected = "add<br> or change your bank account"
actual = self.get('/bank-account.html')
assert expected in actual
def test_credit_card(self):
expected = "add<br> or change your credit card"
actual = self.get('/credit-card.html')
assert expected in actual
def test_github_associate(self):
expected = "Forbidden, program!"
actual = self.get('/on/github/associate')
assert expected in actual
def test_twitter_associate(self):
expected = "Forbidden, program!"
actual = self.get('/on/twitter/associate')
assert expected in actual
def test_about(self):
expected = "small weekly cash gifts"
actual = self.get('/about/')
assert expected in actual
def test_about_stats(self):
expected = "have joined Gittip"
actual = self.get('/about/stats.html')
assert expected in actual
def test_about_charts(self):
expected = "Money transferred"
actual = self.get('/about/charts.html')
assert expected in actual
@patch('gittip.elsewhere.github.requests')
def test_github_proxy(self, requests):
requests.get().status_code = 200
requests.get().text = GITHUB_USER_UNREGISTERED_LGTEST
expected = "lgtest has not joined"
actual = self.get('/on/github/lgtest/').decode('utf8')
assert expected in actual
# This hits the network. XXX add a knob to skip this
def test_twitter_proxy(self):
expected = "twitter has not joined"
actual = self.get('/on/twitter/twitter/').decode('utf8')
assert expected in actual
def test_404(self):
actual = self.get('/about/four-oh-four.html')
assert "Page Not Found" in actual
assert "{%" not in actual
def test_bank_account_complete(self):
expected = "Page Not Found"
actual = self.get('/bank-account-complete.html')
assert expected in actual
def test_bank_account_json(self):
expected = "Page Not Found"
actual = self.get('/bank-account.json')
assert expected in actual
def test_credit_card_json(self):
expected = "Page Not Found"
actual = self.get('/credit-card.json')
assert expected in actual
| Python | 0 |
e6642dd9c9cad6aca3cb70e4cca53afe51494d4b | Add a test for checking setup.py | tests/test_setup.py | tests/test_setup.py | r""" Testspectra_gen functions"""
def test_setup():
import os
cmd = "python3 setup.py check"
os.system(cmd) | Python | 0.000002 | |
0424eb7dd8e55e2f88f088c3a84c8e962d89f06e | build perf from source | tools/perf_build.py | tools/perf_build.py | #!/usr/bin/env python
import platform
import subprocess
if subprocess.call('which sudo', shell=True) == 0:
with_sudo = 'sudo '
else:
with_sudo = ''
major = int(platform.release().split('.')[0])
minor = int(platform.release().split('.')[1])
revision = int(platform.release().split('.')[2].split('-')[0])
url_kernel = 'https://cdn.kernel.org/pub/linux/kernel/v%d.x/linux-%d.%d.tar.gz' % (major, major, minor)
tarfile = 'linux-%d.%d.tar.gz' % (major, minor)
source_dir = 'linux-%d.%d' % (major, minor)
print('URL: ', url_kernel)
print('TarFile: ', tarfile)
subprocess.call('rm -r %s' % (source_dir), shell=True)
subprocess.call('rm %s' % (tarfile), shell=True)
subprocess.call('wget %s' % (url_kernel) , shell=True)
subprocess.call('tar xf %s && make -j -C %s/tools/perf' % (tarfile, source_dir) , shell=True)
subprocess.call(with_sudo + 'cp %s/tools/perf/perf /usr/bin/' % (source_dir) , shell=True)
subprocess.call('rm -r %s' % (source_dir), shell=True)
subprocess.call('rm %s' % (tarfile), shell=True)
subprocess.call('ls -lah /usr/bin/perf', shell=True)
#get kernelversion
#wget http://www.kernel.org/pub/linux/kernel/v2.6/testing/linux-2.6.33-rc3.tar.bz2
| Python | 0 | |
ef3e07794d4245b9d4a1d0007a0b9099d5bafaf9 | Add asteval wrapper | project/asteval_wrapper.py | project/asteval_wrapper.py | from asteval import Interpreter
import functools
import re
class Script(object):
def __init__(self):
"""
Sets up an interpreter.
"""
self.interpreter = Interpreter()
self.symtable['re'] = re
@property
def symtable(self):
"""
Expose the internal symbol table.
"""
return self.interpreter.symtable
@symtable.setter
def symtable(self, symtable):
"""
Apply changes to the internal symbol table.
"""
self.interpreter.symtable = symtable
def add_file(self, path):
"""
Adds and loads code from a script file.
"""
with open(path, 'rb') as f:
self.interpreter(f.read())
def invoke(self, name, *args, **kwargs):
"""
Invokes a function in the script with the appropriate arguments.
"""
f = self.interpreter.symtable.get(name, None)
if not callable(f):
return
return f(*args, **kwargs)
def __getattr__(self, name):
"""
Returns the function to invoke a function in the script, if a function
with that name exists within the symbol table. Otherwise, an attribute
error is being raised (default behaviour).
"""
if name in ['symtable', 'interpreter']:
raise AttributeError("{} instance has no attribute '{}'".format(
self.__class__.__name__, name))
if not callable(self.symtable.get(name, None)):
raise AttributeError("{} instance has no attribute '{}'".format(
self.__class__.__name__, name))
return functools.partial(self.invoke, name)
| Python | 0.000001 | |
126863fd6c2a13491b92d546d3e886d0e0da492b | Add experiment for nodejs. | swig/node/binding.gyp | swig/node/binding.gyp | {
"targets": [
{
"target_name": "velocypack",
"sources": [ "../../src/asm-functions.cpp",
"../../src/AttributeTranslator.cpp",
"../../src/Builder.cpp",
"../../src/Collection.cpp",
"../../src/Dumper.cpp",
"../../src/Exception.cpp",
"../../src/fasthash.cpp",
"../../src/fpconv.cpp",
"../../src/HexDump.cpp",
"../../src/Iterator.cpp",
"../../src/Options.cpp",
"../../src/Parser.cpp",
"../../src/Slice.cpp",
"../../src/ValueType.cpp",
"../../src/velocypack-common.cpp",
"../../src/Version.cpp",
"velocypack_wrap.cxx" ],
"include_dirs": [ "../../include", "../../src", "/usr/local/node-v5.0.0-linux-x64/include/node" ],
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions" ]
}
]
}
| Python | 0 | |
ad0a1bf70dc2776c88115389400fd6958e49ecc8 | Add rsync package | var/spack/packages/rsync/package.py | var/spack/packages/rsync/package.py | from spack import *
class Rsync(Package):
"""rsync is an open source utility that provides fast incremental file transfer."""
homepage = "https://rsync.samba.org"
url = "https://download.samba.org/pub/rsync/rsync-3.1.1.tar.gz"
version('3.1.1', '43bd6676f0b404326eee2d63be3cdcfe')
# depends_on("foo")
def install(self, spec, prefix):
configure('--prefix=%s' % prefix)
make()
make("install")
| Python | 0.000001 | |
5869091cc63afbe9c8bde2bf6e9f934c46d3c3f5 | Create generate_api_error.py | vnpy/api/tap/generator/generate_api_error.py | vnpy/api/tap/generator/generate_api_error.py | """"""
class DataTypeGenerator:
"""DataType生成器"""
def __init__(self, filename: str, prefix: str, name: str) -> None:
"""Constructor"""
self.filename: str = filename
self.prefix: str = prefix
self.name: str = name
def run(self) -> None:
"""主函数"""
self.f_cpp = open(self.filename, "r", encoding="UTF-8")
self.f_define = open(f"{self.prefix}_{self.name}_error_constant.py", "w", encoding="UTF-8")
for line in self.f_cpp:
self.process_line(line)
self.f_cpp.close()
self.f_define.close()
print(f"{self.name}_DataType生成完毕")
def process_line(self, line: str) -> None:
"""处理每行"""
line = line.replace("\n", "")
line = line.replace(";", "")
# print(line)
# MD
if self.name == "md":
if line.startswith("const int"):
self.process_int(line)
elif self.name == "td":
if line.startswith(" const int"):
self.process_int(line)
def process_int(self, line: str) -> None:
"""处理类型定义"""
sectors = line.split("=")
value = sectors[1].strip()
words = sectors[0].split(" ")
words = [word for word in words if word != ""]
name = words[-1].strip()
new_line = f"{name} = {value}\n"
self.f_define.write(new_line)
# def process_char_td(self, line: str) -> None:
# words = line.split(" ")
# words = [word for word in words if word != ""]
# name = words[-1]
# if "[" in name:
# name = name.split("[")[0]
# new_line = f"{name} = \"string\"\n"
# else:
# new_line = f"{name} = \"char\"\n"
# self.f_typedef.write(new_line)
# def process_const_md(self, line: str) -> None:
# """"""
# sectors = line.split("=")
# value = sectors[1].strip()
# words = sectors[0].split(" ")
# words = [word for word in words if word != ""]
# # name = words[1].strip()
# print(value, words)
# # new_line = f"{name} = {value}\n"
# # self.f_define.write(new_line)
# def process_const_td(self, line: str):
# sectors = line.split("=")
# value = sectors[1].replace("\'", "\"").strip()
# words = sectors[0].split(" ")
# words = [word for word in words if word != ""]
# name = words[-1].strip()
# new_line = f"{name} = {value}\n"
# self.f_define.write(new_line)
if __name__ == "__main__":
# md_generator = DataTypeGenerator("../include/tap/TapAPIError.h", "tap", "md")
# md_generator.run()
td_generator = DataTypeGenerator("../include/tap/iTapAPIError.h", "tap", "td")
td_generator.run()
| Python | 0.000126 | |
d205284e21f5fad8195d796ad356042cb5c47894 | add log test | py_logging/test_logging.py | py_logging/test_logging.py | #!/usr/bin/env python
# encoding: utf-8
import logging
import os
import time
from unittest import TestCase
class TestLogging(TestCase):
def setUp(self):
dir_path = os.path.dirname(__file__)
self.logfile = os.path.join(dir_path, "tmp.log")
self.logger = logging.getLogger(
"test_logger_%s" % int(time.time() * 1000))
def tearDown(self):
if os.path.exists(self.logfile):
os.remove(self.logfile)
def log_lines(self):
with open(self.logfile, "rt") as fp:
return [l.strip() for l in fp]
def test_logger(self):
self.assertEqual(self.logger.level, logging.NOTSET)
def test_filehandler(self):
filehdr = logging.FileHandler(self.logfile)
self.logger.addHandler(filehdr)
self.logger.setLevel(logging.INFO)
self.logger.debug("debug")
self.logger.info("info")
self.logger.warning("warning")
self.logger.error("error")
self.logger.critical("critical")
self.assertListEqual(self.log_lines(), [
"info", "warning", "error", "critical"])
def test_format(self):
filehdr = logging.FileHandler(self.logfile)
logfmt = logging.Formatter("test: %(name)s %(levelname)-8s %(message)s")
filehdr.setFormatter(logfmt)
self.logger.addHandler(filehdr)
self.logger.setLevel(logging.INFO)
self.logger.info("info")
self.assertListEqual(self.log_lines(), [
"test: %s INFO info" % (self.logger.name,)])
| Python | 0.000001 | |
9b2e0396f1121f94d6b66daa26c83bb85bc1a79a | format string tests | pychecker2/utest/format.py | pychecker2/utest/format.py | from pychecker2 import TestSupport
from pychecker2 import FormatStringChecks
class FormatTestCase(TestSupport.WarningTester):
def testGoodFormats(self):
self.silent('def f(x):\n'
' return "%s" % x\n')
self.silent('def f(x):\n'
" return ('%s' + '%s') % (x, x)\n")
self.silent("def f(x):\n"
" return (('%s' + '%s') * 8) % ((x,) * 16)\n")
self.silent("def f(x):\n"
" y = 2\n"
" return '%(x)f %(y)s' % locals()\n")
self.silent("y = 1\n"
"def f():\n"
" return '%(y)s' % globals()\n")
self.silent("def f():\n"
" return '%*.s %*.*s %*f' % locals()\n")
self.silent("def f():\n"
" return '%s %%' % ('',)\n")
self.silent("def f(t):\n"
" return '%s %f' % t\n")
self.silent("def f(t):\n"
" return ('%s %f' + t) % (1, 2)\n")
self.silent("def f(t):\n"
" return '%s' % `t`\n")
self.silent("def f(t):\n"
" return '%s' * ((7 - 1) / 2) % (t,t,t)\n")
def testBadFormats(self):
w = FormatStringChecks.FormatStringCheck.badFormat
self.warning("def f():\n"
" return '%' % locals()\n", 2, w, 0, '%')
self.warning("def f():\n"
" return '%z a kookie format, yah' % locals()\n",
2, w, 0, '%z a kooki...')
self.warning("def f():\n"
" return '%(foo)*.*s' % {'foo': 'bar'}\n",
2, w, 0, '%(foo)*.*s')
def testMixed(self):
w = FormatStringChecks.FormatStringCheck.mixedFormat
self.warning("def f():\n"
" return '%(mi)x %up' % locals()\n", 2, w, '(mi)')
self.warning("def f():\n"
" return '%up %(mi)x' % (1, 2)\n", 2, w, '(mi)')
def testFormatCount(self):
w = FormatStringChecks.FormatStringCheck.formatCount
self.warning("def f():\n"
" return '%s %d %f' % ('', 2)\n",
2, w, 2, 3)
def testUselessModifier(self):
w = FormatStringChecks.FormatStringCheck.uselessModifier
self.warning("def f(t):\n"
" return '%s %lf' % (t, t)\n",
2, w, 'l')
def testFormatConstants(self):
w = FormatStringChecks.FormatStringCheck.badConstant
self.warning("def f():\n"
" return ('%s' * 6) % ((1, 2) + 3 * 7)\n",
2, w, 'can only concatenate tuple (not "int") to tuple')
self.warning("def f():\n"
" return ('%s' + 6) % ((1, 2) * 3)\n",
2, w, "cannot concatenate 'str' and 'int' objects")
def testUnknownName(self):
w = FormatStringChecks.FormatStringCheck.unknownFormatName
self.warning("def f():\n"
" return '%(unknown)s' % globals()\n",
2, w, "unknown", "globals")
self.warning("def f():\n"
" return '%(unknown)s' % locals()\n",
2, w, "unknown", "locals")
| Python | 0.000005 | |
2e2bae00f7b098e5fd20f2901b4f70554e250d2d | add program to plot offset distribution | python/plot_offset_dist.py | python/plot_offset_dist.py | #!/usr/bin/env python
import argparse
import numpy as np
import glob
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams.update({'font.size': 10})
import matplotlib.pyplot as plt
def add_stat_legend(x):
textstr = '$\mathrm{N}=%d$\n$\mathrm{mean}=%.2f$\n$\mathrm{median}=%.2f$\n$\mathrm{std}=%.2f$' % (
len(x), np.nanmean(x), np.nanmedian(x), np.nanstd(x))
props = dict(boxstyle='round', facecolor='white')
plt.text(0.95, 0.95, textstr, transform=plt.gca().transAxes, va='top', ha='right', bbox=props)
def main():
# parse command-line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--verbose", action="store_true",
help="print verbose output")
parser.add_argument("-o", "--output", type=str, default=None,
help="output file base name")
parser.add_argument("-i", "--input", type=str, default=None,
help="required input file")
args = parser.parse_args()
nfiles = len(filenames)
# the first is the fiberid and the next two columns are xfocal and yfocal positions of the target
nidtokens = 3
# the rest are the tabulated throughput correction values
npoints = 71
# the throughput correction vectors span the range 3500A to 10500A
xvalues = np.linspace(3500, 10500, npoints, endpoint=True)
offset_dict = {}
for x in xvalues:
offset_dict[x] = []
offsets = []
for i,filename in enumerate(filenames):
plate, mjd = filename.split('.')[0].split('-')[-2:]
data = np.loadtxt(filename, ndmin=2)
nentries, ntokens = data.shape
assert ntokens == 3*npoints + nidtokens
for row in data:
fiberid, xfocal, yfocal = row[0:nidtokens]
offset = row[nidtokens+0::3]
fiber_fraction = row[nidtokens+1::3]
tpcorr = row[nidtokens+2::3]
offsets.append(offsets)
offsets_array = np.vstack(offsets)
for i,x in enumerate(xvalues):
offsets_wave_slice = offsets_array[:,i]
fig = plt.figure(figsize=(8,6))
plt.hist(offsets_wave_slice, bins=50, histtype='stepfilled', alpha=0.5)
plt.xlabel('Centroid offset (arcseconds)')
plt.ylabel('Counts')
plt.title(r%'$\lambda = %s$' % x)
plt.xlim([0, 2])
add_stat_legend(offsets_wave_slice)
plt.grid(True)
fig.savefig(args.output+'-%s.png'%x, bbox_inches='tight')
if __name__ == '__main__':
main()
| Python | 0 | |
091432b795e3b5571887eb924fb831060d2fd53b | Add logging setup | turbinia/config/logger.py | turbinia/config/logger.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets up logging."""
import logging
from turbinia import config
def setup(root=False):
"""Set up logging parameters."""
config.LoadConfig()
log = logging.getLogger('turbinia')
fh = logging.FileHandler(config.LOG_FILE)
formatter = logging.Formatter(u'%(asctime)s:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(u'[%(levelname)s] %(message)s')
ch.setFormatter(formatter)
log.addHandler(fh)
log.addHandler(ch)
# Optionally configure the root logger because other modules like PSQ use
# this, and we want to see log messages from it when executing from CLI.
if root:
root_log = logging.getLogger()
root_log.addHandler(ch)
root_log.setLevel(logging.DEBUG)
| Python | 0.000001 | |
99866bc8e47e1457bb12730e0190d6690ad024e2 | Create pj_cpp_petlje.py | pj_cpp_petlje.py | pj_cpp_petlje.py | from pj import *
class CPP(enum.Enum):
FOR = 'for'
COUT = 'cout'
ENDL = 'endl'
OOTV, OZATV, VOTV, VZATV = '(){}'
MANJE, JEDNAKO, TOČKAZ = '<=;'
PLUSP = '++'
PLUSJ = '+='
IZLAZ = '<<'
BROJ = 34545
IME = 'varijabla'
def cpp_lex(source):
lex = Tokenizer(source)
for znak in iter(lex.čitaj, ''):
if znak.isspace(): lex.token(E.PRAZNO)
elif znak == '+':
sljedeći = lex.čitaj()
if sljedeći == '+': yield lex.token(CPP.PLUSP)
elif sljedeći == '=': yield lex.token(CPP.PLUSJ)
else: lex.greška('nema samostalnog +')
elif znak == '<':
if lex.čitaj() == '<': yield lex.token(CPP.IZLAZ)
else:
lex.vrati()
yield lex.token(CPP.MANJE)
elif znak.isalpha():
lex.zvijezda(identifikator)
yield lex.token(ključna_riječ(CPP, lex.sadržaj) or CPP.IME)
elif znak.isdigit():
lex.zvijezda(str.isdigit)
if lex.sadržaj == '0': yield lex.token(CPP.BROJ)
elif lex.sadržaj[0] != '0': yield lex.token(CPP.BROJ)
else: lex.greška('druge baze nisu podržane')
else: yield lex.token(operator(CPP, znak) or lex.greška())
# start -> naredba naredbe
# naredbe -> '' | naredba naredbe
# naredba -> petlja | izlaz TOČKAZ
# for -> FOR OOTV IME JEDNAKO BROJ TOČKAZ IME MANJE BROJ
# TOČKAZ IME inkrement OZATV
# petlja -> for naredba | for VOTV naredbe VZATV
# inkrement -> PLUSPLUS | PLUSJEDNAKO BROJ
# izlaz -> COUT varijable | COUT varijable IZLAZ ENDL
# varijable -> '' | IZLAZ IME varijable
class Program(AST('naredbe')):
def izvrši(self):
memorija = {}
for naredba in self.naredbe:
naredba.izvrši(memorija)
class Petlja(AST('varijabla početak granica inkrement blok')):
def izvrši(self, mem):
mem[self.varijabla.sadržaj] = int(self.početak.sadržaj)
while mem[self.varijabla.sadržaj] < int(self.granica.sadržaj):
for naredba in self.blok:
naredba.izvrši(mem)
inkr = self.inkrement
if inkr is nenavedeno: inkr = 1
else: inkr = int(inkr.sadržaj)
mem[self.varijabla.sadržaj] += inkr
class Izlaz(AST('varijable novired')):
def izvrši(self, mem):
for varijabla in self.varijable:
if varijabla.sadržaj in mem:
print(mem[varijabla.sadržaj], end=' ')
else:
varijabla.nedeklaracija()
if self.novired:
print()
class CPPParser(Parser):
def start(self):
naredbe = []
while not self >> E.KRAJ:
naredbe.append(self.naredba())
return Program(naredbe)
def naredba(self):
if self >> CPP.FOR:
return self.petlja()
elif self >> CPP.COUT:
return self.izlaz()
else:
self.greška()
def petlja(self):
self.pročitaj(CPP.OOTV)
i = self.pročitaj(CPP.IME)
self.pročitaj(CPP.JEDNAKO)
početak = self.pročitaj(CPP.BROJ)
self.pročitaj(CPP.TOČKAZ)
i2 = self.pročitaj(CPP.IME)
if i != i2: raise SemantičkaGreška('nisu podržane različite varijable')
self.pročitaj(CPP.MANJE)
granica = self.pročitaj(CPP.BROJ)
self.pročitaj(CPP.TOČKAZ)
i3 = self.pročitaj(CPP.IME)
if i != i3: raise SemantičkaGreška('nisu podržane različite varijable')
if self >> CPP.PLUSP:
inkrement = nenavedeno
elif self >> CPP.PLUSJ:
inkrement = self.pročitaj(CPP.BROJ)
self.pročitaj(CPP.OZATV)
if self >> CPP.VOTV:
blok = []
while not self >> CPP.VZATV:
blok.append(self.naredba())
else:
blok = [self.naredba()]
return Petlja(i, početak, granica, inkrement, blok)
def izlaz(self):
varijable = []
novired = False
while self >> CPP.IZLAZ:
if self >> CPP.IME:
varijable.append(self.zadnji)
elif self >> CPP.ENDL:
novired = True
break
self.pročitaj(CPP.TOČKAZ)
return Izlaz(varijable, novired)
if __name__ == '__main__':
CPPParser.parsiraj(cpp_lex('''
for ( i = 8 ; i < 23 ; i += 2 )
for(j=0; j<3; j++) cout<<i<<j<<endl;
''')).izvrši()
| Python | 0.000125 | |
f9998701bafa24fce25156751fefdfa97074c801 | Add protocol conformance map | utils/gyb_syntax_support/protocolsMap.py | utils/gyb_syntax_support/protocolsMap.py | SYNTAX_BUILDABLE_EXPRESSIBLE_AS_CONFORMANCES = {
'ExpressibleAsConditionElement': [
'ExpressibleAsConditionElementList'
],
'ExpressibleAsDeclBuildable': [
'ExpressibleAsCodeBlockItem',
'ExpressibleAsMemberDeclListItem',
'ExpressibleAsSyntaxBuildable'
],
'ExpressibleAsStmtBuildable': [
'ExpressibleAsCodeBlockItem',
'ExpressibleAsSyntaxBuildable'
],
'ExpressibleAsExprList': [
'ExpressibleAsConditionElement',
'ExpressibleAsSyntaxBuildable'
]
}
| Python | 0 | |
143eb4665e76065ec67b5dd42cfe84e238d50094 | use per post winner count if available to overide settings | candidates/constants.py | candidates/constants.py | ELECTION_ID_REGEX = r'(?P<election>[^/]+)'
POST_ID_REGEX = r'(?P<post_id>[^/]+)'
| Python | 0 | |
6aef9ab419b09822b2255141349144ac8978e862 | Add migration for h5p kind. | kolibri/core/content/migrations/0025_add_h5p_kind.py | kolibri/core/content/migrations/0025_add_h5p_kind.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-12-19 02:29
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
("content", "0024_channelmetadata_public"),
]
operations = [
migrations.AlterField(
model_name="contentnode",
name="kind",
field=models.CharField(
blank=True,
choices=[
("topic", "Topic"),
("video", "Video"),
("audio", "Audio"),
("exercise", "Exercise"),
("document", "Document"),
("html5", "HTML5 App"),
("slideshow", "Slideshow"),
("h5p", "H5P"),
],
max_length=200,
),
),
]
| Python | 0 | |
39b00572d7888895bcf552999f80b712c1738142 | Create BillboardIter.py | BillboardIter.py | BillboardIter.py | from datetime import date, timedelta
class BillboardDates():
'''Iterator over valid Billboard Chart weeks, which is
supposed to be a per-class singleton for start quantization'''
def __init__(self, endDate=date.today()):
assert type(endDate) is str or type(endDate) is date
self.endDate = endDate
if type(endDate) is not date:
self.endDate = self.str_to_date(endDate)
self.currentDate = date(1958, 8, 9)
def __iter__(self):
return self
def __next__(self):
if self.compare_dates(self.endDate) >= 0:
raise StopIteration
current = self.currentDate
self.increment()
return current
def str_to_date(self, string):
year, month, day = string.split('-')
return date(int(year), int(month), int(day))
def increment(self, days=7):
'''Serves as an abstraction barrier'''
self.currentDate = self.currentDate + timedelta(days)
def __repr__(self):
return str(self.currentDate)
def compare_dates(self, dateObj):
'''Returns 1 if current date is larger, 0 if equal, -1 if smaller'''
# check year first
if self.currentDate > dateObj:
return 1
elif self.currentDate < dateObj:
return -1
return 0 # if they are equal
class BillboardIter(BillboardDates):
'''Iterator over valid Billboard Chart weeks, which
quantizes the start to the next valid date'''
_BillboardDates = BillboardDates()
def __init__(self, startDate, endDate=date.today()):
assert type(startDate) is str or type(startDate) is date
super().__init__(endDate)
self.initDate = startDate
if type(self.initDate) is not date:
self.initDate = self.str_to_date(self.initDate)
self.currentDate = self.initDate
self.quantizeStart()
def reset(self):
self.currentDate = self.initDate
self.quantizeStart()
def quantizeStart(self):
'''Quantizes starting date to the closest following Billboard chart'''
bbDate = self._BillboardDates.currentDate
while self.compare_dates(bbDate) >= 0: # get BB date up to start
bbDate = next(self._BillboardDates)
while self.compare_dates(bbDate) < 0: # get start up to valid BB date
self.increment(1)
| Python | 0 | |
566850c873f6bdbed6632388330f8e4df6fbe613 | add migration for accordeon block on homepage | meinberlin/apps/cms/migrations/0021_add_accordeon_block.py | meinberlin/apps/cms/migrations/0021_add_accordeon_block.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-08 12:57
from __future__ import unicode_literals
from django.db import migrations
import meinberlin.apps.cms.blocks
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('meinberlin_cms', '0020_add_header_block'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(template='meinberlin_cms/blocks/richtext_block.html')), ('call_to_action', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock()), ('link', wagtail.wagtailcore.blocks.CharBlock()), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=50))))), ('image_call_to_action', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('body', wagtail.wagtailcore.blocks.RichTextBlock()), ('link', wagtail.wagtailcore.blocks.CharBlock()), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=50))))), ('columns_text', wagtail.wagtailcore.blocks.StructBlock((('columns_count', wagtail.wagtailcore.blocks.ChoiceBlock(choices=[(2, 'Two columns'), (3, 'Three columns'), (4, 'Four columns')])), ('columns', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.RichTextBlock(label='Column body')))))), ('projects', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('projects', wagtail.wagtailcore.blocks.ListBlock(meinberlin.apps.cms.blocks.ProjectSelectionBlock(label='Project')))))), ('activities', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(label='Heading')), ('count', wagtail.wagtailcore.blocks.IntegerBlock(default=5, label='Count'))))), ('accordion', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock()), ('body', wagtail.wagtailcore.blocks.RichTextBlock(required=False))))))),
),
]
| Python | 0 | |
ecfadf8478b8775d8579812a7bd835f6ebb1ffd4 | Add file lister for rclone export | util/rclone-list-files.py | util/rclone-list-files.py | #!/usr/bin/env python3
import glob
# For use with --files-from argument for Rclone
# This suits Edgar's structure with is
# SPECIESNAME/{occurrences|projected-distributions}/[2nd-to-latest-file-is-the-latest].zip
for folder in glob.glob('*'):
occurrences = glob.glob(folder + '/occurrences/*')
projected_distributions = glob.glob(folder + '/projected-distributions/*')
if not 'latest' in occurrences[-1] and not 'latest' in projected_distributions[-1]:
print(f'No latest in {folder}!')
exit(1)
print(folder + '/metadata.json')
print(occurrences[-2])
print(projected_distributions[-2])
| Python | 0 | |
47e3a59dd05f30f1ce0c41e0aa531987fb33606c | Generate watersheds for new EM dataset | new-em-watersheds.py | new-em-watersheds.py | from gala import morpho
from gala import imio
import numpy as np
pr = imio.read_image_stack('membrane/*.tiff')
ws = morpho.watershed_sequence(pr / pr.max(), axis=0, connectivity=2, smooth_thresh=0.02, minimum_seed_size=2)
imio.write_h5_stack(ws, 'watershed.lzf.h5', compression='lzf')
slices = [(slice(None), slice(None, 625), slice(None, 625)),
(slice(None), slice(None, 625), slice(625, None)),
(slice(None), slice(625, None), slice(None, 625)),
(slice(None), slice(625, None), slice(625, None))]
wss = [ws[s] for s in slices]
from skimage.measure import label
for i, vol in enumerate(wss):
fn = 'watershed-%i.lzf.h5' % i
vol_relabel = label(vol)
print(np.max(vol_relabel))
imio.write_h5_stack(vol_relabel, fn, compression='lzf')
| Python | 0.999865 | |
629bd006bfd7e6210dcc95198be9b65614e4f051 | Convert optimization_test.py to PyTorch | optimization_test_pytorch.py | optimization_test_pytorch.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optimization_pytorch as optimization
import torch
import unittest
class OptimizationTest(unittest.TestCase):
def assertListAlmostEqual(self, list1, list2, tol):
self.assertEqual(len(list1), len(list2))
for a, b in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol)
def test_adam(self):
w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True)
x = torch.tensor([0.4, 0.2, -0.5])
criterion = torch.nn.MSELoss(reduction='elementwise_mean')
optimizer = optimization.BERTAdam(params={w}, lr=0.2, schedule='warmup_linear', warmup=0.1, t_total=100)
for _ in range(100):
# TODO Solve: reduction='elementwise_mean'=True not taken into account so division by x.size(0) is necessary
loss = criterion(x, w) / x.size(0)
loss.backward()
optimizer.step()
self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
if __name__ == "__main__":
unittest.main()
| Python | 0.999999 | |
101bb07375d2a36a65d07d3de32625cdef8f916d | Add parser for Vietnamese edition. | parser/parse_vi.py | parser/parse_vi.py | import re
from bs4 import BeautifulSoup, Tag
import requests
tested_url = [
"https://vi.wiktionary.org/wiki/kh%C3%B4ng#Ti.E1.BA.BFng_Vi.E1.BB.87t",
"https://vi.wiktionary.org/wiki/c%C3%A1m_%C6%A1n#Ti.E1.BA.BFng_Vi.E1.BB.87t",
]
HEADING_TAG = re.compile(r'^h(?P<level>[1-6])$', re.I)
COMMA_OR_SEMICOLON = re.compile('[,;]')
def get_heading_level(tag):
"""If the tag is a heading tag, return its level (1 through 6).
Otherwise, return `None`."""
heading_match = HEADING_TAG.match(tag)
if heading_match:
return int(heading_match.group('level'))
return None
def get_heading_text(tag):
"""
Extract the text of the heading, discarding "[edit]".
May need to be modified to work for more complex headings.
:param tag: a Tag object. It should be one of the <h?> tags.
:return: the actual/clean text in the tag
"""
text = tag.get_text()
text = text.split('[')[0]
return text
def get_html_tree(url):
html = requests.get(url)
# print(html.content)
soup = BeautifulSoup(html.content, 'html.parser')
return soup
def parse_translation_table(table):
"""
Parse the table to get translations and the languages.
Hopefully this function will work for all editions.
:param table: a list like table.
:return: (translation, language_name, language_code)
"""
for li in table.find_all('li'):
if not isinstance(li, Tag):
continue
text = li.get_text().split(':')
# language name is before ":"
lang_name = text[0]
# language code is in super script
lang_code = li.find("sup")
if lang_code:
lang_code = lang_code.text.strip()[1:-1]
else:
lang_code = ""
# each "trans" is: translation <sup>(lang_code)</sup> (transliteration)
# lang_code and transliteration may not exist
trans_list = re.split(COMMA_OR_SEMICOLON, text[1])
for trans in trans_list:
translation = trans.split('(')[0].strip()
yield (translation, lang_name, lang_code)
def generate_translation_tuples(soup):
"""
A generator of translation tuples
:param soup: BeautifulSoup object
:return: tuple of the form (headword, head_lang, translation, trans_lang, trans_lang_code, part_of_speech)
"""
# START non-edition-specific
# this is the table of content which is present in each edition
toc = soup.find('div', id='toc')
# print(toc.get_text())
page_state = {'headword': None,
'headword_lang': None,
'part_of_speech': None}
for element in toc.next_siblings:
if isinstance(element, Tag): # it could be a Tag or a NavigableString
level = get_heading_level(element.name)
# END non-edition-specific
if level == 2: # it is a header tag
page_state['headword_lang'] = get_heading_text(element)
elif level == 3:
page_state['part_of_speech'] = get_heading_text(element)
elif element.name == "p": # is a paragraph tag
bold_word = element.b
if bold_word:
page_state['headword'] = bold_word.get_text()
# print("headword: ", bold_word.get_text().strip())
elif element.name == "h4":
first_headline = element.find(class_="mw-headline")
if first_headline.text.strip() == "Dịch": # this translation header
# this is an translation table
table = element.find_next_sibling(class_="columns")
for translation, lang, lang_code in parse_translation_table(table):
yield (page_state['headword'], page_state['headword_lang'], translation, lang, lang_code,
page_state['part_of_speech'])
def main():
for url in tested_url:
soup = get_html_tree(url)
for tup in generate_translation_tuples(soup):
print(",".join(tup))
if __name__ == '__main__':
main()
| Python | 0 | |
7da53597f9cb4117cecbaed1dbb77f4693289815 | add a test for well locations endpoint | app/backend/wells/tests/test_wells.py | app/backend/wells/tests/test_wells.py | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
class TestWellLocationsSearch(APITestCase):
def test_well_locations(self):
# Basic test to ensure that the well location search returns a non-error response
url = reverse('well-locations')
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| Python | 0.000001 | |
32f7fe6562f4d1592dfab5a9b065154dca51f1d3 | Add rsync module | pyIsis/rsync.py | pyIsis/rsync.py | # -*- coding: utf-8 -*-
import os
import subprocess
import logging
#RSYNC_PATH = os.path.join(
# os.path.abspath (os.path.dirname(__file__)), 'bin', 'rsync')
RSYNC_PATH = '/opt/rsync/bin/rsync'
RSYNC_CMD = '{cmd} {options} "{source}" "{destination}"'
rsync_logger = logging.getLogger('avidisis')
class rsync(object):
"""
Run rsync as a subprocess sending output to a logger.
This class subclasses subprocess.Popen
"""
def __init__(self, src, dst, *options):
self.src = src
self.dst = dst
self.options = options
rsync_logger.debug('rsync parameters: {} {}'.format(src, dst))
def run(self):
cmd = RSYNC_CMD.format(
cmd=RSYNC_PATH,
options= ' '.join(self.options),
source=self.src,
destination=self.dst)
process = subprocess.Popen(
cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = ''
# Poll process for new output until finished
for line in iter(process.stdout.readline, ""):
rsync_logger.debug('------ {}'.format(line.strip('\n\r')))
#print '------ {}'.format(line.strip('\n\r'))
output += line
process.wait()
exitCode = process.returncode
if (exitCode == 0):
rsync_logger.info('Workspace [{}] backup done.'.format(
os.path.basename(self.src)))
return output
else:
rsync_logger.error('rsync exitCode: {}, ouput {}'.format(
exitCode, output))
raise Exception(cmd, exitCode, output)
if __name__ == "__main__":
r = rsync('/tmp/test/', '/tmp/test2', '-av', '--delete', '--exclude="*.log"')
out = r.run()
print out
| Python | 0.000001 | |
01eaa6ba14c51568ea7aa3cc436d14fbffb78720 | Create downloader-v0.22pa.py | downloader-v0.22pa.py | downloader-v0.22pa.py | import urllib2
import re
import os
def stahniHtml(url):
f = urllib2.urlopen(url)
obsah = f.read()
f.close()
return obsah
def nahled(url):
global chapter
global currpatch1
odkazy = vyberodkazux(url)
for odkaz in odkazy:
currpatch1 = odkaz.replace("index.html", "")
chapter = re.search(r'.*/(.*?)/index',odkaz).group(1)
print "Kapitola "+chapter
print " Stahovani nahledu kapitoly... ",
nahledhtml = stahniHtml(odkaz)
print "Hotovo."
print " Vyhledavani odkazu stranky... ",
tabulka = re.search(r'<!-- Thumbnail images -->(.*?)class="xsmalltxt"',nahledhtml, re.DOTALL).group(1)
nahledyurl = re.findall(r'<a href="(.*?)"',tabulka)
print "Hotovo."
kapitola(nahledyurl)
print "Vsechna stahovani dokoncena."
finalpatch = os.path.expanduser("~")+"\\Downloads\\anime-manga.cz-downloader\\"+nazevserie+"\\"
print "Ulozeno do: "+finalpatch
os.startfile(finalpatch)
def kapitola(nahledyurl):
for kapitolasmallurl in nahledyurl:
kapitolafullurl = currpatch1 + kapitolasmallurl
getobrazek(kapitolafullurl)
def getobrazek(kapitolafullurl):
global imgname
print " Vyhledavani odkazu obrazku... ",
obrazekshorturl = re.search(r'<img id="slide" src="(.*?)".*?>',stahniHtml(kapitolafullurl)).group(1).replace("../", "")
imgname = obrazekshorturl
print "Hotovo."
obrazekfullurl = currpatch1 + obrazekshorturl
#print obrazekfullurl
ulozitobr(obrazekfullurl)
def ulozitobr(obrazekfullurl):
print " Ukladani obrazku "+obrazekfullurl+"... ",
currentpatch = os.path.expanduser("~")+"\\Downloads\\anime-manga.cz-downloader\\"+nazevserie+"\\"+chapter+"\\"
createDir(currentpatch)
imgData = urllib2.urlopen(obrazekfullurl).read()
output = open(currentpatch+imgname,'wb')
output.write(imgData)
output.close()
print "Hotovo."
def createDir(path):
if os.path.exists(path) != True:
os.makedirs(path)
### 18+ rozsireni ###
def vyberodkazux(url):
global nazevserie
print "Stahovani hlavni stranky... ",
stranka = stahniHtml(url)
print "Hotovo."
print "Vyhledavani kapitol... ",
odkazy = odkazya(stranka) + odkazyb(stranka)
nazevserie = re.search(r'<title>(.*?) *\| Anime - Manga.*?</title>',stranka).group(1).replace(" ", "").replace(" ", " ").replace(" ", " ")
print "Hotovo."
print "Manga "+nazevserie
return odkazy
def odkazya(stranka):
odkazy1 = re.findall(r'<a href="(http://anime-manga.cz/manga.*?)"', stranka)
odkazy2 = re.findall(r'<a href="(http://www.anime-manga.cz/manga.*?)"',stranka)
odkazy = odkazy1 + odkazy2
return odkazy
def odkazyb(stranka):
odkazy18 = re.findall(r'<a href="(http://anime-manga.cz/\d[^/]*?)"|<a href="(http://www.anime-manga.cz/\d[^/]*?)"|<a href="(http://anime-manga.cz/[^/]*?\d)"|<a href="(http://www.anime-manga.cz/[^/]*?\d)"', stranka)
odkazy = []
for odkaz18 in odkazy18:
for i in range(4):
if odkaz18[i]!= '':
stranka18 = stahniHtml(odkaz18[i])
odkazy.append(re.search(r'<a href="(.*?anime-manga.cz/manga.*?)"',stranka18).group(1))
return odkazy
### Proxy ###
def inicializaceproxy():
prx = raw_input('Zadej prihlasovani ve tvaru http://username:password@proxyserver.domain.com: ')
os.environ['HTTP_PROXY'] = prx
proxy = urllib2.ProxyHandler({'http': prx})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
print "Anime-manga.cz Downloader PROXY alfa"
inicializaceproxy()
xurl = raw_input('stahnout mangu s url: http://www.anime-manga.cz/')
nahled("http://www.anime-manga.cz/"+xurl)
| Python | 0 | |
313d54c4dc4296dad82502ec18f28542ecc747b4 | Add growl hexchat plugin | HexChat/growl.py | HexChat/growl.py | # -*- coding: utf-8; tab-width: 4; -*-
__module_name__='Growl'
__module_description__='Growl notification support'
__module_author__='TingPing'
__module_version__='14'
import re
from time import time
import xchat
try:
import gntp.notifier
except ImportError:
xchat.prnt('Growl Error: Please install https://github.com/kfdm/gntp')
# Too bad we can't unload here to stop the upcoming errors =(
hexchatlogo = 'https://raw.github.com/hexchat/hexchat/master/src/pixmaps/hexchat.png'
lasttime = time()
lastnick = ''
lasterrtime = 0
# initial setup of growl and list of possible notifications
# hostname and password are for over the network notifications
growl = gntp.notifier.GrowlNotifier(
applicationName='HexChat',
notifications=['Highlight', 'Private Message', 'Invited', 'Topic Changed',
'User Online', 'Server Notice', 'Disconnected', 'Banned',
'Killed', 'Kicked', 'Custom'],
defaultNotifications=['Highlight', 'Private Message', 'Invited', 'Server Notice',
'Disconnected', 'Killed', 'Kicked', 'Banned', 'Custom'],
applicationIcon=hexchatlogo,
#hostname='localhost',
#password=''
)
try:
growl.register()
except:
xchat.prnt('Growl Error: Could not register with Growl')
def growlnotify(_type, title, desc='', pri=0):
try:
growl.notify(
noteType=_type,
title=title,
description=desc,
icon=hexchatlogo,
sticky=False,
priority=pri
)
except:
global lasterrtime
# Avoid more spam, 1 error a min.
if lasterrtime + 60 < time():
xchat.prnt('Growl Error: Growl is not running.')
lasterrtime = time()
return None
# now checks for and ignores mass hilights, performance impact not yet tested, maybe removed, optional, or only used on small channels
# disabled for now
# def masshilight(nick, message):
# userlist = ''
# for user in xchat.get_list('users'):
# if user.nick != word[0]:
# userlist += user.nick + ' '
# if re.search(userlist[:-1], xchat.strip(message)):
# return True
# else:
# return False
def spam(currenttime, currentnick):
# Highlight and PM now have spam protection which previously could hang XChat
global lasttime
global lastnick
if xchat.nickcmp(lastnick, currentnick) != 0:
lasttime = time()
lastnick = currentnick
return False
elif lasttime + 3 < currenttime:
lasttime = time()
return False
else:
lasttime = time()
return True
def active(chan):
# Checks to see if chat is active to reduce annoying notifications
try:
chat = xchat.find_context()
currentchat = chat.get_info("channel")
status = xchat.get_info("win_status")
if currentchat == chan and status == "active":
return True
else:
return False
except:
return False
# start list of notifications
def hilight_callback(word, word_eol, userdata):
if not spam(time(), word[0]): # and not masshilight(word[0], word[1]):
growlnotify('Highlight',
'Highlight by ' + word[0],
word[1],
1)
def pm_callback(word, word_eol, userdata):
if not spam(time(), word[0]) and not active(word[0]):
growlnotify('Private Message',
'Messaged by ' + word[0],
word[1],
1)
def invited_callback(word, word_eol, userdata):
growlnotify('Invited',
'Invited to ' + word[0],
'Invited to %s by %s on %s' % (word[0], word[1], word[2]))
def topic_callback(word, word_eol, userdata):
growlnotify('Topic Changed',
word[2] + '\'s topic changed',
'%s \'s topic changed to %s by %s' % (word[2], word[1], word[0]),
-2)
def onlinenotify_callback(word, word_eol, userdata):
growlnotify('User Online',
word[0] + ' is online on ' + word[2])
def servernotice_callback(word, word_eol, userdata):
growlnotify('Server Notice',
'Notice from ' + word[1],
word[0])
def disconnect_callback(word, word_eol, userdata):
growlnotify('Disconnected',
'Disonnected from server',
word[0],
1)
def killed_callback(word, word_eol, userdata):
growlnotify('Killed',
'Killed by ' + word[0],
word[1],
2)
def kicked_callback(word, word_eol, userdata):
growlnotify('Kicked',
'You have been kicked from ' + word[2],
'Kicked by %s for %s' % (word[1], word[3]),
1)
def banned_callback(word, word_eol, userdata):
# this now works on a basic level, will possibly be improved
nick = xchat.get_info('nick')
for user in xchat.get_list('users'):
if xchat.nickcmp(nick, user.nick) == 0:
userhost = user.host
hostip = re.split('@', userhost)[1]
if re.search(nick, word[1]) or re.search(hostip, word[1]):
growlnotify('Banned',
'You have been banned by ' + word[0])
def tray_callback(word, word_eol, userdata):
if len(word) > 3 and word[1] == '-b':
growlnotify('Custom', word[2], word_eol[3], 1)
return xchat.EAT_ALL
return xchat.EAT_NONE
def unload_callback(userdata):
xchat.prnt(__module_name__ + ' version ' + __module_version__ + ' unloaded.')
# get events from hexchat to call notifications
xchat.hook_print("Channel Msg Hilight", hilight_callback)
xchat.hook_print("Channel Action Hilight", hilight_callback)
xchat.hook_print("Private Message to Dialog", pm_callback)
xchat.hook_print("Private Action to Dialog", pm_callback)
xchat.hook_print("Invited", invited_callback)
xchat.hook_print("Notice", servernotice_callback)
xchat.hook_print("Notify Online", onlinenotify_callback)
xchat.hook_print("Topic Change", topic_callback)
xchat.hook_print("You Kicked", kicked_callback)
xchat.hook_print("Killed", killed_callback)
xchat.hook_print("Channel Ban", banned_callback)
# hook the tray command for other scripts to use
xchat.hook_command("tray", tray_callback)
# just to print its unloaded
xchat.hook_unload(unload_callback)
# Nothing broke yet, its loaded! =)
xchat.prnt(__module_name__ + ' version ' + __module_version__ + ' loaded.')
| Python | 0 | |
d6b01b968b2ef97042bc7c3dfc1f1752fcbc98a4 | Create nnet3_compute_bnf.py | nnet3_compute_bnf.py | nnet3_compute_bnf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 5 16:49:55 2017
@author: Omid Sadjadi <s.omid.sadjadi@gmail.com>
"""
import numpy as np
import nnet3read
def splice_feats(x, w=9):
""" This routine splices the feature vectors in x by stacking over a window
of length w frames (must be odd)
"""
if w < 3 or ((w & 1) != 1):
raise ValueError('Window length should be an odd integer >= 3')
hlen = int(w / 2.)
ndim, nobs = x.shape
xx = np.c_[np.tile(x[:, 0][:,np.newaxis], hlen), x, np.tile(x[:, -1][:,np.newaxis], hlen)]
y = np.empty((w*ndim, nobs), dtype=x.dtype)
for ix in range(w):
y[ix*ndim:(ix+1)*ndim, :] = xx[:, ix:ix+nobs]
return y
def renorm_rms(data, target_rms=1.0, axis=0):
""" This routine scales the data such that the RMS is 1.0
"""
#scale = 1.0 / sqrt(x^t x / (D * target_rms^2)).
D = data.shape[axis]
scale = np.sqrt(np.sum(data * data, axis=axis, keepdims=True)/(D * target_rms * target_rms)) + 0.0
scale[scale==0] = 1.
return data / scale
def squashit(aff, nonlin, renorm=False):
""" This routine applies Sigmoid and RELU activation functions along with the
RMS renorm
"""
if nonlin=='sigmoid':
aff = sigmoid(aff)
elif nonlin=='relu':
np.maximum(aff, 0, aff)
if renorm:
aff = renorm_rms(aff, axis=0)
return aff
def sigmoid(x):
""" This routine implements Sigmoid nonlinearity
"""
return 1 / (1 + np.exp(-x))
def extract_bn_features(dnn, fea, nonlin='sigmoid', renorm=False):
""" This routine computes the bottleneck features using the DNN parameters (b, W)
and the spliced feature vectors fea. It is assumed that the last layer is
the bottleneck layer. This can be achieved by running the following command:
nnet3-copy --binary=false --nnet-config='echo output-node name=output input=dnn_bn.renorm |' \
--edits='remove-orphans' exp/nnet3/swbd9/final.raw exp/nnet3/swbd/final.txt
"""
b, W = dnn
aff = fea
for bi,wi in zip(b[:-1],W[:-1]):
aff = wi.dot(aff) + bi
aff = squashit(aff, nonlin, renorm)
aff = W[-1].dot(aff) + b[-1]
return aff
if __name__ == '__main__':
# example that shows how to extract bottleneck features from (say) MFCCs
dnn = nnet3read('final.txt', 'DNN_1024.h5', write_to_disk=True)
# we assume mfc is a numpy array of [ndim x nframes] dimesnsion, e.g., [39 x 537]
# that contains 39-dimensional (say) MFCCs. Features are spliced by stacking over
# a 21-frame context
fea = splice_feats(mfc, w=21)
# now we extract bottleneck features using the DNN parameters and the spliced
# features. Here we assume that a RELU ativation function is used, and followed
# by a renorm nonlinearity to scale the RMS of the vector of activations to 1.0.
# This kind of nonlinearity is implemented in Kaldi nnet3 as 'relu-renorm-layer'.
bnf = extract_bn_features(dnn, fea, nonlin='relu', renorm=True)
| Python | 0.00002 | |
49712dd43a2376c913e66cac7b52fc7247912e44 | Make disable_builtins schema a property | flexget/plugins/operate/disable_builtins.py | flexget/plugins/operate/disable_builtins.py | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a task."""
def __init__(self):
# cannot trust that on_task_start would have been executed
self.disabled = []
# TODO: Shit, how was this ever working? If this plugin is loaded before any builtin plugins, they are not allowed
# in the schema. We need to change plugin loading to not access the schema until all plugins are loaded.
@property
def schema(self):
return {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}
]
}
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_task_start(self, task, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_task_exit(self, task, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_task_abort = on_task_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a task."""
def __init__(self):
# cannot trust that on_task_start would have been executed
self.disabled = []
# TODO: Shit, how was this ever working? If this plugin is loaded before any builtin plugins, they are not allowed
# in the schema.
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}
]
}
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_task_start(self, task, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_task_exit(self, task, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_task_abort = on_task_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
| Python | 0.000003 |
e2b4d3849db7b51f81d1770ebcce2b9af7d098e6 | add tests for Mixpanel | test/test_mixpanel.py | test/test_mixpanel.py | import base64
import json
import unittest
from libsaas.executors import test_executor
from libsaas.services import mixpanel
class MixpanelTestCase(unittest.TestCase):
def setUp(self):
self.executor = test_executor.use()
self.executor.set_response(b'1', 200, {})
self.service = mixpanel.Mixpanel('my-token', 'api-key', 'api-secret')
def serialize(self, data):
return base64.b64encode(json.dumps(data))
def expect(self, uri, params=None, subdomain=None):
if not subdomain:
domain = 'mixpanel.com'
else:
domain = '{0}.mixpanel.com'.format(subdomain)
if subdomain != 'api':
uri = '/api/2.0{0}'.format(uri)
self.assertEqual(self.executor.request.uri,
'http://{0}{1}'.format(domain, uri))
if params:
self.assertEquals(self.executor.request.params, params)
def test_track(self):
ret = self.service.track('login', {'user': 'foo'}, ip=True)
data = self.serialize({'event': 'login',
'properties': {'token': 'my-token',
'user': 'foo'}})
self.expect('/data/track/', {'data': data, 'ip': '1'}, 'api')
self.assertTrue(ret)
ret = self.service.track('logout', test=True)
data = self.serialize({'event': 'logout',
'properties': {'token': 'my-token'}})
self.expect('/data/track/', {'data': data, 'test': '1'}, 'api')
self.assertTrue(ret)
def test_track_failure(self):
self.executor.set_response(b'0', 200, {})
ret = self.service.track('ev')
self.assertFalse(ret)
def test_events(self):
self.service.events().get(['login', 'logout'], 'general', 'day', 10)
self.expect('/events/', {'event': json.dumps(['login', 'logout']),
'type': 'general', 'unit': 'day', 'interval': 10})
self.service.events().top('general', limit=10)
self.expect('/events/top/', {'type': 'general', 'limit': 10})
self.service.events().names('general')
self.expect('/events/names/', {'type': 'general'})
def test_event_properties(self):
self.service.properties().get('login', 'plan', 'unique', 'day', 7,
values=['standard', 'premium'])
self.expect('/events/properties/',
{'event': 'login', 'name': 'plan', 'type': 'unique',
'unit': 'day', 'interval': '7',
'values': json.dumps(['standard', 'premium'])})
self.service.properties().top('login')
self.expect('/events/top/', {'event': 'login'})
self.service.properties().values('login', 'plan', bucket='10')
self.expect('/events/values/', {'event': 'login', 'name': 'plan',
'bucket': 10})
def test_funnels(self):
self.service.funnels().get(10, '2012-01-01', length=5)
self.expect('/funnels/', {'funnel_id': 10, 'from_date': '2012-01-01',
'length': 5})
self.service.funnels().list()
self.expect('/funnels/list/', {})
def test_segmentation(self):
self.service.segmentation().get('login', '2011-01-01',
'2012-01-01', type='unique')
self.expect('/segmentation/',
{'event': 'login', 'from_date': '2011-01-01',
'to_date': '2012-01-01', 'type': 'unique'})
self.service.segmentation().numeric('login', '2011-01-01',
'2012-01-01', on='true', buckets=3)
self.expect('/segmentation/numeric/',
{'event': 'login', 'from_date': '2011-01-01',
'to_date': '2012-01-01', 'on': 'true', 'buckets': 3})
on = 'properties["succeeded"] - property["failed"]'
self.service.segmentation().sum('buy', '2011-01-01', '2012-01-01', on)
self.expect('/segmentation/sum/',
{'event': 'buy', 'from_date': '2011-01-01',
'to_date': '2012-01-01', 'on': on})
on = 'property["amount"]', 'day'
self.service.segmentation().average('pay', '2011-01-01',
'2012-01-01', on, 'day')
self.expect('/segmentation/average/',
{'event': 'pay', 'from_date': '2011-01-01',
'to_date': '2012-01-01', 'on': on, 'unit': 'day'})
def test_retention(self):
self.service.retention().get('2011-01-01', '2012-01-01',
born_event='login', limit=10)
self.expect('/retention/',
{'from_date': '2011-01-01', 'to_date': '2012-01-01',
'born_event': 'login', 'limit': 10})
def test_export(self):
self.service.export('2011-01-01', '2012-01-01', ['login', 'logout'])
self.expect('/export/',
{'from_date': '2011-01-01', 'to_date': '2012-01-01',
'event': json.dumps(['login', 'logout'])}, 'data')
def test_no_key(self):
self.service = mixpanel.Mixpanel('my-token')
# tracking is allowed without setting the api key and api secret
self.service.track('login')
data = self.serialize({'event': 'login',
'properties': {'token': 'my-token'}})
self.expect('/data/track/', {'data': data}, 'api')
# but data export methods fail
self.assertRaises(mixpanel.Insufficient, self.service.funnels.list)
self.assertRaises(mixpanel.ConfigurationError, self.service.export,
'2011-01-01', '2012-01-01', ['login', 'logout'])
| Python | 0 | |
790be842b1c2e752210d5328dad05acb05d337bb | add minimal test for serial.theaded | test/test_threaded.py | test/test_threaded.py | #!/usr/bin/env python
#
# This file is part of pySerial - Cross platform serial port support for Python
# (C) 2016 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
"""\
Test serial.threaded related functionality.
"""
import os
import unittest
import serial
import serial.threaded
import time
# on which port should the tests be performed:
PORT = 'loop://'
class Test_asyncio(unittest.TestCase):
"""Test asyncio related functionality"""
def test_line_reader(self):
"""simple test of line reader class"""
class TestLines(serial.threaded.LineReader):
def __init__(self):
super(TestLines, self).__init__()
self.received_lines = []
def handle_line(self, data):
self.received_lines.append(data)
ser = serial.serial_for_url(PORT, baudrate=115200, timeout=1)
with serial.threaded.ReaderThread(ser, TestLines) as protocol:
protocol.write_line('hello')
time.sleep(1)
self.assertEqual(protocol.received_lines, ['hello'])
if __name__ == '__main__':
import sys
sys.stdout.write(__doc__)
if len(sys.argv) > 1:
PORT = sys.argv[1]
sys.stdout.write("Testing port: {!r}\n".format(PORT))
sys.argv[1:] = ['-v']
# When this module is executed from the command-line, it runs all its tests
unittest.main()
| Python | 0.000015 | |
70683aabe3cebda02db62fc254b7ec7532a50618 | Add test_config.sample.py. | test_config.sample.py | test_config.sample.py | CREDENTIAL_FILE = ''
SHEET_NAME = ''
| Python | 0 | |
499adce8b5c23d60073d4c92259e611609ee0c61 | Add initial draft script to analyse Maven deps | states/common/maven/artifacts/check_dependencies.py | states/common/maven/artifacts/check_dependencies.py | #!/usr/bin/env python
import subprocess as sub
import yaml
import re
distrib_pom_path = '/home/uvsmtid/Works/maritime-singapore.git/clearsea-distribution/pom.xml'
# Resolve (download) all dependencies locally so that next command
# can work offline.
sub.check_call(
[
'mvn',
'-f',
distrib_pom_path,
'dependency:resolve',
],
)
# Get list of all dependencies.
p = sub.Popen(
[
'mvn',
'-f',
distrib_pom_path,
'dependency:list',
],
stdout = sub.PIPE,
)
# Select lines with dependency items.
artifact_regex = re.compile(')
for line in p.stdout:
| Python | 0 | |
1a065a251c3337ae7741af1916c51f2edcb9180f | add db.py | www/transwarp/db.py | www/transwarp/db.py | #/usr/bin/python
#_*_ coding:utf-8 _*_
import threading
class _Engine(object):
"""
数据库引擎对象
"""
def __init__(self, connect):
self._connect = connect
def connect(self):
return self._connect()
engine = None
class _DbCtx(threading.local):
"""
持有数据库连接的上下文对象
"""
def __init__(self):
self.connection = None
self.transactions = 0
def is_init(self):
return not self.connection is None
def init(self):
self.connection = _LasyConnection()
self.transactions = 0
def cleanup(self):
self.connection.cleanup()
self.connection = None
def cursor(self):
return self.connection.cursor()
_db_ctx = _DbCtx()
class _ConnectionCtx(object):
def __enter__(self):
global _db_ctx
self.should_cleanup = False
if not _db_ctx.is_init():
_db_ctx.is_init()
self.should_cleanup = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global _db_ctx
if self.should_cleanup:
_db_ctx.cleanup()
def connection():
return _ConnectionCtx()
| Python | 0.000002 | |
fed635826be361c4748f13bca09ed411c59ca352 | Add test for OpenStackServer API to increase test coverage | instance/tests/api/test_openstack_server.py | instance/tests/api/test_openstack_server.py | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015-2016 OpenCraft <contact@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Views - Tests
"""
# Imports #####################################################################
import ddt
from django.conf import settings
from rest_framework import status
from instance.tests.api.base import APITestCase
from instance.tests.models.factories.server import OpenStackServerFactory
# Tests #######################################################################
@ddt.ddt
class OpenStackServerAPITestCase(APITestCase):
"""
Test cases for OpenStackServer API calls
"""
def test_get_unauthenticated(self):
"""
GET - Require to be authenticated
"""
response = self.api_client.get('/api/v1/openstackserver/')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data, {"detail": "Authentication credentials were not provided."})
def test_get_authenticated(self):
"""
GET - Authenticated access
"""
self.api_client.login(username='user1', password='pass')
response = self.api_client.get('/api/v1/openstackserver/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, [])
server = OpenStackServerFactory()
response = self.api_client.get('/api/v1/openstackserver/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.check_serialized_server(response.data[0], server)
def test_get_details(self):
"""
GET - Detailed attributes
"""
self.api_client.login(username='user3', password='pass')
test_openstack_id = 'test-openstack-id'
server = OpenStackServerFactory(openstack_id=test_openstack_id)
response = self.api_client.get('/api/v1/openstackserver/{pk}/'.format(pk=server.id))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.check_serialized_server(response.data, server)
self.assertEqual(response.data['openstack_id'], test_openstack_id)
def check_serialized_server(self, data, server):
"""
Assert that the server data is what we expect
"""
self.assertEqual(data['id'], server.id)
self.assertEqual(
data['api_url'],
'http://testserver/api/v1/openstackserver/{pk}/'.format(pk=server.id)
)
self.assertEqual(data['name'], server.name)
self.assertEqual(data['openstack_region'], settings.OPENSTACK_REGION)
self.assertIn('created', data)
self.assertIn('modified', data)
self.assertIn('openstack_id', data)
self.assertIn('public_ip', data)
self.assertIn('status', data)
| Python | 0 | |
6fd75772efac321517a1d8c01addfa5cbbf7caf0 | Add test file for user functions. | tests/db/user_test.py | tests/db/user_test.py | from okcupyd.db import user
def test_have_messaged_before(T):
message_thread_model = T.factory.message_thread()
assert user.have_messaged_by_username(
message_thread_model.initiator.handle,
message_thread_model.respondent.handle
)
assert user.have_messaged_by_username(
message_thread_model.respondent.handle,
message_thread_model.initiator.handle
)
assert not user.have_messaged_by_username('a', 'b')
assert not user.have_messaged_by_username(
message_thread_model.respondent.handle, 'a'
)
T.factory.user('b')
assert not user.have_messaged_by_username(
'b', message_thread_model.initiator.handle
)
| Python | 0 | |
6d2480c5817a8ba7a4a810378ce8fabe0ede3cbf | check YAML examples | tests/testexamples.py | tests/testexamples.py | #!/usr/bin/python
import os
import yaml
def test_examples():
for filename in os.listdir("examples/"):
with open(os.path.join("examples", filename)) as stream:
print(yaml.load(stream))
| Python | 0.000001 | |
a9bb7c7c929b0e182160a700e0a3f23dc3e81765 | Update and rename exercises to exercises/12.py | exercises/12.py | exercises/12.py | '''
Define a procedure histogram() that takes a list of
integers and prints a histogram to the screen.
For example, histogram([4, 9, 7]) should print the following:
****
*********
*******
'''
def histogram(lst):
for item in lst:
print(item * '*')
| Python | 0.000001 | |
a0d2e58a6eecf3427646f311e638c359706e806a | Add Energenie example code | energenie.py | energenie.py | import RPi.GPIO as GPIO
from time import sleep
bit1 = 11
bit2 = 15
bit3 = 16
bit4 = 13
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
GPIO.setup(bit1, GPIO.OUT)
GPIO.setup(bit2, GPIO.OUT)
GPIO.setup(bit3, GPIO.OUT)
GPIO.setup(bit4, GPIO.OUT)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
GPIO.output(18, False)
GPIO.output(bit1, False)
GPIO.output(bit2, False)
GPIO.output(bit3, False)
GPIO.output(bit4, False)
on = ['1011', '0111', '0110', '0101', '0100']
off = ['0011', '1111', '1110', '1101', '1100']
def change_plug_state(socket, on_or_off):
state = on_or_off[socket][-1] == '1'
GPIO.output(bit1, state)
state = on_or_off[socket][-2] == '1'
GPIO.output(bit2, state)
state = on_or_off[socket][-3] == '1'
GPIO.output(bit3, state)
state = on_or_off[socket][-4] == '1'
GPIO.output(bit4, state)
sleep(0.1)
GPIO.output(22, True)
sleep(0.25)
GPIO.output(22, False)
while True:
raw_input('Hit any key to turn on: ')
print('turning on')
change_plug_state(2, on)
raw_input('Hit any key to turn off: ')
print('turning off')
change_plug_state(0, off)
| Python | 0 | |
8646929a913b77438bf58e48e672ea68492d3ac1 | Mark third_party/accessibility-developer-tools as a known license info issue. | android_webview/tools/known_issues.py | android_webview/tools/known_issues.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""List of known-incompatibly-licensed directories for Android WebView.
This is not used by the webview_licenses tool itself; it is effectively a
"cache" of the output of webview_licenses.GetIncompatibleDirectories() for the
subset of repositories that WebView needs.
We store a copy here because GetIncompatibleDirectories() doesn't work properly
after things have been removed from the tree - it can no longer see the
README.chromium files for previously-removed directories, but they may have
newly added files in them. As long as this list is up to date, we can remove the
things listed first, and then just run the tool afterwards to validate that it
was sufficient. If the tool returns any extra directories then the snapshotting
process will stop and this list must be updated.
"""
# If there is a temporary license-related issue with a particular third_party
# directory, please put it here, with a comment linking to the bug entry.
KNOWN_ISSUES = [
'third_party/accessibility-developer-tools', # crbug.com/165901
]
KNOWN_INCOMPATIBLE = {
# Incompatible code in the main chromium repository.
'.': [
'base/third_party/xdg_mime',
'breakpad',
'chrome/installer/mac/third_party/xz',
'chrome/test/data',
'third_party/active_doc',
'third_party/apple_apsl',
'third_party/apple_sample_code',
'third_party/bsdiff',
'third_party/bspatch',
'third_party/sudden_motion_sensor',
'third_party/swiftshader',
'third_party/talloc',
'third_party/webdriver',
'third_party/wtl',
'tools/telemetry/third_party/websocket-client',
],
# Incompatible code in ICU.
'third_party/icu': [
'source/data/brkitr',
],
}
KNOWN_INCOMPATIBLE['.'].extend(KNOWN_ISSUES)
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""List of known-incompatibly-licensed directories for Android WebView.
This is not used by the webview_licenses tool itself; it is effectively a
"cache" of the output of webview_licenses.GetIncompatibleDirectories() for the
subset of repositories that WebView needs.
We store a copy here because GetIncompatibleDirectories() doesn't work properly
after things have been removed from the tree - it can no longer see the
README.chromium files for previously-removed directories, but they may have
newly added files in them. As long as this list is up to date, we can remove the
things listed first, and then just run the tool afterwards to validate that it
was sufficient. If the tool returns any extra directories then the snapshotting
process will stop and this list must be updated.
"""
# If there is a temporary license-related issue with a particular third_party
# directory, please put it here, with a comment linking to the bug entry.
KNOWN_ISSUES = []
KNOWN_INCOMPATIBLE = {
# Incompatible code in the main chromium repository.
'.': [
'base/third_party/xdg_mime',
'breakpad',
'chrome/installer/mac/third_party/xz',
'chrome/test/data',
'third_party/active_doc',
'third_party/apple_apsl',
'third_party/apple_sample_code',
'third_party/bsdiff',
'third_party/bspatch',
'third_party/sudden_motion_sensor',
'third_party/swiftshader',
'third_party/talloc',
'third_party/webdriver',
'third_party/wtl',
'tools/telemetry/third_party/websocket-client',
],
# Incompatible code in ICU.
'third_party/icu': [
'source/data/brkitr',
],
}
KNOWN_INCOMPATIBLE['.'].extend(KNOWN_ISSUES)
| Python | 0.000178 |
a5440305173c218ec785b0d5a2dfa8b02bb0b731 | Add package: py-fava (#21275) | var/spack/repos/builtin/packages/py-fava/package.py | var/spack/repos/builtin/packages/py-fava/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFava(PythonPackage):
"""Fava is a web interface for the double-entry bookkeeping software
Beancount with a focus on features and usability."""
homepage = "https://beancount.github.io/fava/"
pypi = "fava/fava-1.18.tar.gz"
version('1.18', sha256='21336b695708497e6f00cab77135b174c51feb2713b657e0e208282960885bf5')
# For some reason Fava adds a whole bunch of executables to
# its bin directory, and this causes clashes when loading
# the module.
extends('python', ignore='bin/^(?!fava).*')
# Some of the dependencies are not listed as required at
# build or run time, but actually are.
# - py-setuptools
# - py-importlib
# - py-pytest
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-setuptools-scm', type=('build'))
depends_on('py-babel@2.6.0:', type=('build', 'run'))
depends_on('py-beancount@2.3.0:', type=('build', 'run'))
depends_on('py-cheroot', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('py-flask@0.10.1:', type=('build', 'run'))
depends_on('py-flask-babel@1.0.0:', type=('build', 'run'))
depends_on('py-importlib', type=('build', 'run'))
depends_on('py-jinja2@2.10:', type=('build', 'run'))
depends_on('py-markdown2@2.3.0:', type=('build', 'run'))
depends_on('py-ply', type=('build', 'run'))
depends_on('py-pytest', type=('build', 'run'))
depends_on('py-simplejson@3.2.0:', type=('build', 'run'))
depends_on('py-werkzeug@0.15.0:', type=('build', 'run'))
| Python | 0 | |
31342e58f914c057404fd35edfff42b95e5fb051 | Test #2 (with the current GitLab API syntax) | gitlabform/gitlabform/test/test_project_settings.py | gitlabform/gitlabform/test/test_project_settings.py | import pytest
from gitlabform.gitlabform import GitLabForm
from gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, GROUP_NAME
PROJECT_NAME = 'project_settings_project'
GROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME
@pytest.fixture(scope="module")
def gitlab(request):
create_group(GROUP_NAME)
create_project_in_group(GROUP_NAME, PROJECT_NAME)
gl = get_gitlab()
def fin():
gl.delete_project(GROUP_AND_PROJECT_NAME)
request.addfinalizer(fin)
return gl # provide fixture value
config_builds_for_private_projects = """
gitlab:
api_version: 4
project_settings:
project_settings:
builds_access_level: private
visibility: private
"""
class TestProjectSettings:
def test__builds_for_private_projects(self, gitlab):
gf = GitLabForm(config_string=config_builds_for_private_projects,
project_or_group=GROUP_AND_PROJECT_NAME)
gf.main()
settings = gitlab.get_project_settings(GROUP_AND_PROJECT_NAME)
assert settings['builds_access_level'] is 'private'
assert settings['visibility'] is 'private'
| Python | 0 | |
caf135f6c94146038ac9d9e77a808e30ea52f900 | make pyroma a runnable module (#62) | pyroma/__main__.py | pyroma/__main__.py | from . import main
if __name__ == "__main__":
main()
| Python | 0 | |
25a1d94b45980fbc78c162af2c81ad807ee954c9 | add test_vpr.py, add test functions and stubs | wradlib/tests/test_vpr.py | wradlib/tests/test_vpr.py | #!/usr/bin/env python
# -------------------------------------------------------------------------------
# Name: test_vpr.py
# Purpose: testing file for the wradlib.vpr module
#
# Authors: wradlib developers
#
# Created: 26.02.2016
# Copyright: (c) wradlib developers
# Licence: The MIT License
# -------------------------------------------------------------------------------
import unittest
import wradlib.vpr as vpr
import wradlib.georef as georef
import numpy as np
class VPRHelperFunctionsTest(unittest.TestCase):
def setUp(self):
self.site = (7.0, 53.0, 100.)
self.proj = georef.epsg_to_osr(31467)
self.az = np.arange(0., 360., 1.)
self.r = np.arange(0, 100000, 1000)
self.el = 2.5
self.coords = vpr.volcoords_from_polar(self.site, self.el, self.az, self.r, self.proj)
def test_out_of_range(self):
pass
def test_blindspots(self):
pass
def test_volcoords_from_polar(self):
coords = vpr.volcoords_from_polar(self.site, self.el, self.az, self.r, self.proj)
pass
def test_volcoords_from_polar_irregular(self):
coords = vpr.volcoords_from_polar_irregular(self.site, [self.el, 5.0], self.az, self.r, self.proj)
pass
def test_synthetic_polar_volume(self):
vol = vpr.synthetic_polar_volume(self.coords)
pass
def test_vpr_interpolator(self):
pass
def test_correct_vpr(self):
pass
def test_mean_norm_from_vpr(self):
pass
def test_norm_vpr_stats(self):
pass
def test_make_3D_grid(self):
maxrange = 200000.
maxalt = 5000.
horiz_res = 2000.
vert_res = 250.
vpr.make_3D_grid(self.site, self.proj, maxrange, maxalt, horiz_res, vert_res)
pass
class CartesianVolumeTest(unittest.TestCase):
def test_CartesianVolume(self):
pass
def test_CAPPI(self):
pass
def test_PseudoCAPPI(self):
pass
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
9ad3b4e6ff5ec500fe1feeb841c4fe00e9267d19 | add sh_quote.py | python/sh_quote.py | python/sh_quote.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Tristan Cavelier <t.cavelier@free.fr>
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
def sh_quote(*params):
return " ".join(("'" + p.replace("'", "'\\''") + "'" for p in params))
### in bash you can do :
# eval -- "$(python sh_quote.py)"
### in python3 you can do :
# import os, sys
# out = os.popen(sh_quote(*['ls', '-1', "my'file;"]))
# out._proc.wait()
# sys.stdout.write(out.read())
######################################################################
# Tests
# prints if failure
def test(a, b):
if a != b:
print(a + " != " + b)
test(sh_quote(*['ls', '-1', "my'file;"]), "'ls' '-1' 'my'\\''file;'")
| Python | 0.000004 | |
8a836c53d85f63831e51e7aac9a2f77fdf25ef9f | Add more indexing tests. | numpy/core/tests/test_indexing.py | numpy/core/tests/test_indexing.py | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import asbytes
from numpy.testing import *
import sys, warnings
# The C implementation of fancy indexing is relatively complicated,
# and has many seeming inconsistencies. It also appears to lack any
# kind of test suite, making any changes to the underlying code difficult
# because of its fragility.
# This file is to remedy the test suite part a little bit,
# but hopefully NumPy indexing can be changed to be more systematic
# at some point in the future.
class TestIndexing(TestCase):
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
assert_equal(a[None], a[np.newaxis])
assert_equal(a[None].ndim, a.ndim + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = np.array([1, 2, 3])
assert_equal(a[()], a)
assert_(a[()].base is a)
def _test_empty_list_index(self):
# Empty list index (is buggy!)
a = np.array([1, 2, 3])
assert_equal(a[[]], a)
def test_empty_array_index(self):
# Empty array index is illegal
a = np.array([1, 2, 3])
b = np.array([])
assert_raises(IndexError, a.__getitem__, b)
def test_ellipsis_index(self):
# Ellipsis index does not create a view
a = np.array([[1, 2, 3],
[4 ,5, 6],
[7, 8, 9]])
assert_equal(a[...], a)
assert_(a[...] is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
assert_equal(a[0, ...], a[0])
assert_equal(a[0, ...], a[0, :])
assert_equal(a[..., 0], a[:, 0])
# Slicing with ellipsis always results
# in an array, not a scalar
assert_equal(a[0, ..., 1], np.array(2))
def test_single_int_index(self):
# Single integer index selects one row
a = np.array([[1, 2, 3],
[4 ,5, 6],
[7, 8, 9]])
assert_equal(a[0], [1, 2, 3])
assert_equal(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
assert_raises(IndexError, a.__getitem__, 1<<30)
# Index overflow produces ValueError
assert_raises(ValueError, a.__getitem__, 1<<64)
def _test_single_bool_index(self):
# Single boolean index (is buggy?)
a = np.array([[1, 2, 3],
[4 ,5, 6],
[7, 8, 9]])
# Python boolean converts to integer (invalid?)
assert_equal(a[True], a[1])
# NumPy zero-dimensional boolean array (*crashes*)
assert_equal(a[np.array(True)], a) # what should be the behaviour?
assert_equal(a[np.array(False)], []) # what should be the behaviour?
def test_boolean_indexing(self):
# Indexing a 2-dimensional array with a length-1 array of 'True'
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
if __name__ == "__main__":
run_module_suite()
| from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import asbytes
from numpy.testing import *
import sys, warnings
# The C implementation of fancy indexing is relatively complicated,
# and has many seeming inconsistencies. It also appears to lack any
# kind of test suite, making any changes to the underlying code difficult
# because of its fragility.
# This file is to remedy the test suite part a little bit,
# but hopefully NumPy indexing can be changed to be more systematic
# at some point in the future.
def test_boolean_indexing():
# Indexing a 2-dimensional array with a length-1 array of 'True'
a = np.array([[ 0., 0., 0.]])
b = np.array([ True], dtype=bool)
assert_equal(a[b], a)
a[b] = 1.
assert_equal(a, [[1., 1., 1.]])
if __name__ == "__main__":
run_module_suite()
| Python | 0 |
2273dfcdb2f311f39e1bffe4f40cdc6e3b753155 | add buildOffsetMap.py | buildOffsetMap.py | buildOffsetMap.py | import sys, os, json
import frm
DATA_PATH = "data"
def main():
if len(sys.argv) != 2:
print "USAGE: %s IMAGES_LIST" % sys.argv[0]
sys.exit(1)
images = list(open(sys.argv[1]))
imageInfo = {}
for image in images:
image = image.rstrip()
frmPath = os.path.join(DATA_PATH, image + ".FRM")
frmInfo = frm.readFRMInfo(open(frmPath, "rb"))
sx = 0 # running total width offset
for direction in frmInfo['frameOffsets']:
ox = 0 # running total offsets
oy = 0
for frame in direction:
ox += frame['x']
oy += frame['y']
frame['sx'] = sx
frame['ox'] = ox
frame['oy'] = oy
sx += frame['w']
imageInfo[image] = frmInfo
print json.dumps(imageInfo)
if __name__ == '__main__':
main() | Python | 0.000001 | |
a62b4f70816b831a16973e861449b0c76761cf52 | Create Odd_Even_Linked_List.py | data_structures/Linked_list/Python/Odd_Even_Linked_List.py | data_structures/Linked_list/Python/Odd_Even_Linked_List.py | '''
Given the head of a singly linked list, group all the nodes with odd indices together followed by the nodes with even indices, and return the reordered list.
'''
class Solution(object):
def oddEvenList(self, head):
if head is None: return None
if head.next is None: return head
o = head
p = o.next
ehead = p
while p.next is not None:
o.next = p.next
p.next = p.next.next
o = o.next
p = p.next
if p is None: break
o.next = ehead
return head
'''
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
----------------------
Input: head = [1,2,3,4,5]
Output: [1,3,5,2,4]
'''
| Python | 0 | |
dd0e335574afd936b5849186202aedc8500f2c5b | add build-front | organization/core/management/commands/build-front.py | organization/core/management/commands/build-front.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, time
import subprocess
from django.apps import apps
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import connections
class Command(BaseCommand):
help = "Build the front with bower and gulp"
def handle(self, *args, **options):
theme = ""
theme_path = ""
for ht in settings.HOST_THEMES:
# search for theme name in INSTALLED_APPS
# to get the one is used
if ht[1] in settings.INSTALLED_APPS:
theme = ht[1]
if theme :
theme_path = apps.get_app_config(theme.split('.')[1]).path
os.chdir(theme_path)
subprocess.run(["bower", "--allow-root", "install", "&&", "gulp", "build"])
| Python | 0.000017 | |
cc2343a564572e6c0bd94279bf3907e9e85da79b | Create version.py | plotly-1.2.9/plotly/version.py | plotly-1.2.9/plotly/version.py | __version__ = '1.2.9'
| Python | 0.000001 | |
65239ce01df89ceaaed989b28f4623ac521ce2c3 | Add download_stats script | download_stats.py | download_stats.py | import argparse
from xmlrpclib import ServerProxy
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("-n", type=int)
parser.add_argument("--package-list",
action="store")
args = parser.parse_args()
url = 'https://pypi.python.org/pypi'
client = ServerProxy(url)
if not args.package_list:
args.package_list = client.list_packages()
else:
args.package_list = [package.strip() for package in
open(args.package_list, 'r').readlines()]
if args.n:
args.package_list = args.package_list[:args.n]
downloads_dict = dict()
for package in args.package_list:
versions = client.package_releases(package)
try:
latest_version = versions[0]
downloads = max(client.release_data(package,
latest_version)['downloads'].values())
downloads_dict[package] = downloads
except:
downloads_dict[package] = 0
pickle.dump(downloads_dict, open('downloads_dict.pkl', 'w'))
| Python | 0 | |
2b79fd91b43248169e408093454a32367ecb6d61 | Add a solution that creates a separate LP for each frame transition. | project7/project7-lp-single.py | project7/project7-lp-single.py | from gurobipy import *
from sys import argv
import json
import math
import drawful
def read_lst(fn):
with open(fn, 'r') as f:
(n, tp) = json.load(f)
return (n, tp)
def write_lst(fn, lst):
with open(fn, 'w') as f:
json.dump(lst, f)
def distance(v1, v2):
return math.sqrt((v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2 + (v2[2] - v1[2]) ** 2)
def distance_squared(v1, v2):
return (v2[0] - v1[0]) ** 2 + (v2[1] - v1[1]) ** 2 + (v2[2] - v1[2]) ** 2
def get_permutation(edges, last_perm, last_frame, frame, n):
perm = [0] * n
for v1, v2 in edges:
v1i = last_frame.index(list(v1))
v2i = frame.index(list(v2))
j = last_perm.index(v1i)
perm[j] = v2i
return perm
def main():
def optimize_single(f):
m = Model('project7')
print("Adding variables...")
edge_vars = {}
point_edges = {}
t1, f1 = frames[f]
t2, f2 = frames[f + 1]
for i in range(n):
v1 = tuple(f1[i])
point_edges[v1] = []
for j in range(n):
v2 = tuple(f2[j])
cost = distance_squared(v1, v2)
#if (v1, v2) in edge_vars[f]:
# print("Duplicate vertex!")
# return
edge_vars[v1, v2] = m.addVar(obj=cost, vtype=GRB.BINARY)
point_edges[v1].append(edge_vars[v1, v2])
m.update()
print("Adding constraints...")
'''
# There must be n edges from one frame to the next
for frame in edge_vars:
m.addConstr(quicksum(frame.values()) == n)
'''
# There must be one incoming edge per point in the last n-1 frames
for v2 in frames[f+1][1]:
v2 = tuple(v2)
v2_edges = []
for v1 in frames[f][1]:
v1 = tuple(v1)
v2_edges.append(edge_vars[v1,v2])
m.addConstr(quicksum(v2_edges) == 1)
# There must be one outgoing edge per point in the first n-1 frames
for edges in point_edges:
m.addConstr(quicksum(point_edges[edges]) == 1)
m.optimize()
edges = m.getAttr('x', edge_vars).items()
selected = []
for edge, value in edges:
if value:
selected.append(edge)
# Calculate cost
cost = 0
for v1, v2 in selected:
cost += distance(v1, v2)
print("cost", f, ":", cost)
return get_permutation(selected, last_perm, frames[f][1], frames[f + 1][1], n)
# fn = 'data-n2-t3.json'
# fn = 'example-points.lst'
fn = 'points-00100-0.lst'
if len(argv) == 2:
fn = argv[1]
n, frames = read_lst(fn)
orig_frames = [[tuple(u) for u in ss[1]] for ss in frames]
nf = len(frames) - 1
print("n:", n)
print("frames: t0-t" + str(nf))
solution = [n]
last_perm = [i for i in range(n)]
for f in range(nf):
last_perm = optimize_single(f)
solution.append(last_perm)
# print(solution)
write_lst(fn + '.sol', solution)
drawful.drawWithIndices(orig_frames, solution[1], solution[2])
if __name__ == '__main__':
import time
start = time.clock()
main()
end = time.clock()
print("time: {0:.3f} s".format(end - start))
| Python | 0 | |
95a26454173b59c8609ddb81027ed71005e9e86c | add module to handle exceptions | cno/misc/tools.py | cno/misc/tools.py | __all__ = ["CNOError"]
class CNOError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| Python | 0.000001 | |
bb58564dc400e91c132e3a26532595ec9de73958 | Create managers.py | managers.py | managers.py | from django.db import models
class VisibilityManagerMixin(object):
"""
This manager should be used with a model that implements the Hideable
mixin.
"""
def __init__(self, *args, **kwargs):
self.visible = kwargs.pop('visible', True)
super().__init__(*args, **kwargs)
def get_queryset(self):
return super().get_queryset()
.filter(hidden__isnull=self.visible)
class VisibilityManager(VisibilityManagerMixin, models.Manager):
pass
| Python | 0.000001 | |
fbaaf3ba027ee9d18df7b1f48533c8847f084381 | Add harmonic mean estimator. | harmonicmean.py | harmonicmean.py | import numpy.random
import numpy as np
import lib
from math import log
def compute_harmonicmean(lnlike_post, posterior_sample=None, lnlikefunc=None,
lnlikeargs=(), **kwargs):
"""
Computes the harmonic mean estimate of the marginal likelihood.
The estimation is based on n posterior samples
(indexed by s, with s = 0, ..., n-1), but can be done directly if the
log(likelihood) in this sample is passed.
:param array lnlike_post:
log(likelihood) computed over a posterior sample. 1-D array of length n.
If an emply array is given, then compute from posterior sample.
:param array posterior_sample:
A sample from the parameter posterior distribution.
Dimensions are (n x k), where k is the number of parameters. If None
the computation is done using the log(likelihood) obtained from the
posterior sample.
:param callable lnlikefunc:
Function to compute ln(likelihood) on the marginal samples.
:param tuple lnlikeargs:
Extra arguments passed to the likelihood function.
Other parameters
----------------
:param int size:
Size of sample to use for computation. If none is given, use size of
given array or posterior sample.
References
----------
Kass & Raftery (1995), JASA vol. 90, N. 430, pp. 773-795
"""
if len(lnlike_post) == 0 and posterior_sample is not None:
samplesize = kwargs.pop('size', len(posterior_sample))
if samplesize < len(posterior_sample):
posterior_subsample = numpy.random.choice(posterior_sample,
size=samplesize,
replace=False)
else:
posterior_subsample = posterior_sample.copy()
# Compute log likelihood in posterior sample.
log_likelihood = lnlikefunc(posterior_subsample, *lnlikeargs)
elif len(lnlike_post) > 0:
samplesize = kwargs.pop('size', len(lnlike_post))
log_likelihood = numpy.random.choice(lnlike_post, size=samplesize,
replace=False)
# Use identity for summation
# http://en.wikipedia.org/wiki/List_of_logarithmic_identities#Summation.2Fsubtraction
# ln(sum(x)) = ln(x[0]) + ln(1 + sum( exp( ln(x[1:]) - ln(x[0]) ) ) )
hme = -lib.log_sum(-log_likelihood) + log(len(log_likelihood))
return hme
def run_hme_mc(log_likelihood, nmc, samplesize):
hme = np.zeros(nmc)
for i in range(nmc):
hme[i] = compute_harmonicmean(log_likelihood, size=samplesize)
return hme
__author__ = 'Rodrigo F. Diaz' | Python | 0.000003 | |
156a31c7aef3dfc07f5e3b0998b0957308abdd16 | Create getPrice.py | src/getPrice.py | src/getPrice.py | import requests
import requests.auth
import time
import json
def getPrices():
print "Grabbing price..."
dogeprice = parsePrices("doge")
btcprice = parsePrices("btc")
ltcprice = parsePrices("ltc")
rddprice = parsePrices("rdd")
obj3 = open('price.txt', 'w')
obj3.write(str(dogeprice) + "\n" + str(btcprice) + '\n' + str(ltcprice) + '\n' + str(rddprice))
obj3.close()
print 'Done'
def parsePrices(currency):
code = requests.get('http://coinmarketcap.northpole.ro/api/' + currency + '.json')
json_input = code.json()
decoded = json.dumps(json_input)
decoded = json.loads(decoded)
price = decoded['price']
price = float(price)
price = 1.3 * 4 / price
price = round(price,7)
return price
while True:
getPrices()
for x in range(2700,-1,-1):
print x
x+=1
time.sleep(1)
| Python | 0.000001 | |
7c0bf9e7930773a35da7303284842f74cc7b7744 | Add util script to dump flickr photo stats | myflickr.py | myflickr.py | #
# use flickrapi to dump stats about pictures in a flickr account
# use time sleep to prevent automatic flickr rejection
#
import sys
import flickrapi
import time
import sys
from basics import *
import xml.etree.ElementTree
#
# method to dump social stats about the flickr user account
# args:
# - api_secret : your flickr api secret
# - api_key : your flickr api key
# - user_id : your flickr user id
# - filepath : the path of the xml file where the data will be dumped into
#
def flickr_dump(api_secret,api_key,user_id,filepath):
#
# connect to flickr with flick api
#
flickr=flickrapi.FlickrAPI(api_key,api_secret)
flickr.web_login_url("read")
(token,frob)= flickr.get_token_part_one(perms='read')
if not token: time.sleep(20)
flickr.get_token_part_two((token, frob))
#
# get the photo data
#
myphotos = []
perpage = 10
pageindex = 1
rsp = flickr.people_getPhotos(api_key=api_key,user_id=user_id,per_page=perpage,page=pageindex)
photoss = list(rsp.iter("photos"))[0];
while int(photoss.attrib['page']) < int(photoss.attrib['pages']):
puts("page index",pageindex)
time.sleep(10)
photolist = list(photoss.iter("photo"));
photoindex = 0
for photo in photolist:
time.sleep(1)
# get the title
phototitle = photo.attrib['title']
# get timestamp of the photo
photoinfo = flickr.photos_getInfo(api_key=api_key,photo_id=photo.attrib['id'])
photoxml = list(photoinfo.iter("photo"))[0]
dates = list(photoxml.iter("dates"))[0]
phototimestamp = dates.attrib['posted']
# get the list of favorites for the photo
time.sleep(1)
favs = flickr.photos_getFavorites(api_key=api_key,photo_id=photo.attrib['id'])
favxml = list(favs.iter("photo"))[0]
favcount = favxml.attrib['total']
personlist = list(favxml.iter("person"))
favedates = [person.attrib['favedate'] for person in personlist]
# add data to cache structure
myphotos.append((phototitle, phototimestamp, favcount, favedates))
# stdout info
puts("photo",photo.attrib['id'],phototitle,"favedates",favedates)
# iter to the next photo
photoindex += 1
# iter to the next page
pageindex += 1
rsp = flickr.people_getPhotos(api_key=api_key,user_id="22283623@N00",per_page=perpage,page=pageindex)
photoss = list(rsp.iter("photos"))[0];
#
# format the data into xml
#
result = "<flickr timestamp=\"" + str(time.time()) + "\">\n"
for photo in myphotos:
(title,timestamp,total,favdates) = photo
result += " <photo title=\"" + title + "\" \t timestamp=\"" + timestamp + "\" \t count=\"" + total + "\" >\n"
for favdate in favedates:
result += " <favedate timestamp=\"" + favdate + "\"/>\n"
result += " </photo>\n"
result += "</flickr>\n"
# dump the xml result in a file
output=open(filepath, 'w+')
output.write(result.encode('utf8'))
output.close()
# flickr_dump("123456789abcdef0","123456789abcdef0123456789abcdef0","12345678@N01","C:/stats/flickr_stats.xml")
| Python | 0 | |
082f11f5a24efd21f05b1d7cc7f1b1f1ab91fb0c | Add exercise 13: couplage | prologin-2014/13_couplage.py | prologin-2014/13_couplage.py | # http://www.prologin.org/training/challenge/demi2014/couplage
from sys import stdin
nbBowlsFirst = int(stdin.readline())
nbBowlsSecond = int(stdin.readline())
bowlsFirst = [int(x) for x in stdin.readline().split()]
bowlsSecond = [int(x) for x in stdin.readline().split()]
def maxInTwoLists(first, second):
"""Find the max value present in two lists"""
maxFirst = max(first)
maxSecond = max(second)
if (maxFirst == maxSecond):
return maxFirst
elif (maxFirst < maxSecond):
second.remove(maxSecond)
return maxInTwoLists(first, second)
else:
first.remove(maxFirst)
return maxInTwoLists(first, second)
def optimize(acc, first, second):
# If a list is empty, stop here
if len(first) == 0 or len(second) == 0:
return acc
# Try to reach the max value in these lists
maxValue = maxInTwoLists(first, second)
# If we have matching bowls before the maxValue, count them
for i in range(min(first.index(maxValue), second.index(maxValue))):
if (first[i] == second[i]):
return optimize(acc + first[i], first[i+1:], second[i+1:])
# Determine the index of the maxValue in both lists
firstIndex = first.index(maxValue)
secondIndex = second.index(maxValue)
# Maybe it would be better to not reach this maxValue.
# Delete it from the first list and try that
firstWithoutMax = list(first)
firstWithoutMax.remove(maxValue)
return max(
# Go straight to the maxValue in both lists and continue with tails
optimize(acc + maxValue, first[firstIndex+1:], second[secondIndex+1:]),
# Maybe it would be better to not reach this maximum
optimize(acc, firstWithoutMax, second)
)
print optimize(0, bowlsFirst, bowlsSecond) | Python | 0.001185 | |
b405d9384cd9b1155177fe99cc40a8f1a53cb5f6 | Include graphing | mpi_optimizer.py | mpi_optimizer.py | """
@Author: Rui Shu
@Date: 4/11/15
Finds the global maxima of a costly function in a parallelized setting. Runs
optimizer.py in parallel with with several worker nodes that evaluates the costly
function in batch.
Run as: mpiexec -np 4 python parallel_optimizer.py
where 4 is the number of available processes
Framework:
Master -- handles the Optimizer object (which takes prior data,
interpolates based on a neural network-linear regression model, and selects the
next set of points to query). Tells worker nodes which points to query.
Worker -- compute the costly function. Returns function evaluation.
Trainer -- in charge of handling the neural network training.
"""
from mpi_definitions import *
print "THE RANK IS: %d, with total size: %d" % (rank, size)
def contains_row(x, X):
""" Checks if the row x is contained in matrix X
"""
for i in range(X.shape[0]):
if X[i,:] == x:
return True
return False
def master_process(lim_x, init_size):
import mpi_master
import random
import matplotlib.pyplot as plt
num_workers = size - 1
closed_workers = 0
trainer_is_ready = True
print "Master starting with %d workers" % num_workers
init_query = np.asarray([[i] for i in np.linspace(0, lim_x[1], init_size)], dtype=np.float32) # Uniform sampling
domain = np.asarray([[i] for i in np.linspace(lim_x[0], lim_x[1], 100)])
# Acquire an initial data set
tasks_assigned = 0
tasks_done = 0
tasks_total = init_size
dataset = None
# Initial query
while tasks_done < init_size:
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) # receive worker info
source = status.Get_source()
tag = status.Get_tag()
if tag == WORKER_READY:
if tasks_assigned < tasks_total:
comm.send(init_query[tasks_assigned, :], dest=source, tag=SEND_WORKER)
tasks_assigned += 1
else:
print "MASTER: No more intial work available. Give random work."
comm.send(domain[random.choice(range(domain.shape[0])), :], dest=source, tag=SEND_WORKER)
if tag == WORKER_DONE:
if dataset == None:
dataset = data
else:
dataset = np.concatenate((dataset, data), axis=0)
if contains_row(data[0, :-1], init_query):
tasks_done += 1
print "Complete initial dataset acquired"
print dataset
# Principled query
import optimizer as op
optimizer = op.Optimizer(dataset, domain)
optimizer.train()
selected_points = optimizer.select_multiple()
selection_size = selected_points.shape[0]
selection_index = 0
tasks_done = 0
tasks_total = 4
while closed_workers < num_workers:
if selection_index == selection_size:
selected_points = optimizer.select_multiple()
selection_index = 0
if trainer_is_ready:
# If trainer is ready, keep shoving data at him, if there is data to be shoved
comm.send("Master has sent Trainer something", dest=TRAINER, tag=SEND_TRAINER)
trainer_is_ready = not trainer_is_ready
if not (tasks_done < tasks_total):
print "Master: Killing Trainer"
comm.send("Master has fired Trainer", dest=TRAINER, tag=EXIT_TRAINER)
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) # receive worker info
source = status.Get_source()
tag = status.Get_tag() # check what worker's request is
if tag == WORKER_READY:
# If worker is ready, check if you have work left. If all work is completed,
# tell worker to exit. If work is available, give it to worker.
if tasks_done < tasks_total:
comm.send(selected_points[selection_index], dest=source, tag=SEND_WORKER)
selection_index += 1
else:
print "Master: Killing Worker"
comm.send(None, dest=source, tag=EXIT_WORKER)
elif tag == WORKER_DONE:
# If worker is done, tally the total amount of work done.
dataset = np.concatenate((dataset, data), axis=0)
tasks_done += 1
print "Number of total tasks: %d" % tasks_done
elif tag == TRAINER_DONE:
# If trainer is done, store what trainer did.
train_is_ready = not trainer_is_ready
elif tag == EXIT_WORKER or tag == EXIT_TRAINER:
# If worker has exited, tally up number of closed workers.
closed_workers += 1
print "Master is done."
optimizer.train()
selected_point = optimizer.select_multiple()[0, :]
domain, pred, hi_ci, lo_ci, nn_pred, ei, gamma = optimizer.get_prediction()
# Plot results
ax = plt.gca()
plt.plot(domain, pred, 'c--', label='NN-LR regression', linewidth=7)
plt.plot(domain, nn_pred, 'r--', label='NN regression', linewidth=7)
plt.plot(domain, hi_ci, 'g--', label='ci')
plt.plot(domain, lo_ci, 'g--')
plt.plot(domain, ei, 'b--', label='ei')
plt.plot(domain, gamma, 'r', label='gamma')
plt.plot([selected_point, selected_point], [ax.axis()[2], ax.axis()[3]], 'r--',
label='EI selection')
plt.plot(dataset[:,:-1], dataset[:, -1:], 'rv', label='training', markersize=7.)
plt.xlabel('Input space')
plt.ylabel('Output space')
plt.title("NN-LR regression")
plt.legend()
plt.show()
def trainer_process():
while True:
new_data = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag == SEND_TRAINER:
print new_data
# Does its thing
comm.send("TRAINER is done", dest=0, tag=TRAINER_DONE)
elif tag == EXIT_TRAINER:
print "TRAINER: Commiting suicide"
break
comm.send(None, dest=0, tag=EXIT_TRAINER) # Suicide complete
def worker_process():
from hidden_function import evaluate
while True:
comm.send("WORKER is ready", dest=0, tag=WORKER_READY) # tell Master node that I need a new query
query = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag == SEND_WORKER:
print "WORKER: The query is: " + str(query)
result = evaluate(query)
comm.send(result, dest=0, tag=WORKER_DONE)
elif tag == EXIT_WORKER:
# Worker dies!
print "WORKER: Commiting suicide"
break
comm.send(None, dest=0, tag=EXIT_WORKER) # Suicide complete
if rank == MASTER: # MASTER NODE
# Settings
lim_x = [-6, 4] # x range for univariate data
init_size = 5
master_process(lim_x, init_size)
elif rank == TRAINER:
trainer_process()
else:
worker_process()
| Python | 0 | |
f8a0aa92c8e19bc11f8a609733644afe0efed5c8 | Update test script to do match testing. | decompose_test.py | decompose_test.py | from util.decompose_graph import decompose_graph
from core.himesis_utils import expand_graph, set_do_pickle, set_compression
set_do_pickle(True)
set_compression(6)
file_name = "226482067288742734644994685633991185819"
graph = expand_graph(file_name)
print(graph.name)
from core.himesis_utils import load_directory
contracts = load_directory("mbeddr2C_MM/Contracts/")
atomic_contracts = [
'AssignmentInstance'
]
if_then_contracts = []
prop_if_then_contracts = []
from core.himesis_utils import graph_to_dot, load_directory
from util.test_script_utils import select_rules, get_sub_and_super_classes,\
load_transformation, changePropertyProverMetamodel, set_supertypes, load_contracts
from util.slicer import Slicer
from util.parser import load_parser
inputMM = "./mbeddr2C_MM/ecore_metamodels/Module.ecore"
outputMM = "./mbeddr2C_MM/ecore_metamodels/C.ecore"
subclasses_dict, superclasses_dict = get_sub_and_super_classes(inputMM, outputMM)
atomic_contracts, if_then_contracts = load_contracts(contracts, superclasses_dict,
atomic_contracts, if_then_contracts,
prop_if_then_contracts,
False)
contract =atomic_contracts[0][1]
print(contract)
print(contract.has_pivots())
#graph_to_dot(graph.name, graph, force_trace_links = True)
import time
print("Starting to check")
start_time = time.time()
result = contract.check(graph)
print(result)
print("Finished in " + str(time.time() - start_time) + " seconds")
#decompose_graph(graph) | Python | 0 | |
577891c76140ce50f6be450594a23d78366c5719 | Create __init__.py | pythainlp/number/__init__.py | pythainlp/number/__init__.py | # ระบบแปลงเลขใน 1- 10 ภาษาไทย
p = [['ภาษาไทย', 'ตัวเลข','เลขไทย'],
['หนึ่ง', '1', '๑'],
['สอง', '2', '๒'],
['สาม', '3', '๓'],
['สี่', '4', '๔'],
['ห้า', '5', '๕'],
['หก', '6', '๖'],
['หก', '7', '๗'],
['แปด', '8', '๘'],
['เก้า', '9', '๙']]
thaitonum = dict((x[2], x[1]) for x in p[1:])
p1 = dict((x[0], x[1]) for x in p[1:])
d1 = 0
def nttn(text):
#เลขไทยสู่เลข
thaitonum = dict((x[2], x[1]) for x in p[1:])
return thaitonum[text]
def nttt(text):
#เลขไทยสู่ข้อความ
thaitonum = dict((x[2], x[0]) for x in p[1:])
return thaitonum[text]
def ntnt(text):
#เลขสู่เลขไทย
thaitonum = dict((x[1], x[2]) for x in p[1:])
return thaitonum[text]
def ntt(text):
#เลขสู่ข้อความ
thaitonum = dict((x[1], x[0]) for x in p[1:])
return thaitonum[text]
def ttn(text):
#ข้อความสู่เลข
thaitonum = dict((x[0], x[1]) for x in p[1:])
return thaitonum[text]
def ttnt(text):
#ข้อความสู่เลขไทย
thaitonum = dict((x[0], x[2]) for x in p[1:])
return thaitonum[text]
if __name__ == "__main__":
print(ntt('4'))
| Python | 0.000429 | |
8c98e313caeb82ee710d56399d5de7cf1eb1f7df | Add DNA Coding | python/src/dna/dna_coding.py | python/src/dna/dna_coding.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import src.mylib.mfile as mfile
from matplotlib import style
stop =int('101010', 2) #101010 I Ching 63 After Completion
befo =int('010101', 2) #101010 I Ching 64 Before Completion
guai =int('111110', 2) #101010 I Ching 43
qian =int('111111', 2) #101010 I Ching 01
kun =int('000000', 2) #101010 I Ching 02
df = mfile.loadOneSymbol("JPY=X", "../db/forex.db")
df = df.reset_index(drop=True)
df = df['Close']
df = df[-1000:]
df = df.diff()
df = df.dropna()
fn = lambda x: (1.0 if x > 0 else 0.0)
xx = df.apply(fn)
xx = xx.values
ln = len(xx)
sz = (ln // 6) * 6
xx = xx[:sz]
print(xx)
#L0 = xx[:-2]
#L1 = xx[1:-1]
#L2 = xx[2:]
#yy = L0 * 4 + L1 * 2 + L2
def my_func(arr, num):
sum = 0
for i in range(num):
sum += arr[i] * (2**(num-i-1))
return sum
xx = np.reshape(xx, (-1, 6))
yy = np.apply_along_axis(my_func, 1, xx, 6)
i, = np.where(yy == stop)
zz = np.copy(yy)
zz[zz != stop] = np.nan
ss = yy
sp = range(0, len(ss))
style.use('ggplot')
plt.plot(ss)
plt.plot(zz, 'bo')
print(ss)
plt.show()
| Python | 0.000595 | |
1be041fd9bfc856fd59fba52501823d80d3ff037 | Create setup.py | neutron/setup.py | neutron/setup.py | Python | 0.000001 | ||
a4f030cf10683aa949550e9922c3ea72421cf392 | Update PCI configuration options | nova/conf/pci.py | nova/conf/pci.py | # needs:check_opt_group_and_type
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
pci_opts = [
cfg.MultiStrOpt('pci_alias',
default=[],
help="""
An alias for a PCI passthrough device requirement.
This allows users to specify the alias in the extra_spec for a flavor, without
needing to repeat all the PCI property requirements.
Possible Values:
* A list of JSON values which describe the aliases. For example:
pci_alias = {
"name": "QuickAssist",
"product_id": "0443",
"vendor_id": "8086",
"device_type": "type-PCI"
}
defines an alias for the Intel QuickAssist card. (multi valued). Valid key
values are :
* "name": Name of the PCI alias.
* "product_id": Product ID of the device in hexadecimal.
* "vendor_id": Vendor ID of the device in hexadecimal.
* "device_type": Type of PCI device. Valid values are: "type-PCI",
"type-PF" and "type-VF".
"""),
cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help="""
White list of PCI devices available to VMs.
Possible values:
* A JSON dictionary which describe a whitelisted PCI device. It should take
the following format:
["vendor_id": "<id>",] ["product_id": "<id>",]
["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
"devname": "<name>",]
{"<tag>": "<tag_value>",}
Where '[' indicates zero or one occurrences, '{' indicates zero or multiple
occurrences, and '|' mutually exclusive options. Note that any missing
fields are automatically wildcarded.
Valid key values are :
* "vendor_id": Vendor ID of the device in hexadecimal.
* "product_id": Product ID of the device in hexadecimal.
* "address": PCI address of the device.
* "devname": Device name of the device (for e.g. interface name). Not all
PCI devices have a name.
* "<tag>": Additional <tag> and <tag_value> used for matching PCI devices.
Supported <tag>: "physical_network".
Valid examples are:
pci_passthrough_whitelist = {"devname":"eth0",
"physical_network":"physnet"}
pci_passthrough_whitelist = {"address":"*:0a:00.*"}
pci_passthrough_whitelist = {"address":":0a:00.",
"physical_network":"physnet1"}
pci_passthrough_whitelist = {"vendor_id":"1137",
"product_id":"0071"}
pci_passthrough_whitelist = {"vendor_id":"1137",
"product_id":"0071",
"address": "0000:0a:00.1",
"physical_network":"physnet1"}
The following are invalid, as they specify mutually exclusive options:
pci_passthrough_whitelist = {"devname":"eth0",
"physical_network":"physnet",
"address":"*:0a:00.*"}
* A JSON list of JSON dictionaries corresponding to the above format. For
example:
pci_passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
{"product_id":"0002", "vendor_id":"8086"}]
""")
]
def register_opts(conf):
conf.register_opts(pci_opts)
def list_opts():
# TODO(sfinucan): This should be moved into the PCI group and
# oslo_config.cfg.OptGroup used
return {'DEFAULT': pci_opts}
| # needs:check_opt_group_and_type
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
pci_opts = [
cfg.MultiStrOpt('pci_alias',
default=[],
help="""
An alias for a PCI passthrough device requirement.
This allows users to specify the alias in the extra_spec for a flavor, without
needing to repeat all the PCI property requirements.
Possible Values:
* A list of JSON values which describe the aliases. For example:
pci_alias = {
"name": "QuickAssist",
"product_id": "0443",
"vendor_id": "8086",
"device_type": "type-PCI"
}
defines an alias for the Intel QuickAssist card. (multi valued). Valid key
values are :
* "name"
* "product_id"
* "vendor_id"
* "device_type"
"""),
cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help="""
White list of PCI devices available to VMs.
Possible values:
* A JSON dictionary which describe a whitelisted PCI device. It should take
the following format:
["device_id": "<id>",] ["product_id": "<id>",]
["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
"devname": "PCI Device Name",]
{"tag": "<tag_value>",}
where '[' indicates zero or one occurrences, '{' indicates zero or multiple
occurrences, and '|' mutually exclusive options. Note that any missing
fields are automatically wildcarded. Valid examples are:
pci_passthrough_whitelist = {"devname":"eth0",
"physical_network":"physnet"}
pci_passthrough_whitelist = {"address":"*:0a:00.*"}
pci_passthrough_whitelist = {"address":":0a:00.",
"physical_network":"physnet1"}
pci_passthrough_whitelist = {"vendor_id":"1137",
"product_id":"0071"}
pci_passthrough_whitelist = {"vendor_id":"1137",
"product_id":"0071",
"address": "0000:0a:00.1",
"physical_network":"physnet1"}
The following are invalid, as they specify mutually exclusive options:
pci_passthrough_whitelist = {"devname":"eth0",
"physical_network":"physnet",
"address":"*:0a:00.*"}
* A JSON list of JSON dictionaries corresponding to the above format. For
example:
pci_passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
{"product_id":"0002", "vendor_id":"8086"}]
""")
]
def register_opts(conf):
conf.register_opts(pci_opts)
def list_opts():
# TODO(sfinucan): This should be moved into the PCI group and
# oslo_config.cfg.OptGroup used
return {'DEFAULT': pci_opts}
| Python | 0.000002 |
08122e57235e836dbfd4230e9e3ad3f7c54072ff | add simple debug callback test case | pycurl/tests/test_debug.py | pycurl/tests/test_debug.py | # $Id$
import pycurl
def test(**args):
print args
c = pycurl.init()
c.setopt(pycurl.URL, 'http://curl.haxx.se/')
c.setopt(pycurl.VERBOSE, 1)
c.setopt(pycurl.DEBUGFUNCTION, test)
c.perform()
c.cleanup()
| Python | 0 | |
6e42855d527976dd8b1cdb272502ce3aa76f8c6e | Add dbee abstract base class. | dbeekeeper/dbee/base.py | dbeekeeper/dbee/base.py | # Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
class Base(Exception):
"""Abstract base class for dbeekeeper local storage, or 'dbee'.
A dbee instance must be accessed from a single thread.
Dbee transactions must be idempotent. Much like ZooKeeper snapshots, dbee
snapshots are 'fuzzy', meaning that transactions that were executed during
snapshotting may or may not be included in the snapshot. During recovery,
dbeekeeper executes all the transactions since the beginning of the
snapshot it's recoverying from in the same order they were applied
originally.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def execute(self, transaction):
"""Execute a transaction.
This method is *not* responsible for persisting transaction to disk.
The caller must maintain a transaction log until it takes a snapshot.
Args:
transaction: transaction to execute in string.
Returns:
None
Raises:
dbeekeeper.DbeeError:
DbeeError is considered fatal since it might affet consistency
of dbee. When dbee throws a DbeeError, dbeekeeper goes into
recovery mode.
dbeekeeper.ClientError:
ClientError is *not* considered fatal since it does not affect
consistency of dbee. Dbeekeeper simply pass ClientErrors back
to the client.
"""
@abc.abstractmethod
def snapshot(self, filename, callback):
"""Take a snapshot of this dbee asynchronously.
This method must not block. It should initiate snapshotting in a
separate thread/process and return without waiting for the snapshotting
to finish. Dbee must reject any other incoming snapshot/restore
requests during the snapshot by raising a ClientError.
The resulting snapshot must contain all the transactions this dbee
successfully executed before the snapshot() was called. For incoming
execute requests during the snapshot, dbee must either:
a. Block them until the snapshotting finishes.
b. Accept the transactions. These transactions may or may not be in the
resulting snapshot. It is the caller's responsibility to maintain
a log for these transactions until the next snapshot() call finishes
successfully.
Args:
filename: filename to use for the snapshot.
callback: function to call when the snapshotting completes. This
function must take 2 arguemnts, error and filename. If
snapshotting succeeded, the first argument is set to None
and the second argument is a string that contains
filename of the resulting snapshot. If snapshotting
failed, the first argument is an Exception and the second
argument is set to None.
Returns:
None
Raises:
This method must not raise any dbeekeeper error. All the dbeekeeper
errors must be passed in the callback
"""
@abc.abstractmethod
def restore(self, filename):
"""Restore dbee from a snapshot.
This method must block until the restore operation completes.
Args:
filename: Snapshot file to restore from.
Returns:
None
Raises:
dbeekeeper.DbeeError:
"""
| Python | 0 | |
7e96013f21bbb5003b30da1e04833dcf58650602 | Implement a ThriftHandler for tornado | freenoted/tasks/tornado_thrift.py | freenoted/tasks/tornado_thrift.py | from __future__ import absolute_import
import tornado.web
from thrift.transport.TTransport import TMemoryBuffer
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
class TornadoThriftHandler(tornado.web.RequestHandler):
def initialize(self, processor):
self.processor = processor
def post(self):
iprot = TBinaryProtocol(TMemoryBuffer(self.request.body))
oprot = TBinaryProtocol(TMemoryBuffer())
self.processor.process(iprot, oprot)
self.set_header('Content-Type', 'application/x-thrift')
self.write(oprot.trans.getvalue())
| Python | 0.000517 | |
c66e64556747736c1ee7461aa6ee8780a330481b | add sparse_to_dense_test | caffe2/python/sparse_to_dense_test.py | caffe2/python/sparse_to_dense_test.py | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestSparseToDense(TestCase):
def test_sparse_to_dense(self):
op = core.CreateOperator(
'SparseToDense',
['indices', 'values'],
['output'])
workspace.FeedBlob(
'indices',
np.array([2, 4, 999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 6, 7], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
print(output)
expected = np.zeros(1000, dtype=np.int32)
expected[2] = 1 + 7
expected[4] = 2
expected[999] = 6
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_invalid_inputs(self):
op = core.CreateOperator(
'SparseToDense',
['indices', 'values'],
['output'])
workspace.FeedBlob(
'indices',
np.array([2, 4, 999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 6], dtype=np.int32))
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
def test_sparse_to_dense_with_data_to_infer_dim(self):
op = core.CreateOperator(
'SparseToDense',
['indices', 'values', 'data_to_infer_dim'],
['output'])
workspace.FeedBlob(
'indices',
np.array([2, 4, 999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 6, 7], dtype=np.int32))
workspace.FeedBlob(
'data_to_infer_dim',
np.array(np.zeros(1500, ), dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
print(output)
expected = np.zeros(1500, dtype=np.int32)
expected[2] = 1 + 7
expected[4] = 2
expected[999] = 6
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
| Python | 0.000006 | |
29e8dce6fc2956dc9f942eca41fdb632c382fe8e | Create pylsy.py | pylsy/tests/pylsy.py | pylsy/tests/pylsy.py | # -*- coding: utf-8 -*-
from __future__ import print_function
class PylsyTable(object):
def __init__(self, attributes):
self.StrTable = ""
self.Attributes = attributes
self.Table = []
self.AttributesLength = []
self.Cols_num = len(self.Attributes)
self.Lines_num = 0
for attribute in self.Attributes:
col = dict()
col[attribute] = ""
self.Table.append(col)
def print_divide(self):
for space in self.AttributesLength:
self.StrTable += "+ "
for sign in range(space):
self.StrTable += "- "
self.StrTable += "+"+"\n"
def add_data(self, attribute, values):
for col in self.Table:
if attribute in col:
dict_values = [str(value) for value in values]
col[attribute] = dict_values
def create_table(self):
for col in self.Table:
values = list(col.values())[0]
if self.Lines_num < len(values):
self.Lines_num = len(values)
# find the length of longest word in current column
key_length = len(list(col.keys())[0])
for value in values:
length = len(value)
if length > key_length:
key_length = length
self.AttributesLength.append(key_length)
self.print_head()
self.print_value()
def print_head(self):
self.print_divide()
self.StrTable += "| "
for spaces, attr in zip(self.AttributesLength, self.Attributes):
space_num = spaces * 2 - 1
start = (space_num - len(attr)) // 2
for space in range(start):
self.StrTable += " "
self.StrTable += attr+' '
end = space_num - start - len(attr)
for space in range(end):
self.StrTable += " "
self.StrTable += "| "
self.StrTable += ""+'\n'
self.print_divide()
def print_value(self):
for line in range(self.Lines_num):
for col, length in zip(self.Table, self.AttributesLength):
self.StrTable += "| "
value_length = length * 2 - 1
value = list(col.values())[0]
if len(value) != 0:
start = (value_length - len(value[line])) // 2
for space in range(start):
self.StrTable += " "
self.StrTable += value[line]+' '
end = value_length - start - len(value[line])
for space in range(end):
self.StrTable += " "
else:
start = 0
end = value_length - start + 1
for space in range(end):
self.StrTable += " "
self.StrTable += "|"+'\n'
self.print_divide()
def __str__(self):
self.create_table()
return self.StrTable
| Python | 0.000001 | |
d60c1f9a6e56472611a96779462b42e8505e7905 | Convert a PDF document to JPEG/PNG image via /pdftoimg endpoint | python/pdf_to_img.py | python/pdf_to_img.py | import requests
import json
# Convert a PDF document to JPEG/PNG image via /pdftoimg endpoint - https://pixlab.io/cmd?id=pdftoimg
req = requests.get('https://api.pixlab.io/pdftoimg',params={
'src':'https://www.getharvest.com/downloads/Invoice_Template.pdf',
'export': 'jpeg',
'key':'My_PixLab_Key'
})
reply = req.json()
if reply['status'] != 200:
print (reply['error'])
else:
print ("Link to the image output (Converted PDF page): "+ reply['link'])
| Python | 0.999999 | |
198b54c9ff796cc98cccfdc530f0111739901b0d | Create base-7.py | Python/base-7.py | Python/base-7.py | # Time: O(1)
# Space: O(1)
# Given an integer, return its base 7 string representation.
#
# Example 1:
# Input: 100
# Output: "202"
# Example 2:
# Input: -7
# Output: "-10"
# Note: The input will be in range of [-1e7, 1e7].
class Solution(object):
def convertToBase7(self, num):
if num < 0: return '-' + self.convertToBase7(-num)
result = ''
while num:
result = str(num % 7) + result
num //= 7
return result if result else '0'
class Solution2(object):
def convertToBase7(self, num):
"""
:type num: int
:rtype: str
"""
if num < 0: return '-' + self.convertToBase7(-num)
if num < 7: return str(num)
return self.convertToBase7(num // 7) + str(num % 7)
| Python | 0.000022 | |
50dc018891511ce34b4177a43cfcd678456444cf | test of quasiisothermaldf's meanvR | nose/test_qdf.py | nose/test_qdf.py | # Tests of the quasiisothermaldf module
import numpy
#fiducial setup uses these
from galpy.potential import MWPotential
from galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel
from galpy.df import quasiisothermaldf
aAA= actionAngleAdiabatic(pot=MWPotential,c=True)
aAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)
def test_meanvR_adiabatic_gl():
qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,
pot=MWPotential,aA=aAA,cutcounter=True)
#In the mid-plane
assert numpy.fabs(qdf.meanvR(0.9,0.,gl=True)) < 0.01, "qdf's meanvr is not equal to zero"
#higher up
assert numpy.fabs(qdf.meanvR(0.9,0.2,gl=True)) < 0.01, "qdf's meanvr is not equal to zero"
assert numpy.fabs(qdf.meanvR(0.9,-0.25,gl=True)) < 0.01, "qdf's meanvr is not equal to zero"
return None
| Python | 0 | |
660e53fa4505782a2d1484cc0b6e598edc851df0 | Initialize P05_stylingExcel | books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py | books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py | # This program uses the OpenPyXL module to manipulate Excel documents
import openpyxl
from openpyxl.styles import Font, Style
wb = openpyxl.Workbook()
sheet = wb.get_sheet_by_name("Sheet")
italic24Font = Font(size=24, italic=True)
styleObj = Style(font=italic24Font)
sheet["A1"].style = styleObj
sheet["A1"] = "Hello world!"
wb.save("styled.xlsx")
| Python | 0.000002 | |
6e79ab6ca68252669055df12c333320bd0bda959 | Create obtainNytimes.py | obtainNytimes.py | obtainNytimes.py | import urllib.request, sys, re
from http.cookiejar import CookieJar
from docx import Document
def writeDoc(title,content):
docName = title+'.docx'
document = Document()
document.add_heading(title, 0)
document.add_paragraph(content)
document.save(docName)
def ObtainContent(pageContent):
#obtain title
for ln in pageContent:
#print(ln)
mat = re.search(b'<h1 itemprop="headline" id="story-heading" class="story-heading">.*</h1>', ln)
if mat:
headline = mat.group(0).decode('utf-8')
title = ''
length = len(headline)
i = 0
while i < length:
if headline[i] == '<':
#find >
z = i + 1
while z < length and headline[z] != '>':
z = z + 1
i = z + 1
while i < length and headline[i] != '<':
title = title + headline[i]
i = i + 1
else:
i = i + 1
break
#obtain content
#step 1: get all content with label p
paraList = []
for ln in pageContent:
mat = re.findall(b'<p class="story-body-text story-content".*?</p>', ln)
for m in mat:
paraList.append(m)
#step 2: fetch content between <p> </p>
para = ''
for e in paraList:
extract = e.decode('utf-8')
length = len(extract)
i = 0
while i < length:
if extract[i] == '<':
#find >
z = i + 1
while z < length and extract[z] != '>':
z = z + 1
i = z + 1
while i < length and extract[i] != '<':
para = para + extract[i]
i = i + 1
else:
i = i + 1
para = para + '\n'
return (title,para)
def fetchWebPages(website):
cj = CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
page = opener.open(website)
return page
def ObtainNyTimes():
page = fetchWebPages(sys.argv[1])
(title, paras) = ObtainContent(page)
writeDoc(title,paras)
print("Fetch Your Newspaper Successfully..........")
if __name__ == "__main__":
ObtainNyTimes()
| Python | 0 | |
4e778d86670d4673cd591217d514a1f64dbc8424 | Add an OOP demo | oop/basic_oop.py | oop/basic_oop.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This script demonstrates how to use OOP in Python
Latest version can be found at https://github.com/letuananh/pydemo
References:
Classes in Python:
https://docs.python.org/2/tutorial/classes.html
@author: Le Tuan Anh <tuananh.ke@gmail.com>
'''
# Copyright (c) 2015, Le Tuan Anh <tuananh.ke@gmail.com>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
__author__ = "Le Tuan Anh <tuananh.ke@gmail.com>"
__copyright__ = "Copyright 2015, pydemo"
__credits__ = [ "Le Tuan Anh" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Le Tuan Anh"
__email__ = "<tuananh.ke@gmail.com>"
__status__ = "Prototype"
########################################################################
import logging
########################################################################
class Classroom:
''' This class represents a classroom model. Each class has its own code and a group of students.
'''
def __init__(self, class_code):
self.students = []
self.class_code = class_code
def add(self, student):
''' This method will add an existing student into this classroom
'''
self.students.append(student)
def __repr__(self):
''' This method will print details of a classroom object
'''
return "Classroom{code='%s',Students=%s}" % (self.class_code, self.students)
def __str__(self):
''' A shorter & more human friendly way of printing an object
'''
return "Classroom %s" % (self.class_code)
class Student:
''' Each student object has a name and age.
'''
def __init__(self, name, age):
self.name = name
self.age = age
def __repr__(self):
return "Student{name='%s',age=%s}" % (self.name, self.age)
def __str__(self):
return "Student %s" % (self.name)
#----------------------------------------------------------------------------
# Define the main method
#------------------------------------------------------------------------------
def main():
'''The main entry of the application (i.e. The tasks should start from here)
'''
# Create a classroom
c = Classroom("Philosophy 101")
print("%s is created." % c)
# ... now we create students
descartes = Student("Rene Descartes", 419)
nietzsche = Student("Friedrich Nietzsche", 171)
print("%s is created." % descartes)
print("%s is created." % nietzsche)
# ... add the students to the classroom
c.add(descartes)
c.add(nietzsche)
# Bonus: You can use repr to get deeper information, this can be useful for debugging
print("-" * 20)
print(repr(c))
logging.info(repr(c))
pass
#------------------------------------------------------------------------------
# Check if this file is run as an application
#------------------------------------------------------------------------------
if __name__ == "__main__":
# If the condition is true, execute the main method
main()
| Python | 0.000001 | |
fb5ed0ea066c9bdb801a95e50d78529addffbed8 | add twitter url to video URL email | dj/scripts/email_url.py | dj/scripts/email_url.py | #!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video of your talk is posted:
{{url}}
{% if ep.state == 7 %}
Look at it, make sure the title is spelled right and the audio sounds reasonable.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
http://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
{% endif %}
{% if ep.twitter_url %}
It has been tweeted: {{ ep.twitter_url }}
Re-tweet it, blog it, whatever it. No point in making videos if no one watches them.
{% endif %}
"""
py_name = "email_url.py"
def more_context(self, ep):
# If there is a Richard (pyvideo) url, use that;
# else use the youtube url.
url = ep.public_url or ep.host_url
return {'url':url}
if __name__ == '__main__':
p=email_url()
p.main()
| #!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video of your talk is posted:
{{url}}
Look at it, make sure the title is spelled right, let me know if it is OK.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
http://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
"""
py_name = "email_url.py"
def more_context(self, ep):
# If there is a Richard (pyvideo) url, use that;
# else use the youtube url.
url = ep.public_url or ep.host_url
return {'url':url}
if __name__ == '__main__':
p=email_url()
p.main()
| Python | 0.000003 |
47a20435cf669d2fe0cdef1b82bbdac6a475ef5f | Add IMF to master | dlstats/fetchers/IMF.py | dlstats/fetchers/IMF.py | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 20 10:25:29 2015
@author: salimeh/ CEPREMAP
"""
from dlstats.fetchers._skeleton import Skeleton, Category, Series, Dataset, Provider
import urllib
import xlrd
import csv
import codecs
import datetime
import pandas
class IMF(Skeleton):
def __init__(self):
super().__init__()
self.response= urllib.request.urlopen('http://www.imf.org/external/pubs/ft/weo/2014/01/weodata/WEOApr2014all.xls')
self.readers = csv.DictReader(codecs.iterdecode(self.response, 'latin-1'), delimiter='\t')
self.files_ = {'WEOApr2014all':self.readers}
self.provider = Provider(name='IMF',website='http://http://www.imf.org/')
self.releaseDates_ = self.response.getheaders()[3][1]
self.releaseDates = [datetime.datetime.strptime(self.releaseDates_[5:], "%d %b %Y %H:%M:%S GMT")]
def update_selected_database(self, datasetCode):
if datasetCode=='WEO':
reader = self.files_['WEOApr2014all']
else:
raise Exception("The name of dataset was not entered!")
countries_list = []
ISO_list = []
Subject_Notes_list = []
Units_list = []
Scale_list = []
WEO_Country_Code_list = []
Country_Series_specific_Notes_list = []
for count, row in enumerate(reader):
# last 2 rows are blank/metadata
# so get out when we hit a blank row
if row['Country']:
#countrys[row['ISO']] = row['Country']
if row['Country'] not in countries_list: countries_list.append(row['Country'])
if row['WEO Country Code'] not in WEO_Country_Code_list: WEO_Country_Code_list.append(row['WEO Country Code'])
if row['ISO'] not in ISO_list: ISO_list.append(row['ISO'])
if row['Subject Notes'] not in Subject_Notes_list: Subject_Notes_list.append(row['Subject Notes'])
if row['Units'] not in Units_list: Units_list.append(row['Units'])
if row['Scale'] not in Scale_list: Scale_list.append(row['Scale'])
if row['Country/Series-specific Notes'] not in Country_Series_specific_Notes_list: Country_Series_specific_Notes_list.append(row['Country/Series-specific Notes'])
dimensionList=[{'name':'WEO Country Code', 'values': WEO_Country_Code_list},
{'name':'ISO', 'values': ISO_list},
{'name':'country', 'values': countries_list},
{'name':'Subject Notes', 'values': Subject_Notes_list},
{'name':'Units', 'values': Units_list},
{'name':'Scale', 'values': Scale_list},
{'name':'Country/Series-specific Notes', 'values': Country_Series_specific_Notes_list}]
for count, row in enumerate(reader):
if row['Country']:
name = row['Subject Descriptor']
#key = 'WEO_'+row['WEO Subject Code']
document = Dataset(provider = 'IMF',
name = name ,
datasetCode = datasetCode, lastUpdate = self.releaseDates,
dimensionList = dimensionList )
document.update_database()
def upsert_categories(self):
document = Category(provider = 'IMF',
name = 'WEO' ,
categoryCode ='WEO')
return document.update_database()
def update_a_series(self,datasetCode):
value = []
if datasetCode=='WEO':
reader = self.files_['WEOApr2014all']
else:
raise Exception("The name of dataset was not entered!")
years = reader.fieldnames[9:-1]
period_index = pandas.period_range(years[0], years[-1] , freq = 'annual')
for count, row in enumerate(reader):
if row['Country']:
name = row['Subject Descriptor']
key = 'WEO_'+row['WEO Subject Code']
for year in years:
value.append(row[year])
dimensions=[{'name':'WEO Country Code', 'values': row['WEO Country Code']},
{'name':'ISO', 'values': row['ISO']},
{'name':'country', 'values': row['Country']},
{'name':'Subject Notes', 'values': row['Subject Notes']},
{'name':'Units', 'values': row['Units']},
{'name':'Scale', 'values': row['Scale']},
{'name':'Country/Series-specific Notes', 'values': row['Country/Series-specific Notes']}]
document = Series(provider = 'WorldBank',
name = name , key = key,
datasetCode = 'WEO', values = value,
period_index = period_index
, releaseDates = self.releaseDates,
dimensions = dimensions)
document.update_database(key=key)
| Python | 0.000039 | |
b47369d43a0a85ac2bc32bfa77c6a4d9074ce700 | Add basic test case for retrieve_dns module | test/test_retrieve_dns.py | test/test_retrieve_dns.py | import logging
import os
import tempfile
import unittest
import mock
import bin.retrieve_dns
logging.basicConfig(level=logging.INFO)
class RetrieveDnsTestCase(unittest.TestCase):
def setUp(self):
# Mock out logging
mock.patch('bin.retrieve_dns.set_up_logging', autospec=True).start()
# Mock out config
mock_config = mock.patch('bin.retrieve_dns.get_config', autospec=True).start()
# Mock out retrieving xml
self.mock_xml = mock.patch('bin.retrieve_dns.get_xml', autospec=True).start()
# Set up temp files
self.files = {}
for item in ('dn', 'extra', 'ban'):
self.files[item] = dict(zip(('handle', 'path'), tempfile.mkstemp()))
os.write(self.files[item]['handle'], '/wobble')
for item in self.files.values():
os.close(item['handle'])
# Set up config using temp files
c = bin.retrieve_dns.Configuration()
c.dn_file = self.files['dn']['path']
c.extra_dns = self.files['extra']['path']
c.banned_dns = self.files['ban']['path']
mock_config.return_value = c
def test_basics(self):
self.mock_xml.return_value = "<HOSTDN>/wibble</HOSTDN>"
bin.retrieve_dns.runprocess("fakefile", "fakefile")
dns = open(self.files['dn']['path'])
self.assertEqual(dns.read(), '/wibble\n')
dns.close()
def tearDown(self):
# Delete temp files
for item in self.files.values():
os.remove(item['path'])
mock.patch.stopall()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
4f9aaa4809c0ff083393088e205a48c3197b46f2 | add forgoten '\n' in `Usage` output | platform/platform-resources/src/launcher.py | platform/platform-resources/src/launcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
args = []
skip_next = False
for i, arg in enumerate(sys.argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print(('Usage:\n' +
' {0} -h |-? | --help\n' +
' {0} [-l|--line line] file[:line]\n' +
' {0} diff <left> <right>\n' +
' {0} merge <local> <remote> [base] <merged>').format(sys.argv[0]))
exit(0)
elif arg == 'diff' and i == 0:
args.append(arg)
elif arg == 'merge' and i == 0:
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
args.append(os.path.abspath(file_path))
else:
args.append(os.path.abspath(arg))
else:
args.append(os.path.abspath(arg))
def launch_with_port(port):
found = False
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except:
return False
while True:
try:
path_len = struct.unpack(">h", s.recv(2))[0]
path = s.recv(path_len)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except:
break
if found:
if args:
cmd = "activate " + os.getcwd() + "\0" + "\0".join(args)
encoded = struct.pack(">h", len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close socket immediately
return True
return False
port = -1
try:
f = open(os.path.join(CONFIG_PATH, 'port'))
port = int(f.read())
except Exception:
type, value, traceback = sys.exc_info()
print('No IDE instance has been found. New one will be started.')
port = -1
if port == -1:
# SocketLock actually allows up to 50 ports, but the checking takes too long
for port in range(6942, 6942 + 10):
if launch_with_port(port):
exit()
else:
if launch_with_port(port):
exit()
if sys.platform == "darwin":
# OS X: RUN_PATH is *.app path
if len(args):
args.insert(0, "--args")
os.execvp("open", ["-a", RUN_PATH] + args)
else:
# unix common
bin_dir, bin_file = os.path.split(RUN_PATH)
os.execv(RUN_PATH, [bin_file] + args)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
args = []
skip_next = False
for i, arg in enumerate(sys.argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print(('Usage:\n' +
' {0} -h |-? | --help\n' +
' {0} [-l|--line line] file[:line]\n' +
' {0} diff <left> <right>' +
' {0} merge <local> <remote> [base] <merged>').format(sys.argv[0]))
exit(0)
elif arg == 'diff' and i == 0:
args.append(arg)
elif arg == 'merge' and i == 0:
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
args.append(os.path.abspath(file_path))
else:
args.append(os.path.abspath(arg))
else:
args.append(os.path.abspath(arg))
def launch_with_port(port):
found = False
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except:
return False
while True:
try:
path_len = struct.unpack(">h", s.recv(2))[0]
path = s.recv(path_len)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except:
break
if found:
if args:
cmd = "activate " + os.getcwd() + "\0" + "\0".join(args)
encoded = struct.pack(">h", len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close socket immediately
return True
return False
port = -1
try:
f = open(os.path.join(CONFIG_PATH, 'port'))
port = int(f.read())
except Exception:
type, value, traceback = sys.exc_info()
print('No IDE instance has been found. New one will be started.')
port = -1
if port == -1:
# SocketLock actually allows up to 50 ports, but the checking takes too long
for port in range(6942, 6942 + 10):
if launch_with_port(port):
exit()
else:
if launch_with_port(port):
exit()
if sys.platform == "darwin":
# OS X: RUN_PATH is *.app path
if len(args):
args.insert(0, "--args")
os.execvp("open", ["-a", RUN_PATH] + args)
else:
# unix common
bin_dir, bin_file = os.path.split(RUN_PATH)
os.execv(RUN_PATH, [bin_file] + args)
| Python | 0.003142 |
b97a9571478dc8c919e072734816b412dadc0da9 | Add maths plugin | plugins/maths.py | plugins/maths.py | import io
import unittest
from sympy.parsing import sympy_parser
class Plugin:
def on_command(self, bot, msg, stdin, stdout, reply):
expr = " ".join(msg["args"][1:])
expr = sympy_parser.parse_expr(expr)
print(expr.evalf(), file=stdout)
def on_help(self):
return "Perform maths expressions."
class Test(unittest.TestCase):
def setUp(self):
self.plugin = Plugin()
def test_command(self):
for a in range(1, 1000, 50):
for b in range(1, 1000, 50):
stdout = io.StringIO()
self.plugin.on_command(None, {"args": [None, str(a) + "*" + str(b)]}, None, stdout, None)
self.assertEqual(int(float(stdout.getvalue().strip())), a * b)
def test_help(self):
self.assertTrue(self.plugin.on_help())
| Python | 0.000001 | |
932fc681e1e1b79a28d03a480c19869fc0a6956c | Add state module to manage InfluxDB users | salt/states/influxdb_user.py | salt/states/influxdb_user.py | # -*- coding: utf-8 -*-
'''
Management of InfluxDB users
============================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_user'
return False
def _changes(name, admin):
'''
Get necessary changes to given user account
'''
existing_user = __salt__['influxdb.user_info'](name)
changes = {}
if existing_user['admin'] != admin:
changes['admin'] = admin
return changes
def present(name,
password,
admin=False,
**client_args):
'''
Ensure that given user is present.
name
Name of the user to manage
password
Password of the user
admin : False
Whether the user should have cluster administration
privileges or not.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is present and up to date'.format(name)}
if not __salt__['influxdb.user_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} will be created'.format(name)
return ret
else:
if __salt__['influxdb.create_user'](
name, password, admin=admin, **client_args):
ret['comment'] = 'Created user {0}'.format(name)
ret['changes'][name] = 'created'
return ret
else:
ret['comment'] = 'Failed to create user {0}'.format(name)
ret['result'] = False
return ret
else:
changes = _changes(name, admin)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The following user attributes are set to '
'be changed:\n')
for k, v in changes.items():
ret['comment'] += u'{0}: {1}\n'.format(k, v)
return ret
else:
pre = __salt__['influxdb.user_info'](name)
for k, v in changes.items():
if k == 'admin':
if v:
__salt__['influxdb.grant_admin_privileges'](name)
continue
else:
__salt__['influxdb.revoke_admin_privileges'](name)
continue
post = __salt__['influxdb.user_info'](name)
for k in post:
if post[k] != pre[k]:
ret['changes'][k] = post[k]
if ret['changes']:
ret['comment'] = 'Updated user {0}'.format(name)
return ret
def absent(name, **client_args):
'''
Ensure that given user is absent.
name
The name of the user to manage
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is not present'.format(name)}
if __salt__['influxdb.user_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} will be removed'.format(name)
return ret
else:
if __salt__['influxdb.remove_user'](name, **client_args):
ret['comment'] = 'Removed user {0}'.format(name)
ret['changes'][name] = 'removed'
return ret
else:
ret['comment'] = 'Failed to remove user {0}'.format(name)
ret['result'] = False
return ret
return ret
| Python | 0 | |
8ab44294c0dd7b95102bfa1d9e8437067813cd0f | Add basic document parser | vc2xlsx/doc_parser.py | vc2xlsx/doc_parser.py | import parsley
class Goto (object):
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "Goto({}, {})".format(repr(self.x), repr(self.y))
class Entry (object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "Entry({})".format(repr(self.value))
class Menu (object):
def __init__(self, command):
self.command = command
def __repr__(self):
return "Menu({})".format(repr(self.command))
_grammar = parsley.makeGrammar(r"""
document = command*:c -> tuple(x for x in c if x)
command = goto_command | menu_command | entry_command | nl
goto_command = '>' <letter+>:x <digit+>:y (':' | nl) -> Goto(x, y)
entry_command = <(letter | digit | '"' | '\'' | '+' | '-' | '(' | '#' | '@') not_nl*>:value -> Entry(value)
menu_command = '/' <(letter | '-') (letter | digit | '$' | '*')*>:command -> Menu(command)
nl = ('\r'? '\n' | '\r') -> None
not_nl = anything:x ?(x not in '\r\n') -> x
""", globals())
def parse(value):
return _grammar(value.rstrip('\0\r\n\t ')).document()
if __name__ == "__main__":
import sys
with open(sys.argv[1]) as f:
result = parse(f.read())
print(repr(result))
| Python | 0.000004 | |
01bcda4326dc0204798f268bb1c60f06526aaba3 | add freebsd shadow module | salt/modules/freebsd_shadow.py | salt/modules/freebsd_shadow.py | '''
Manage the password database on FreeBSD systems
'''
# Import python libs
import os
try:
import pwd
except ImportError:
pass
# Import salt libs
import salt.utils
def __virtual__():
return 'shadow' if __grains__.get('os', '') == 'FreeBSD' else False
def info(name):
'''
Return information for the specified user
CLI Example::
salt '*' shadow.info root
'''
try:
data = pwd.getpwnam(name)
ret = {
'name': data.pw_name,
'passwd': data.pw_passwd if data.pw_passwd != '*' else '',
'change': '',
'expire': ''}
except KeyError:
return {
'name': '',
'passwd': '',
'change': '',
'expire': ''}
# Get password aging info
cmd = 'pw user show {0} | cut -f6,7 -d:'.format(name)
try:
change, expire = __salt__['cmd.run_all'](cmd)['stdout'].split(':')
except ValueError:
pass
else:
ret['change'] = change
ret['expire'] = expire
return ret
def set_password(name, password):
'''
Set the password for a named user. The password must be a properly defined
hash. The password hash can be generated with this command:
``python -c "import crypt; print crypt.crypt('password',
'$6$SALTsalt')"``
``SALTsalt`` is the 8-character crpytographic salt. Valid characters in the
salt are ``.``, ``/``, and any alphanumeric character.
Keep in mind that the $6 represents a sha512 hash, if your OS is using a
different hashing algorithm this needs to be changed accordingly
CLI Example::
salt '*' shadow.set_password root '$1$UYCIxa628.9qXjpQCjM4a..'
'''
__salt__['cmd.run']('pw user mod {0} -H 0'.format(name), stdin=password)
uinfo = info(name)
return uinfo['passwd'] == password
| Python | 0 | |
9dab373023fa6b7767cd7555a533161752205eda | Test a weighted affine solver. | scripts/0-weighted-affine.py | scripts/0-weighted-affine.py | #!/usr/bin/python
import sys
sys.path.append('../lib')
import transformations
v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
#weights = [1.0, 1.0, 1.0, 1.0]
weights = [0.1, 0.01, 0.1, 0.2]
print "original"
print transformations.affine_matrix_from_points(v0, v1, shear=False)
print "weighted"
print transformations.affine_matrix_from_points_weighted(v0, v1, weights, shear=False)
| Python | 0.001095 | |
3cb76ce0473c8598e1672828df152f164a6951c3 | Add os_recordset module (#2240) | lib/ansible/modules/extras/cloud/openstack/os_recordset.py | lib/ansible/modules/extras/cloud/openstack/os_recordset.py | #!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
DOCUMENTATION = '''
---
module: os_recordset
short_description: Manage OpenStack DNS recordsets
extends_documentation_fragment: openstack
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
updated. Only the I(records), I(description), and I(ttl) values
can be updated.
options:
zone:
description:
- Zone managing the recordset
required: true
name:
description:
- Name of the recordset
required: true
recordset_type:
description:
- Recordset type
required: true
records:
description:
- List of recordset definitions
required: true
description:
description:
- Description of the recordset
required: false
default: None
ttl:
description:
- TTL (Time To Live) value in seconds
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a recordset named "www.example.net."
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
recordset_type: primary
records: ['10.1.1.1']
description: test recordset
ttl: 3600
# Update the TTL on existing "www.example.net." recordset
- os_recordset:
cloud: mycloud
state: present
zone: example.net.
name: www
ttl: 7200
# Delete recorset named "www.example.net."
- os_recordset:
cloud: mycloud
state: absent
zone: example.net.
name: www
'''
RETURN = '''
recordset:
description: Dictionary describing the recordset.
returned: On success when I(state) is 'present'.
type: dictionary
contains:
id:
description: Unique recordset ID
type: string
sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
name:
description: Recordset name
type: string
sample: "www.example.net."
zone_id:
description: Zone id
type: string
sample: 9508e177-41d8-434e-962c-6fe6ca880af7
type:
description: Recordset type
type: string
sample: "A"
description:
description: Recordset description
type: string
sample: "Test description"
ttl:
description: Zone TTL value
type: int
sample: 3600
records:
description: Recordset records
type: list
sample: ['10.0.0.1']
'''
def _system_state_change(state, records, description, ttl, zone, recordset):
if state == 'present':
if recordset is None:
return True
if records is not None and recordset.records != records:
return True
if description is not None and recordset.description != description:
return True
if ttl is not None and recordset.ttl != ttl:
return True
if state == 'absent' and recordset:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
zone=dict(required=True),
name=dict(required=True),
recordset_type=dict(required=False),
records=dict(required=False, type='list'),
description=dict(required=False, default=None),
ttl=dict(required=False, default=None, type='int'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
required_if=[
('state', 'present',
['recordset_type', 'records'])],
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) <= StrictVersion('1.8.0'):
module.fail_json(msg="To utilize this module, the installed version of "
"the shade library MUST be >1.8.0")
zone = module.params.get('zone')
name = module.params.get('name')
state = module.params.get('state')
try:
cloud = shade.openstack_cloud(**module.params)
recordset = cloud.get_recordset(zone, name + '.' + zone)
if state == 'present':
recordset_type = module.params.get('recordset_type')
records = module.params.get('records')
description = module.params.get('description')
ttl = module.params.get('ttl')
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
records, description,
ttl, zone,
recordset))
if recordset is None:
recordset = cloud.create_recordset(
zone=zone, name=name, recordset_type=recordset_type,
records=records, description=description, ttl=ttl)
changed = True
else:
if records is None:
records = []
pre_update_recordset = recordset
changed = _system_state_change(state, records,
description, ttl,
zone, pre_update_recordset)
if changed:
zone = cloud.update_recordset(
zone, name + '.' + zone,
records=records,
description=description,
ttl=ttl)
module.exit_json(changed=changed, recordset=recordset)
elif state == 'absent':
if module.check_mode:
module.exit_json(changed=_system_state_change(state,
None, None,
None,
None, recordset))
if recordset is None:
changed=False
else:
cloud.delete_recordset(zone, name + '.' + zone)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| Python | 0 | |
962ecc4bda24a7dcb0ec75f49649853149f7e88d | Update to the tide utilities to work with the new .csv files | Module/tide_utils.py | Module/tide_utils.py | import os
import glob
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
def load_Neah_Bay(datadir):
"""
Function to load the Neah Bay tidal station data from 2015 - 2016
and returns a dataframe and a Datetime Index object
Datadir is the directory path to where the data is located
"""
if not glob.glob(os.path.join(datadir, '*NeahBay.csv')):
return None
else:
NeahBay_2014 = pd.read_csv(datadir + "2014_NeahBay.csv",
parse_dates=['Date Time'])
NeahBay_2015 = pd.read_csv(datadir + "2015_NeahBay.csv",
parse_dates=['Date Time'])
NeahBay_2016 = pd.read_csv(datadir + "2016_NeahBay.csv",
parse_dates=['Date Time'])
NeahBay = NeahBay_2014.append(NeahBay_2015)
NeahBay = NeahBay.append(NeahBay_2016)
NeahBay.rename(columns={'Date Time':'datetime'},
inplace=True)
return NeahBay
def load_Port_Townsend(datadir):
"""
Function to load the Port Townsend tidal station data from 2015
& 2016. Supply the directory to where the csv files with the
data are saved. Returns None if files are not located in
specified directory.
"""
if not glob.glob(os.path.join(datadir, '*PortTownsend.csv')):
return None
else:
PortTownsend_2014 = pd.read_csv(datadir +
'2014_PortTownsend.csv',
parse_dates=['Date Time'])
PortTownsend_2015 = pd.read_csv(datadir +
'2015_PortTownsend.csv',
parse_dates=['Date Time'])
PortTownsend_2016 = pd.read_csv(datadir +
'2016_PortTownsend.csv',
parse_dates=['Date Time'])
PortTownsend = PortTownsend_2014.append(PortTownsend_2015)
PortTownsend = PortTownsend.append(PortTownsend_2016)
PortTownsend.rename(columns={'Date Time':'datetime'},
inplace=True)
return PortTownsend
def load_Port_Angeles(datadir):
"""
Function to load the Port Angeles tidal station data from 2015
& 2016. Supply the directory to where the csv files with the
data are saved. Returns None if files are not located in
specified directory.
"""
if not glob.glob(os.path.join(datadir, '*PortAngeles.csv')):
return None
else:
# Load the Port Angeles tidal data and put into one dataframe
PortAngeles_2014 = pd.read_csv(datadir +
'2014_PortAngeles.csv',
parse_dates=['Date Time'])
PortAngeles_2015 = pd.read_csv(datadir +
'2015_PortAngeles.csv',
parse_dates=['Date Time'])
PortAngeles_2016 = pd.read_csv(datadir +
'2016_PortAngeles.csv',
parse_dates=['Date Time'])
PortAngeles = PortAngeles_2014.append(PortAngeles_2015)
PortAngeles = PortAngeles.append(PortAngeles_2016)
PortAngeles.rename(columns={'Date Time':'datetime'},
inplace=True)
return PortAngeles
def load_tide_data(datadir):
"""
Upper level load function for the Tide Data.
Datadir is the directory where the data .csv files are saved
"""
NeahBay = load_Neah_Bay(datadir)
PortAngeles = load_Port_Angeles(datadir)
PortTownsend = load_Port_Townsend(datadir)
if NeahBay is None:
return None
elif PortAngeles is None:
return None
elif PortTownsend is None:
return None
else:
return NeahBay, PortAngeles, PortTownsend
def create_tide_dataset(NeahBay, PortAngeles, PortTownsend):
"""
Function takes in the tidal station dataframes and returns
an Xarray Dataset with the tidal station data
"""
NB = xr.DataArray(NeahBay['Water Level'], dims='datetime')
PA = xr.DataArray(PortAngeles['Water Level'], dims='datetime')
PT = xr.DataArray(PortTownsend['Water Level'], dims='datetime')
Tides = xr.Dataset({'NeahBay': NB, 'PortAngeles': PA,
'PortTownsend': PT})
return Tides
def plot_tide_data(dt):
"""
This function plots the three tidal stations for the given
time period along with a marker showing the time and elevation
selected using the widget slider
"""
fig, axes = plt.subplots(nrows=3)
NB.plot(ax=axes[0])
axes[0].scatter(x=NB.datetime.values[dt], y=NB.values[dt],
color="red", s=100)
axes[0].grid()
PA.plot(ax=axes[1])
axes[1].scatter(x=NB.datetime.values[dt], y=PA.values[dt],
color="red", s=100)
axes[1].grid()
PT.plot(ax=axes[2])
axes[2].scatter(x=NB.datetime.values[dt], y=PT.values[dt],
color="red", s=100)
axes[2].grid()
def plot_tidal_elevation(slide):
"""
Function to plot the tidal elevation taken from
the function plot_tide_data interactive slider
"""
try:
fig, axes = plt.subplots(nrows=1, ncols=1)
# Get each station's tidal elevation based on the widget slider
NBelev = NB.values[slide.value]
PAelev = PA.values[slide.value]
PTelev = PT.values[slide.value]
# Create dummy x-values
x = (1, 2, 3)
y = (NBelev, PAelev, PTelev)
# Create the figure with station labels
plt.scatter(x, y, s=100, color="red", zorder=2)
plt.plot(x, y, 'b', zorder=1)
plt.xticks(x, ['Neah Bay', 'Port Angeles', 'Port Townsend'],
rotation='vertical')
plt.grid()
plt.ylabel('Tidal Elevation (m)')
except:
return None
| Python | 0 | |
260e0ef2bc37750dccea47d30110221c272e757a | Add script for automating analysis for all corpora | run_all_corpora.py | run_all_corpora.py | import os
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument("corpusdir", help = "Path to the directory containing corpus directories")
parser.add_argument("script", help = "name of the script to be run")
args = parser.parse_args()
## lists of corpora to skip
## and failed to run
skipped = []
failed = []
## first check that the script exists
assert(os.path.isfile(args.script), "{} should be a script that exists".format(args.script))
## loop through files in the directory
for corpus in os.listdir(args.corpusdir):
## check if the file is actually a directory since that is the expected format for the
## analysis scripts
if os.path.isdir(corpus):
if corpus in skipped:
continue
try:
print("Processing {}".format(corpus))
## first reset the corpus
subprocess.call(['python', 'reset_database.py', corpus])
## run the script on the corpus
subprocess.call(['python', args.script, corpus, "-s"])
except:
failed.append(corpus)
continue
print("Complete!")
print("Following corpora were not run: {}" failed)
| Python | 0 | |
593941ec42918a389a348a5d35e8c5033bb34e73 | Add 8ball plugin | plugins/ball8.py | plugins/ball8.py | import random
from plugin import CommandPlugin, PluginException
class Ball8(CommandPlugin):
"""
8ball command (by javipepe :))
"""
def __init__(self, bot):
CommandPlugin.__init__(self, bot)
self.triggers = ['8ball']
self.short_help = 'Ask me a question'
self.help = 'Ask me a question, I\'ll decide what the answer should be. Based on https://en.wikipedia.org/wiki/Magic_8-Ball'
self.help_example = ['!8ball Is linux better than windows?']
# ^ obviously yes.
def on_command(self, event, response):
args = event['text']
if not args or not args[-1:].__contains__('?'):
raise PluginException('Invalid argument! Ask me a question!')
else:
possible_answers = ['It is certain', 'It is decidedly so', 'Without a doubt', 'Yes, definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes', 'Reply hazy try again', 'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again', 'Do\'t count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful']
response['text'] = ':8ball: says *_%s_*!' % random.choice(possible_answers)
self.bot.sc.api_call('chat.postMessage', **response)
| Python | 0 | |
d22ca6dbf7e8aa98b0f580b7972e157894925365 | Fix test output for combining filename and extension | tests/test_auto_moving.py | tests/test_auto_moving.py | import os
import shutil
from nose.tools import assert_equal
from .base import BaseTest
class TestAutoMoving(BaseTest):
organise = True
def teardown(self):
super(TestAutoMoving, self).teardown()
shutil.rmtree(self.organised)
os.mkdir(self.organised)
def test_using_organise_uses_the_specified_organise_folder(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
organise_dir = os.path.join('/', *path.split('/')[:-3])
assert_equal(self.organised, organise_dir)
def test_using_organise_uses_the_correct_show_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-3:][0]
assert_equal(season_dir, self._file.show_name)
def test_using_organise_uses_the_correct_season_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-2:][0]
assert_equal(season_dir, 'Season {0}'.format(self._file.season))
def test_using_organise_uses_the_correct_filename(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
filename = path.split('/')[-1:][0].split(' - ')[-1:][0]
assert_equal(filename, ''.join([self._file.episodes[0].title, self._file.extension]))
def test_moving_the_leading_the_to_the_end_of_a_show_name_causes_the_show_folder_name_to_follow_suit_when_using_organise(self):
show_name = 'Big Bang Theory, The'
self._file.show_name = show_name
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
show_dir = path.split('/')[-3:][0]
assert_equal(show_dir, show_name)
| import os
import shutil
from nose.tools import assert_equal
from .base import BaseTest
class TestAutoMoving(BaseTest):
organise = True
def teardown(self):
super(TestAutoMoving, self).teardown()
shutil.rmtree(self.organised)
os.mkdir(self.organised)
def test_using_organise_uses_the_specified_organise_folder(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
organise_dir = os.path.join('/', *path.split('/')[:-3])
assert_equal(self.organised, organise_dir)
def test_using_organise_uses_the_correct_show_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-3:][0]
assert_equal(season_dir, self._file.show_name)
def test_using_organise_uses_the_correct_season_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-2:][0]
assert_equal(season_dir, 'Season {0}'.format(self._file.season))
def test_using_organise_uses_the_correct_filename(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
filename = path.split('/')[-1:][0].split(' - ')[-1:][0]
assert_equal(filename, '.'.join([self._file.episodes[0].title, self._file.extension]))
def test_moving_the_leading_the_to_the_end_of_a_show_name_causes_the_show_folder_name_to_follow_suit_when_using_organise(self):
show_name = 'Big Bang Theory, The'
self._file.show_name = show_name
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
show_dir = path.split('/')[-3:][0]
assert_equal(show_dir, show_name)
| Python | 0.000003 |
98c658822cf6782ca0907ab7a68691922e701aa6 | Add unittest for pytesseract | tests/test_pytesseract.py | tests/test_pytesseract.py | import unittest
import io
import pytesseract
import numpy as np
from wand.image import Image as wandimage
class TestPytesseract(unittest.TestCase):
def test_tesseract(self):
# Open pdf with Wand
with wandimage(filename='/input/tests/data/test.pdf') as wand_image:
img_buffer = np.asarray(bytearray(wand_image.make_blob(format='png')), dtype='uint8')
bytesio = io.BytesIO(img_buffer)
test_string = pytesseract.image_to_string(PILImage.open(bytesio))
self.assertTrue(type(test_string) == str)
| Python | 0.000001 | |
f8d8580dfffee35236478ec75116b291499c085c | Create maximum-average-subarray-i.py | Python/maximum-average-subarray-i.py | Python/maximum-average-subarray-i.py | # Time: O(n)
# Space: O(1)
# Given an array consisting of n integers,
# find the contiguous subarray of given length k that has the maximum average value.
# And you need to output the maximum average value.
#
# Example 1:
# Input: [1,12,-5,-6,50,3], k = 4
# Output: 12.75
# Explanation: Maximum average is (12-5-6+50)/4 = 51/4 = 12.75
# Note:
# 1 <= k <= n <= 30,000.
# Elements of the given array will be in the range [-10,000, 10,000].
class Solution(object):
def findMaxAverage(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: float
"""
total = 0
for i in xrange(k):
total += nums[i]
result = total
for i in xrange(k, len(nums)):
total += nums[i] - nums[i-k]
result = max(result, total)
return float(result) / k
| Python | 0.999246 | |
164ccb9206885b216e724b3618ebae5601ab0ac0 | Add parallel execution utility module. | parallel.py | parallel.py | import multiprocessing as mp
def run_func(func, args, parallel=False):
if parallel:
mp.Pool(mp.cpu_count() - 1).map(func, args)
else:
for arg in args:
func(arg)
| Python | 0 | |
7c3ed589ace907a71931b79902382b168a2ae80d | add direct_link_downloader | downloaders/direct_link_downloader.py | downloaders/direct_link_downloader.py | import os
from urllib.request import urlopen
def direct_link_download(url, file_path):
"""
:param url: direct link to an image
:param file_path: file path (including filename) to save image to
"""
# make sure the file_path param doesn't point to a directory
if os.path.isdir(file_path):
raise ValueError(':param file_path: shouldn\'t point to a directory')
# make sure the file doesn't already exist
if os.path.isfile(file_path):
raise FileExistsError('%s already exists' % file_path)
# create path(s) for file_path if necessary
base_dir = os.path.dirname(file_path)
if not os.path.isdir(base_dir):
os.makedirs(os.path.abspath(base_dir))
# download and save the image
req = urlopen(url).read()
with open(file_path, 'w') as f:
f.write(req)
if __name__ == "__main__":
# tests
dir1 = os.path.join(os.getcwd(), 'img1.jpg')
print(dir1)
url = 'http://i.imgur.com/2MlAOkC.jpg'
url2 = 'http://img05.deviantart.net/41ee/i/2013/299/9/f/_stock__mystic_woods_by_dominikaaniola-d2ehxq4.jpg'
direct_link_download(url, 'img1.jpg')
# direct_link_download(url2, './tmp/tmp2/img2.jpg')
| Python | 0.000001 | |
e3e62c964b864c057e98763169ddc0dd922e6fa9 | Add a separate module for common parsing functions. | dunovo_parsers.py | dunovo_parsers.py | import collections
# A pair of `StrandFamily`s with the same barcode.
BarFamily = collections.namedtuple('BarFamily', ('bar', 'ab', 'ba'))
# A pair of `ReadFamily`s with the same order and barcode.
StrandFamily = collections.namedtuple('StrandFamily', ('order', 'mate1', 'mate2'))
# A set of `Read`s with the same mate, order, and barcode.
ReadFamily = collections.namedtuple('ReadFamily', ('mate', 'reads'))
# A single read.
Read = collections.namedtuple('Read', ('name', 'seq', 'quals'))
class DunovoFormatError(ValueError):
pass
def parse_make_families(lines, prepended=False):
strand_families = []
strand_family_lines = []
last_barcode = last_order = None
for line_num, line in enumerate(lines, 1):
fields = line.rstrip('\r\n').split('\t')
if len(fields) != 8:
raise DunovoFormatError(f'Line {line_num} has an invalid number of columns: {len(fields)}')
# If it's the output of correct.py with --prepend, there's an extra column.
# We want the corrected barcode (column 1), not the original one (column 2).
if prepended:
del fields[1]
barcode, order = fields[:2]
if barcode != last_barcode or order != last_order:
if last_order is not None:
strand_families.append(create_strand_family(strand_family_lines))
strand_family_lines = []
if barcode != last_barcode:
if last_barcode is not None:
yield create_bar_family(strand_families, last_barcode)
strand_families = []
strand_family_lines.append(fields)
last_barcode = barcode
last_order = order
if last_order is not None:
strand_families.append(create_strand_family(strand_family_lines))
if last_barcode is not None:
yield create_bar_family(strand_families, last_barcode)
def create_strand_family(strand_family_lines):
read1s = []
read2s = []
last_order = None
for fields in strand_family_lines:
barcode, order, name1, seq1, quals1, name2, seq2, quals2 = fields
if order not in ('ab', 'ba'):
raise DunovoFormatError(f'Invalid order: {order!r}')
assert order == last_order or last_order is None, (order, last_order)
read1s.append(Read(name1, seq1, quals1))
read2s.append(Read(name2, seq2, quals2))
last_order = order
read_family1 = ReadFamily(1, tuple(read1s))
read_family2 = ReadFamily(2, tuple(read2s))
return StrandFamily(order, read_family1, read_family2)
def create_bar_family(strand_families_raw, barcode):
assert 1 <= len(strand_families_raw) <= 2, len(strand_families_raw)
# Create a strand_families list with them in the right order.
strand_families = [None, None]
for strand_family in strand_families_raw:
if strand_family.order == 'ab':
strand_families[0] = strand_family
elif strand_family.order == 'ba':
strand_families[1] = strand_family
# Fill in any missing strand families with empty ones.
for i, (order, strand_family) in enumerate(zip(('ab', 'ba'), strand_families)):
if strand_family is None:
strand_families[i] = StrandFamily(order, ReadFamily(1,()), ReadFamily(2,()))
return BarFamily(barcode, *strand_families)
| Python | 0 | |
1455f6c563edd07a61dd826bde03137fff2d3f57 | add data for recipe 1.8 | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/08-calculating_with_dictionaries/main.py | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/08-calculating_with_dictionaries/main.py | def example_1():
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
min_price = min(zip(prices.values(), prices.keys()))
max_price = max(zip(prices.values(), prices.keys()))
print(min_price, max_price)
sorted_prices = sorted(zip(prices.values(), prices.keys()))
print(sorted_prices)
prices_and_names = zip(prices.values(), prices.keys())
print(min(prices_and_names))
try:
print(max(prices_and_names))
except ValueError:
print('here is ValueError')
def example_2():
prices = {
'ACME': 45.23,
'AAPL': 612.78,
'IBM': 205.55,
'HPQ': 37.20,
'FB': 10.75
}
print(min(prices))
print(max(prices))
print(min(prices.values()))
print(max(prices.values()))
print(min(prices, key = lambda k: prices[k]))
print(max(prices, key = lambda k: prices[k]))
print(prices[min(prices, key = lambda k: prices[k])])
print(prices[max(prices, key = lambda k: prices[k])])
def example_3():
prices = { 'AAA': 45.23, 'ZZZ': 45.23 }
print(min(zip(prices.values(), prices.keys())))
print(max(zip(prices.values(), prices.keys())))
if __name__ == '__main__':
example_1()
example_2()
example_3()
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.