commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
84c5bfa0252814c5797cf7f20b04808dafa9e1fa | Create MergeIntervals_001.py | leetcode/056-Merge-Intervals/MergeIntervals_001.py | leetcode/056-Merge-Intervals/MergeIntervals_001.py | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
# @param {Interval[]} intervals
# @return {Interval[]}
def sortmeg(self, intervals):
ls = []
for i in intervals:
ls.append(i.start)
idx = sorted(range(len(ls)),key=lambda x:ls[x])
sortedintv = []
for i in idx:
sortedintv.append(intervals[i])
return sortedintv
def merge(self, intervals):
if len(intervals) < 2:
return intervals
intervals = self.sortmeg(intervals)
p = 0
while p + 1 <= len(intervals) - 1:
if intervals[p+1].start <= intervals[p].end:
if intervals[p+1].end > intervals[p].end:
intervals[p].end = intervals[p+1].end
del intervals[p+1]
else:
p += 1
return intervals
| Python | 0 | |
8471516294d5b28a81cae73db591ae712f44bc01 | Add failing cairo test | tests/pygobject/test_structs.py | tests/pygobject/test_structs.py | # Copyright 2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
import unittest
from gi.repository import Gtk
from tests import is_gi
class StructTest(unittest.TestCase):
@unittest.skipUnless(is_gi, "FIXME")
def test_foreign_cairo(self):
window = Gtk.OffscreenWindow()
area = Gtk.DrawingArea()
window.add(area)
def foo(area, context):
self.assertTrue(hasattr(context, "set_source_rgb"))
area.connect("draw", foo)
window.show_all()
while Gtk.events_pending():
Gtk.main_iteration()
window.destroy()
| Python | 0.000002 | |
c46e6d170f4d641c3bb5045a701c7810d77f28a6 | add update-version script | update-version.py | update-version.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import xml.etree.ElementTree as et
NS = "http://maven.apache.org/POM/4.0.0"
POM_NS = "{http://maven.apache.org/POM/4.0.0}"
def getModuleNames(mainPom):
pom = et.parse(mainPom)
modules = pom.findall("./{ns}modules/{ns}module".format(ns=POM_NS))
return map(lambda element: element.text, modules)
def updateVersionInModule(module, newVersion):
pomPath = os.path.join(module, "pom.xml")
modulePom = et.parse(pomPath)
parentVersion = modulePom.find("./{ns}parent/{ns}version".format(ns=POM_NS))
parentVersion.text = newVersion
modulePom.write(pomPath, xml_declaration=False, encoding="utf-8", method="xml")
if __name__ == '__main__':
et.register_namespace('', NS)
parser = argparse.ArgumentParser(description='Update parent version in all submodules.')
parser.add_argument('version', help='the new parent version')
args = parser.parse_args()
allModules = getModuleNames("pom.xml")
for module in allModules:
updateVersionInModule(module, args.version) | Python | 0 | |
0bc4d105bd649ed9e174b26b5017572f08fd5c2f | Write unit tests for physics library | src/server/test_physics.py | src/server/test_physics.py | #!/usr/bin/env python
import unittest
from physics import *
class TestNVectors(unittest.TestCase):
def setUp(self):
self.v11 = NVector(1, 1)
self.v34 = NVector(3, 4)
self.v10 = NVector(10, 0)
self.vneg = NVector(-2, -2)
def test_dimensionality(self):
"""Test counting of number of dimensionality"""
self.assertEqual(self.v11.dimensionality(), 2)
def test_magnitude(self):
"""Test magnitude calculation"""
self.assertEqual(self.v34.magnitude(), 5)
def test_norm(self):
"""Test unit vector calculation"""
self.assertEqual(self.v10.norm(), NVector(1, 0))
self.assertEqual(self.v11.norm(),
NVector(0.7071067811865475, 0.7071067811865475))
def test_init(self):
"""Check initialization"""
self.assertEqual(self.v11.dimensions, (1, 1))
self.assertEqual(self.v34.dimensions, (3, 4))
self.assertEqual(self.vneg.dimensions, (-2, -2))
def test_equality(self):
"""Check equality between vectors"""
self.assertEqual(NVector(5, 5), NVector(5, 5))
self.assertNotEqual(NVector(3, 4), NVector(4, 3))
def test_neg(self):
"""Check negation"""
self.assertEqual(NVector(1, -1), -NVector(-1, 1))
self.assertNotEqual(NVector(10, 5), -NVector(10, 5))
def test_truth(self):
"""Check truth values"""
self.assertFalse(NVector(0, 0, 0, 0))
self.assertTrue(NVector(1, 0))
self.assertTrue(NVector(-10, -20, -30))
def test_addition(self):
"""Check vector addition"""
self.assertEqual(NVector(3, 2, 1, 0) + NVector(0, 1, 2, 3),
NVector(3, 3, 3, 3))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(2, 2) + NVector(3, 3, 3)
with self.assertRaises(TypeError):
NVector(1, 1) + 10
def test_subtraction(self):
"""Check vector subtraction"""
self.assertEqual(NVector(3, 2, 1, 0) - NVector(0, 1, 2, 3),
NVector(3, 1, -1, -3))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(2, 2) - NVector(3, 3, 3)
with self.assertRaises(TypeError):
NVector(1, 1) - 10
def test_multiplication(self):
"""Check vector and scalar multiplication"""
self.assertEqual(NVector(4, 2) * 10, NVector(40, 20))
self.assertEqual(2 * NVector(1, 1), NVector(2, 2))
self.assertEqual(NVector(3, 3) * NVector(2, 2), NVector(6, 6))
# Make sure some exceptions are raised.
with self.assertRaises(DimensionalityError):
NVector(1) * NVector(2, 2)
def test_division(self):
"""Check vector and scalar true and floor division"""
self.assertEqual(NVector(5, 5) / NVector(2, 2), NVector(2.5, 2.5))
self.assertEqual(NVector(5, 5) // NVector(2, 2), NVector(2, 2))
self.assertEqual(NVector(5, 5) / 2, NVector(2.5, 2.5))
self.assertEqual(NVector(5, 5) // 2, NVector(2, 2))
with self.assertRaises(DimensionalityError):
NVector(3, 3, 3) / NVector(2, 2)
with self.assertRaises(DimensionalityError):
NVector(3, 3, 3) // NVector(2, 2)
with self.assertRaises(TypeError):
5 / NVector(1, 1)
with self.assertRaises(TypeError):
5 // NVector(1, 1)
def test_stringy(self):
"""Test string formatting"""
self.assertEqual(str(NVector(1, 1)), "<1.000000, 1.000000>")
if __name__ == "__main__":
unittest.main()
| Python | 0.000001 | |
b9cf46407eea6df9bb3fef5eb3103c7353b249a9 | solve problem 11 | problem11.py | problem11.py | def largest_grid_product(grid):
max = float("-inf")
for i in xrange(0, len(grid)):
for j in xrange(0, len(grid[i]) - 3):
productx = grid[i][j] * grid[i][j+1] * grid[i][j+2] * grid[i][j+3]
producty = grid[j][i] * grid[j+1][i] * grid[j+2][i] * grid[j+3][i]
if productx > max:
max = productx
elif producty > max:
max = producty
for i in xrange(0, len(grid) - 3):
productd = grid[i][i] * grid[i+1][i+1] * grid[i+2][i+2] * grid[i+3][i+3]
if productd > max: max = productd
print max
if __name__ == '__main__':
L = []
L.append("08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08")
L.append("49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00")
L.append("81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65")
L.append("52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91")
L.append("22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80")
L.append("24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50")
L.append("32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70")
L.append("67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21")
L.append("24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72")
L.append("21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95")
L.append("78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92")
L.append("16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57")
L.append("86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58")
L.append("19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40")
L.append("04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66")
L.append("88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69")
L.append("04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36")
L.append("20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16")
L.append("20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54")
L.append("01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48")
M = [i.split() for i in L]
M = [[int(j) for j in i] for i in M]
largest_grid_product(M)
| Python | 0.000436 | |
1d3719bcd03b92d04efae10933928f953d95c7a4 | Add a simple basicmap python example | src/python/BasicMap.py | src/python/BasicMap.py | """
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> b = sc.parallelize([1, 2, 3, 4])
>>> sorted(basicSquare(b).collect())
[1, 4, 9, 12]
"""
import sys
from pyspark import SparkContext
def basicSquare(nums):
"""Square the numbers"""
return nums.map(lambda x: x * x)
if __name__ == "__main__":
master = "local"
if len(sys.argv) = 2:
master = sys.argv[1]
sc = SparkContext(master, "BasicMap")
nums = sc.parallelize([1, 2, 3, 4])
output = countWords(nums)
for num in output:
print "%i " % (num)
| Python | 0.000014 | |
41220718d0e9a32fc9e95d55acdb989b2f87563f | Add @job tasks | smsish/tasks.py | smsish/tasks.py | import django_rq
from rq.decorators import job
DEFAULT_QUEUE_NAME = "default"
DEFAULT_REDIS_CONNECTION = django_rq.get_connection()
@job(DEFAULT_QUEUE_NAME, connection=DEFAULT_REDIS_CONNECTION)
def send_sms(*args, **kwargs):
from smsish.sms import send_sms as _send_sms
return _send_sms(*args, **kwargs)
@job(DEFAULT_QUEUE_NAME, connection=DEFAULT_REDIS_CONNECTION)
def send_mass_sms(*args, **kwargs):
from smsish.sms import send_mass_sms as _send_mass_sms
return _send_mass_sms(*args, **kwargs)
| Python | 0.000791 | |
6ee145c7af7084f228ee48754ef2a0bfc37c5946 | Add missing hooks.py module | pyqt_distutils/hooks.py | pyqt_distutils/hooks.py | """
A pyqt-distutils hook is a python function that is called after the
compilation of a ui script to let you customise its content. E.g. you
might want to write a hook to change the translate function used or replace
the PyQt imports by your owns if you're using a shim,...
The hook function is a simple python function which must take a single
argument: the path to the generated python script.
Hooks are exposed as setuptools entrypoint using :attr:`ENTRYPOINT` as the
entrypoint key. E.g., in your setup.py::
setup(
...,
entry_points={
'pyqt_distutils_hooks': [
'hook_name = package_name.module_name:function_name']
},
...)
There is a "hooks" config key where you can list the hooks
that you want to run on all your ui/qrc scripts. E.g.::
{
"files": [
["forms/*.ui", "foo_gui/forms/"],
["resources/*.qrc", "foo_gui/forms/"]
],
"pyrcc": "pyrcc5",
"pyrcc_options": "",
"pyuic": "pyuic5",
"pyuic_options": "--from-imports",
"hooks": ["gettext", "spam", "eggs"]
}
At the moment, we provide one builtin hook: **gettext**. This hook let you
use ``gettext.gettext`` instead of ``QCoreApplication.translate``.
"""
import pkg_resources
import traceback
#: Name of the entrypoint to use in setup.py
ENTRYPOINT = 'pyqt_distutils_hooks'
def load_hooks():
"""
Load the exposed hooks.
Returns a dict of hooks where the keys are the name of the hook and the
values are the actual hook functions.
"""
hooks = {}
for entrypoint in pkg_resources.iter_entry_points(ENTRYPOINT):
name = str(entrypoint).split('=')[0].strip()
try:
hook = entrypoint.load()
except Exception:
traceback.print_exc()
else:
hooks[name] = hook
return hooks
def hook(ui_file_path):
"""
This is the prototype of a hook function.
"""
pass
GETTEXT_REPLACEMENT = ''' import gettext
def _translate(_, string):
return gettext.gettext(string)
'''
def gettext(ui_file_path):
"""
Let you use gettext instead of the Qt tools for l18n
"""
with open(ui_file_path, 'r') as fin:
content = fin.read()
with open(ui_file_path, 'w') as fout:
fout.write(content.replace(
' _translate = QtCore.QCoreApplication.translate',
GETTEXT_REPLACEMENT))
| Python | 0.000003 | |
92af6cafc6ef297464b1ae3f3556e2b815504bbb | add nfs datastore to all hosts in all clusters | add_nfs_ds.py | add_nfs_ds.py | #!/usr/bin/env python
try:
import json
except ImportError:
import simplejson as json
import re
import os
import time
import atexit
import urllib2
import datetime
import ast
import ssl
if hasattr(ssl, '_create_default_https_context') and hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
try:
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim, vmodl
except ImportError:
print("failed=True msg='pyVmomi is required to run this module'")
DOCUMENTATION = '''
---
module: add_nfs_ds
Short_description: Create NFS datastore and attache to all hosts in all cluster in datacenter
description:
- Provides an interface to add nfs datastore to all hosts in clusters.
version_added: "0.1"
options:
host:
description:
- Address to connect to the vCenter instance.
required: True
default: null
login:
description:
- Username to login to vCenter instance.
required: True
default: null
password:
description:
- Password to authenticate to vCenter instance.
required: True
default: null
port:
description:
- Port to access vCenter instance.
required: False
default: 443
nfshost:
description:
- hostname/ip of the nfs service.
required: True
default: null
nfspath:
description:
- path to nfs share ex: /nfs
required: True
default: null
nfsname:
description:
- name of nfs datastore in vcenter
required: True
default: null
nfsaccess:
description:
- type of access if not readWrite specified on the nfs service, module will fail
required: False
default: readWrite
nfstype:
description:
- type of datastore specified, NFS
required: False
default: NFS
'''
class Createdsnfs(object):
def __init__(self, module):
self.module = module
def si_connection(self, vhost, user, password, port):
try:
self.SI = SmartConnect(host=vhost, user=user, pwd=password, port=port)
except:
creds = vhost + " " + user + " " + password
self.module.fail_json(msg='Cannot connect %s' % creds)
return self.SI
def get_content(self, connection):
try:
content = connection.RetrieveContent()
return content
except vmodl.MethodFault as e:
return module.fail_json(msg=e.msg)
def get_vcobjt_byname(self, connection, vimtype, target_name):
content = self.get_content(connection)
vc_objt = {}
try:
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for managed_object_ref in container.view:
vc_objt.update({managed_object_ref.name: managed_object_ref})
for k, v in vc_objt.items():
if k == target_name:
return False, v
except vmodl.MethodFault as meth_fault:
return True, dict(msg=meth_fault.msg)
except vmodl.RuntimeFault as run_fault:
return True, dict(msg=run_fault.msg)
def get_datacenter(self, connection, vimtype, datacenter_name):
status, datacenter_object_ref = self.get_vcobjt_byname(connection, vimtype, datacenter_name)
if not status:
return False, datacenter_object_ref
else:
return True, datacenter_object_ref
def nas_spec(self, nfshost, nfspath, nfsname, nfsaccess, nfstype):
nas_spec = vim.host.NasVolume.Specification(remoteHost=nfshost,
remotePath=nfspath,
localPath=nfsname,
accessMode=nfsaccess,
type=nfstype)
return nas_spec
def create_nfs(self, clusters, nasconfigspec):
for cluster in clusters:
hosts_in_cluster = cluster.host
try:
for host in hosts_in_cluster:
host.configManager.datastoreSystem.CreateNasDatastore(spec=nasconfigspec)
except vim.HostConfigFault as host_fault:
return True, dict(msg=host_fault.msg)
except vmodl.MethodFault as method_fault:
return True, dict(msg=method_fault.msg)
return False, dict(msg="Attached all hosts to nfs datastore")
def core(module):
vcsvr = module.params.get('host')
vuser = module.params.get('login')
vpass = module.params.get('password')
vport = module.params.get('port')
vio_dc = module.params.get('datacenter', dict())
vnfshost = module.params.get('nfshost')
vnfspath = module.params.get('nfspath')
vnfsname = module.params.get('nfsname')
vnfsaccess = module.params.get('nfsaccess')
vnfstype = module.params.get('nfstype')
target_dc_name = vio_dc['name']
v = Createdsnfs(module)
c = v.si_connection(vcsvr, vuser, vpass, vport)
try:
status, target_dc_object = v.get_datacenter(c, [vim.Datacenter], target_dc_name)
if not status:
host_folder = target_dc_object.hostFolder
clusters_list = host_folder.childEntity
vnas_spec = v.nas_spec(vnfshost, vnfspath, vnfsname, vnfsaccess, vnfstype)
fail, result = v.create_nfs(clusters_list, vnas_spec)
return fail, result
except Exception as e:
return True, str(e)
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
login=dict(required=True),
password=dict(required=True),
port=dict(type='int'),
datacenter=dict(type='dict', required=True),
nfshost=dict(type='str', required=True),
nfspath=dict(type='str', required=True),
nfsname=dict(type='str', required=True),
nfsaccess=dict(type='str', required=False, default='readWrite'),
nfstype=dict(type='str', required=False, default='NFS')
)
)
try:
fail, result = core(module)
if fail:
module.fail_json(msg=result)
else:
module.exit_json(msg=result)
except Exception as e:
import traceback
module.fail_json(msg='%s: %s\n%s' % (e.__class__.__name__, str(e), traceback.format_exc()))
from ansible.module_utils.basic import *
| Python | 0.000002 | |
ff79343cb1feda5259244199b4f0d503da401f24 | Create quick_sort_iterativo.py | quick_sort_iterativo.py | quick_sort_iterativo.py | import unittest
def _quick_recursivo(seq, inicio, final):
if inicio >= final:
return seq
indice_pivot = final
pivot = seq[indice_pivot]
i_esquerda = inicio
i_direita = final - 1
while i_esquerda<=i_direita:
while i_esquerda<=i_direita and seq[i_esquerda]<=pivot:
i_esquerda=i_esquerda+1
while i_esquerda<=i_direita and seq[i_direita]>=pivot:
i_direita=i_direita-1
if i_esquerda<i_direita:
aux=seq[i_esquerda]
seq[i_esquerda]=seq[i_direita]
seq[i_direita]=aux
aux=seq[i_esquerda]
seq[i_esquerda]=seq[final]
seq[final]=aux
_quick_recursivo(seq, inicio, i_esquerda - 1)
_quick_recursivo(seq, i_esquerda + 1, final)
return seq
def quick_sort(seq):
return _quick_recursivo(seq, 0, len(seq) - 1)
class OrdenacaoTestes(unittest.TestCase):
def teste_lista_vazia(self):
self.assertListEqual([], quick_sort([]))
def teste_lista_unitaria(self):
self.assertListEqual([1], quick_sort([1]))
def teste_lista_binaria(self):
self.assertListEqual([1, 2], quick_sort([2, 1]))
def teste_lista_desordenada(self):
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], quick_sort([9, 7, 1, 8, 5, 3, 6, 4, 2, 0]))
def teste_lista_com_elementos_repetidos(self):
self.assertListEqual([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9], quick_sort([9, 7, 1, 8, 5, 3, 6, 4, 2, 0, 9, 9]))
def teste_lista_so_com_elementos_repetidos(self):
self.assertListEqual([9, 9, 9], quick_sort([9, 9, 9]))
if __name__ == '__main__':
unittest.main()
| Python | 0.000004 | |
ae2f6b1d6a3d19ab183442db0760dd5453ebefab | Add new dci_remoteci module | modules/dci_remoteci.py | modules/dci_remoteci.py | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.module_utils.basic import *
import os
try:
from dciclient.v1.api import context as dci_context
from dciclient.v1.api import remoteci as dci_remoteci
except ImportError:
dciclient_found = False
else:
dciclient_found = True
DOCUMENTATION = '''
---
module: dci_remoteci
short_description: An ansible module to interact with the /remotecis endpoint of DCI
version_added: 2.2
options:
state:
required: false
description: Desired state of the resource
login:
required: false
description: User's DCI login
password:
required: false
description: User's DCI password
url:
required: false
description: DCI Control Server URL
id:
required: false
description: ID of the remoteci to interact with
name:
required: false
description: RemoteCI name
data:
required: false
description: Data associated with the RemoteCI
active:
required: false
description: Wether the remoteci is active;w
team_id:
required: false
description: ID of the team the remoteci belongs to
'''
EXAMPLES = '''
- name: Create a new remoteci
dci_remoteci:
name: 'MyRemoteCI'
team_id: XXXX
- name: Create a new team
dci_remoteci:
name: 'MyRemoteCI'
team_id: XXXX
active: False
data: >
{"certification_id": "xfewafeqafewqfeqw"}
- name: Get remoteci information
dci_topic:
id: XXXXX
- name: Update remoteci informations
dci_topic:
id: XXXX
active: True
- name: Delete a topic
dci_topic:
state: absent
id: XXXXX
'''
# TODO
RETURN = '''
'''
def get_details(module):
"""Method that retrieves the appropriate credentials. """
login_list = [module.params['login'], os.getenv('DCI_LOGIN')]
login = next((item for item in login_list if item is not None), None)
password_list = [module.params['password'], os.getenv('DCI_PASSWORD')]
password = next((item for item in password_list if item is not None), None)
url_list = [module.params['url'], os.getenv('DCI_CS_URL')]
url = next((item for item in url_list if item is not None), 'https://api.distributed-ci.io')
return login, password, url
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
# Authentication related parameters
#
login=dict(required=False, type='str'),
password=dict(required=False, type='str'),
url=dict(required=False, type='str'),
# Resource related parameters
#
id=dict(type='str'),
name=dict(type='str'),
data=dict(type='dict'),
active=dict(type='bool'),
team_id=dict(type='str'),
),
)
if not dciclient_found:
module.fail_json(msg='The python dciclient module is required')
login, password, url = get_details(module)
if not login or not password:
module.fail_json(msg='login and/or password have not been specified')
ctx = dci_context.build_dci_context(url, login, password, 'Ansible')
# Action required: Delete the remoteci matching remoteci id
# Endpoint called: /remotecis/<remoteci_id> DELETE via dci_remoteci.delete()
#
# If the remoteci exists and it has been succesfully deleted the changed is
# set to true, else if the remoteci does not exist changed is set to False
if module.params['state'] == 'absent':
if not module.params['id']:
module.fail_json(msg='id parameter is required')
res = dci_remoteci.get(ctx, module.params['id'])
if res.status_code not in [400, 401, 404, 422]:
kwargs = {
'id': module.params['id'],
'etag': res.json()['remoteci']['etag']
}
res = dci_remoteci.delete(ctx, **kwargs)
# Action required: Retrieve remoteci informations
# Endpoint called: /remotecis/<remoteci_id> GET via dci_remoteci.get()
#
# Get remoteci informations
elif module.params['id'] and not module.params['name'] and not module.params['data'] and not module.params['team_id'] and module.params['active'] is None:
res = dci_remoteci.get(ctx, module.params['id'])
# Action required: Update an existing remoteci
# Endpoint called: /remotecis/<remoteci_id> PUT via dci_remoteci.update()
#
# Update the remoteci with the specified characteristics.
elif module.params['id']:
res = dci_remoteci.get(ctx, module.params['id'])
if res.status_code not in [400, 401, 404, 422]:
kwargs = {
'id': module.params['id'],
'etag': res.json()['remoteci']['etag']
}
if module.params['name']:
kwargs['name'] = module.params['name']
if module.params['data']:
kwargs['data'] = module.params['data']
if module.params['team_id']:
kwargs['team_id'] = module.params['team_id']
if module.params['active'] is not None:
kwargs['active'] = module.params['active']
open('/tmp/tuiti', 'w').write(str(kwargs.keys()))
res = dci_remoteci.update(ctx, **kwargs)
# Action required: Create a remoteci with the specified content
# Endpoint called: /remotecis POST via dci_remoteci.create()
#
# Create the new remoteci.
else:
if not module.params['name']:
module.fail_json(msg='name parameter must be specified')
if not module.params['team_id']:
module.fail_json(msg='team_id parameter must be specified')
kwargs = {
'name': module.params['name'],
'team_id': module.params['team_id']
}
if module.params['data']:
kwargs['data'] = module.params['data']
if module.params['active'] is not None:
kwargs['active'] = module.params['active']
res = dci_remoteci.create(ctx, **kwargs)
try:
result = res.json()
if res.status_code == 404:
module.fail_json(msg='The resource does not exist')
if res.status_code == 422:
result['changed'] = False
else:
result['changed'] = True
except:
result = {}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| Python | 0.000003 | |
8543b2bf12c25163be62a8d44b48d32396f3ac9b | Add source. | solver.py | solver.py | #!usr/bin/env python3
import sys, time
from tkinter import messagebox, Tk
game_w, game_h = 50, 30 # total width and height of the game board in game coordinates
formula_mode = "axis"
from pymouse import PyMouse, PyMouseEvent
from pykeyboard import PyKeyboard, PyKeyboardEvent
m = PyMouse()
k = PyKeyboard()
class PointMouseSelector(PyMouseEvent):
def __init__(self):
PyMouseEvent.__init__(self)
self.x, self.y = None, None
def click(self, x, y, button, press):
if press: return # only handle button up events
if button == 1: # left click
print("selecting", x, y)
self.x, self.y = x, y
self.stop()
elif button == 2: # right click
self.stop()
def select_point():
S = PointMouseSelector()
try: S.run()
except: pass
return (S.x, S.y)
def calculate_formula_axis(point_list):
sorted_points = sorted(points, key=lambda x: x[0])
start = point_list[0]
x1, y1 = 0, 0
result = ""
normalize = lambda x: str(x) if "-" in str(x) else "+" + str(x)
for point in points[1:]:
x2, y2 = point[0] - start[0], point[1] - start[1]
if x2 == x1: # jump discontinuity, skip to get a jump
pass
else:
slope = (y2 - y1) / (x2 - x1)
result += "+(sign(x{0})-sign(x{1}))*({2}*x{3})/2".format(normalize(-x1), normalize(-x2), str(round(-slope, 3)), normalize(round(-(y1 - slope * x1), 3))) # add a line segment with correct slope
x1, y1 = x2, y2
result = result[1:] + "+0.5*sin(800*x)" # remove the leading plus sign
return result
def calculate_formula_graphwar(point_list):
sorted_points = sorted(points, key=lambda x: x[0])
start = point_list[0]
x1, y1 = start[0], 0
result = ""
normalize = lambda x: str(x) if "-" in str(x) else "+" + str(x)
for point in points[1:]:
x2, y2 = point[0], point[1] - start[1]
if x2 == x1: # jump discontinuity, skip to get a jump
raise Exception("bad thing happen")
else:
slope = (y2 - y1) / (x2 - x1)
result += "+(1/(1+exp(-1000*(x{0})))-1/(1+exp(-1000*(x{1}))))*({2}*x{3})".format(normalize(round(-x1)), normalize(round(-x2)), str(round(-slope, 3)), normalize(round(-(y1 - slope * x1), 3))) # add a line segment with correct slope
x1, y1 = x2, y2
result = result[1:] + "+0.1*sin(60*x)" # remove the leading plus sign
return result
messagebox.showinfo("Select Point", "Press OK and left click on the top left corner and then the bottom right corner of the game axes.")
top_left = select_point()
if top_left[0] == None: sys.exit()
bottom_right = select_point()
if bottom_right[0] == None: sys.exit()
scale_w, scale_h = (bottom_right[0] - top_left[0]) / game_w, (bottom_right[1] - top_left[1]) / game_h
print("window size", bottom_right[0] - top_left[0], bottom_right[1] - top_left[1])
while True:
messagebox.showinfo("Game Start", "Press OK and right click path points when your turn starts, starting with the player. Right click on the formula entry box to complete.")
# get start point
start = select_point()
start = (start[0] - top_left[0], start[1] - top_left[1])
if start[0] == None: sys.exit()
# get path points
points = [(start[0] / scale_w - game_w / 2, start[1] / scale_h - game_h / 2)]
current_x = start[0]
while True:
point = select_point()
if point[0] == None: break # completed
point = (point[0] - top_left[0], point[1] - top_left[1])
if point[0] <= current_x: # left or same as current one, which means jump down
points.append((current_x / scale_w - game_w / 2, point[1] / scale_h - game_h / 2))
else: # normal line segment
points.append((point[0] / scale_w - game_w / 2, point[1] / scale_h - game_h / 2))
current_x = point[0]
print("selected points ", points)
if formula_mode == "axis": # axisthgame style formulas
formula = calculate_formula_axis(points)
elif formula_mode == "graphwar": # graphwar style formulas
formula = calculate_formula_graphwar(points)
else: raise Exception("bad thing happen")
print(formula)
try:
import win32clipboard
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.SetClipboardText(formula, win32clipboard.CF_TEXT)
win32clipboard.CloseClipboard()
except ImportError: pass
| Python | 0 | |
4324eaf427731db3943cf130e42e29509bdbd4df | Fix for Python 3 | asv/config.py | asv/config.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
from . import util
class Config(object):
"""
Manages the configuration for a benchmark project.
"""
api_version = 1
def __init__(self):
self.project = "project"
self.project_url = "#"
self.repo = None
self.pythons = ["{0[0]}.{0[1]}".format(sys.version_info)]
self.matrix = {}
self.env_dir = "env"
self.benchmark_dir = "benchmarks"
self.results_dir = "results"
self.html_dir = "html"
self.show_commit_url = "#"
self.hash_length = 8
@classmethod
def load(cls, path=None):
"""
Load a configuration from a file. If no file is provided,
defaults to `asv.conf.json`.
"""
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
conf = Config()
d = util.load_json(path, cls.api_version)
conf.__dict__.update(d)
if not getattr(conf, "repo", None):
raise ValueError(
"No repo specified in {0} config file.".format(path))
return conf
@classmethod
def update(cls, path=None):
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
util.update_json(cls, path, cls.api_version)
| # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
from . import util
class Config(object):
"""
Manages the configuration for a benchmark project.
"""
api_version = 1
def __init__(self):
self.project = "project"
self.project_url = "#"
self.repo = None
self.pythons = ["{0.major}.{0.minor}".format(sys.version_info)]
self.matrix = {}
self.env_dir = "env"
self.benchmark_dir = "benchmarks"
self.results_dir = "results"
self.html_dir = "html"
self.show_commit_url = "#"
self.hash_length = 8
@classmethod
def load(cls, path=None):
"""
Load a configuration from a file. If no file is provided,
defaults to `asv.conf.json`.
"""
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
conf = Config()
d = util.load_json(path, cls.api_version)
conf.__dict__.update(d)
if not getattr(conf, "repo", None):
raise ValueError(
"No repo specified in {0} config file.".format(path))
return conf
@classmethod
def update(cls, path=None):
if not path:
path = "asv.conf.json"
if not os.path.exists(path):
raise RuntimeError("Config file {0} not found.".format(path))
util.update_json(cls, path, cls.api_version)
| Python | 0.000054 |
0eb579b00c7e42813d45aa841df3f42607db0a7e | add thermoengineTest | rmgpy/thermo/thermoengineTest.py | rmgpy/thermo/thermoengineTest.py | #!/usr/bin/env python
# encoding: utf-8 -*-
"""
This module contains unit tests of the rmgpy.parallel module.
"""
import os
import sys
import unittest
import random
from external.wip import work_in_progress
from rmgpy import settings
from rmgpy.data.rmg import RMGDatabase
from rmgpy.rmg.main import RMG
from rmgpy.scoop_framework.framework import TestScoopCommon
from rmgpy.species import Species
from rmgpy.thermo.thermoengine import submit
try:
from scoop import futures, _control, shared
except ImportError, e:
import logging as logging
logging.debug("Could not properly import SCOOP.")
def load():
tearDown()
rmg = RMG()#for solvent
database = RMGDatabase()
database.loadThermo(os.path.join(settings['database.directory'], 'thermo'))
database.loadTransport(os.path.join(settings['database.directory'], 'transport'))
database.loadSolvation(os.path.join(settings['database.directory'], 'solvation'))
def tearDown():
"""
Reset the loaded database
"""
import rmgpy.data.rmg
rmgpy.data.rmg.database = None
def funcSubmit():
"""
Test that we can submit a number of species.
"""
load()
spcs = [
Species().fromSMILES('C'),\
Species().fromSMILES('CC'), \
Species().fromSMILES('CCC')
]
for spc in spcs:
submit(spc)
return True
def funcGet():
"""
Test if we can retrieve thermo of species even before we have submitted them explicitly.
"""
load()
spcs = [
Species().fromSMILES('C'),
Species().fromSMILES('CC'), \
Species().fromSMILES('CCC')
]
output = []
for spc in spcs:
data = spc.getThermoData()
output.append((spc, data))
for spc, data in output:
if not data:
return False
return True
def funcSubmitGet():
"""
Test if we can retrieve thermo of species after submitting some of them.
"""
load()
spcs = [
Species().fromSMILES('C'),\
Species().fromSMILES('CC'), \
Species().fromSMILES('CCC')
]
for spc in spcs:
submit(spc)
absent = Species().fromSMILES('[CH3]')
data = absent.getThermoData()
if not data: return False
present = Species().fromSMILES('CC')
data = present.getThermoData()
if not data: return False
random.shuffle(spcs)
for spc in spcs:
data = spc.getThermoData()
if not data: return False
return True
@work_in_progress
class AsyncThermoTest(TestScoopCommon):
def __init__(self, *args, **kwargs):
# Parent initialization
super(self.__class__, self).__init__(*args, **kwargs)
# Only setup the scoop framework once, and not in every test method:
super(self.__class__, self).setUp()
@unittest.skipUnless(sys.platform.startswith("linux"),
"test currently only runs on linux")
def testSubmit(self):
"""
Test that we can submit a request to generate
thermo/transport for a number of species.
"""
result = futures._startup(funcSubmit)
self.assertEquals(result, True)
@unittest.skipUnless(sys.platform.startswith("linux"),
"test currently only runs on linux")
def testGet(self):
"""
Test that we can get the data of a number of species.
"""
result = futures._startup(funcGet)
self.assertEquals(result, True)
if __name__ == '__main__' and os.environ.get('IS_ORIGIN', "1") == "1":
unittest.main()
| Python | 0.000001 | |
6a47c684012b98679c9274ca4087958c725a1fa7 | support extensions in tests | test/unit/dockerstache_tests.py | test/unit/dockerstache_tests.py | #!/usr/bin/env python
"""
dockerstache module test coverage for API calls
"""
import os
import tempfile
import json
import unittest
import mock
from dockerstache.dockerstache import run
class RunAPITests(unittest.TestCase):
"""tests for run API call"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.defaults = os.path.join(self.tempdir, 'defaults.json')
self.context = os.path.join(self.tempdir, 'context.json')
self.dotfile = os.path.join(self.tempdir, '.dockerstache')
with open(self.defaults, 'w') as handle:
json.dump(
{"defaults": {"value1": 1, "value2": 2}, "default_value": 99},
handle
)
with open(self.context, 'w') as handle:
json.dump(
{
"defaults": {"value2": 100},
"context": {"value3": 3, "value4": 4}
},
handle
)
with open(self.dotfile, 'w') as handle:
json.dump(
{
"context": self.context,
"defaults": self.defaults
},
handle
)
self.opts = {}
self.opts['input'] = self.tempdir
self.opts['output'] = None
self.opts['context'] = None
self.opts['defaults'] = None
def tearDown(self):
"""cleanup test data """
if os.path.exists(self.tempdir):
os.system("rm -rf {}".format(self.tempdir))
@mock.patch('dockerstache.dockerstache.process_templates')
def test_run(self, mock_process):
"""test run method"""
run(**self.opts)
self.failUnless(mock_process.called)
@mock.patch('dockerstache.dockerstache.process_templates')
def test_run_extend_context(self, mock_process):
"""test run method with extras for context"""
extend = {'extensions': {'extras': 'values'}}
self.opts['extend_context'] = extend
run(**self.opts)
self.failUnless(mock_process.called)
context = mock_process.call_args[0][2]
self.failUnless('extensions' in context)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
3618ce5749517c7757a04f0c08a74275e8e82b69 | Create fasttext.py | fasttext.py | fasttext.py | from __future__ import print_function
import numpy as np
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import GlobalAveragePooling1D
from keras.datasets import imdb
class FastText:
'''
Takes in pandas dataframe with at least two columns where one
is the dependent variable, and one is text.
EXAMPLE USE:
FastText(data,var)
If there is more than one possible depedent variable in df then
there you can run the moddle for any of it.
'''
def __init__(self,data,var):
self.data = data
self.var = var
self.null = self._configuration()
self.null = self._get_cube()
self.null = self._padding()
self.model = self._build_model()
def _configuration(self):
self.max_features = 125000
self.maxlen = 800
self.batch_size = 16
self.embedding_dims = 20
self.epochs = 2
return "NULL"
def _get_cube(self):
o = Cube(self.data,self.var)
self.x_train = o.x_train
self.y_train = o.y_train
self.x_test = o.x_test
self.y_test = o.y_test
return 'NULL'
def create_ngram_set(self,input_list, ngram_value=2):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def add_ngram(self,sequences, token_indice, ngram_range=2):
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for i in range(len(new_list) - ngram_range + 1):
for ngram_value in range(2, ngram_range + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
def _padding(self):
self.x_train = sequence.pad_sequences(self.x_train, maxlen=self.maxlen)
self.x_test = sequence.pad_sequences(self.x_test, maxlen=self.maxlen)
return 'NULL'
def _build_model(self):
model = Sequential()
model.add(Embedding(self.max_features, # efficient embedding layer which maps
self.embedding_dims, # vocab indices into embedding_dims dimensions
input_length=self.maxlen))
model.add(GlobalAveragePooling1D()) # avg the embeddings of all words in the document
model.add(Dense(1, activation='hard_sigmoid')) # project onto a single unit
# output layer, and squash it
model.compile(loss='binary_crossentropy',
optimizer='adagrad',
metrics=['accuracy'])
model.fit(self.x_train, self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_data=(self.x_test, self.y_test))
return model
| Python | 0.000031 | |
eb96fc1d59108a429ea2a03ee07d94a1a143139f | Manage the creation and launching of a cluster | multifil/aws/cluster.py | multifil/aws/cluster.py | #!/usr/bin/env python
# encoding: utf-8
"""
cluster.py - manage the behaviour and start up of a cluster
Created by Dave Williams on 2016-07-19
"""
import os
import sys
import time
import string
import copy
import subprocess as subp
import configparser
import boto
## Defaults
BASE_PATH = os.path.expanduser('~/code/multifil/')
CODE_DIR = 'multifil'
CODE_LOCATION = BASE_PATH + CODE_DIR
USER_DATA = CODE_LOCATION + '/aws/userdata.py'
CODE_BUCKET = 'model_code'
JOB_QUEUE = 'job_queue'
KEY_FILE = os.path.expanduser('~/.aws/keys/id_gsg-keypair')
KEY_NAME = 'gsg-keypair'
SECURITY_GROUP_ID = 'sg-1ddae166'
SUBNET_IDS = ('subnet-945f5ca9', 'subnet-c9ce2b80', # each corresponds to
'subnet-64e6d34e', 'subnet-bd94a0e5') # an availability zone
#AMI = ('ami-2d39803a', 'c4.xlarge') # Ubuntu
AMI = ('ami-2d39803a', 't2.medium') # Testing
SPOT_BID = 0.209 # bidding the on-demand price
## Helper functions, quite substantial
def print_direct(string):
"""Print the given string straight to the stdout"""
sys.stdout.truncate(0)
sys.stdout.write(string)
sys.stdout.flush()
return
def get_access_keys(filename=os.path.expanduser('~/.aws/credentials'),
section='cluster'):
"""Parse out the access and secret keys"""
config = configparser.ConfigParser()
config.read(filename)
id = config.get(section,'aws_access_key_id')
secret = config.get(section,'aws_secret_access_key')
return id, secret
def load_userdata(filename='userdata.py', queue_name=JOB_QUEUE):
id, secret = get_access_keys()
user_data_dict = {
'aws_access_key': id,
'aws_secret_key': secret,
'job_queue_name': queue_name,
'code_zip_key': "s3://%s/%s.zip"%(CODE_BUCKET, CODE_DIR)}
with open(filename, 'r') as udfile:
ud_template = string.Template(udfile.read())
return ud_template.substitute(user_data_dict)
def update_code_on_s3():
"""Update the code on s3 from our local copy"""
zipname = CODE_DIR+'.zip'
cmds = (
"cd ../..; zip -roTFS %s %s"%(zipname, CODE_DIR),
"cd ../..; aws s3 cp %s s3://%s/"%(zipname, CODE_BUCKET))
print(os.getcwd())
print(cmds)
[print(subp.call(c, shell=True)) for c in cmds]
def launch_on_demand_instances(ec2, num_of, userdata,
ami=AMI[0], inst_type=AMI[1]):
if len(userdata) > 16*1024:
print("error: User data file is too big")
return
reservation = ec2.run_instances(
image_id = ami,
key_name = KEY_NAME,
security_group_ids = [SECURITY_GROUP_ID],
user_data = userdata,
instance_type = inst_type,
min_count = num_of,
max_count = num_of,
subnet_id = SUBNET_IDS[1])
time.sleep(.5) # Give the machines time to register
nodes = copy.copy(reservation.instances)
return nodes
def launch_spot_instances(ec2, num_of, userdata, bid=SPOT_BID,
ami=AMI[0], inst_type=AMI[1]):
if len(userdata) > 16*1024:
print("error: User data file is too big")
return
reservation = ec2.request_spot_instances(
price = bid,
image_id = ami,
key_name = KEY_NAME,
security_group_ids = [SECURITY_GROUP_ID],
user_data = userdata,
instance_type = inst_type,
min_count = num_of,
max_count = num_of,
subnet_id = SUBNET_IDS[1])
time.sleep(.5) # Give the machines time to register
return reservation
class cluster(object):
def __init__(self,
number_of_instances,
queue_name=JOB_QUEUE,
userdata=USER_DATA,
use_spot=True):
"""A cluster management object"""
self.number_of_instances = number_of_instances
self.queue_name = queue_name
self.userdata = load_userdata(userdata, queue_name)
self.use_spot = use_spot
self.s3 = boto.connect_s3()
self.ec2 = boto.connect_ec2()
def launch(self):
"""Get the cluster rolling, manual to make you think a bit"""
print("Uploading code to S3")
update_code_on_s3()
print("Creating reservation")
# TODO: This next bit could be refactored to be prettier
if self.use_spot is True:
nodes = launch_spot_instances(self.ec2,
self.number_of_instances,
self.userdata)
ids = [node.id for node in nodes]
node_states = lambda nodes: [node.state == 'active'
for node in nodes]
node_update = lambda : self.ec2.get_all_spot_instance_requests(ids)
else:
nodes = launch_on_demand_instances(self.ec2,
self.number_of_instances,
self.userdata)
ids = [node.id for node in nodes]
node_states = lambda nodes: [node.state_code == 16
for node in nodes]
node_update = lambda : [inst for res in
self.ec2.get_all_instances(ids)
for inst in res.instances]
print("Nodes are starting...")
while not all(node_states(nodes)):
nodes = node_update()
ready = sum(node_states(nodes))
print_direct("\r%i of %i nodes are ready"%(ready, len(nodes)))
time.sleep(1)
print_direct("\nAll nodes ready \n")
self.nodes = nodes
return nodes
def kill_cluster(self):
"""Terminate the cluster nodes"""
try:
[node.terminate() for node in self.nodes]
except:
[node.cancel() for node in self.nodes]
ids = [node.instance_id for node in self.nodes]
[instance.terminate() for reservation in EC2.get_all_instances(ids)
for instance in reservation.instances]
def node_ip_addresses(self):
"""Print the ip addresses for each node"""
[print(instance.ip_address) for instance in self.nodes]
| Python | 0.000001 | |
d23b83f8052f1ca5a988b05c3893b884eb3be6cc | Add link.py | misc/link.py | misc/link.py | #!/usr/bin/env python
from signal import signal, SIGPIPE, SIG_DFL
signal(SIGPIPE, SIG_DFL)
import argparse
import csv
import sys
import itertools
from collections import defaultdict, Counter
from math import log
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics.pairwise import cosine_similarity as sim
from operator import itemgetter
from multiprocessing import Pool, cpu_count
parser = argparse.ArgumentParser()
parser.add_argument('--synsets', required=True)
parser.add_argument('--isas', required=True)
parser.add_argument('-k', nargs='?', type=int, default=6)
args = vars(parser.parse_args())
synsets, index, lexicon = {}, defaultdict(lambda: set()), set()
with open(args['synsets']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
synsets[int(row[0])] = [word.lower() for word in row[2].split(', ') if word]
for word in synsets[int(row[0])]:
index[word].add(int(row[0]))
lexicon.update(synsets[int(row[0])])
isas = defaultdict(lambda: set())
with open(args['isas']) as f:
reader = csv.reader(f, delimiter='\t', quoting=csv.QUOTE_NONE)
for hyponym, hypernym in reader:
if hyponym in lexicon and hypernym in lexicon:
isas[hyponym].add(hypernym)
idf, D = defaultdict(lambda: 0), .0
for words in synsets.values():
hypernyms = [isas[word] for word in words if word in isas]
if not hypernyms:
continue
for hypernym in set.union(*hypernyms):
idf[hypernym] += 1
D += 1
idf = {hypernym: log(D / df) for hypernym, df in idf.items()}
def tf(w, words):
return float(Counter(words)[w])
def tfidf(w, words):
return tf(w, words) * idf.get(w, 1.)
hctx = {}
for id, words in synsets.items():
hypernyms = list(itertools.chain(*(isas[word] for word in words if word in isas)))
if not hypernyms:
continue
hctx[id] = {word: tfidf(word, hypernyms) for word in hypernyms}
v = DictVectorizer().fit(hctx.values())
def emit(id):
hypernyms, vector, hsenses = hctx[id], v.transform(hctx[id]), {}
for hypernym in hypernyms:
candidates = {hid: synsets[hid] for hid in index[hypernym]}
if not candidates:
continue
candidates = {hid: {word: tfidf(word, words) for word in words} for hid, words in candidates.items()}
candidates = {hid: sim(vector, v.transform(words)) for hid, words in candidates.items()}
hid, cosine = max(candidates.items(), key=itemgetter(1))
if cosine > 0:
hsenses[(hypernym, hid)] = cosine
hsenses = dict(dict(sorted(hsenses.items(), key=itemgetter(1), reverse=True)[:args['k']]).keys())
return (id, hsenses)
i = 0
with Pool(cpu_count()) as pool:
for id, hsenses in pool.imap_unordered(emit, hctx):
i += 1
print('%d\t%s' % (id, ', '.join(('%s#%d' % e for e in hsenses.items()))))
if i % 1000 == 0:
print('%d entries out of %d done.' % (i, len(hctx)), file=sys.stderr, flush=True)
if len(hctx) % 1000 != 0:
print('%d entries out of %d done.' % (len(hctx), len(hctx)), file=sys.stderr, flush=True)
| Python | 0.000001 | |
7655e376696a04aa1c3596274861515953f592e8 | Add profiling script for savings code | openprescribing/frontend/price_per_unit/profile.py | openprescribing/frontend/price_per_unit/profile.py | """
Basic profiling code for working out where we're spending our time
Invoke with:
./manage.py shell -c 'from frontend.price_per_unit.profile import profile; profile()'
"""
from cProfile import Profile
import datetime
import time
from .savings import get_all_savings_for_orgs
def test():
get_all_savings_for_orgs("2019-11-01", "ccg", ["99C"])
# get_all_savings_for_orgs("2019-11-01", "all_standard_practices", [None])
def profile():
num_attempts = 5
attempts = []
for _ in range(num_attempts):
profiler = Profile()
start = time.time()
profiler.runcall(test)
duration = time.time() - start
attempts.append((duration, profiler))
attempts.sort()
profile_file = "profile.{}.prof".format(
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
attempts[0][1].dump_stats(profile_file)
print(
"{}s (best of {}), profile saved as: {}".format(
attempts[0][0], num_attempts, profile_file
)
)
| Python | 0 | |
40caa4c9b720388207e338ffde3cd7f2d85cdf0d | add a single script to perform formatting of base log files | base-format.py | base-format.py | #!/usr/bin/python
from __future__ import print_function
import sys
import re
import datetime
import ircformatlib as il
timeformat_format = '%H:%M:%S'
timeformat_formatlen = 8
timeformat_filler = ' ' * timeformat_formatlen
def timeformat(time):
try:
x = int(time)
dt = datetime.datetime.fromtimestamp(round(x / 1000.0))
return dt.strftime(timeformat_format)
except:
return timeformat_filler
def colorized_newstate():
return { 'maxlen': 0, 'hits': {}, 'counts': {}, 'allocated': {},
'textmatcher': {} }
def colorized_text(state, text, leadstr=''):
state['maxlen'] = il.getmaxlen(leadstr + text, state['maxlen'])
color = il.getcolor(text, state['allocated'], state['counts'],
state['hits'])
il.uplogs(color, state['hits'])
return (il.getmaxpad(leadstr + text, state['maxlen']) + leadstr +
color + text + il.clearseq)
chanformat_state = colorized_newstate()
def chanformat(channel):
if not channel:
return ''
return colorized_text(chanformat_state, channel)
nameformat_state = colorized_newstate()
def nameformat(name):
leadstr = ''
for lead in ('--- ', '* '):
if name.startswith(lead):
leadstr = lead
name = name[len(lead):]
break
for perm in ('@', '+', '%', '*'):
if name.startswith(perm):
leadstr += perm
name = name[len(perm):]
break
return colorized_text(nameformat_state, name, leadstr)
def textformat(text):
return il.text_colorize(il.text_colorize(text,
chanformat_state['textmatcher'],
chanformat_state['allocated']),
nameformat_state['textmatcher'],
nameformat_state['allocated'])
def combine_parts(channel, time, name, text):
tcsep = ''
if time and channel:
tcsep = ' '
return time + tcsep + channel + ' ' + name + ' ' + text
def main():
try:
m = re.compile(r'(([^\t]+)\t)?([^\t]+)\t([^\t]+)\t([^\t]+)')
line = sys.stdin.readline()
while line:
r = m.match(line)
if r:
line = combine_parts(chanformat(r.group(2)),
timeformat(r.group(3)),
nameformat(r.group(4)),
textformat(r.group(5)))
else:
line = textformat(line)
print(line, end='')
sys.stdout.flush()
line = sys.stdin.readline()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| Python | 0 | |
2766e8797515497e5569b31696416db68641c9b4 | Extend MediaRemovalMixin to move media files on updates | base/models.py | base/models.py | import os
from django.conf import settings
class MediaRemovalMixin(object):
"""
Removes all files associated with the model, as returned by the
get_media_files() method.
"""
# Models that use this mixin need to override this method
def get_media_files(self):
return
def delete(self, *args, **kwargs):
for media_file in self.get_media_files():
path = settings.MEDIA_ROOT + media_file
if os.path.exists(path):
os.remove(path)
return super(MediaRemovalMixin, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
if self.pk:
# Primary key exists, object is being edited
old_object = self.__class__.objects.get(pk=self.pk)
path_pairs = zip(old_object.get_media_files(),
self.get_media_files())
# Move each associated file to its new location
for (old_path, new_path) in path_pairs:
full_old_path = settings.MEDIA_ROOT + old_path
full_new_path = settings.MEDIA_ROOT + new_path
if old_path != new_path and os.path.exists(full_old_path):
os.rename(full_old_path, full_new_path)
return super(MediaRemovalMixin, self).save(*args, **kwargs)
| import os
from django.conf import settings
class MediaRemovalMixin(object):
"""
Removes all files associated with the model, as returned by the
get_media_files() method.
"""
# Models that use this mixin need to override this method
def get_media_files(self):
return
def delete(self):
for media_file in self.get_media_files():
path = settings.MEDIA_ROOT + media_file
if os.path.exists(path):
os.remove(path)
return super(MediaRemovalMixin, self).delete()
| Python | 0 |
24c642063ffcb3313545b2e1ba3abbb62aa98437 | Add cuit validator to utils module | nbs/utils/validators.py | nbs/utils/validators.py | # -*- coding: utf-8-*-
def validate_cuit(cuit):
"from: http://python.org.ar/pyar/Recetario/ValidarCuit by Mariano Reingart"
# validaciones minimas
if len(cuit) != 13 or cuit[2] != "-" or cuit [11] != "-":
return False
base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
cuit = cuit.replace("-", "")
# calculo digito verificador
aux = 0
for i in range(10):
aux += int(cuit[i]*base[i])
aux = 11 - (aux - (int(aux/11) * 11))
if aux == 11:
aux = 0
if aux == 10:
aux = 9
return aux == int(cuit[10])
| Python | 0 | |
7274f9286bd267970c286954e9d21e601af30cb7 | Create messenger.py | messenger.py | messenger.py | # -*- coding: utf-8 -*-
import requests
apiurl = '你的地址'
apiheaders = {'U-ApiKey': '你的key'}
code="动态码"
response = requests.get(apiurl, params={"media_id":'gh_3fc78df4c9d2',"auth_code":code, "scene":1,"device_no":1,"location":'jia'})
json = response.json()
print(json)
| Python | 0.000002 | |
620ad7f4dc5ed9403f468f592b99a22a92d22072 | make python -m i3configger work | i3configger/__main__.py | i3configger/__main__.py | import i3configger.main
if __name__ == "__main__":
i3configger.main.main()
| Python | 0.000012 | |
ad2178a8973ce2de55611321c0b7b57b1488fc6b | move utilities in a private module | appengine_toolkit/management/commands/_utils.py | appengine_toolkit/management/commands/_utils.py | import pkg_resources
import os
class RequirementNotFoundError(Exception):
pass
def collect_dependency_paths(package_name):
"""
TODO docstrings
"""
deps = []
try:
dist = pkg_resources.get_distribution(package_name)
except ValueError:
message = "Distribution '{}' not found.".format(package_name)
raise RequirementNotFoundError(message)
if dist.has_metadata('top_level.txt'):
for line in dist.get_metadata('top_level.txt').split():
deps.append(os.path.join(dist.location, line))
for req in dist.requires():
deps.extend(collect_dependency_paths(req.project_name))
return deps
| Python | 0 | |
79b99968d7c9e728efe05f8c962bdda5c9d56559 | Add LDAP authentication plugin | web/utils/auth.py | web/utils/auth.py | # http://www.djangosnippets.org/snippets/501/
from django.contrib.auth.models import User
from django.conf import settings
import ldap
class ActiveDirectoryBackend:
supports_object_permissions = False
supports_anonymous_user = False
supports_inactive_user = False
def authenticate(self, username=None, password=None):
if username:
username = username.lower()
if not self.is_valid(username, password):
return None
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
l = ldap.initialize(settings.AD_LDAP_URL)
binddn = '%s@%s' % (username, settings.AD_NT4_DOMAIN)
l.simple_bind_s(binddn, password)
result = l.search_ext_s(settings.AD_SEARCH_DN, ldap.SCOPE_SUBTREE,
'sAMAccountName=%s' % username, settings.AD_SEARCH_FIELDS)[0][1]
l.unbind_s()
# givenName == First Name
if 'givenName' in result:
first_name = result['givenName'][0]
else:
first_name = None
# sn == Last Name (Surname)
if 'sn' in result:
last_name = result['sn'][0]
else:
last_name = None
# mail == Email Address
if 'mail' in result:
email = result['mail'][0]
else:
email = None
user = User(username=username, first_name=first_name, last_name=last_name, email=email)
user.is_staff = False
user.is_superuser = False
user.set_password(password)
user.save()
return user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def is_valid(self, username=None, password=None):
# Disallowing null or blank string as password
# as per comment: http://www.djangosnippets.org/snippets/501/#c868
if password is None or password == '':
return False
if username:
username = username.lower()
binddn = '%s@%s' % (username, settings.AD_NT4_DOMAIN)
try:
l = ldap.initialize(settings.AD_LDAP_URL)
l.simple_bind_s(binddn, password)
l.unbind_s()
return True
except ldap.LDAPError:
return False
| Python | 0 | |
6f8460b10827a9877fd0c3f0d45a01e7b2d42014 | Create ios.py | bitcoin/ios.py | bitcoin/ios.py | import ecdsa
import binascii
import hashlib
import struct
from bitcoin.main import *
from bitcoin.pyspecials import *
# https://gist.github.com/b22e178cff75c4b432a8
# Returns byte string value, not hex string
def varint(n):
if n < 0xfd:
return struct.pack('<B', n)
elif n < 0xffff:
return struct.pack('<cH', '\xfd', n)
elif n < 0xffffffff:
return struct.pack('<cL', '\xfe', n)
else:
return struct.pack('<cQ', '\xff', n)
# Takes and returns byte string value, not hex string
def varstr(s):
return varint(len(s)) + s
def privtopub(s):
# accepts hex encoded (sec) key, returns hex pubkey
sk = ecdsa.SigningKey.from_string(s.decode('hex'), curve=ecdsa.SECP256k1)
#vk = sk.verifying_key
return '04' binascii.hexlify(sk.verifying_key.to_string()) # TODO: add compressed func
# Input is a hex-encoded, DER-encoded signature
# Output is a 64-byte hex-encoded signature
def derSigToHexSig(s):
s, junk = ecdsa.der.remove_sequence(s.decode('hex'))
if junk != '':
print 'JUNK', junk.encode('hex')
assert(junk == '')
x, s = ecdsa.der.remove_integer(s)
y, s = ecdsa.der.remove_integer(s)
return '%064x%064x' % (x, y)
def readyRawTx(rawtx, scriptpubkey, hashcode=1):
# takes rawtx and inserts scriptpubkey into scriptsig and appends '01000000'
seqidx = rawtx.find('00ffffffff')
rawtx.replace('00fffffffff', scriptpubkey+'ffffffff')
return rawtx + binascii.hexlify(struct.pack('<L',1))
def signTx(rawtx, privkey, spk, hashcode=1):
# rawtx = unsigned Tx w/ scriptPubKey in ScriptSig and '01000000' appended
rawtx = readyRawTx(rawtx, spk, hashcode=hashcode)
s256 = hashlib.sha256(hashlib.sha256(rawtx.decode('hex')).digest()).digest()
sk = ecdsa.SigningKey.from_string(privkey.decode('hex'), curve=ecdsa.SECP256k1)
sig = sk.sign_digest(s256, sigencode=ecdsa.util.sigencode_der) + '\01' # 01 is hashtype
pubKey = privtopub(privkey)
scriptSig = varstr(sig).encode('hex') + varstr(pubKey.decode('hex')).encode('hex')
return scriptSig
def privkey_to_pubkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey, f)
if privkey >= N:
raise Exception("Invalid privkey")
if f in ['bin', 'bin_compressed', 'hex', 'hex_compressed', 'decimal']:
try:
return encode_pubkey(fast_multiply(G, privkey), f)
except RuntimeError:
assert f is 'hex'
import bitcoin.ios as ios
return ios.privtopub(privkey)
else:
try: return encode_pubkey(fast_multiply(G, privkey), f.replace('wif', 'hex'))
except RuntimeError:
assert f in ('hex', 'wif')
import bitcoin.ios as ios
return ios.privtopub(privkey)
# SIG = '47304402202c2e1a746c556546f2c959e92f2d0bd2678274823cc55e11628284e4a13016f80220797e716835f9dbcddb752cd0115a970a022ea6f2d8edafff6e087f928e41baac014104392b964e911955ed50e4e368a9476bc3f9dcc134280e15636430eb91145dab739f0d68b82cf33003379d885a0b212ac95e9cddfd2d391807934d25995468bc55'
#if __name__ == '__main__':
# unittest.main()
| Python | 0.000005 | |
8e1a3cc1a3d4e4d9bc63fb73a8787e5c627afb7d | add tests for service inspector | tests/test_service_inspector.py | tests/test_service_inspector.py | from __future__ import absolute_import
import unittest
import servy.server
class Dummy(object):
def fn(self):
pass
class Service(servy.server.Service):
def __call__(self):
pass
class ServiceDetection(unittest.TestCase):
def test_lambda(self):
self.assertTrue(servy.server.ServiceInspector.is_service(lambda x: x))
def test_method(self):
self.assertTrue(servy.server.ServiceInspector.is_service(Dummy().fn))
def test_callable_class_service(self):
self.assertTrue(servy.server.ServiceInspector.is_service(Service()))
def test_type(self):
self.assertFalse(servy.server.ServiceInspector.is_service(dict))
def test_int(self):
self.assertFalse(servy.server.ServiceInspector.is_service(1))
def test_string(self):
self.assertFalse(servy.server.ServiceInspector.is_service("1"))
def test_dummy_class(self):
self.assertFalse(servy.server.ServiceInspector.is_service(Dummy))
class ContainerDetection(unittest.TestCase):
def test_dict(self):
self.assertTrue(servy.server.ServiceInspector.is_container({}))
def test_service_class(self):
self.assertTrue(servy.server.ServiceInspector.is_container(Service))
def test_service_class_instance(self):
self.assertTrue(servy.server.ServiceInspector.is_container(Service()))
def test_dummy_class(self):
self.assertFalse(servy.server.ServiceInspector.is_container(Dummy))
class PublicMethodsDetection(unittest.TestCase):
def test_double_underscores(self):
items = {
'__private': None,
}
self.assertEqual(
servy.server.ServiceInspector.get_public(items.items()),
{},
)
def test_single_underscores(self):
items = {
'_private': None,
}
self.assertEqual(
servy.server.ServiceInspector.get_public(items.items()),
{},
)
| Python | 0.000001 | |
43de875bcb2dcf4213b881ff1de8f9e715fb2d30 | Add brute_force.py | brute_force.py | brute_force.py | from battingorder import *
from itertools import permutations
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Brute force.')
parser.add_argument("filename", nargs='?', default='braves.data', help="file with necessary statistics")
args = parser.parse_args()
player_matrices = readdata(args.filename)
run_matrix = createrunmatrix()
start_order = range(9)
samples = []
for order in permutations(start_order):
score = calculate(order, player_matrices, run_matrix)
samples.append((score, order))
samples.sort(reverse=True)
best = samples[0]
print("Final ordering: {}".format(best[1]))
print("This lineup will score an average of {} runs per game.".format(best[0]))
| Python | 0.99866 | |
b013f059a5d39acf05ba8e5ef9d6cb1d9e3f724c | add a script to exercise the example jsonrpc methods | tester.py | tester.py | import zmq
class JRPC:
def __init__(self):
self.id = 0
def make_req(self, method, params):
req = {"jsonrpc":"2.0", "method":method, "params":params,
"id":self.id}
self.id += 1
return req
zctx = zmq.Context.instance()
zsock = zctx.socket(zmq.REQ)
zsock.connect("tcp://127.0.0.1:10000")
jrpc = JRPC()
req = jrpc.make_req("echo", [10, 5])
zsock.send_json(req)
print zsock.recv()
req = jrpc.make_req("subtract", {"minuend":10, "subtrahend":5})
zsock.send_json(req)
print zsock.recv()
req = jrpc.make_req("subtract", [10, 5])
zsock.send_json(req)
print zsock.recv()
req_array = []
for k in range(10):
req = jrpc.make_req("sum", range(1+k))
req_array.append(req)
zsock.send_json(req_array)
print zsock.recv()
| Python | 0 | |
0dcf9178564b879a51b06ae06df58917f78adb6d | Fix linting | tensorflow_datasets/image/nyu_depth_v2.py | tensorflow_datasets/image/nyu_depth_v2.py | """NYU Depth V2 Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import h5py
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
_CITATION = """\
@inproceedings{Silberman:ECCV12,
author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
title = {Indoor Segmentation and Support Inference from RGBD Images},
booktitle = {ECCV},
year = {2012}
}
@inproceedings{icra_2019_fastdepth,
author = {Wofk, Diana and Ma, Fangchang and Yang, Tien-Ju and Karaman, Sertac and Sze, Vivienne},
title = {FastDepth: Fast Monocular Depth Estimation on Embedded Systems},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2019}
}
"""
_DESCRIPTION = """\
The NYU-Depth V2 data set is comprised of video sequences from a variety of
indoor scenes as recorded by both the RGB and Depth cameras from the
Microsoft Kinect.
"""
_URL = 'http://datasets.lids.mit.edu/fastdepth/data/nyudepthv2.tar.gz'
class NyuDepthV2(tfds.core.GeneratorBasedBuilder):
"""NYU Depth V2 Dataset."""
VERSION = tfds.core.Version('0.0.1')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(480, 640, 3)),
'depth': tfds.features.Tensor(shape=(480, 640), dtype=tf.float16),
}),
supervised_keys=('image', 'depth'),
homepage='https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
base_path = dl_manager.download_and_extract(_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'root_dir': os.path.join(base_path, 'nyudepthv2', 'train')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'root_dir': os.path.join(base_path, 'nyudepthv2', 'val')
},
),
]
def _generate_examples(self, root_dir):
"""Yields examples."""
for directory in tf.io.gfile.listdir(root_dir):
for file_name in tf.io.gfile.listdir(os.path.join(root_dir, directory)):
with h5py.File(os.path.join(root_dir, directory, file_name), 'r') as f:
yield directory + '_' + file_name, {
'image': np.transpose(f["rgb"], (1, 2, 0)),
'depth': f['depth'][:].astype('float16')
}
| """NYU Depth V2 Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import h5py
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
_CITATION = """\
@inproceedings{Silberman:ECCV12,
author = {Nathan Silberman, Derek Hoiem, Pushmeet Kohli and Rob Fergus},
title = {Indoor Segmentation and Support Inference from RGBD Images},
booktitle = {ECCV},
year = {2012}
}
@inproceedings{icra_2019_fastdepth,
author = {Wofk, Diana and Ma, Fangchang and Yang, Tien-Ju and Karaman, Sertac and Sze, Vivienne},
title = {FastDepth: Fast Monocular Depth Estimation on Embedded Systems},
booktitle = {IEEE International Conference on Robotics and Automation (ICRA)},
year = {2019}
}
"""
_DESCRIPTION = """\
The NYU-Depth V2 data set is comprised of video sequences from a variety of
indoor scenes as recorded by both the RGB and Depth cameras from the
Microsoft Kinect.
"""
_URL = 'http://datasets.lids.mit.edu/fastdepth/data/nyudepthv2.tar.gz'
class NyuDepthV2(tfds.core.GeneratorBasedBuilder):
"""NYU Depth V2 Dataset."""
VERSION = tfds.core.Version('0.0.1')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(shape=(480, 640, 3)),
'depth': tfds.features.Tensor(shape=(480, 640), dtype=tf.float16),
}),
supervised_keys=('image', 'depth'),
homepage='https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
base_path = dl_manager.download_and_extract(_URL)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'root_dir': os.path.join(base_path, 'nyudepthv2', 'train')
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'root_dir': os.path.join(base_path, 'nyudepthv2', 'val')
},
),
]
def _generate_examples(self, root_dir):
"""Yields examples."""
for dir in tf.io.gfile.listdir(root_dir):
for file_name in tf.io.gfile.listdir(os.path.join(root_dir, dir)):
with h5py.File(os.path.join(root_dir, dir, file_name), 'r') as file:
yield dir + '_' + file_name, {
'image': np.transpose(file["rgb"], (1, 2, 0)),
'depth': file['depth'][:].astype('float16')
}
| Python | 0.000004 |
061dcecdd7b691cefd34c8a254037a399b251378 | add a new script to build a pypi 'simple' index from a dir containing wheels | build_index.py | build_index.py | import sys
import py
PACKAGES = [
'netifaces',
]
class IndexBuilder(object):
def __init__(self, wheeldir, outdir):
self.wheeldir = py.path.local(wheeldir)
self.outdir = py.path.local(outdir)
self.packages = []
def copy_wheels(self):
for whl in self.wheeldir.visit('*.whl'):
name, version = self.parse(whl)
self.packages.append(name)
d = self.outdir.join(name).ensure(dir=True)
dst = d.join(whl.basename)
if dst.check(file=False):
whl.copy(d)
def build_index(self):
self._write_index(self.outdir, 'PyPy Wheel Index', self.packages)
for pkg in self.packages:
d = self.outdir.join(pkg)
wheels = [whl.basename for whl in d.listdir('*.whl')]
self._write_index(d, 'Links for %s' % pkg, wheels)
def parse(self, f):
name, version, _ = f.basename.split('-', 2)
return name, version
def _write_index(self, d, title, links):
lines = [
'<html><body><h1>{title}</h1>'.format(title=title)
]
for name in links:
line = '<a href="{name}">{name}</a>'.format(name=name)
lines.append(line)
lines.append('</body></html>')
html = '\n'.join(lines)
d.join('index.html').write(html)
def main():
wheeldir = sys.argv[1]
outdir = sys.argv[2]
index = IndexBuilder(wheeldir, outdir)
index.copy_wheels()
index.build_index()
if __name__ == '__main__':
main()
| Python | 0 | |
b22bf4e2431ac3598d9c8afee3f924d940e2297e | Create building_df.py | building_df.py | building_df.py | """Utility functions"""
import os
import pandas as pd
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols: # add SPY for reference, if absent
symbols.insert(0, 'SPY')
for symbol in symbols:
#Read and join data for each symbol
df.join(symbol,'inner')
return df
def test_run():
# Define a date range
dates = pd.date_range('2010-01-22', '2010-01-26')
# Choose stock symbols to read
symbols = ['GOOG', 'IBM', 'GLD']
# Get stock data
df = get_data(symbols, dates)
print df
if __name__ == "__main__":
test_run()
| Python | 0.000002 | |
7d84cf8c41105d9990b8cfdf176415f1bcb20e0f | Add tests for batch norm | thinc/tests/integration/test_batch_norm.py | thinc/tests/integration/test_batch_norm.py | import pytest
from mock import MagicMock
import numpy
import numpy.random
from numpy.testing import assert_allclose
from hypothesis import given, settings, strategies
from ...neural._classes.batchnorm import BatchNorm
from ...api import layerize, noop
from ...neural._classes.affine import Affine
from ..strategies import arrays_OI_O_BI
from ..util import get_model, get_shape
@pytest.fixture
def shape():
return (10, 20)
@pytest.fixture
def layer(shape):
dummy = layerize(noop())
dummy.nO = shape[-1]
return dummy
def test_batch_norm_init(layer):
layer = BatchNorm(layer)
def test_batch_norm_weights_init_to_one(layer):
layer = BatchNorm(layer)
assert layer.G is not None
assert all(weight == 1. for weight in layer.G.flatten())
def test_batch_norm_runs_child_hooks(layer):
mock_hook = MagicMock()
layer.on_data_hooks.append(mock_hook)
layer = BatchNorm(layer)
for hook in layer.on_data_hooks:
hook(layer, None)
mock_hook.assert_called()
def test_batch_norm_predict_maintains_shape(layer, shape):
input_ = numpy.ones(shape)
input1 = layer.predict(input_)
assert_allclose(input1, input_)
layer = BatchNorm(layer)
output = layer.predict(input_)
assert output.shape == input_.shape
@given(arrays_OI_O_BI(max_batch=8, max_out=8, max_in=8))
def test_begin_update_matches_predict(W_b_input):
model = get_model(W_b_input)
nr_batch, nr_out, nr_in = get_shape(W_b_input)
W, b, input_ = W_b_input
model = BatchNorm(model)
fwd_via_begin_update, finish_update = model.begin_update(input_)
fwd_via_predict_batch = model.predict(input_)
assert_allclose(fwd_via_begin_update, fwd_via_predict_batch)
@given(arrays_OI_O_BI(max_batch=8, max_out=8, max_in=8))
def test_finish_update_calls_optimizer_with_weights(W_b_input):
model = get_model(W_b_input)
nr_batch, nr_out, nr_in = get_shape(W_b_input)
W, b, input_ = W_b_input
model = BatchNorm(model)
output, finish_update = model.begin_update(input_)
seen_keys = set()
def sgd(data, gradient, key=None, **kwargs):
seen_keys.add(key)
assert data.shape == gradient.shape
assert data.ndim == 1
assert gradient.ndim == 1
grad_BO = numpy.ones((nr_batch, nr_out))
grad_BI = finish_update(grad_BO, sgd)
assert seen_keys == {id(model._mem), id(model.child._mem)}
| Python | 0 | |
cca6eee8dbf4dda84c74dfedef1cf4bcb5264ca5 | Add the first database revision | admin/migrations/versions/ff0417f4318f_.py | admin/migrations/versions/ff0417f4318f_.py | """ Initial schema
Revision ID: ff0417f4318f
Revises: None
Create Date: 2016-06-25 13:07:11.132070
"""
# revision identifiers, used by Alembic.
revision = 'ff0417f4318f'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('domain',
sa.Column('created_at', sa.Date(), nullable=False),
sa.Column('updated_at', sa.Date(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('max_users', sa.Integer(), nullable=False),
sa.Column('max_aliases', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('name')
)
op.create_table('alias',
sa.Column('created_at', sa.Date(), nullable=False),
sa.Column('updated_at', sa.Date(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('localpart', sa.String(length=80), nullable=False),
sa.Column('destination', sa.String(), nullable=False),
sa.Column('domain_name', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ),
sa.PrimaryKeyConstraint('email')
)
op.create_table('user',
sa.Column('created_at', sa.Date(), nullable=False),
sa.Column('updated_at', sa.Date(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('localpart', sa.String(length=80), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('quota_bytes', sa.Integer(), nullable=False),
sa.Column('global_admin', sa.Boolean(), nullable=False),
sa.Column('enable_imap', sa.Boolean(), nullable=False),
sa.Column('enable_pop', sa.Boolean(), nullable=False),
sa.Column('forward_enabled', sa.Boolean(), nullable=False),
sa.Column('forward_destination', sa.String(length=255), nullable=True),
sa.Column('reply_enabled', sa.Boolean(), nullable=False),
sa.Column('reply_subject', sa.String(length=255), nullable=True),
sa.Column('reply_body', sa.Text(), nullable=True),
sa.Column('displayed_name', sa.String(length=160), nullable=False),
sa.Column('spam_enabled', sa.Boolean(), nullable=False),
sa.Column('spam_threshold', sa.Numeric(), nullable=False),
sa.Column('domain_name', sa.String(length=80), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ),
sa.PrimaryKeyConstraint('email')
)
op.create_table('fetch',
sa.Column('created_at', sa.Date(), nullable=False),
sa.Column('updated_at', sa.Date(), nullable=True),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_email', sa.String(length=255), nullable=False),
sa.Column('protocol', sa.Enum('imap', 'pop3'), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('port', sa.Integer(), nullable=False),
sa.Column('tls', sa.Boolean(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(['user_email'], ['user.email'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('manager',
sa.Column('domain_name', sa.String(length=80), nullable=True),
sa.Column('user_email', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ),
sa.ForeignKeyConstraint(['user_email'], ['user.email'], )
)
def downgrade():
op.drop_table('manager')
op.drop_table('fetch')
op.drop_table('user')
op.drop_table('alias')
op.drop_table('domain')
| Python | 0.000001 | |
7473384155edbf85304cc541325d0a94a75d2cf4 | Add converting script | labs/12_i2c_oled_display/convert.py | labs/12_i2c_oled_display/convert.py | import imageio
import sys
import os
import numpy as np
if (len(sys.argv) != 2):
print("Format: python convert.py grayscale_image_name")
sys.exit(1)
try:
data = imageio.imread(sys.argv[1])
except:
print("Wrong image name!")
sys.exit(1)
if (len(data.shape) != 2):
print("Image must be grayscale!")
sys.exit(1)
output = open(os.path.splitext(sys.argv[1])[0] + ".c", "w")
output.write("const unsigned char my_pic[] = {\n")
image = data.flatten(order='C')
fimage = np.array_split(image, image.shape[0]//16)
for chunk in fimage:
fstr = ', '.join(['0x%02x'%x for x in chunk])
output.write(" " + fstr)
output.write(",\n")
output.write("}")
output.close()
print("Done! The array is stored in " +\
os.path.splitext(sys.argv[1])[0] + ".c")
| Python | 0 | |
2448f1d6835129bc08855a9ecc59fea347a14243 | add re.escape for match_with_format | onlinejudge/implementation/format_utils.py | onlinejudge/implementation/format_utils.py | # Python Version: 3.x
import onlinejudge
import onlinejudge.implementation.utils as utils
import onlinejudge.implementation.logging as log
import collections
import glob
import pathlib
import re
import sys
from typing import Dict, List, Match, Optional
def glob_with_format(directory: pathlib.Path, format: str) -> List[pathlib.Path]:
table = {}
table['s'] = '*'
table['e'] = '*'
pattern = str(directory / utils.percentformat(format, table))
paths = list(map(pathlib.Path, glob.glob(pattern)))
for path in paths:
log.debug('testcase globbed: %s', path)
return paths
def match_with_format(directory: pathlib.Path, format: str, path: pathlib.Path) -> Optional[Match[str]]:
table = {}
table['s'] = '(?P<name>.+)'
table['e'] = '(?P<ext>in|out)'
pattern = re.compile('^' + re.escape(str(directory.resolve())) +
'/' + utils.percentformat(re.escape(format), table) + '$')
return pattern.match(str(path.resolve()))
def path_from_format(directory: pathlib.Path, format: str, name: str, ext: str) -> pathlib.Path:
table = {}
table['s'] = name
table['e'] = ext
return directory / utils.percentformat(format, table)
def is_backup_or_hidden_file(path: pathlib.Path) -> bool:
basename = path.stem
return basename.endswith('~') or (basename.startswith('#') and basename.endswith('#')) or basename.startswith('.')
def drop_backup_or_hidden_files(paths: List[pathlib.Path]) -> List[pathlib.Path]:
result = [] # type: List[pathlib.Path]
for path in paths:
if is_backup_or_hidden_file(path):
log.warning('ignore a backup file: %s', path)
else:
result += [path]
return result
def construct_relationship_of_files(paths: List[pathlib.Path], directory: pathlib.Path, format: str) -> Dict[str, Dict[str, pathlib.Path]]:
# type: Dict[str, Dict[str, pathlib.Path]]
tests = collections.defaultdict(dict)
for path in paths:
m = match_with_format(directory, format, path.resolve())
if not m:
log.error('unrecognizable file found: %s', path)
sys.exit(1)
name = m.groupdict()['name']
ext = m.groupdict()['ext']
assert ext not in tests[name]
tests[name][ext] = path
for name in tests:
if 'in' not in tests[name]:
assert 'out' in tests[name]
log.error('dangling output case: %s', tests[name]['out'])
sys.exit(1)
if not tests:
log.error('no cases found')
sys.exit(1)
log.info('%d cases found', len(tests))
return tests
| # Python Version: 3.x
import onlinejudge
import onlinejudge.implementation.utils as utils
import onlinejudge.implementation.logging as log
import collections
import glob
import pathlib
import re
import sys
from typing import Dict, List, Match, Optional
def glob_with_format(directory: pathlib.Path, format: str) -> List[pathlib.Path]:
table = {}
table['s'] = '*'
table['e'] = '*'
pattern = str(directory / utils.percentformat(format, table))
paths = list(map(pathlib.Path, glob.glob(pattern)))
for path in paths:
log.debug('testcase globbed: %s', path)
return paths
def match_with_format(directory: pathlib.Path, format: str, path: pathlib.Path) -> Optional[Match[str]]:
table = {}
table['s'] = '(?P<name>.+)'
table['e'] = '(?P<ext>in|out)'
pattern = re.compile('^' + str(directory.resolve()) + '/' + utils.percentformat(format, table) + '$')
return pattern.match(str(path.resolve()))
def path_from_format(directory: pathlib.Path, format: str, name: str, ext: str) -> pathlib.Path:
table = {}
table['s'] = name
table['e'] = ext
return directory / utils.percentformat(format, table)
def is_backup_or_hidden_file(path: pathlib.Path) -> bool:
basename = path.stem
return basename.endswith('~') or (basename.startswith('#') and basename.endswith('#')) or basename.startswith('.')
def drop_backup_or_hidden_files(paths: List[pathlib.Path]) -> List[pathlib.Path]:
result = [] # type: List[pathlib.Path]
for path in paths:
if is_backup_or_hidden_file(path):
log.warning('ignore a backup file: %s', path)
else:
result += [ path ]
return result
def construct_relationship_of_files(paths: List[pathlib.Path], directory: pathlib.Path, format: str) -> Dict[str, Dict[str, pathlib.Path]]:
tests = collections.defaultdict(dict) # type: Dict[str, Dict[str, pathlib.Path]]
for path in paths:
m = match_with_format(directory, format, path.resolve())
if not m:
log.error('unrecognizable file found: %s', path)
sys.exit(1)
name = m.groupdict()['name']
ext = m.groupdict()['ext']
assert ext not in tests[name]
tests[name][ext] = path
for name in tests:
if 'in' not in tests[name]:
assert 'out' in tests[name]
log.error('dangling output case: %s', tests[name]['out'])
sys.exit(1)
if not tests:
log.error('no cases found')
sys.exit(1)
log.info('%d cases found', len(tests))
return tests
| Python | 0 |
3db7c5502bcba0adbfbcf6649c0b4179b37cd74a | Create redis_board.py | simpleRaft/boards/redis_board.py | simpleRaft/boards/redis_board.py | import redis
from board import Board
class RedisBoard( Board ):
"""This will create a message board that is backed by Redis."""
def __init__( self, *args, **kwargs ):
"""Creates the Redis connection."""
self.redis = redis.Redis( *args, **kwargs )
def set_owner( self, owner ):
self.owner = owner
def post_message( self, message ):
"""This will append the message to the list."""
pass
def get_message( self ):
"""This will pop a message off the list."""
pass
def _key( self ):
if not self.key:
self.key = "%s-queue" % self.owner
return self.key
| Python | 0.000001 | |
69fbab70f09f83e763f9af7ff02d028af62d8d89 | Create weighted_4_node_probability_convergence.py | weighted_4_node_probability_convergence.py | weighted_4_node_probability_convergence.py | # statistics on convergence_weighted_4_node.txt
# output into a csv file
import re,sys, numpy as np, pandas as pd
from pandas import Series, DataFrame
def main(argv):
author = ''
play = ''
sub = []
play_subgraph=Series()
l=''
subgraph = ''
subgraphs = []
pro = 0.0
pros = []
f = open('./convergence_weighted_4_node.txt','r')
fi = open('./convergence_weighted_4_node.csv','w')
# first to get the full index of subgraphs
for line in f:
if '*:' in line or '-:' in line:
continue
l = re.split(':',line.strip())
subgraph = l[0]
if subgraph not in sub:
sub.append(subgraph)
df = DataFrame(index=sub)
f.seek(0)
for line in f:
if '*:' in line:
author = line[10:12]
elif '-:' in line:
if play!='':
play_subgraph = Series(pros,index=subgraphs)
#play_subgraph=Series(sub_pro,index=sub,dtype=float)
play_subgraph.name=author+':'+play
play_subgraph.index.name='probability'
df[play_subgraph.name]=play_subgraph
#if author=='Sh':
# print 'play_subgraph.name = '+play_subgraph.name
# print play_subgraph
# print 'df'
# print df[play_subgraph.name]
play = re.split('-',line)[6]
subgraphs = []
pros = []
else:
l = re.split(':',line.strip())
subgraph = l[0]
pro = float(l[-1])
subgraphs.append(subgraph)
pros.append(pro)
#sub_pro[subgraph] = pro
print 'sub has '+str(len(sub))+' lines.'
#df.fillna(0)
#print df
df.to_csv(fi)
#print sub
if __name__ == '__main__':
main(sys.argv)
| Python | 0.000003 | |
f414c122eea771da74efc5837b7bd650ec022445 | normalise - adds new ffv1/mkv script | normalise.py | normalise.py | #!/usr/bin/env python
'''
Performs normalisation to FFV1/Matroska.
This performs a basic normalisation and does not enforce any folder structure.
This supercedes makeffv1 within our workflows. This is mostly because makeffv1 imposes a specific, outdated
folder structure, and it's best to let SIPCREATOR handle the folder structure and let normalise.py handle
the actual normalisation.
'''
import sys
import os
import subprocess
import ififuncs
def extract_provenance(filename):
'''
This will extract mediainfo and mediatrace XML
'''
parent_folder = os.path.dirname(filename)
inputxml = "%s/%s_mediainfo.xml" % (parent_folder, os.path.basename(filename))
inputtracexml = "%s/%s_mediatrace.xml" % (parent_folder, os.path.basename(filename))
print(' - Generating mediainfo xml of input file and saving it in %s' % inputxml)
ififuncs.make_mediainfo(inputxml, 'mediaxmlinput', filename)
print ' - Generating mediatrace xml of input file and saving it in %s' % inputtracexml
ififuncs.make_mediatrace(inputtracexml, 'mediatracexmlinput', filename)
return parent_folder
def normalise_process(filename):
'''
Begins the actual normalisation process using FFmpeg
'''
output_uuid = ififuncs.create_uuid()
print(' - The following UUID has been generated: %s' % output_uuid)
parent_folder = os.path.dirname(filename)
output = "%s/%s.mkv" % (
parent_folder, output_uuid
)
print(' - The normalise file will have this filename: %s' % output)
fmd5 = "%s/%s_source.framemd5" % (
parent_folder, os.path.basename(filename)
)
print(' - Framemd5s for each frame of your input file will be stored in: %s' % fmd5)
ffv1_logfile = os.path.join(parent_folder, '%s_normalise.log' % output_uuid)
print(' - The FFmpeg logfile for the transcode will be stored in: %s' % ffv1_logfile)
print(' - FFmpeg will begin normalisation now.')
ffv1_env_dict = ififuncs.set_environment(ffv1_logfile)
ffv1_command = [
'ffmpeg',
'-i', filename,
'-c:v', 'ffv1', # Use FFv1 codec
'-g', '1', # Use intra-frame only aka ALL-I aka GOP=1
'-level', '3', # Use Version 3 of FFv1
'-c:a', 'copy', # Copy and paste audio bitsream with no transcoding
'-map', '0',
'-dn',
'-report',
'-slicecrc', '1',
'-slices', '16',
]
if ififuncs.check_for_fcp(filename) is True:
print(' - A 720/576 file with no Pixel Aspect Ratio and scan type metadata has been detected.')
ffv1_command += [
'-vf',
'setfield=tff, setdar=4/3'
]
print(' - -vf setfield=tff, setdar=4/3 will be added to the FFmpeg command.')
ffv1_command += [
output,
'-f', 'framemd5', '-an', # Create decoded md5 checksums for every frame of the input. -an ignores audio
fmd5
]
print(ffv1_command)
subprocess.call(ffv1_command, env=ffv1_env_dict)
return output, output_uuid, fmd5
def verify_losslessness(parent_folder, output, output_uuid, fmd5):
'''
Verify the losslessness of the process using framemd5.
An additional metadata check should also occur.
'''
fmd5_logfile = os.path.join(parent_folder, '%s_framemd5.log' % output_uuid)
fmd5ffv1 = "%s/%s.framemd5" % (parent_folder, output_uuid)
print(' - Framemd5s for each frame of your output file will be stored in: %s' % fmd5ffv1)
fmd5_env_dict = ififuncs.set_environment(fmd5_logfile)
print(' - FFmpeg will attempt to verify the losslessness of the normalisation by using Framemd5s.')
fmd5_command = [
'ffmpeg', # Create decoded md5 checksums for every frame
'-i', output,
'-report',
'-f', 'framemd5', '-an',
fmd5ffv1
]
print fmd5_command
subprocess.call(fmd5_command, env=fmd5_env_dict)
checksum_mismatches = ififuncs.diff_framemd5s(fmd5, fmd5ffv1)
if len(checksum_mismatches) > 0:
print 'not lossless'
else:
print 'lossless'
def main():
print('\n - Normalise.py started')
source = sys.argv[1]
file_list = ififuncs.get_video_files(source)
for filename in file_list:
print('\n - Processing: %s' % filename)
parent_folder = extract_provenance(filename)
output, output_uuid, fmd5 = normalise_process(filename)
verify_losslessness(parent_folder, output, output_uuid, fmd5)
if __name__ == '__main__':
main() | Python | 0 | |
944ec176f4d6db70f9486dddab9a6cf901d6d575 | Create MyUsefulExample.py | src/zhang/MyUsefulExample.py | src/zhang/MyUsefulExample.py | #JUST EXAMPLES
import pyspark.ml.recommendation
df = spark.createDataFrame(
... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
... ["user", "item", "rating"])
als = ALS(rank=10, maxIter=5, seed=0)
model = als.fit(df)
model.rank
#10
model.userFactors.orderBy("id").collect()
#[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
test = spark.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
predictions[0]
#Row(user=0, item=2, prediction=-0.13807615637779236)
predictions[1]
#Row(user=1, item=0, prediction=2.6258413791656494)
predictions[2]
#Row(user=2, item=0, prediction=-1.5018409490585327)
als_path = temp_path + "/als"
als.save(als_path)
als2 = ALS.load(als_path)
als.getMaxIter()
#5
model_path = temp_path + "/als_model"
model.save(model_path)
model2 = ALSModel.load(model_path)
model.rank == model2.rank
#True
sorted(model.userFactors.collect()) == sorted(model2.userFactors.collect())
#True
sorted(model.itemFactors.collect()) == sorted(model2.itemFactors.collect())
#True
# ---------------------------------------
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.recommendation import ALS
from pyspark.sql import Row
lines = spark.read.text("../zhang/proj/sample_movielens_ratings.txt").rdd
parts = lines.map(lambda row: row.value.split("::"))
ratingsRDD = parts.map(lambda p: Row(userId=int(p[0]), movieId=int(p[1]),
rating=float(p[2]), timestamp=long(p[3])))
ratings = spark.createDataFrame(ratingsRDD)
(training, test) = ratings.randomSplit([0.8, 0.2])
# Build the recommendation model using ALS on the training data
als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating")
model = als.fit(training)
# Evaluate the model by computing the RMSE on the test data
# prediction is a dataframe DataFrame[movieId: bigint, rating: double, timestamp: bigint, userId: bigint, prediction: float]
predictions = model.transform(test)
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(predictions)
print("Root-mean-square error = " + str(rmse))
| Python | 0 | |
5ba36ca805b002af63c619e17dd00400650da14b | Add script to rewrite the agents used by scc. | agent_paths.py | agent_paths.py | #!/usr/bin/env python3
from argparse import ArgumentParser
import json
import os.path
import re
import sys
from generate_simplestreams import json_dump
def main():
parser = ArgumentParser()
parser.add_argument('input')
parser.add_argument('output')
args = parser.parse_args()
paths_hashes = {}
with open(args.input) as input_file:
stanzas = json.load(input_file)
hashes = {}
for stanza in stanzas:
path = os.path.join('agent', os.path.basename(stanza['path']))
path = re.sub('-win(2012(hv)?(r2)?|7|8|81)-', '-windows-', path)
path_hash = stanza['sha256']
paths_hashes.setdefault(path, stanza['sha256'])
if paths_hashes[path] != path_hash:
raise ValueError('Conflicting hash')
stanza['path'] = path
hashes[path] = path_hash
ph_list = {}
for path, path_hash in hashes.items():
ph_list.setdefault(path_hash, set()).add(path)
for path_hash, paths in ph_list.items():
if len(paths) > 1:
print(paths)
json_dump(stanzas, args.output)
if __name__ == '__main__':
sys.exit(main())
| Python | 0 | |
5cc627d0c0cb18e236a055ce7fceb05b63b45385 | Add flask backend file | woogle.py | woogle.py | """:mod:`woogle` --- Flask Backend for Woogle Calendar
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from flask import Flask
app = Flask(__name__)
@app.route("/")
def calendar():
return "Hello World!"
if __name__ == "__main__":
app.run()
| Python | 0.000001 | |
f82ef484f6440c2b5b10eb144af09b770fa413c9 | Add python script for extracting server i18n msgs | .infrastructure/i18n/extract-server-msgs.py | .infrastructure/i18n/extract-server-msgs.py | import os
# Keys indicating the fn symbols that pybabel should search for
# when finding translations.
keys = '-k format -k format_time -k format_date -k format_datetime'
# Extraction
os.system("pybabel extract -F babel.cfg {} -o messages.pot .".format(keys))
os.system("pybabel init -i messages.pot -d . -o './beavy-server.po' -l en")
os.system("./node_modules/.bin/po2json beavy-server.po var/server-messages/beavy-server.json -F -f mf --fallback-to-msgid")
# Clean up
os.system("rm messages.pot")
os.system("rm beavy-server.po")
| Python | 0 | |
5046ff8ba17899893a9aa30687a1ec58a6e95af2 | Add solution for Square Detector. | 2014/qualification-round/square-detector.py | 2014/qualification-round/square-detector.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
class QuizzesParser:
def __init__(self, src):
self.src = src
with open(src) as f:
self.raw = f.read().splitlines()
self.amount = int(self.raw[0])
def quizpool(self):
cur_line = 1
for i in range(self.amount):
offset = int(self.raw[cur_line])
prev_line = cur_line
cur_line = prev_line + offset + 1
yield self.raw[prev_line:cur_line]
class QuizSolver:
def __init__(self, quiz):
self.quiz = quiz
def solve(self):
N = int(self.quiz[0])
started = False
start_line = -1
mask = list()
length = 0
for i in range(N):
line = self.quiz[i]
if not started and '#' not in line:
continue
if not started:
if line.count('#') > N - i:
return 'NO'
for j in range(len(line)):
if len(line) > 2 and j > 0 and j < len(line) - 1 \
and line[j] != '#' and '#' in line[:j] \
and '#' in line[j:]:
return 'NO'
mask.append(1 if line[j] == '#' else 0)
start_line = i
length = line.count('#')
started = True
continue
if i - start_line >= length:
if '#' in line:
return 'NO'
else:
continue
mask_pair = list()
for j in range(len(line)):
mask_pair.append(1 if line[j] == '#' else 0)
if any(map(lambda x, y: x ^ y, mask, mask_pair)):
return 'NO'
return 'YES'
def main():
qsparser = QuizzesParser(sys.argv[1])
with open(sys.argv[2], 'w') as f:
for i, quiz in enumerate(qsparser.quizpool()):
qsolver = QuizSolver(quiz)
f.write('Case #{num}: {ans}\n'.format(num=i+1, ans=qsolver.solve()))
if __name__ == '__main__':
main()
| Python | 0 | |
3df4cc086bf6c85eebc12094cc3ca459bd2bcd3d | Add unit test for programmatic application and approval | project/members/tests/test_application.py | project/members/tests/test_application.py | # -*- coding: utf-8 -*-
import pytest
from members.tests.fixtures.memberlikes import MembershipApplicationFactory
from members.tests.fixtures.types import MemberTypeFactory
from members.models import Member
@pytest.mark.django_db
def test_application_approve():
mtypes = [MemberTypeFactory(label='Normal member')]
application = MembershipApplicationFactory()
email = application.email
application.approve(set_mtypes=mtypes)
Member.objects.get(email=email)
| Python | 0 | |
b1b799c224418b1639850305a7136a3042c5e9b5 | Add station_data.py | station_data.py | station_data.py | from ftplib import FTP
import csv
import os
def unicode_csv_reader(latin1_data, **kwargs):
csv_reader = csv.reader(latin1_data, **kwargs)
for row in csv_reader:
yield [unicode(cell, "latin-1") for cell in row]
def get_station_data():
filename = "/tmp/station_list.txt"
# remove old filename
try:
os.remove(filename)
except OSError:
pass
# write stations_list_soil.txt into filename
with open(filename,'wb') as file:
ftp = FTP("ftp-cdc.dwd.de")
ftp.login()
ftp.retrbinary('RETR /pub/CDC/help/stations_list_soil.txt', file.write)
ftp.quit()
id_idx = 0
name2id = {}
id2meta = {}
first = True
# parse csv file
with open(filename, 'r') as csvfile:
spamreader = unicode_csv_reader(csvfile, delimiter=';')
for row in spamreader:
# first row contains header info
if first:
first = False
else:
name2id[row[4].strip()] = int(row[0].strip())
id2meta[int(row[id_idx].strip())] = {}
id2meta[int(row[id_idx].strip())]['id'] = int(row[id_idx].strip())
id2meta[int(row[id_idx].strip())]['height'] = row[1].strip()
id2meta[int(row[id_idx].strip())]['latitude'] = row[2].strip()
id2meta[int(row[id_idx].strip())]['longitude'] = row[3].strip()
id2meta[int(row[id_idx].strip())]['name'] = row[4].strip()
id2meta[int(row[id_idx].strip())]['state'] = row[5].strip()
return name2id, id2meta
def get_daily_recent():
ftp = FTP("ftp-cdc.dwd.de")
ftp.login()
ftp.cwd("/pub/CDC/observations_germany/climate/daily/kl/recent/")
ls = []
ftp.retrlines('NLST', lambda l: ls.append(l))
id2file = {}
for l in ls:
try:
id2file[int(l.split("_")[2])] = "ftp-cdc.dwd.de/pub/CDC/observations_germany/climate/daily/kl/recent/" + l
except:
continue
ftp.quit()
return id2file
def get_daily_hist():
ftp = FTP("ftp-cdc.dwd.de")
ftp.login()
ftp.cwd("/pub/CDC/observations_germany/climate/daily/kl/historical/")
ls = []
ftp.retrlines('NLST', lambda l: ls.append(l))
id2file = {}
for l in ls:
try:
id2file[int(l.split("_")[1])] = "ftp://ftp-cdc.dwd.de/pub/CDC/observations_germany/climate/daily/kl/historical/" + l
except:
continue
ftp.quit()
return id2file
def suggest_names(name, name2id):
station_names=sorted(list(name2id.keys()))
return [st for st in station_names if unicode(name,"utf8").lower() in st.lower()]
def get_name(name2id):
while True:
name = raw_input("Enter station name: ")
ns = suggest_names(name, name2id)
if len(ns) == 1:
return ns[0]
elif len(ns) == 0:
print "Nothing found. Repeat!"
else:
print "Reduce selection: ",
for n in ns:
print "'"+n+"'",
print
def cli():
name2id, id2meta = get_station_data()
id2recent = get_daily_recent()
id2hist = get_daily_hist()
station_name = get_name(name2id)
station_id = name2id[station_name]
print "Station name:", station_name
print " - id:", station_id
print " - height:", id2meta[station_id]['height']
print " - latitude:", id2meta[station_id]['latitude']
print " - longitude:", id2meta[station_id]['longitude']
print " - federal state:", id2meta[station_id]['state']
print " - Recent file:", id2recent[station_id]
print " - History file:", id2hist[station_id]
if __name__ == '__main__':
cli()
| Python | 0 | |
1a682405904dcc711d889881d6a216b3eff9e1dd | remove off method from status light | status_light.py | status_light.py | import time
import config
import RPi.GPIO as GPIO
class StatusLight(object):
"""available patterns for the status light"""
patterns = {
'on' : (.1, [True]),
'off' : (.1, [False]),
'blink_fast' : (.1, [False, True]),
'blink' : (.1, [False, False, False, True, True, True, True, True, True, True, True, True, True]),
'blink_pauze' : (.1, [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]),
}
"""placeholder for pattern to tenmporarily interrupt
status light with different pattern"""
interrupt_pattern = [0, []]
"""continue flashing, controlled by the stop"""
cont = True
pin_id = None
def __init__(self, pin_id):
self.pin_id = pin_id
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_id, GPIO.OUT)
self.action = 'on'
def interrupt(self, action, repeat = 1):
"""Interupt the current status of the light with a names action
parameters: action the name of the action
repeat: the number of times to repeatthe interruption"""
self.interrupt_pattern[0] = self.patterns[action][0]
for i in range(0, repeat):
self.interrupt_pattern[1].extend(list(self.patterns[action][1][:]))
def start(self):
"""Perform a status light action"""
while True:
for state in self.patterns[self.action][1]:
# if the interrupt_pattern is not empty, prioritize it
while len(self.interrupt_pattern[1]):
time.sleep(self.interrupt_pattern[0])
self.set_state(state = self.interrupt_pattern[1].pop(0))
# peform the regular action when not interrupted
time.sleep(self.patterns[self.action][0])
self.set_state(state)
def set_state(self, state):
"""Turn the light on or off"""
GPIO.output(self.pin_id, state)
def __del__(self):
GPIO.cleanup()
if __name__ == '__main__':
light = StatusLight(config.status_light_pin)
light.interrupt('blink_fast', 3)
light.start()
| import time
import config
import RPi.GPIO as GPIO
class StatusLight(object):
"""available patterns for the status light"""
patterns = {
'on' : (.1, [True]),
'off' : (.1, [False]),
'blink_fast' : (.1, [False, True]),
'blink' : (.1, [False, False, False, True, True, True, True, True, True, True, True, True, True]),
'blink_pauze' : (.1, [False, False, False, False, False, False, False, False, False, False, False, False, False, False, True]),
}
"""placeholder for pattern to tenmporarily interrupt
status light with different pattern"""
interrupt_pattern = [0, []]
"""continue flashing, controlled by the stop"""
cont = True
pin_id = None
def __init__(self, pin_id):
self.pin_id = pin_id
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin_id, GPIO.OUT)
self.action = 'on'
def interrupt(self, action, repeat = 1):
"""Interupt the current status of the light with a names action
parameters: action the name of the action
repeat: the number of times to repeatthe interruption"""
self.interrupt_pattern[0] = self.patterns[action][0]
for i in range(0, repeat):
self.interrupt_pattern[1].extend(list(self.patterns[action][1][:]))
def start(self):
"""Perform a status light action"""
while True:
for state in self.patterns[self.action][1]:
# if the interrupt_pattern is not empty, prioritize it
while len(self.interrupt_pattern[1]):
time.sleep(self.interrupt_pattern[0])
self.set_state(state = self.interrupt_pattern[1].pop(0))
# peform the regular action when not interrupted
time.sleep(self.patterns[self.action][0])
self.set_state(state)
def off(self, state):
"""Turn off status light"""
self.cont = False
self.set_state(state)
def set_state(self, state):
"""Turn the light on or off"""
GPIO.output(self.pin_id, state)
def __del__(self):
GPIO.cleanup()
if __name__ == '__main__':
light = StatusLight(config.status_light_pin)
light.interrupt('blink_fast', 3)
light.start()
| Python | 0.000001 |
f4d94c48c5cf5d999c39d8d6c6dbab72a827fec2 | Add an example | example/cifer10.py | example/cifer10.py | #!/usr/bin/env python3
import argparse
import logging
import json
import matplotlib
matplotlib.use('Agg') # noqa / this should be before numpy/chainer
from chainer.ya.utils import rangelog, SourceBackup, ArgumentBackup, FinalRequest, SlackPost, SamplePairingDataset # noqa
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import optimizers
from chainer import training
from chainer.datasets import get_cifar10
from chainer.training import StandardUpdater, extensions
from chainer.training.extensions import snapshot_object
from chainer.training.triggers import MinValueTrigger
from chainer.iterators.serial_iterator import SerialIterator
class Conv(chainer.Chain):
def __init__(self, n_out, n_units=128, n_layers=10):
super(Conv, self).__init__()
with self.init_scope():
for i in range(n_layers):
setattr(self, "c"+str(i), L.Convolution2D(None,n_units,
ksize=3, pad=1))
setattr(self, "bn"+str(i), L.BatchNormalization(n_units))
self.l = L.Linear(None,n_out)
self.n_units = n_units
self.n_out = n_out
self.n_layers = n_layers
def predict(self, x):
h = x
for i in range(self.n_layers):
h = F.relu(getattr(self,"bn"+str(i))(getattr(self,"c"+str(i))(h)))
return self.l(h)
def __call__(self, x, t):
loss = F.softmax_cross_entropy(self.predict(x), t)
chainer.report({'loss': loss/t.shape[0]}, self)
return loss
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-S", "--sample_pairing", action="store_true",
default=False)
parser.add_argument("--model_path", default='model.npz')
parser.add_argument("-e", "--epoch", type=int, default=10)
parser.add_argument("-b", "--batch", type=int, default=500)
parser.add_argument("--store", default="result")
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--report_keys", action="append", default=['loss'])
args = parser.parse_args()
args.report_keys = ['main/'+k for k in args.report_keys]
args.report_keys += ['validation/'+k for k in args.report_keys]
return args
def train(args):
logger = logging.getLogger()
logger.setLevel(getattr(logging, 'INFO'))
logger.addHandler(logging.StreamHandler())
rangelog.set_logger(logger)
rangelog.set_start_msg("start... {name}")
rangelog.set_end_msg(" end...")
with rangelog("creating dataset") as logger:
train_set, eval_set = get_cifar10()
if args.sample_pairing:
train_set = SamplePairingDataset(train_set)
with rangelog("creating iterator") as logger:
logger.info("train_set: {}, eval_set: {}"
.format(len(train_set), len(eval_set)))
iterator = SerialIterator(train_set, args.batch, repeat=True)
eval_iterator = SerialIterator(eval_set, args.batch, repeat=False)
with rangelog("creating model") as logger:
logger.info('GPU: {}'.format(args.device))
model = Conv(10)
chainer.cuda.get_device_from_id(args.device).use()
model.to_gpu(args.device)
with rangelog("creating optimizer"):
optimizer = optimizers.Adam()
optimizer.setup(model)
with rangelog("creating trainer"):
updater = StandardUpdater(iterator=iterator,
optimizer=optimizer,
device=args.device)
trainer = training.Trainer(updater, (args.epoch, 'epoch'),
out=args.store)
with rangelog("trainer extension") as logger:
trainer.extend(extensions.Evaluator(iterator=eval_iterator,
target=model,
device=args.device))
trainer.extend(extensions.LogReport())
trainer.extend(SourceBackup())
trainer.extend(ArgumentBackup(args))
try:
slack = json.load(open("slack.json"))
except Exception as e:
logger.warn("Error {}".format(e))
else:
trainer.extend(SlackPost(slack["token"], slack["channel"]))
trainer.extend(extensions.PrintReport(['epoch']+args.report_keys))
trainer.extend(extensions.ProgressBar(update_interval=1))
trainer.extend(extensions.PlotReport(args.report_keys, 'epoch',
file_name='plot.png'))
trigger = MinValueTrigger(key='validation/main/loss')
snapshoter = snapshot_object(model, filename=args.model_path)
trainer.extend(snapshoter, trigger=trigger)
with rangelog("training"):
trainer.run()
return model
if __name__ == '__main__':
train(parse_args())
| Python | 0 | |
aa292c2f180ffcfdfc55114750f22b6c8790a69b | Add Jaro-Winkler distance based on code on RosettaCode | pygraphc/similarity/RosettaJaroWinkler.py | pygraphc/similarity/RosettaJaroWinkler.py | from __future__ import division
from itertools import combinations
from time import time
def jaro(s, t):
s_len = len(s)
t_len = len(t)
if s_len == 0 and t_len == 0:
return 1
match_distance = (max(s_len, t_len) // 2) - 1
s_matches = [False] * s_len
t_matches = [False] * t_len
matches = 0
transpositions = 0
for i in range(s_len):
start = max(0, i - match_distance)
end = min(i + match_distance + 1, t_len)
for j in range(start, end):
if t_matches[j]:
continue
if s[i] != t[j]:
continue
s_matches[i] = True
t_matches[j] = True
matches += 1
break
if matches == 0:
return 0
k = 0
for i in range(s_len):
if not s_matches[i]:
continue
while not t_matches[k]:
k += 1
if s[i] != t[k]:
transpositions += 1
k += 1
return ((matches / s_len) +
(matches / t_len) +
((matches - transpositions / 2) / matches)) / 3
start = time()
log_file = '/home/hs32832011/Git/labeled-authlog/dataset/Hofstede2014/dataset1_perday/Dec 1.log'
with open(log_file, 'r') as f:
lines = f.readlines()
log_length = len(lines)
for line1, line2 in combinations(xrange(log_length), 2):
s = lines[line1]
t = lines[line2]
print("%.10f" % (jaro(s, t)))
# print runtime
duration = time() - start
minute, second = divmod(duration, 60)
hour, minute = divmod(minute, 60)
print "Runtime: %d:%02d:%02d" % (hour, minute, second)
| Python | 0.000004 | |
2674aa95c69c6e0fe0d8fd71d9116150cfab6507 | add xdawn decoding example | examples/decoding/plot_decoding_xdawn_meg.py | examples/decoding/plot_decoding_xdawn_meg.py | """
=============================
XDAWN Decoding From MEG data
=============================
ERF decoding with Xdawn. For each event type, a set of spatial Xdawn filters
are trained and apply on the signal. Channels are concatenated and rescaled to
create features vectors that will be fed into a Logistic Regression.
"""
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import sample
from mne.preprocessing.xdawn import Xdawn
from mne.decoding import ConcatenateChannels
from sklearn.cross_validation import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 20, method='iir')
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(3),
ConcatenateChannels(),
MinMaxScaler(),
LogisticRegression(penalty='l1'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(labels, 10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv:
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
plt.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Normalized Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| Python | 0.000001 | |
34986c7bfd1d4634861a5c4b54cf90ef18090ff4 | test versions of required libs across different places | spacy/tests/test_requirements.py | spacy/tests/test_requirements.py | import re
from pathlib import Path
def test_build_dependencies(en_vocab):
libs_ignore_requirements = ["pytest", "pytest-timeout", "mock", "flake8", "jsonschema"]
libs_ignore_setup = ["fugashi", "natto-py", "pythainlp"]
# check requirements.txt
root_dir = Path(__file__).parent.parent.parent
req_file = root_dir / "requirements.txt"
req_dict = {}
with req_file.open() as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if not line.startswith("#"):
lib, v = _parse_req(line)
if lib and lib not in libs_ignore_requirements:
req_dict[lib] = v
# check setup.cfg and compare to requirements.txt
# also fails when there are missing or additional libs
setup_file = root_dir / "setup.cfg"
with setup_file.open() as f:
lines = f.readlines()
setup_keys = set()
for line in lines:
line = line.strip()
if not line.startswith("#"):
lib, v = _parse_req(line)
if lib and not lib.startswith("cupy") and lib not in libs_ignore_setup:
req_v = req_dict.get(lib, None)
assert req_v is not None # if fail: setup.cfg contains a lib not in requirements.txt
assert (lib+v) == (lib+req_v) # if fail: setup.cfg & requirements.txt have conflicting versions
setup_keys.add(lib)
assert sorted(setup_keys) == sorted(req_dict.keys()) # if fail: requirements.txt contains a lib not in setup.cfg
# check pyproject.toml and compare the versions of the libs to requirements.txt
# does not fail when there are missing or additional libs
toml_file = root_dir / "pyproject.toml"
with toml_file.open() as f:
lines = f.readlines()
toml_keys = set()
for line in lines:
line = line.strip()
line = line.strip(",")
line = line.strip("\"")
if not line.startswith("#"):
lib, v = _parse_req(line)
if lib:
req_v = req_dict.get(lib, None)
assert (lib+v) == (lib+req_v) # if fail: pyproject.toml & requirements.txt have conflicting versions
toml_keys.add(lib)
def _parse_req(line):
lib = re.match(r"^[a-z0-9\-]*", line).group(0)
v = line.replace(lib, "").strip()
if not re.match(r"^[<>=][<>=].*", v):
return None, None
return lib, v | Python | 0 | |
08b4e97d3e3bcf07bdc8b0e0c02ce5d29fe5ee9e | Create battleship.py | battleship.py | battleship.py | from random import randint
from random import randrange
ships = 0
board = []
BuiltShips = {}
board_size = int(input("How big would you like the board to be?"))
for x in range(board_size):
board.append(["O"] * board_size)
def print_board(board):
for row in board:
print(" ".join(row))
class BattleShip(object):
def __init__(self, id):
self.id = id
self.location = {
"x": [],
"y": []
}
self.hits = 0
self.orientation = ""
x = [] # Keep Track of all X Coordinates
y = [] # Keep Track of all Y Coordinates
sank = 0
# All battleships start with these attributes, hit count, whether or not it is sank, location and orientation
def ExcludeRand(self,exclude): #this will generate a random number, while excluding coordinates already assigned
points = None
while points in exclude or points is None:
points = randrange(0, len(board)-1)
return points
# Battleship, Build Thyself!
def build(self):
if randint(0, 1) == 1: # Flip a coin to determine orientation
self.orientation = "vertical"
else:
self.orientation = "horizontal"
# If there aren't any ships built yet, we can put it anywhere
if self.orientation == "horizontal": #If the coin flipped to horizontal, build it that way
self.location["x"].append(int(self.ExcludeRand(self.x))) #Assign Random X Coordinate
self.x.append(self.location["x"][0])
print ("X's:", self.x)
print (self.location)
self.location["y"].append(int(self.ExcludeRand(self.y))) #Assign Random Y Coordinate
self.y.append(self.location["y"][0])
print (self.location)
if self.location["x"] == len(board) - 1:
self.location["x"][0].append(len(board) - 2)
print (self.location)
else:
self.location["x"].append(self.location["x"][0] + 1)
print (self.location)
print (self.location)
else:
self.location["x"].append(int(self.ExcludeRand(self.x))) #Random X
self.x.append(self.location["x"][0])
print (self.location)
self.location["y"].append(int(self.ExcludeRand(self.y))) #Random Y
self.y.append(self.location["y"][0])
print ("Y's:",self.y)
print (self.location)
if self.location["y"][0] == len(board) - 1: #Y plus or minus 1
self.location["y"].append(len(board) - 2)
print (self.location)
else:
self.location["y"].append(self.location["y"][0] + 1)
print (self.location)
def is_int(n):
try:
return int(n)
except ValueError:
is_int(input("Sorry, not a number. Try again:"))
ships = is_int(input("How many ships?"))
for each in range(ships):
BuiltShips["ship" + str(each)] = BattleShip(each)
BuiltShips["ship" + str(each)].build()
def Assault(x,y):
for each in BuiltShips:
if x in BuiltShips[each].location["x"] and y in BuiltShips[each].location["y"]:
BuiltShips[each].hits += 1
if BuiltShips[each].hits == 2:
print ("You sank a ship!")
BattleShip.sank += 1
return True
else:
print (BuiltShips[each].location)
return False
turns = 3 * ships
while BattleShip.sank < ships:
if turns > 0:
print ("%s turns left" % turns)
print_board(board)
guess_x = int(input("Guess Row:"))
guess_y = int(input("Guess Column:"))
if board[guess_x][guess_y] == "X" or board[guess_x][guess_y] == "!":
print ("You already guessed there!")
turns -= 1
elif Assault(guess_x,guess_y) == True:
print ("You got a hit!")
board[guess_x][guess_y] = "!"
print_board(board)
print ("Ships sank:",BattleShip.sank)
else:
print ("Miss!")
board[guess_x][guess_y] = "X"
turns -= 1
else:
print ("Sorry, out of turns.")
break
else:
print ("You won.")
| Python | 0.000038 | |
13959dbce03b44f15c4c05ff0715b7d26ff6c0fa | Add a widget. | python/tkinter/python3/animation_print.py | python/tkinter/python3/animation_print.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See also: http://effbot.org/tkinterbook/widget.htm
import tkinter as tk
def main():
"""Main function"""
root = tk.Tk()
def increment_counter():
# Do something...
print("Hello")
# Reschedule event in 3 seconds
root.after(3000, increment_counter)
# Schedule event in 3 seconds
root.after(3000, increment_counter)
root.mainloop()
if __name__ == '__main__':
main()
| Python | 0 | |
1372a374b02d5e1d01b1569c71f84bdb71fb1296 | Update handler.py | tendrl/node_agent/message/handler.py | tendrl/node_agent/message/handler.py | import os
from io import BlockingIOError
import sys
import struct
import traceback
import gevent.event
import gevent.greenlet
from gevent.server import StreamServer
from gevent import socket
from gevent.socket import error as socket_error
from gevent.socket import timeout as socket_timeout
from tendrl.commons.message import Message
from tendrl.commons.logger import Logger
MESSAGE_SOCK_PATH = "/var/run/tendrl/message.sock"
class MessageHandler(gevent.greenlet.Greenlet):
def __init__(self):
super(MessageHandler, self).__init__()
self.server = StreamServer(
self.bind_unix_listener(),
self.read_socket
)
def read_socket(self, sock, *args):
try:
size = self._msgLength(sock)
data = self._read(sock, size)
frmt = "=%ds" % size
msg = struct.unpack(frmt, data)
message = Message.from_json(msg[0])
Logger(message)
except (socket_error, socket_timeout):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
except (TypeError, ValueError, KeyError, AttributeError):
sys.stderr.write(
"Unable to log the message.%s\n" % self.data)
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
def _read(self, sock, size):
data = ''
while len(data) < size:
dataTmp = sock.recv(size-len(data))
data += dataTmp
if dataTmp == '':
raise RuntimeError("Message socket connection broken")
return data
def _msgLength(self, sock):
d = self._read(sock, 4)
s = struct.unpack('=I', d)
return s[0]
def _run(self):
try:
self.server.serve_forever()
except (TypeError, BlockingIOError, socket_error, ValueError):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
def stop(self):
pass
def bind_unix_listener(self):
# http://0pointer.de/blog/projects/systemd.html (search "file
# descriptor 3")
try:
socket_fd = 3
self.sock = socket.fromfd(socket_fd, socket.AF_UNIX,
socket.SOCK_STREAM)
self.sock.setblocking(0)
self.sock.listen(50)
return self.sock
except (TypeError, BlockingIOError, socket_error, ValueError):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb,
file=sys.stderr)
pass
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(MESSAGE_SOCK_PATH):
os.remove(MESSAGE_SOCK_PATH)
self.sock.setblocking(0)
self.sock.bind(MESSAGE_SOCK_PATH)
self.sock.listen(50)
return self.sock
except:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb,
file=sys.stderr)
| import os
from io import BlockingIOError
import sys
import traceback
import gevent.event
import gevent.greenlet
from gevent.server import StreamServer
from gevent import socket
from gevent.socket import error as socket_error
from gevent.socket import timeout as socket_timeout
from tendrl.commons.message import Message
from tendrl.commons.logger import Logger
RECEIVE_DATA_SIZE = 4096
MESSAGE_SOCK_PATH = "/var/run/tendrl/message.sock"
class MessageHandler(gevent.greenlet.Greenlet):
def __init__(self):
super(MessageHandler, self).__init__()
self.server = StreamServer(
self.bind_unix_listener(),
self.read_socket
)
def read_socket(self, sock, *args):
try:
self.data = sock.recv(RECEIVE_DATA_SIZE)
message = Message.from_json(self.data)
Logger(message)
except (socket_error, socket_timeout):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
except (TypeError, ValueError, KeyError, AttributeError):
sys.stderr.write(
"Unable to log the message.%s\n" % self.data)
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
def _run(self):
try:
self.server.serve_forever()
except (TypeError, BlockingIOError, socket_error, ValueError):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(
exc_type, exc_value, exc_tb, file=sys.stderr)
def stop(self):
pass
def bind_unix_listener(self):
# http://0pointer.de/blog/projects/systemd.html (search "file
# descriptor 3")
try:
socket_fd = 3
self.sock = socket.fromfd(socket_fd, socket.AF_UNIX,
socket.SOCK_STREAM)
self.sock.setblocking(0)
self.sock.listen(50)
return self.sock
except (TypeError, BlockingIOError, socket_error, ValueError):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb,
file=sys.stderr)
pass
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if os.path.exists(MESSAGE_SOCK_PATH):
os.remove(MESSAGE_SOCK_PATH)
self.sock.setblocking(0)
self.sock.bind(MESSAGE_SOCK_PATH)
self.sock.listen(50)
return self.sock
except:
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb,
file=sys.stderr)
| Python | 0.000001 |
49253451d65511713cd97a86c7fe54e64b3e80a9 | Add a separate test of the runtest.py --qmtest option. | test/runtest/qmtest.py | test/runtest/qmtest.py | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that the --qmtest option invokes tests directly via QMTest,
not directly via Python.
"""
import os.path
import string
import TestRuntest
test = TestRuntest.TestRuntest()
test.subdir('test')
test_fail_py = os.path.join('test', 'fail.py')
test_no_result_py = os.path.join('test', 'no_result.py')
test_pass_py = os.path.join('test', 'pass.py')
workpath_fail_py = test.workpath(test_fail_py)
workpath_no_result_py = test.workpath(test_no_result_py)
workpath_pass_py = test.workpath(test_pass_py)
test.write_failing_test(test_fail_py)
test.write_no_result_test(test_no_result_py)
test.write_passing_test(test_pass_py)
# NOTE: the FAIL and PASS lines below have trailing spaces.
expect_stdout = """\
qmtest run --output results.qmr --format none --result-stream="scons_tdb.AegisChangeStream" test/fail.py test/no_result.py test/pass.py
--- TEST RESULTS -------------------------------------------------------------
test/fail.py : FAIL
FAILING TEST STDOUT
FAILING TEST STDERR
test/no_result.py : NO_RESULT
NO RESULT TEST STDOUT
NO RESULT TEST STDERR
test/pass.py : PASS
--- TESTS THAT DID NOT PASS --------------------------------------------------
test/fail.py : FAIL
test/no_result.py : NO_RESULT
--- STATISTICS ---------------------------------------------------------------
3 tests total
1 ( 33%) tests PASS
1 ( 33%) tests FAIL
1 ( 33%) tests NO_RESULT
"""
testlist = [
test_fail_py,
test_no_result_py,
test_pass_py,
]
test.run(arguments='--qmtest %s' % string.join(testlist),
status=1,
stdout=expect_stdout)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python | 0 | |
0c2fb46c977d8d8ee03d295fee8ddf37cee8cc06 | Add script to calculate recalls of track zip files. | tools/stats/zip_track_recall.py | tools/stats/zip_track_recall.py | #!/usr/bin/env python
from vdetlib.utils.protocol import proto_load, proto_dump, track_box_at_frame
from vdetlib.utils.common import iou
import argparse
import numpy as np
import glob
import cPickle
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('annot_file')
parser.add_argument('track_dir')
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
annot_proto = proto_load(args.annot_file)
track_files = glob.glob(args.track_dir + "/*.pkl")
tracks = []
frames = []
for track_file in track_files:
track = cPickle.loads(open(track_file, 'rb').read())
tracks.append(track['bbox'])
frames.append(track['frame'])
gt_count = 0
recall_count = 0
for frame in vid_proto['frames']:
frame_id = frame['frame']
# annot boxes
annot_boxes = [track_box_at_frame(annot_track['track'], frame_id) \
for annot_track in annot_proto['annotations']]
annot_boxes = [box for box in annot_boxes if box is not None]
if len(annot_boxes) == 0: continue
gt_count += len(annot_boxes)
# track boxes
track_boxes = [track[frame==frame_id,:].flatten() for track, frame \
in zip(tracks, frames) if np.any(frame==frame_id)]
if len(track_boxes) == 0: continue
overlaps = iou(np.asarray(annot_boxes), np.asarray(track_boxes))
max_overlaps = overlaps.max(axis=1)
recall_count += np.count_nonzero(max_overlaps >= 0.5)
print "{} {} {} {}".format(vid_proto['video'],
gt_count, recall_count, float(recall_count) / gt_count)
| Python | 0 | |
3ee41b704e98e143d23eb0d714c6d79e8d6e6130 | Write test for RequestTypeError | tests/web/test_request_type_error.py | tests/web/test_request_type_error.py | import unittest
from performance.web import RequestTypeError
class RequestTypeErrorTestCase(unittest.TestCase):
def test_init(self):
type = 'get'
error = RequestTypeError(type)
self.assertEqual(type, error.type)
def test_to_string(self):
type = 'get'
error = RequestTypeError(type)
self.assertEqual('Invalid request type "%s"' % type, error.__str__()) | Python | 0.000004 | |
125a6714d1c4bda74a32c0b2fc67629ef2b45d7a | 6-2 lucky_number | 06/lucky_number.py | 06/lucky_number.py | friend = {'dwq': '5', 'bql': '3','xx': '28', 'txo':'44', 'fw':'2'}
print(friend['dwq'])
print(friend['bql'])
print(friend['xx'])
print(friend['txo'])
print(friend['fw'])
| Python | 0.998787 | |
00e75bc59dfec20bd6b96ffac7d17da5760f584c | Add Slack integration | hc/api/migrations/0012_auto_20150930_1922.py | hc/api/migrations/0012_auto_20150930_1922.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0011_notification'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='kind',
field=models.CharField(choices=[('email', 'Email'), ('webhook', 'Webhook'), ('slack', 'Slack'), ('pd', 'PagerDuty')], max_length=20),
),
]
| Python | 0.000001 | |
f92d06346b3d28513c5f5b9833dbf5a4d48c3e46 | Create rot_alpha.py | rot_alpha.py | rot_alpha.py | #!/usr/bin/env python
from string import uppercase, lowercase, maketrans
import sys
class ROTAlpha():
def rot_alpha(self, data, rot):
upper = ''.join([uppercase[(i+rot)%26] for i in xrange(26)])
lower = ''.join([lowercase[(i+rot)%26] for i in xrange(26)])
table = maketrans(uppercase + lowercase, upper + lower)
print(data.translate(table))
if __name__ == '__main__':
try:
data = sys.argv[1]
rot = sys.argv[2]
rot = int(rot, 0)
table = ROTAlpha()
table.rot_alpha(data, rot)
except IndexError:
print('Usage: rot_alpha.py <alpha numeric data> <int to rotate>')
sys.exit(1)
| Python | 0.000078 | |
d96acd58ecf5937da344942f387d845dc5b26871 | Add db tests | test/test_db.py | test/test_db.py | from piper.db import DbCLI
import mock
import pytest
class DbCLIBase(object):
def setup_method(self, method):
self.cli = DbCLI()
self.ns = mock.Mock()
self.config = mock.Mock()
class TestDbCLIRun(DbCLIBase):
def test_plain_run(self):
self.cli.init = mock.Mock()
ret = self.cli.run(self.ns, self.config)
assert ret == 0
self.cli.init.assert_called_once_with(self.ns, self.config)
class TestDbCLIInit(DbCLIBase):
def test_no_db(self):
self.config.db.host = None
with pytest.raises(AssertionError):
self.cli.init(self.ns, self.config)
def test_calls(self):
self.cli.handle_sqlite = mock.Mock()
self.cli.create_tables = mock.Mock()
self.cli.init(self.ns, self.config)
self.cli.handle_sqlite.assert_called_once_with(self.config.db.host)
self.cli.create_tables.assert_called_once_with(
self.config.db.host,
echo=self.ns.verbose,
)
class TestDbCLIHandleSqlite(DbCLIBase):
@mock.patch('piper.utils.mkdir')
@mock.patch('os.path.dirname')
@mock.patch('os.path.exists')
def test_sqlite_handling_creates_dir(self, exists, dirname, mkdir):
self.config.db.host = 'sqlite:///amaranthine.db'
exists.return_value = False
self.cli.handle_sqlite(self.ns.host)
mkdir.assert_called_once_with(dirname.return_value)
class TestDbCLICreateTables(DbCLIBase):
def setup_method(self, method):
super(TestDbCLICreateTables, self).setup_method(method)
self.cli.tables = (mock.Mock(), mock.Mock())
for x, table in enumerate(self.cli.tables):
table.__tablename__ = x
@mock.patch('piper.db.Session')
@mock.patch('piper.db.create_engine')
def test_creation(self, ce, se):
eng = ce.return_value
host = self.config.host
self.cli.create_tables(host)
ce.assert_called_once_with(host, echo=False)
se.configure.assert_called_once_with(bind=eng)
for table in self.cli.tables:
assert table.metadata.bind is eng
table.metadata.create_all.assert_called_once_with()
| Python | 0 | |
83afa054e3bee18aba212394973978fd49429afa | Create test_ratings.py | test_ratings.py | test_ratings.py | #!/usr/bin/env python3.5
import sys
import re
import os
import csv
from extract_toc import parseargs
from get_ratings import Ratings, Ratings2
def nvl(v1,v2):
if v1:
return v1
else:
return v2
def process_ratings_for_file(ratings, filename):
ratings.process_file(filename)
ratings.map_ratings()
improvement = 0
for k in ratings.all_available_ratings:
v = ratings.ratings_mapped.get(k)
if not v:
v = [None] * 3
v_current = ratings.current_ratings_alt.get(k)
if v_current:
if (not v[0] or v[0] != v_current):
improvement += 1
elif (not v_current):
if (v[0]):
improvement -= 1
print("%-30s %-2s/%-2s %-2s %-2s" % (k, nvl(v[0], "_"), nvl(v_current, "_"), nvl(v[1], "_"), nvl(v[2], "_")))
# print(ratings.current_ratings_alt)
print("")
print("Number of improvements using new methodology = %d" % (improvement))
print("")
def main(args):
argsmap = parseargs(args)
files = argsmap.get('files')
if (not files):
sys.exit(0)
ratings_mapper_file = argsmap.get("rmap")
if ratings_mapper_file:
ratings_mapper_file = ratings_mapper_file[0]
if not ratings_mapper_file:
print("Ratings Mapper File file name must be entered using the --rmap option...")
sys.exit(1)
ratings = Ratings(ratings_mapper_file)
for filename in files:
print("Processing file: " + filename)
print("============================")
process_ratings_for_file(ratings, filename)
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| Python | 0.000015 | |
f804300765f036f375768e57e081b070a549a800 | Add test script with only a few packages | test-extract-dependencies.py | test-extract-dependencies.py | from dependencies import extract_package
import xmlrpc.client as xmlrpclib
import random
client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
packages = ['gala', 'scikit-learn', 'scipy', 'scikit-image', 'Flask']
random.shuffle(packages)
for i, package in enumerate(packages):
extract_package(package, to='test-pypi-deps.txt',
client=client, n=i)
| Python | 0 | |
7de55b168a276b3d5cdea4d718680ede46edf4d8 | Create file to test thinc.extra.search | thinc/tests/unit/test_beam_search.py | thinc/tests/unit/test_beam_search.py | from ...extra.search import MaxViolation
def test_init_violn():
v = MaxViolation()
| Python | 0 | |
38b839405f9976df2d63c08d3c16441af6cdebd1 | Add test | test/selenium/src/tests/test_risk_threats_page.py | test/selenium/src/tests/test_risk_threats_page.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: jernej@reciprocitylabs.com
# Maintained By: jernej@reciprocitylabs.com
"""All smoke tests relevant to risks/threats page"""
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import url
class TestRiskThreatPage(base.Test):
"""Tests the threat/risk page, a part of smoke tests, section 8."""
@pytest.mark.smoke_tests
def test_app_redirects_to_new_risk_page(self, new_risk):
"""Tests if after saving and closing the lhn_modal the app redirects to
the object page.
Generally we start at a random url. Here we verify that after saving
and closing the lhn_modal we're redirected to an url that contains an
object id.
"""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
assert url.RISKS + "/" + new_risk.object_id in \
new_risk.url
| Python | 0.000005 | |
3d6f78447175d7f34e2eaedc2b0df82acb1e0e0e | Add a simple script I used to grep all SVN sources for control statements. | tools/dev/find-control-statements.py | tools/dev/find-control-statements.py | #!/usr/bin/python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# Find places in our code that are part of control statements
# i.e. "for", "if" and "while". That output is then easily
# searched for various interesting / complex pattern.
#
#
# USAGE: find-control-statements.py FILE1 FILE2 ...
#
import sys
header_shown = False
last_line_num = None
def print_line(fname, line_num, line):
""" Print LINE of number LINE_NUM in file FNAME.
Show FNAME only once per file and LINE_NUM only for
non-consecutive lines.
"""
global header_shown
global last_line_num
if not header_shown:
print('')
print(fname)
header_shown = True
if last_line_num and (last_line_num + 1 == line_num):
print(" %s" % line),
else:
print('%5d:%s' % (line_num, line)),
last_line_num = line_num
def is_control(line, index, word):
""" Return whether LINE[INDEX] is actual the start position of
control statement WORD. It must be followed by an opening
parantheses and only whitespace in between WORD and the '('.
"""
if index > 0:
if not (line[index-1] in [' ', '\t', ';']):
return False
index = index + len(word)
parantheses_index = line.find('(', index)
if parantheses_index == -1:
return False
while index < parantheses_index:
if not (line[index] in [' ', '\t',]):
return False
index += 1
return True
def find_specific_control(line, control):
""" Return the first offset of the control statement CONTROL
in LINE, or -1 if it can't be found.
"""
current = 0
while current != -1:
index = line.find(control, current)
if index == -1:
break
if is_control(line, index, control):
return index
current = index + len(control);
return -1
def find_control(line):
""" Return the offset of the first control in LINE or -1
if there is none.
"""
current = 0
for_index = find_specific_control(line, "for")
if_index = find_specific_control(line, "if")
while_index = find_specific_control(line, "while")
first = len(line)
if for_index >= 0 and first > for_index:
first = for_index
if if_index >= 0 and first > if_index:
first = if_index
if while_index >= 0 and first > while_index:
first = while_index
if first == len(line):
return -1
return first
def parantheses_delta(line):
""" Return the number of opening minus the number of closing
parantheses in LINE. Don't count those inside strings or chars.
"""
escaped = False
in_squote = False
in_dquote = False
delta = 0
for c in line:
if escaped:
escaped = False
elif in_dquote:
if c == '\\':
escaped = True
elif c == '"':
in_dquote = False
elif in_squote:
if c == '\\':
escaped = True
elif c == "'":
in_squote = False
elif c == '(':
delta += 1
elif c == ')':
delta -= 1
elif c == '"':
in_dquote = True
elif c == "'":
in_squote -= True
return delta
def scan_file(fname):
lines = open(fname).readlines()
line_num = 1
parantheses_level = 0
for line in lines:
if parantheses_level > 0:
index = 0
else:
index = find_control(line)
if index >= 0:
print_line(fname, line_num, line)
parantheses_level += parantheses_delta(line[index:])
line_num += 1
if __name__ == '__main__':
for fname in sys.argv[1:]:
header_shown = False
last_line_num = None
scan_file(fname)
| Python | 0.006384 | |
f00c22f79d8f1cd210830957e6c79d75638c7c5b | add test for role | tests/k8s/test_role.py | tests/k8s/test_role.py | #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
from k8s.client import NotFound
from k8s.models.common import ObjectMeta
from k8s.models.role import Role, PolicyRule
NAME = "my-role"
NAMESPACE = "my-namespace"
@pytest.mark.usefixtures("k8s_config")
class TestRole(object):
def test_created_if_not_exists(self, post, api_get):
api_get.side_effect = NotFound()
role = _create_default_role()
call_params = role.as_dict()
post.return_value.json.return_value = call_params
assert role._new
role.save()
assert not role._new
pytest.helpers.assert_any_call(post, _uri(NAMESPACE), call_params)
def test_updated_if_exists(self, get, put):
mock_response = _create_mock_response()
get.return_value = mock_response
role = _create_default_role()
from_api = Role.get_or_create(
metadata=role.metadata,
rules=role.rules,
)
assert not from_api._new
assert from_api.rules == role.rules
def test_deleted(self, delete):
Role.delete(NAME, namespace=NAMESPACE)
pytest.helpers.assert_any_call(delete, _uri(NAMESPACE, NAME))
def _create_mock_response():
mock_response = mock.Mock()
mock_response.json.return_value = {
"apiVersion": "rbac.authorization.k8s.io/v1",
"kind": "Role",
"metadata": {
"creationTimestamp": "2017-09-08T13:37:00Z",
"generation": 1,
"labels": {
"test": "true"
},
"name": NAME,
"namespace": NAMESPACE,
"resourceVersion": "42",
"selfLink": _uri(NAMESPACE, NAME),
"uid": "d8f1ba26-b182-11e6-a364-fa163ea2a9c4"
},
"rules": [
{
"apiGroups": ["fiaas.schibsted.io"],
"resources": ["applications", "application-statuses"],
"verbs": ["get", "list", "watch"],
},
],
}
return mock_response
def _create_default_role():
object_meta = ObjectMeta(name=NAME, namespace=NAMESPACE, labels={"test": "true"})
policy_rules = [
PolicyRule(
apiGroups=[],
resources=[],
verbs=[],
resourceNames=[],
nonResourceURLs=[],
)
]
return Role(metadata=object_meta, rules=policy_rules)
def _uri(namespace, name=""):
uri = "/apis/rbac.authorization.k8s.io/v1/namespaces/{namespace}/roles/{name}"
return uri.format(name=name, namespace=namespace)
| Python | 0 | |
51030039f68d0dc4243b6ba125fb9b7aca44638d | Add Pipeline tests | test/data/test_pipeline.py | test/data/test_pipeline.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import torchtext.data as data
from ..common.torchtext_test_case import TorchtextTestCase
class TestPipeline(TorchtextTestCase):
@staticmethod
def repeat_n(x, n=3):
"""
Given a sequence, repeat it n times.
"""
return x * n
def test_pipeline(self):
pipeline = data.Pipeline(str.lower)
assert pipeline("Test STring") == "test string"
assert pipeline("ᑌᑎIᑕOᗪᕮ_Tᕮ᙭T") == "ᑌᑎiᑕoᗪᕮ_tᕮ᙭t"
assert pipeline(["1241", "Some String"]) == ["1241", "some string"]
args_pipeline = data.Pipeline(TestPipeline.repeat_n)
assert args_pipeline("test", 5) == "testtesttesttesttest"
assert args_pipeline(["ele1", "ele2"], 2) == ["ele1ele1", "ele2ele2"]
def test_composition(self):
pipeline = data.Pipeline(TestPipeline.repeat_n)
pipeline.add_before(str.lower)
pipeline.add_after(str.capitalize)
other_pipeline = data.Pipeline(str.swapcase)
other_pipeline.add_before(pipeline)
# Assert pipeline gives proper results after composition
# (test that we aren't modfifying pipes member)
assert pipeline("teST") == "Testtesttest"
assert pipeline(["ElE1", "eLe2"]) == ["Ele1ele1ele1", "Ele2ele2ele2"]
# Assert pipeline that we added to gives proper results
assert other_pipeline("teST") == "tESTTESTTEST"
assert other_pipeline(["ElE1", "eLe2"]) == ["eLE1ELE1ELE1", "eLE2ELE2ELE2"]
def test_exceptions(self):
with self.assertRaises(ValueError):
data.Pipeline("Not Callable")
| Python | 0.000001 | |
ca4f6e72c152f975c8bf01b920bcbdb3b611876b | add script to save_segment to disk | scripts/save_segment.py | scripts/save_segment.py | '''
IDAPython script that saves the content of a segment to a file.
Prompts the user for:
- segment name
- file path
Useful for extracting data from memory dumps.
Author: Willi Ballenthin <william.ballenthin@fireeye.com>
Licence: Apache 2.0
'''
import logging
from collections import namedtuple
import idaapi
import ida_bytes
import ida_segment
logger = logging.getLogger(__name__)
class BadInputError(Exception):
pass
Segment = namedtuple('SegmentBuffer', ['path', 'name'])
def prompt_for_segment():
''' :returns: a Segment instance, or raises BadInputError '''
class MyForm(idaapi.Form):
def __init__(self):
idaapi.Form.__init__(self, """STARTITEM 0
add segment by buffer
<##segment name:{name}>
<##output path:{path}>
""",
{
'path': idaapi.Form.FileInput(save=True),
'name': idaapi.Form.StringInput(),
})
def OnFormChange(self, fid):
return 1
f = MyForm()
f.Compile()
f.path.value = ""
f.name.value = ""
ok = f.Execute()
if ok != 1:
raise BadInputError('user cancelled')
path = f.path.value
if path == "" or path is None:
raise BadInputError('bad path provided')
name = f.name.value
if name == "" or name is None:
raise BadInputError('bad name provided')
f.Free()
return Segment(path, name)
def main(argv=None):
if argv is None:
argv = sys.argv[:]
try:
seg_spec = prompt_for_segment()
except BadInputError:
logger.error('bad input, exiting...')
return -1
seg = ida_segment.get_segm_by_name(seg_spec.name)
if not seg:
logger.error("bad segment, exiting...")
buf = ida_bytes.get_bytes(seg.start_ea, seg.end_ea - seg.start_ea)
with open(seg_spec.path, "wb") as f:
f.write(buf)
logger.info("wrote %x bytes", len(buf))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
| Python | 0 | |
369eed75c8a2fdc916885344fabb14e116bb60f9 | add datatype test | tests/test_datatype.py | tests/test_datatype.py | # encoding: utf-8
from unittest import TestCase
from statscraper import Datatype, NoSuchDatatype
class TestDatatype(TestCase):
def test_datatype(self):
dt = Datatype("str")
self.assertTrue(str(dt) == "str")
def test_datatype_with_values(self):
dt = Datatype("region")
self.assertTrue(len(dt.allowed_values))
def test_none_existing_datatype(self):
with self.assertRaises(NoSuchDatatype):
Datatype("donkey_power")
def test_allowed_values(self):
dt = Datatype("region")
self.assertTrue(u"Växjö kommun" in dt.allowed_values)
self.assertEqual(str(dt.allowed_values["eu"]), "eu")
| Python | 0.000002 | |
9f7a8e01f7897e8979997b8845a9ace3f64d5412 | Add more tests | tests/test_generate.py | tests/test_generate.py | import pytest
from nlppln.generate import to_bool
def test_to_bool_correct():
assert to_bool('y') == True
assert to_bool('n') == False
def test_to_bool_error():
with pytest.raises(ValueError):
to_bool('foo')
| Python | 0 | |
2cd1ab91ca48b8a8d34eabcc2a01b4014a97bcf6 | add unit tests | test/test_ncompress.py | test/test_ncompress.py | import shutil
import subprocess
from io import BytesIO
import pytest
from ncompress import compress, decompress
@pytest.fixture
def sample_data():
chars = []
for i in range(15):
chars += [i * 16] * (i + 1)
chars += [0, 0, 0]
return bytes(chars)
@pytest.fixture
def sample_compressed(sample_data):
compress_cmd = shutil.which("compress")
if compress_cmd:
return subprocess.check_output(compress_cmd, input=sample_data)
return compress(sample_data)
def test_string_string(sample_data, sample_compressed):
assert compress(sample_data) == sample_compressed
assert decompress(sample_compressed) == sample_data
def test_string_stream(sample_data, sample_compressed):
out = BytesIO()
compress(sample_data, out)
out.seek(0)
assert out.read() == sample_compressed
out = BytesIO()
decompress(sample_compressed, out)
out.seek(0)
assert out.read() == sample_data
def test_stream_stream(sample_data, sample_compressed):
out = BytesIO()
compress(BytesIO(sample_data), out)
out.seek(0)
assert out.read() == sample_compressed
out = BytesIO()
decompress(BytesIO(sample_compressed), out)
out.seek(0)
assert out.read() == sample_data
def test_stream_string(sample_data, sample_compressed):
assert compress(BytesIO(sample_data)) == sample_compressed
assert decompress(BytesIO(sample_compressed)) == sample_data
def test_empty_input(sample_data):
assert decompress(compress(b"")) == b""
with pytest.raises(ValueError):
decompress(b"")
with pytest.raises(TypeError):
compress()
with pytest.raises(TypeError):
decompress()
def test_corrupted_input(sample_compressed):
sample = sample_compressed
for x in [
b"123",
sample[1:],
sample[:1],
b"\0" * 3 + sample[:3],
sample * 2,
b"\0" + sample
]:
with pytest.raises(ValueError) as ex:
decompress(x)
assert ("not in LZW-compressed format" in str(ex.value) or
"corrupt input - " in str(ex.value))
def test_str(sample_data, sample_compressed):
with pytest.raises(TypeError):
compress(sample_data.decode("latin1", errors="replace"))
with pytest.raises(TypeError):
decompress(sample_compressed.decode("latin1", errors="replace"))
def test_closed_input(sample_data, sample_compressed):
expected = "I/O operation on closed file."
with pytest.raises(ValueError) as ex:
stream = BytesIO(sample_data)
stream.close()
compress(stream)
assert expected in str(ex.value)
with pytest.raises(ValueError) as ex:
stream = BytesIO(sample_compressed)
stream.close()
decompress(stream)
assert expected in str(ex.value)
def test_file_input():
with open(__file__, "rb") as f:
expected = f.read()
f.seek(0)
assert decompress(compress(f)) == expected
| Python | 0.000001 | |
c297de3964c53beffdf33922c0bffd022b376ae6 | Create __init__.py | crawl/__init__.py | crawl/__init__.py | Python | 0.000429 | ||
405385e1c840cd8a98d6021358c603964fa8d0d3 | Create simulator.py | simulator.py | simulator.py | import numpy as np
import random
from naive_selector import NaiveSelector
from bayesian_selector import BayesianSelector
from multiarm_selector import MultiarmSelector
NUM_SIM = 30
NUM_USERS = 1000
def coin_flip(prob_true):
if random.random() < prob_true:
return True
else:
return False
def simulate(prob_click_a, prob_click_b, num_users):
naive_selector = NaiveSelector()
bayesian_selector = BayesianSelector()
multiarm_selector = MultiarmSelector()
user_clicks = [
{
"A": coin_flip(prob_click_a),
"B": coin_flip(prob_click_b)
}
for i in range(num_users)
]
for user_click in user_clicks:
naive_selector.handle_response_from_new_user(user_click)
bayesian_selector.handle_response_from_new_user(user_click)
multiarm_selector.handle_response_from_new_user(user_click)
return naive_selector, bayesian_selector, multiarm_selector
def main():
f = open("./data/simulation_results.csv", "w", 0)
f.write(
"prob_click_a, prob_click_b,"
" num_click_naive, num_click_bayesian, num_click_multiarm,"
" prob_correct_naive, prob_correct_bayesian, prob_correct_multiarm\n"
)
prob_click_as = [0.1, 0.3, 0.6]
for prob_click_a in prob_click_as:
prob_click_bs = np.arange(prob_click_a + 0.01, prob_click_a + 0.2, 0.01)
for prob_click_b in prob_click_bs:
print "working on", prob_click_a, prob_click_b
num_click_naive = np.zeros(NUM_SIM)
num_click_bayesian = np.zeros(NUM_SIM)
num_click_multiarm = np.zeros(NUM_SIM)
is_correct_naive = np.zeros(NUM_SIM)
is_correct_bayesian = np.zeros(NUM_SIM)
is_correct_multiarm = np.zeros(NUM_SIM)
# do 1000 simulations for each run
for i in range(NUM_SIM):
naive_selector, bayesian_selector, multiarm_selector = simulate(
prob_click_a=prob_click_a,
prob_click_b=prob_click_b,
num_users=NUM_USERS
)
num_click_naive[i], _ = naive_selector.prepare_report()
num_click_bayesian[i], _ = bayesian_selector.prepare_report()
num_click_multiarm[i], _ = multiarm_selector.prepare_report()
is_correct_naive[i] = naive_selector.did_give_correct_answer()
is_correct_bayesian[i] = bayesian_selector.did_give_correct_answer()
is_correct_multiarm[i] = multiarm_selector.did_give_correct_answer()
f.write(
"{}, {}, {}, {}, {}, {}, {}, {}\n".format(
prob_click_a,
prob_click_b,
np.mean(num_click_naive),
np.mean(num_click_bayesian),
np.mean(num_click_multiarm),
np.mean(is_correct_naive),
np.mean(is_correct_bayesian),
np.mean(is_correct_multiarm)
)
)
f.close()
if __name__ == "__main__":
main()
| Python | 0.000001 | |
ccf21faf0110c9c5a4c28a843c36c53183d71550 | add missing file | pyexcel_xls/__init__.py | pyexcel_xls/__init__.py | """
pyexcel_xls
~~~~~~~~~~~~~~~~~~~
The lower level xls/xlsm file format handler using xlrd/xlwt
:copyright: (c) 2015-2016 by Onni Software Ltd
:license: New BSD License
"""
from pyexcel_io.io import get_data as read_data, isstream, store_data as write_data
def get_data(afile, file_type=None, **keywords):
if isstream(afile) and file_type is None:
file_type = 'xls'
return read_data(afile, file_type=file_type, **keywords)
def save_data(afile, data, file_type=None, **keywords):
if isstream(afile) and file_type is None:
file_type = 'xls'
write_data(afile, data, file_type=file_type, **keywords)
| Python | 0.000003 | |
5c4ed354d1bfd5c4443cc031a29e6535b2063178 | add test-env | sikuli-script/src/test/python/test-env.py | sikuli-script/src/test/python/test-env.py | from __future__ import with_statement
from sikuli.Sikuli import *
print Env.getOS(), Env.getOSVersion()
print "MAC?", Env.getOS() == OS.MAC
print Env.getMouseLocation()
| Python | 0 | |
1828f7bb8cb735e755dbcb3a894724dec28748cc | add sort file | sort/sort.py | sort/sort.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
| Python | 0.000001 | |
fbd6db138ce65825e56a8d39bf30ed8525b88503 | Add exception handler for db not found errors. | resources/middlewares/db_not_found_handler.py | resources/middlewares/db_not_found_handler.py | import falcon
def handler(ex, req, resp, params):
raise falcon.HTTPNotFound()
| Python | 0 | |
a1eaf66efa2041849e906010b7a4fb9412a9b781 | Add instance method unit tests | tests/test_instancemethod.py | tests/test_instancemethod.py | # Imports
import random
import unittest
from funky import memoize, timed_memoize
class Dummy(object):
@memoize
def a(self):
return random.random()
class TestInstanceMethod(unittest.TestCase):
def test_dummy(self):
dummy = Dummy()
v1 = dummy.a()
v2 = dummy.a()
dummy.a.clear()
v3 = dummy.a()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
c3221d70f829dc2968ebfb1a47efd9538a1ef59f | test gaussian + derivatives | tests/vigra_compare.py | tests/vigra_compare.py | import fastfilters as ff
import numpy as np
import sys
try:
import vigra
except ImportError:
print("WARNING: vigra not available - skipping tests.")
with open(sys.argv[1], 'w') as f:
f.write('')
exit()
a = np.random.randn(1000000).reshape(1000,1000)
for order in [0,1,2]:
for sigma in [1.0, 5.0, 10.0]:
res_ff = ff.gaussian2d(a, order, sigma)
res_vigra = vigra.filters.gaussianDerivative(a, sigma, [order,order])
if not np.allclose(res_ff, res_vigra, atol=1e-6):
print(order, sigma, np.max(np.abs(res_ff - res_vigra)))
raise Exception()
np.unique(ff.hog2d(a, 1.0)) | Python | 0.000002 | |
8ec1d35fe79554729e52aec4e0aabd1d9f64a9c7 | Put main.py display functions in its own module so they can be used in other parts of the package | fire_rs/display.py | fire_rs/display.py | from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource
from matplotlib.ticker import FuncFormatter
from matplotlib import cm
def get_default_figure_and_axis():
fire_fig = plt.figure()
fire_ax = fire_fig.gca(aspect='equal', xlabel="X position [m]", ylabel="Y position [m]")
ax_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
fire_ax.yaxis.set_major_formatter(ax_formatter)
fire_ax.xaxis.set_major_formatter(ax_formatter)
return fire_fig, fire_ax
def plot_firefront_contour(ax, x, y, firefront, nfronts=20):
fronts = ax.contour(x, y, firefront, nfronts, cmap=cm.Set1)
labels = ax.clabel(fronts, inline=True, fontsize='smaller', inline_spacing=1, linewidth=2, fmt='%.0f')
return fronts, labels
def plot_elevation_contour(ax, x, y, z):
contour = ax.contour(x, y, z, 15, cmap=cm.gist_earth)
labels = plt.clabel(contour, inline=1, fontsize=10)
return contour, labels
def plot_elevation_shade(ax, x, y, z, dx=25, dy=25):
cbar_lim = (z.min(), z.max())
image_scale = (x[0][0], x[0][x.shape[0] - 1], y[0][0], y[y.shape[0] - 1][0])
ls = LightSource(azdeg=315, altdeg=45)
ax.imshow(ls.hillshade(z, vert_exag=5, dx=dx, dy=dy), extent=image_scale, cmap='gray')
return ax.imshow(ls.shade(z, cmap=cm.terrain, blend_mode='overlay', vert_exag=1, dx=dx, dy=dy,
vmin=cbar_lim[0], vmax=cbar_lim[1]),
extent=image_scale, vmin=cbar_lim[0], vmax=cbar_lim[1], cmap=cm.terrain)
def plot_wind_flow(ax, x, y, wx, wy, wvel):
return ax.streamplot(x, y, wx, wy, density=1, linewidth=1, color='dimgrey')
def plot_wind_arrows(ax, x, y, wx, wy):
return ax.quiver(x, y, wx, wy, pivot='middle', color='dimgrey')
def plot3d_elevation_shade(ax, x, y, z, dx=25, dy=25):
ls = LightSource(azdeg=120, altdeg=45)
rgb = ls.shade(z, cmap=cm.terrain, vert_exag=0.1, blend_mode='overlay')
return ax.plot_surface(x, y, z, facecolors=rgb, rstride=5, cstride=5, linewidth=0, antialiased=True, shade=True)
def plot3d_wind_arrows(ax, x, y, z, wx, wy, wz):
return ax.quiver(x, y, z, wx, wy, wz, pivot='middle', cmap=cm.viridis)
| Python | 0 | |
b50a85bb93e1e33babb6bc1c7888a22f2bde68de | Add a more reusable document writer class | docs/docs_writer.py | docs/docs_writer.py | import os
class DocsWriter:
"""Utility class used to write the HTML files used on the documentation"""
def __init__(self, filename, type_to_path_function):
"""Initializes the writer to the specified output file,
creating the parent directories when used if required.
'type_to_path_function' should be a function which, given a type name
and a named argument relative_to, returns the file path for the specified
type, relative to the given filename"""
self.filename = filename
self.handle = None
# Should be set before calling adding items to the menu
self.menu_separator_tag = None
# Utility functions TODO There must be a better way
self.type_to_path = lambda t: type_to_path_function(t, relative_to=self.filename)
# Control signals
self.menu_began = False
self.table_columns = 0
self.table_columns_left = None
# High level writing
def write_head(self, title, relative_css_path):
"""Writes the head part for the generated document, with the given title and CSS"""
self.write('''<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>''')
self.write(title)
self.write('''</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link href="''')
self.write(relative_css_path)
self.write('''" rel="stylesheet">
<link href="https://fonts.googleapis.com/css?family=Nunito|Space+Mono" rel="stylesheet">
</head>
<body>
<div id="main_div">''')
def set_menu_separator(self, relative_image_path):
"""Sets the menu separator. Must be called before adding entries to the menu"""
if relative_image_path:
self.menu_separator_tag = '<img src="%s" alt="/" />' % relative_image_path
else:
self.menu_separator_tag = None
def add_menu(self, name, link=None):
"""Adds a menu entry, will create it if it doesn't exist yet"""
if self.menu_began:
if self.menu_separator_tag:
self.write(self.menu_separator_tag)
else:
# First time, create the menu tag
self.write('<ul class="horizontal">')
self.menu_began = True
self.write('<li>')
if link:
self.write('<a href="')
self.write(link)
self.write('">')
# Write the real menu entry text
self.write(name)
if link:
self.write('</a>')
self.write('</li>')
def end_menu(self):
"""Ends an opened menu"""
if not self.menu_began:
raise ValueError('No menu had been started in the first place.')
self.write('</ul>')
def write_title(self, title, level=1):
"""Writes a title header in the document body, with an optional depth level"""
self.write('<h%d>' % level)
self.write(title)
self.write('</h%d>' % level)
def write_code(self, tlobject):
"""Writes the code for the given 'tlobject' properly formatted ith with hyperlinks"""
self.write('<pre>---')
self.write('functions' if tlobject.is_function else 'types')
self.write('---\n')
# Write the function or type and its ID
if tlobject.namespace:
self.write(tlobject.namespace)
self.write('.')
self.write(tlobject.name)
self.write('#')
self.write(hex(tlobject.id)[2:].rjust(8, '0'))
# Write all the arguments (or do nothing if there's none)
for arg in tlobject.args:
self.write(' ')
# "Opening" modifiers
if arg.generic_definition:
self.write('{')
# Argument name
self.write(arg.name)
self.write(':')
# "Opening" modifiers
if arg.is_flag:
self.write('flags.%d?' % arg.flag_index)
if arg.is_generic:
self.write('!')
if arg.is_vector:
self.write('<a href="%s">Vector</a><' % self.type_to_path('vector'))
# Argument type
if arg.type:
self.write('<a href="')
self.write(self.type_to_path(arg.type))
self.write('">%s</a>' % arg.type)
else:
self.write('#')
# "Closing" modifiers
if arg.is_vector:
self.write('>')
if arg.generic_definition:
self.write('}')
# Now write the resulting type (result from a function, or type for a constructor)
self.write(' = <a href="')
self.write(self.type_to_path(tlobject.result))
self.write('">%s</a>' % tlobject.result)
self.write('</pre>')
def begin_table(self, column_count):
"""Begins a table with the given 'column_count', required to automatically
create the right amount of columns when adding items to the rows"""
self.table_columns = column_count
self.table_columns_left = 0
self.write('<table>')
def add_row(self, text, link=None, bold=False, align=None):
"""This will create a new row, or add text to the next column
of the previously created, incomplete row, closing it if complete"""
if not self.table_columns_left:
# Starting a new row
self.write('<tr>')
self.table_columns_left = self.table_columns
self.write('<td')
if align:
self.write(' style="text-align:')
self.write(align)
self.write('"')
self.write('>')
if bold:
self.write('<b>')
if link:
self.write('<a href="')
self.write(link)
self.write('">')
# Finally write the real table data, the given text
self.write(text)
if link:
self.write('</a>')
if bold:
self.write('</b>')
self.write('</td>')
self.table_columns_left -= 1
if not self.table_columns_left:
self.write('</tr>')
def end_table(self):
# If there was any column left, finish it before closing the table
if self.table_columns_left:
self.write('</tr>')
self.write('</table>')
def write_text(self, text):
"""Writes a paragraph of text"""
self.write('<p>')
self.write(text)
self.write('</p>')
def end_body(self):
"""Ends the whole document. This should be called the last"""
self.write('</div></body></html>')
# "Low" level writing
def write(self, s):
"""Wrapper around handle.write"""
self.handle.write(s)
# With block
def __enter__(self):
# Sanity check
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
self.handle = open(self.filename, 'w', encoding='utf-8')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.handle.close()
| Python | 0.000002 | |
b17252a0b1becfda77e4244cf48c2fb9f868c03b | add method to pregenerate html reports | src/bat/generateuniquehtml.py | src/bat/generateuniquehtml.py | #!/usr/bin/python
## Binary Analysis Tool
## Copyright 2012 Armijn Hemel for Tjaldur Software Governance Solutions
## Licensed under Apache 2.0, see LICENSE file for details
'''
This is a plugin for the Binary Analysis Tool. It takes the output of hexdump -Cv
and writes it to a file with gzip compression. The output is later used in the
(upcoming) graphical user interface.
This should be run as a postrun scan
'''
import os, os.path, sys, gzip, cgi
def generateHTML(filename, unpackreport, leafscans, scantempdir, toplevelscandir, envvars={}):
if not unpackreport.has_key('sha256'):
return
scanenv = os.environ.copy()
if envvars != None:
for en in envvars.split(':'):
try:
(envname, envvalue) = en.split('=')
scanenv[envname] = envvalue
except Exception, e:
pass
## TODO: check if BAT_REPORTDIR exists
reportdir = scanenv.get('BAT_REPORTDIR', '.')
for i in leafscans:
if i.keys()[0] == 'ranking':
if len(i['ranking']['reports']) != 0:
htmllinks = []
for j in i['ranking']['reports']:
if len(j[2]) != 0:
## here we should either do a database lookup to get the checksum,
## or check if they are already in the report
htmllinks.append((j[1], j[2]))
if htmllinks != []:
uniquehtml = "<html><body><h1>Unique matches per package</h1><p><ul>"
## first generate a header
for h in htmllinks:
uniquehtml = uniquehtml + "<li><a href=\"#%s\">%s</a>" % (h[0], h[0])
uniquehtml = uniquehtml + "</ul></p>"
for h in htmllinks:
uniquehtml = uniquehtml + "<hr><h2><a name=\"%s\" href=\"#%s\">Matches for: %s (%d)</a></h2>" % (h[0], h[0], h[0], len(h[1]))
for k in h[1]:
## we have a list of tuples, per unique string we have a list of sha256sums and meta info
if len(k) > 1:
uniquehtml = uniquehtml + "<h5>%s</h5><p><table><td><b>Filename</b></td><td><b>Version</b></td><td><b>Line number</b></td><td><b>SHA256</b></td></tr>" % cgi.escape(k[0])
uniqtablerows = map(lambda x: "<tr><td>%s</td><td><a href=\"unique:/%s#%d\">%s</a></td><td>%d</td><td>%s</td></tr>" % (x[3], x[0], x[2], x[1], x[2], x[0]), k[1])
uniquehtml = uniquehtml + reduce(lambda x, y: x + y, uniqtablerows) + "</table></p>\n"
else:
uniquehtml = uniquehtml + "<h5>%s</h5>" % cgi.escape(k[0])
uniquehtml = uniquehtml + "</body></html>"
uniquehtmlfile = open("%s/%s-unique.html" % (reportdir, unpackreport['sha256']), 'w')
uniquehtmlfile.write(uniquehtml)
uniquehtmlfile.close()
| Python | 0 | |
78f730b405c6e67988cdc9efab1aa5316c16849f | Add initial test for web response | tests/test_web_response.py | tests/test_web_response.py | import unittest
from unittest import mock
from aiohttp.web import Request, StreamResponse
from aiohttp.protocol import Request as RequestImpl
class TestStreamResponse(unittest.TestCase):
def make_request(self, method, path, headers=()):
self.app = mock.Mock()
self.transport = mock.Mock()
message = RequestImpl(self.transport, method, path)
message.headers.extend(headers)
self.payload = mock.Mock()
self.protocol = mock.Mock()
req = Request(self.app, message, self.payload, self.protocol)
return req
def test_ctor(self):
req = self.make_request('GET', '/')
resp = StreamResponse(req)
self.assertEqual(req, resp._request)
self.assertIsNone(req._response)
self.assertEqual(200, resp.status_code)
self.assertTrue(resp.keep_alive)
| Python | 0 | |
644a678d3829513361fdc099d759ca964100f2e6 | Add script to replace text | text-files/replace-text.py | text-files/replace-text.py | #!/usr/bin/env python3
# This Python 3 script replaces text in a file, in-place.
# For Windows, use:
#!python
import fileinput
import os
import sys
def isValidFile(filename):
return (filename.lower().endswith('.m3u') or
filename.lower().endswith('.m3u8'))
def processFile(filename):
'''Makes custom text modifications to a single file.
Returns true if modified, false if not modified.
'''
modified = False
with fileinput.input(filename, inplace=True) as f:
for line in f:
# Check any condition
if '\\' in line:
modified = True
# Make the modifications
newline = line.replace('\\', '/')
sys.stdout.write(newline)
return modified
if __name__ == '__main__':
for filename in os.listdir(os.getcwd()):
if not isValidFile(filename):
continue
modified = processFile(filename)
if modified:
print(filename)
# Wait for user input to finish
input() | Python | 0.000003 | |
8d8f6b99357912fa9a29098b0744712eeb1d4c70 | Add coder/decoder skeleton | src/coder.py | src/coder.py | from bitarray import bitarray
from datetime import datetime, timedelta
def decode():
with open() as f:
timestamps = []
start = [0, 0, 0]
end = [1, 1, 1]
delta = timedelta(seconds=1)
for line in f:
ts = line.split(" ", 1)[0]
ts = datetime.strptime(ts, '%H:%M:%S.%f')
timestamps.append(ts)
bits = [int(t2 - t1 > delta) for t2, t1 in zip(timestamps[1:], timestamps[:-1])]
bits = extract_message(bits, start, end)
print get_message(bits)
def find_index(list, sublist):
print('Find {} in {}'.format(sublist, list))
for i in range(len(list) - len(sublist) + 1):
if list[i:i+len(sublist)] == sublist:
return i
return None
def extract_message(bits, start, end):
start_index = find_index(bits, start) + len(start)
end_index = find_index(bits[start_index:], end)
return bits[start_index:start_index + end_index]
def get_message(bits):
return bitarray(bits).tostring()
def get_bits(msg):
ba = bitarray.bitarray()
ba.fromstring(msg)
return ba.tolist()
| Python | 0.000004 | |
3c18ace928b0339b0edf4763f4132d327936cbe8 | add utils | src/utils.py | src/utils.py | def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def plot_ROC(actual, predictions):
# plot the FPR vs TPR and AUC for a two class problem (0,1)
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
false_positive_rate, true_positive_rate, thresholds = roc_curve(actual, predictions)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, true_positive_rate, 'b',
label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show() | Python | 0.000004 | |
0bca09339bb49e4540c5be8162e11ea3e8106200 | Create a PySide GUI window. | budget.py | budget.py | #!/usr/bin/env python
import sys
from PySide import QtGui
app = QtGui.QApplication(sys.argv)
wid = QtGui.QWidget()
wid.resize(250, 150)
wid.setWindowTitle('Simple')
wid.show()
sys.exit(app.exec_())
| Python | 0 | |
c02b8011b20e952460a84b7edf1b44fcb0d07319 | add re07.py | trypython/stdlib/re_/re07.py | trypython/stdlib/re_/re07.py | """
正規表現のサンプルです
部分正規表現(「(」と「)」)のグルーピングについて
REFERENCES:: http://bit.ly/2TVtVNY
http://bit.ly/2TVRy8Z
http://bit.ly/2TWQQs4
"""
import re
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
from trypython.stdlib.re_ import util
class Sample(SampleBase):
def exec(self):
# ---------------------------------------------------
# 部分正規表現のグルーピング
# -----------------------------
# 「(」と「)」に囲まれた正規表現をひとまとまりにして
# 部分正規表現を定義することができる.
#
# 「(」と「)」を入れ子にすることも可能.
#
# 例えば (abc)+ という正規表現は
# 「abcという文字列が一回以上連続した文字列」を表す.
#
# また、通常の四則計算と同様に優先度を変更するためにも利用する.
#
# 例えば、「田中|佐藤 太郎」という正規表現は
# 「田中、または佐藤 太郎を意味する。これを
# 「(田中|佐藤) 太郎」とすると、田中 太郎、または佐藤 太郎
# という意味となる。
#
# グルーピングには、もう一つ大きな役目があり
# 「マッチした範囲をキャプチャ(記憶)する」という事もできる.
# キャプチャした内容は後から特別な記法により取り出す事が可能.
# 置換などを行う際に重宝する.
# ---------------------------------------------------
s = '田中 太郎'
p = r'田中|佐藤 太郎'
r = re.compile(p)
m = r.match(s)
if m:
# 田中 太郎でマッチせずに 「田中」のみがマッチする
util.print_match_object(m)
m = r.fullmatch(s)
if not m:
# fullmatch 指定した場合はマッチしないと判定される
# fullmatch メソッドは python 3.4 で追加された
pr(f'({p}).fullmatch({s})', 'マッチせず')
p = r'(田中|佐藤) 太郎'
r = re.compile(p)
m = r.match(s)
if m:
# グルーピング指定しているので「田中 太郎」でマッチする
# かつ、グルーピングにより「田中」の部分がキャプチャされる
util.print_match_object(m)
m = r.fullmatch(s)
if m:
# グルーピング指定しているので「田中 太郎」でフルマッチする
# かつ、グルーピングにより「田中」の部分がキャプチャされる
util.print_match_object(m)
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| Python | 0 | |
19a23591b9b21cbe7dd34c8be7d2cb435c0f965a | generate XML works | umpa/extensions/XML.py | umpa/extensions/XML.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Adriano Monteiro Marques.
#
# Author: Bartosz SKOWRON <getxsick at gmail dot com>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import xml.dom.minidom
from umpa.protocols._fields import Flags
def write(filename, *packets):
# no packets?
if not packets:
return
doc = xml.dom.minidom.Document()
root = doc.createElementNS(None, 'UMPA')
doc.appendChild(root)
for i, packet in enumerate(packets):
pa = doc.createElementNS(None, 'packet')
pa.setAttributeNS(None, "id", str(i))
root.appendChild(pa)
for proto in packet.protos:
pr = doc.createElementNS(None, 'protocol')
pr.setAttributeNS(None, "type", proto.name)
pa.appendChild(pr)
for field in proto.get_fields_keys():
f = doc.createElementNS(None, field)
pr.appendChild(f)
# if Flags...we need care about BitFlags objects
if isinstance(proto._get_field(field), Flags):
for flag in proto._get_field(field).get():
b = doc.createElementNS(None, flag)
f.appendChild(b)
b.appendChild(doc.createTextNode(
str(proto._get_field(field)._value[flag].get())))
else:
f.appendChild(doc.createTextNode(
str(proto._get_field(field).get())))
print doc.toprettyxml()
open(filename, "w").write(doc.toprettyxml())
| Python | 0.999372 | |
4ff319033277bbaa04b1e226f9a90232ecadd49d | Trying out the potential new name, Spectra | cronenberg/config.py | cronenberg/config.py | DEBUG = True
DEFAULT_FROM_TIME = '-3h'
DEFAULT_THEME = 'light'
DASHBOARD_APPNAME = 'Spectra'
SQLALCHEMY_DATABASE_URI = 'sqlite:///cronenberg.db'
GRAPHITE_URL = 'http://graphite.prod.urbanairship.com'
SERVER_ADDRESS = '0.0.0.0'
SERVER_PORT = 5000
INTERACTIVE_CHARTS_DEFAULT = True
INTERACTIVE_CHARTS_RENDERER = 'nvd3'
DASHBOARD_RANGE_PICKER = [
('Past Hour', '-1h'),
('Past 3 Hrs', '-3h'),
('Past 12 Hrs', '-12h'),
('Past Day', '-1d'),
('Past Wk', '-1w'),
('Past 2 Wks', '-2w'),
]
| DEBUG = True
DEFAULT_FROM_TIME = '-3h'
DEFAULT_THEME = 'light'
DASHBOARD_APPNAME = 'Cronenberg'
SQLALCHEMY_DATABASE_URI = 'sqlite:///cronenberg.db'
GRAPHITE_URL = 'http://graphite.prod.urbanairship.com'
SERVER_ADDRESS = '0.0.0.0'
SERVER_PORT = 5000
INTERACTIVE_CHARTS_DEFAULT = True
INTERACTIVE_CHARTS_RENDERER = 'nvd3'
DASHBOARD_RANGE_PICKER = [
('Past Hour', '-1h'),
('Past 3 Hrs', '-3h'),
('Past 12 Hrs', '-12h'),
('Past Day', '-1d'),
('Past Wk', '-1w'),
('Past 2 Wks', '-2w'),
]
| Python | 0.999999 |
bc28f6ab7ba5bb5e82bf38c544a4d091d89973ea | Use servoblaster to control servo | candycrush.py | candycrush.py | #!/usr/bin/env python
import os.path
import subprocess
import time
def scaler(OldMin, OldMax, NewMin, NewMax):
def fn(OldValue):
return (((OldValue - OldMin) * (NewMax - NewMin)) / (OldMax - OldMin)) + NewMin
return fn
def setup_servod():
if not os.path.exists("/dev/servoblaster"):
subprocess.call(["servod"])
def set_servo(physical_pin, degrees):
servodegrees = scaler(0, 180, 53, 240)
with open("/dev/servoblaster", "w") as f:
servovalue = int(servodegrees(degrees))
f.write("P1-{}={}".format(physical_pin, servovalue))
def main():
set_servo(11, 0)
time.sleep(2)
set_servo(11, 180)
time.sleep(2)
set_servo(11, 90)
time.sleep(2)
set_servo(11, 45)
time.sleep(2)
set_servo(11, 30)
if __name__ =='__main__':
main()
| Python | 0.000001 | |
26595ad3dd7dcd9dfd16ae551345db9b7e58412a | Add updater | updater/openexchangerates.py | updater/openexchangerates.py | #!env/bin/python
import urllib2
import simplejson
import datetime
APP_ID = "40639356d56148f1ae26348d670e889f"
TARGET_URL = "http://taggy-api.bx23.net/api/v1/currency/"
def main():
print 'Getting rates...'
request = urllib2.Request("http://openexchangerates.org/api/latest.json?app_id=%s" % (APP_ID))
opener = urllib2.build_opener()
f = opener.open(request)
result = simplejson.load(f)
rates = result['rates']
date = datetime.datetime.fromtimestamp(int(result['timestamp']))
print 'Rates [%s] size: %s' % (date, len(rates))
print 'Sending to API...'
update_j = {"currency" : [], "timestamp" : result['timestamp']}
for name, value in rates.iteritems():
update_j["currency"].append({"name" : name, "value" : value})
request = urllib2.Request(TARGET_URL, simplejson.dumps(update_j), {'Content-Type': 'application/json'})
f = urllib2.urlopen(request)
response = f.read()
f.close()
print ' API: %s' % (response)
if __name__ == '__main__':
main()
| Python | 0 | |
3ef4fdcc98a12111aee6f0d214af98ef68315773 | add reboot module | gozerlib/reboot.py | gozerlib/reboot.py | # gozerbot/utils/reboot.py
#
#
"""
reboot code.
"""
## gozerlib imports
from gozerlib.fleet import fleet
from gozerlib.config import cfg as config
## basic imports
from simplejson import dump
import os
import sys
import pickle
import tempfile
def reboot():
"""
reboot the bot.
.. literalinclude:: ../../gozerbot/reboot.py
:pyobject: reboot
"""
os.execl(sys.argv[0], *sys.argv)
def reboot_stateful(bot, ievent, fleet, partyline):
"""
reboot the bot, but keep the connections.
:param bot: bot on which the reboot command is given
:type bot: gozerbot.botbase.BotBase
:param ievent: event that triggered the reboot
:type ievent: gozerbot.eventbase. EventBase
:param fleet: the fleet of the bot
:type fleet: gozerbot.fleet.Fleet
:param partyline: partyline of the bot
:type partyline: gozerbot.partyline.PartyLine
.. literalinclude:: ../../gozerbot/reboot.py
:pyobject: reboot_stateful
"""
config.reload()
session = {'bots': {}, 'name': bot.name, 'channel': ievent.channel, 'partyline': []}
for i in fleet.bots:
session['bots'].update(i._resumedata())
session['partyline'] = partyline._resumedata()
sessionfile = tempfile.mkstemp('-session', 'gozerbot-')[1]
dump(session, open(sessionfile, 'w'))
fleet.save()
fleet.exit(jabber=True)
os.execl(sys.argv[0], sys.argv[0], '-r', sessionfile)
| Python | 0.000001 | |
edec2fc1f57c31e15793fd56b0f24bb58ba345d9 | Create evalFunctionsLib.py | evalFunctionsLib.py | evalFunctionsLib.py | def plot_stream(stream, data, r):
"""
Plots the values of a specific stream over time.
Inputs: stream - an int indicating the index of the desired stream
data - An array of all sensor data
r = a RAVQ object
Returns: The number of -1s (indicating invalid data) in this stream
"""
values = []
negs = 0
for i in range(len(states)):
if states[i] == -1:
negs += 1
else:
values.append(r.models[states[i]].vector[stream])
plot(data[1][negs:len(states)], values, 'r-')
return negs
def plotColorStatesNoNumber(states):
"""
Makes a color-bar plot of states over time.
Inputs: states - a list indicating the state at
each time step.
"""
fig = figure(figsize=(9.0,6))
for i in range(len(states)):
if states[i] == -1:
plot(i, 0, "ko", hold=True)
elif states[i] == 0:
plot(i, 1, "|", color="LawnGreen")
elif states[i] == 1:
plot(i, 1, "|", color="LimeGreen")
elif states[i] == 2:
plot(i, 1, "|", color="teal")
elif states[i] == 7:
plot(i, 1, "|", color="DarkGreen")
elif states[i] == 4:
plot(i, 1, "b|")
elif states[i] == 5:
plot(i, 1, "|", color="DarkBlue")
elif states[i] == 6:
plot(i, 1, "|", color="purple")
elif states[i] == 3:
plot(i, 1, "|", color="green")
elif states[i] == 8:
plot(i, 1, "|", color="yellow")
elif states[i] == 9:
plot(i, 1, "|", color="navy")
elif states[i] == 11:
plot(i, 1, "|", color="GreenYellow")
elif states[i] == 10:
plot(i, 1, "|", color="orange")
elif states[i] == 12:
plot(i, 1, "|", color="red")
else:
plot(i, 1, "-")
print i
def plotColorStates(states):
"""
Makes a plot in which state is on the y axis and time is on the x axis
and points are colored by state.
Input: states - a list of ints representing the state at each time step.
"""
fig = figure(figsize=(9.0,6))
for i in range(len(states)):
if states[i] == -1:
plot(i, 0, "ko", hold=True)
elif states[i] == 0:
plot(i, 9, "|", color="LawnGreen")
elif states[i] == 1:
plot(i, 8, "|", color="LimeGreen")
elif states[i] == 2:
plot(i, 5, "|", color="teal")
elif states[i] == 7:
plot(i, 6, "|", color="DarkGreen")
elif states[i] == 4:
plot(i, 4, "b|")
elif states[i] == 5:
plot(i, 2, "|", color="DarkBlue")
elif states[i] == 6:
plot(i, 1, "|", color="purple")
elif states[i] == 3:
plot(i, 7, "|", color="green")
elif states[i] == 8:
plot(i, 11, "|", color="yellow")
elif states[i] == 9:
plot(i, 3, "|", color="navy")
elif states[i] == 11:
plot(i, 10, "|", color="GreenYellow")
elif states[i] == 10:
plot(i, 12, "|", color="orange")
elif states[i] == 12:
plot(i, 13, "|", color="red")
else:
plot(i, 1, "-")
print i
def printTransitions(states):
for i in range(1,len(states)):
if states[i-1] != states[i]:
print data[0][i], ":", states[i-1], "->", states[i]
| Python | 0.000002 | |
3c37f63f65a9d85c605dde55ae19c8d5d62ad777 | add missing file | rmake/plugins/plugin.py | rmake/plugins/plugin.py | #
# Copyright (c) 2006 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Definition of plugins available for rmake plugins.
Plugin writers should derive from one of these classes.
The plugin will be called with the hooks described here, if the
correct program is being run. For example, when running rmake-server,
the server hooks will be run.
"""
from rmake.lib.pluginlib import Plugin
TYPE_CLIENT = 0
TYPE_SERVER = 1
TYPE_SUBSCRIBER = 2
class ClientPlugin(Plugin):
types = [TYPE_CLIENT]
def client_preInit(self, main):
"""
Called right after plugins have been loaded.
"""
pass
def client_preCommand(self, main, client):
"""
Called after the command-line client has instantiated,
but before the command has been executed.
"""
pass
class ServerPlugin(Plugin):
types = [TYPE_SERVER]
def server_preConfig(self, main):
"""
Called before the configuration file has been read in.
"""
pass
def server_preInit(self, main, argv):
"""
Called before the server has been instantiated.
"""
pass
def server_postInit(self, server):
"""
Called after the server has been instantiated but before
serving is done.
"""
pass
def server_pidDied(self, pid, status):
"""
Called when the server collects a child process that has died.
"""
pass
def server_loop(self, server):
"""
Called once per server loop, between requests.
"""
pass
def server_builderInit(self, server, builder):
"""
Called when the server instantiates a builder for a job.
"""
pass
def server_shutDown(self, server):
"""
Called when the server is halting.
"""
pass
class SubscriberPlugin(Plugin):
types = [TYPE_SUBSCRIBER]
protocol = None
def subscriber_get(self, uri, name):
"""
Should return a child of the StatusSubscirber class.
"""
pass
| Python | 0.000003 | |
2100eb3e0a72395f23571c6be2bada9939739869 | add ex | checkDigit.py | checkDigit.py | #-*-coding:UTF-8 -*-
#
# 判斷輸入是否為整數(int)
input_string = input('Please input n:')
#while input_string.isdigit() == False:
while not input_string.isdigit():
print("Error, %s is not digit!" % input_string)
input_string = input('Please input n:')
print("%s is digit!" % input_string)
| Python | 0.00024 | |
09592b081a68f912bf9bb73c5269af8398c36f64 | Add unit test for treating Ordering as a collection | tests/test_collection.py | tests/test_collection.py | from unittest import TestCase
from ordering import Ordering
class TestOrderingAsCollection(TestCase):
def setUp(self) -> None:
self.ordering = Ordering[int]()
self.ordering.insert_start(0)
for n in range(10):
self.ordering.insert_after(n, n + 1)
def test_length(self) -> None:
self.assertEqual(len(self.ordering), 11)
def test_iterates_over_correct_elements(self) -> None:
self.assertListEqual(
list(self.ordering),
list(range(11))
)
def test_contains_correct_elements(self) -> None:
for n in range(11):
self.assertIn(n, self.ordering)
for n in range(11, 20):
self.assertNotIn(n, self.ordering)
for n in range(-10, 0):
self.assertNotIn(n, self.ordering)
| Python | 0 | |
6f1ed2fcdd43a5237d0211b426a216fd25930734 | add test preprocess | tests/test_preprocess.py | tests/test_preprocess.py | # coding: utf-8
code = '''
n = 10
for i in range(0,n):
x = 2 * i
y = x / 3
# a comment
if y > 1:
print(y)
for j in range(0, 3):
x = x * y
y = x + 1
if x > 1:
print(x)
'''
code = '''
#$ header legendre(int)
def legendre(p):
k = p + 1
x = zeros(k, double)
w = zeros(k, double)
if p == 1:
x[0] = -0.577350269189625765
x[1] = 0.577350269189625765
w[0] = 1.0
w[1] = 1.0
elif p == 2:
x[0] = -0.774596669241483377
x[1] = 0.0
x[2] = 0.774596669241483377
w[0] = 0.55555555555555556
w[1] = 0.888888888888888889
w[2] = 0.55555555555555556
elif p == 3:
x[0] = -0.861136311594052575
x[1] = -0.339981043584856265
x[2] = 0.339981043584856265
x[3] = 0.861136311594052575
w[0] = 0.347854845137453853
w[1] = 0.65214515486254615
w[2] = 0.65214515486254614
w[3] = 0.34785484513745386
return x,w
#$ comment
if x > 1:
print(x)
'''
from pyccel.codegen import preprocess_as_str
txt = preprocess_as_str(code)
print txt
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.