commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
83a7b19d33c9dac43e103933c9b4a734304ed2a1 | Add some unit tests. | fyabc/MiniGames | HearthStone2/test/utils/test_misc.py | HearthStone2/test/utils/test_misc.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
import MyHearthStone.utils.misc as misc
__author__ = 'fyabc'
class TestMisc(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def testTrivialImpl(self):
class B:
@misc.trivial_impl
def f(self):
pass
class C(B):
def f(self):
pass
self.assertTrue(misc.is_trivial_impl(B.f))
self.assertTrue(misc.is_trivial_impl(B().f))
self.assertFalse(misc.is_trivial_impl(C.f))
self.assertFalse(misc.is_trivial_impl(C().f))
| mit | Python | |
3f1e21e1d2a3d1418c19e454f77071686d21f7b9 | add external project | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/apps/extprojects/admin.py | meinberlin/apps/extprojects/admin.py | from django.contrib import admin
from . import models
@admin.register(models.ExternalProject)
class ExternalProjectAdmin(admin.ModelAdmin):
fields = (
'name', 'url', 'description', 'tile_image', 'tile_image_copyright',
'is_archived'
)
list_display = ('__str__', 'organisation', 'is_draft', 'is_archived')
list_filter = ('is_draft', 'is_archived', 'organisation')
search_fields = ('name',)
date_hierarchy = 'created'
def get_queryset(self, request):
return models.ExternalProject.objects.filter(bplan=None)
| agpl-3.0 | Python | |
be30299c1e9013a99bf7e828700741c1ce3fe386 | Create a contrib rule for creating docker_push defaults. (#92) | bazelbuild/rules_docker,bazelbuild/rules_docker,bazelbuild/rules_docker,bazelbuild/rules_docker | docker/contrib/with-defaults.bzl | docker/contrib/with-defaults.bzl | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This defines a repository rule for configuring the rules' defaults.
For now, this is limited to docker_push, where the default can be
specified as follows:
```python
=== WORKSPACE ===
load(
"@io_bazel_rules_docker//docker/contrib:with-defaults.bzl",
"docker_defaults",
)
docker_defaults(
name = "defaults",
registry = "us.gcr.io",
tag = "{BUILD_USER}"
)
=== BUILD ===
load("@defaults//:defaults.bzl", "docker_push")
```
Any of "registry", "repository" or "tag" may be given a new default.
"""
def _impl(repository_ctx):
"""Core implementation of docker_default."""
repository_ctx.file("BUILD", "")
repository_ctx.file("defaults.bzl", """
load(
"@io_bazel_rules_docker//docker:push.bzl",
_docker_push="docker_push"
)
def docker_push(**kwargs):
if "registry" not in kwargs:
kwargs["registry"] = "{registry}" or None
if "repository" not in kwargs:
kwargs["repository"] = "{repository}" or None
if "tag" not in kwargs:
kwargs["tag"] = "{tag}" or None
_docker_push(**kwargs)
""".format(
registry=repository_ctx.attr.registry or "",
repository=repository_ctx.attr.repository or "",
tag=repository_ctx.attr.tag or "",
))
_docker_defaults = repository_rule(
attrs = {
"registry": attr.string(),
"repository": attr.string(),
"tag": attr.string(),
},
implementation = _impl,
)
def docker_defaults(**kwargs):
"""Creates a version of docker_push with the specified defaults."""
_docker_defaults(**kwargs)
| apache-2.0 | Python | |
07b1602fbab9708929ac331617f4d6635b6e503d | Add test for `/v3/job/list` | treasure-data/td-client-python | tdclient/test/job_api_test.py | tdclient/test/job_api_test.py | #!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import functools
import os
from tdclient import api
from tdclient import version
def setup_function(function):
try:
del os.environ["TD_API_SERVER"]
except KeyError:
pass
try:
del os.environ["HTTP_PROXY"]
except KeyError:
pass
class Response(object):
def __init__(self, status, body, headers):
self.status = status
self.body = body.encode("utf-8")
self.headers = headers
self.request_method = None
self.request_path = None
self.request_headers = None
def get(response, url, params={}):
response.request_method = "GET"
response.request_path = url
response.request_headers = params
return (response.status, response.body, response)
def test_list_jobs():
client = api.API("apikey")
body = """
{
"count":11,"from":0,"to":10,"jobs":[
{"status":"success","cpu_time":496570,"result_size":24,"duration":262,"job_id":"18882028","created_at":"2014-12-23 12:00:18 UTC","updated_at":"2014-12-23 12:04:42 UTC","start_at":"2014-12-23 12:00:19 UTC","end_at":"2014-12-23 12:04:41 UTC","query":"select count(1) from import_test","type":"hive","priority":0,"retry_limit":0,"result":"","url":"http://console.treasuredata.com/jobs/18882028","user_name":"owner","hive_result_schema":"[[\\"_c0\\", \\"bigint\\"]]","organization":null,"database":"jma_weather"},
{"status":"success","cpu_time":489540,"result_size":24,"duration":272,"job_id":"18880612","created_at":"2014-12-23 11:00:16 UTC","updated_at":"2014-12-23 11:04:48 UTC","start_at":"2014-12-23 11:00:16 UTC","end_at":"2014-12-23 11:04:48 UTC","query":"select count(1) from import_test","type":"hive","priority":0,"retry_limit":0,"result":"","url":"http://console.treasuredata.com/jobs/18880612","user_name":"owner","hive_result_schema":"[[\\"_c0\\", \\"bigint\\"]]","organization":null,"database":"jma_weather"},
{"status":"success","cpu_time":486630,"result_size":24,"duration":263,"job_id":"18879199","created_at":"2014-12-23 10:00:21 UTC","updated_at":"2014-12-23 10:04:44 UTC","start_at":"2014-12-23 10:00:21 UTC","end_at":"2014-12-23 10:04:44 UTC","query":"select count(1) from import_test","type":"hive","priority":0,"retry_limit":0,"result":"","url":"http://console.treasuredata.com/jobs/18879199","user_name":"owner","hive_result_schema":"[[\\"_c0\\", \\"bigint\\"]]","organization":null,"database":"jma_weather"}
]
}
"""
response = Response(200, body, {})
client.get = functools.partial(get, response)
jobs = client.list_jobs(0, 2)
assert response.request_method == "GET"
assert response.request_path == "/v3/job/list"
assert len(jobs) == 3
assert sorted([ job[0] for job in jobs ]) == ["18879199", "18880612", "18882028"]
| apache-2.0 | Python | |
e7ec8c2023be2a480d7f459854133b1f5e4a3642 | Add module metrics, with plugins to calculate Cyclomatic Complexity and Halstead metrics of Scratch projects | jemole/hairball,jemole/hairball | hairball/plugins/metrics.py | hairball/plugins/metrics.py | """This module provides plugins with clasic Sw Engineering metrics"""
import math
from collections import Counter
from hairball.plugins import HairballPlugin
class CyclomaticComplexity(HairballPlugin):
"""Plugin that calculates the Cyclomatic Complexity of a project."""
def __init__(self):
super(CyclomaticComplexity, self).__init__()
self.cc = []
self.total = 0
def finalize(self):
"""
Output the Cyclomatic Complexity results.
CC = number of conditions + 1
"""
print("Total Cyclomatic Complexity: %i" % self.total)
average = float (self.total) / len(self.cc)
print ("Average Cyclomatic Complexity: %.2f" % average)
print ("Cyclomatic Complexity by script:")
print self.cc
def analyze(self, scratch):
"""Run and return the results from the CyclomaticComplexity plugin."""
conditionals = (['if %s then%s', 'if %s then%selse%s',
'repeat until %s%s', 'wait until %s',
'when backdrop switches to %s', 'when %s > %s'])
for script in self.iter_scripts(scratch):
conditions = 0
for name, _, _ in self.iter_blocks(script.blocks):
if name in conditionals:
conditions += 1
self.cc.append(conditions + 1)
self.total += conditions +1
class Halstead(HairballPlugin):
"""Plugin that calculates the Halstead complexity measures of a project."""
def __init__(self):
super(Halstead, self).__init__()
self.operators = Counter()
self.operands = Counter()
self.n = 0
self.N = 0
self.V = 0
self.D = 0
self.E = 0
self.T = 0
def finalize(self):
"""Output the Halstead complexity measures results."""
print ("Program vocabulary: %i" % self.n)
print ("Program length: %i" % self.N)
print ("Volume: %.2f" % self.V)
print ("Difficulty: %.2f" % self.D)
print ("Effort: %.2f" % self.E)
print ("Time required to program: %.2f seconds = %.2f minutes" %
(self.T, self.T / float (60)))
def analyze(self, scratch):
"""Run and return the results from the Halstead plugin."""
file_operators = Counter()
file_operands = Counter()
for script in self.iter_scripts(scratch):
for name, _, arguments in self.iter_blocks(script.blocks):
file_operators[name] += 1
for arg in arguments.args:
if (not (type (arg) is list) and
'kurt' not in str(type(arg))):
file_operands[arg] += 1
N1 = sum(file_operators.values())
N2 = sum(file_operands.values())
n1 = len(file_operators.values())
n2 = len(file_operands.values())
self.n = n1+ n2
self.N = N1 + N2
self.V = float(self.N) * math.log(self.n, 2)
self.D = n1/ float (2) * (N2/float (n2))
self.E = float (self.V * self.D)
self.T = float (self.E / 18)
| bsd-2-clause | Python | |
42b4ca440ea785ae764f2c50fa0ca96539c2db8d | Create ShuffleLabel.py | DigitalSlideArchive/HistomicsTK,DigitalSlideArchive/HistomicsTK | histomicstk/ShuffleLabel.py | histomicstk/ShuffleLabel.py | import numpy as np
from skimage import measure as ms
def ShuffleLabel(Label):
"""
Shuffles labels in a label image to improve visualization and enhance
object boundaries.
Parameters
----------
Label : array_like
A label image generated by segmentation methods.
Returns
-------
Shuffled : array_like
A label image where all values > 0 are randomly shuffled.
See Also
--------
CondenseLabel
"""
# get list of unique object labels
Unique = np.unique(Label.flatten())
# remove background objects (Label == 0)
Unique = np.delete(Unique, (Unique == 0).nonzero())
# generate shuffled list of object values
np.random.shuffle(Unique)
# initialize output
Shuffled = np.zeros(Label.shape)
# get pixel list for each object
Props = ms.regionprops(Label.astype(np.int))
# fill in new values
for i in range(len(Unique)):
print Unique[i]
Coords = Props[i].coords
Shuffled[Coords[:, 0], Coords[:, 1]] = Unique[i]
return Shuffled
| apache-2.0 | Python | |
dcd83ea781ad8de1111984c8972b314f6f88e4d0 | add orm-1 | chenqing24/learnPy27 | www/transwarp/orm.py | www/transwarp/orm.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Jeff Chen'
'''
封装orm操作
'''
import db
class Field(object):
'''
db的字段名和类型
'''
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self): # 定制类,反馈类实例的内部信息
return '<%s:%s>' % (self.__class__.__name__, self.name)
class StringField(Field):
'''
扩展的str类型字段
'''
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class IntegerField(Field):
'''
扩展的int型字段
'''
def __init__(self, name):
super(IntegerField, self).__init__(name, 'int')
class ModelMetaclass(type):
'''
元类,定义类的继承
'''
def __new__(cls, name, bases, attrs):
if name == 'Model': # 基类直接返回
return type.__new__(cls, name, bases, attrs)
mappings = dict()
for k, v in attrs.iteritems(): # 按类型装配结果集
if isinstance(v, Field):
mappings[k] = v
for k in mappings.iterkeys():
attrs.pop(k)
class Model(dict):
'''
orm基类
'''
__metaclass__ = ModelMetaclass
| mit | Python | |
5e1ba2f9a14634fb1e8a7eaadbad370b97beb383 | Add Middle English | kylepjohnson/cltk,LBenzahia/cltk,D-K-E/cltk,TylerKirby/cltk,TylerKirby/cltk,cltk/cltk,diyclassics/cltk,LBenzahia/cltk | cltk/corpus/middle_english/alphabet.py | cltk/corpus/middle_english/alphabet.py | """
Sources:
From Old English to Standard English: A Course Book in Language Variation Across Time, Dennis Freeborn
https://web.cn.edu/kwheeler/documents/ME_Pronunciation.pdf
https://en.wikipedia.org/wiki/Middle_English_phonology
"""
ALPHABET = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'x', 'y', 'æ', 'ð', 'þ', 'ƿ']
"""
The produced consonant sound in Middle English are categorized as following:
Stops: ⟨/b/, /p/, /d/, /t/, /g/, /k/⟩
Affricatives: ⟨/ǰ/, /č/, /v/, /f/, /ð/, /θ/, /z/, /s/, /ž/, /š/, /c̹/, /x/, /h/⟩
Nasals: ⟨/m/, /n/, /ɳ/⟩
Later Resonants: ⟨/l/⟩
Medial Resonants: ⟨/r/, /y/, /w/⟩
Thorn (þ) was gradually replaced by the dipthong "th", while Eth (ð) which had already fallen out of use by the 14th century was
later replaced by "d"
Wynn (ƿ) is the predecessor of "w". Modern transliteration scripts, usually replace it with "w" as to avoid confusion with
the strikingly similar p
"""
CONSONANTS = ['b', 'c', 'd', 'f', 'g', 'h', 'l', 'm', 'n', 'p', 'r', 's', 't', 'x', 'ð', 'þ', 'ƿ']
"""
The vowel sounds in Middle English are divided into:
Long Vowels: ⟨/a:/, /e/, /e̜/, /i/ , /ɔ:/, /o/ , /u/⟩
Short Vowels: ⟨/a/, /ɛ/, /I/, /ɔ/, /U/, /ə/⟩
"""
VOWELS = ['a', 'e', 'i', 'o', 'u', 'y', 'æ']
"""
As established rules for ME orthography were effectively nonexistent, compiling a definite list of dipthongs is non-trivial. The
following aims to compile a list of the most commonly-used dipthongs.
"""
DIPTHONGS = ['ai', 'au', 'aw', 'ay', 'ei', 'eu', 'ew', 'ey', 'iu', 'iw', 'o', 'oi', 'ou', 'ow', 'oy', 'uw']
| mit | Python | |
77b06bf2737095b889b8c31e5d296d03b3030bb4 | add promise model pesudo codes | faner-father/pypromise | promise.py | promise.py | # coding: utf-8
__author__ = 'cloud'
'''
promise model:
'''
# example 1:
'''
create vol and attach
'''
def vol_create(vol_arg):
pass
def vol_wait_available(scope):
pass
def vol_attach_get_lock(scope):
pass
def vol_attach(scope):
pass
def vol_attach_release_lock(scope):
pass
def vol_create_fail(scope):
pass
def vol_wait_available_fail(scope):
pass
class Promise(object):
'''
有以下几种执行顺序:
一条链:func->then-then
多条链:func->then->branch->then
->branch2(parallel)
任意条链上节点异常则链停止,执行fail
feeds产生多个promise,
promise(func).then(func2).feeds([d1, d2])
等同于
promise(func).then(func2).feed(d1)
promise(func).then(func2).feed(d2)
由一个promise坐代理
go方法用来启动执行,自动识别是单promise还是多个promise
'''
def __init__(self, func, fail):
pass
def then(self, func_or_promise, fail_callback=None):
# do something
return self
def thenes(self, func_or_promises, *fail_callbacks, **kw_fail_callbacks):
return self
def branch(self, branch_func_or_promise, fail_callback=None):
pass
def branches(self, branch_func_or_promises, *fail_callbacks, **kw_fail_callbacks):
pass
def feed(self, initial_data):
pass
def feeds(self, initial_datas):
pass
def go(self, initial_data=None):
pass
@property
def completed(self):
pass
@property
def result(self):
pass
def promise(func, fail):
return Promise(func, fail)
# promise and go, create single vol
pr1 = promise(vol_create, vol_create_fail) \
.then(vol_wait_available, vol_wait_available_fail) \
.then(vol_attach_get_lock) \
.then(vol_attach).then(vol_attach_release_lock).go(initial_data=vol_arg)
'''
pr1 = promise(vol_create, vol_create_fail) \
.then(vol_wait_available, vol_wait_available_fail) \
.then(vol_attach_get_lock) \
.then(vol_attach).then(vol_attach_release_lock).feed(initial_data=vol_arg)
pr1.go()
'''
while not pr1.completed:
time.sleep()
else:
print pr1.result
# create multiple vols
pr_proxy = promise(vol_create, vol_create_fail) \
.then(vol_wait_available, vol_wait_available_fail) \
.then(vol_attach_get_lock) \
.then(vol_attach).then(vol_attach_release_lock).goes(initial_datas=[vol_arg, vol_arg2])
'''
pr_proxy = promise(vol_create, vol_create_fail) \
.then(vol_wait_available, vol_wait_available_fail) \
.then(vol_attach_get_lock) \
.then(vol_attach).then(vol_attach_release_lock).feeds(initial_datas=[vol_arg, vol_arg2])
pr_proxy.goes()
'''
while not pr_proxy.completed:
time.sleep(1)
else:
for p in pr_proxy:
print p.result
'''
example 2:
create instance with create and attach volumes, create and attach port.
'''
def upload_image():
pass
def instance_create(inst_data):
pass
def port_create(port_data):
pass
def port_attach(scope):
pass
def get_inst_lock(inst_id):
pass
def release_inst_lock(inst_id):
pass
vol_pr_proxy = promise(vol_create, vol_create_fail) \
.then(vol_wait_available, vol_wait_available_fail) \
.then(vol_attach_get_lock) \
.then(vol_attach).then(vol_attach_release_lock).feeds(initial_datas=[vol_arg, vol_arg2])
inst_pr = promise(instance_create).feed(initial_data=inst_data)
port_pr = promise(port_create).thenes([get_inst_lock, port_attach, release_inst_lock]).feed(initial_data=port_data)
promise(upload_image).then(inst_pr).branches([vol_pr_proxy, port_pr])
| mit | Python | |
51dc6dc1ebe6babb468f0ef607ff750327a366ba | Enable the change tracking tables in admin. | KayEss/django-pubsubpull,KayEss/django-pubsubpull,KayEss/django-pubsubpull | pubsubpull/admin.py | pubsubpull/admin.py | """
Enable admin
"""
from django.contrib import admin
from pubsubpull.models import Request, UpdateLog
admin.site.register(Request)
admin.site.register(UpdateLog)
| mit | Python | |
bce4656156b4f04655f38099a4b577651dc794d5 | make python -m crossbar work, using the same console script | NinjaMSP/crossbar,NinjaMSP/crossbar,NinjaMSP/crossbar | crossbar/__main__.py | crossbar/__main__.py | #####################################################################################
#
# Copyright (C) Tavendo GmbH
#
# Unless a separate license agreement exists between you and Tavendo GmbH (e.g. you
# have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
if __name__ == '__main__':
from pkg_resources import load_entry_point
import sys
sys.exit(
load_entry_point('crossbar', 'console_scripts', 'crossbar')()
)
| agpl-3.0 | Python | |
6798e3460e573b06bbf941f96102ef4ce196ca49 | add channel for rpc | ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study | python/proto/pyRpc2/channler.py | python/proto/pyRpc2/channler.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import socket
import google.protobuf.service as service
from pyRpc2 import logger
from pyRpc2.controller import RpcController
import pyRpc2.error as error
import pyRpc2.proto.rpc_pb2 as rpc_pb
_logger = logger.get_logger(__name__)
class SocketCreator(object):
def create_socket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class RpcChannel(service.RpcChannel):
def __init__(self, host='localhost', port=5555,
socket_creator=SocketCreator()):
self.host = host
self.port = port
self.socket_creator = socket_creator
def new_controller(self):
return RpcController()
def validate_request(self, request):
if not request.IsInitialized():
raise error.BadRequestProtoError('Request proto error')
def open_socket(self, host, port):
fd = self.socket_creator.create_socket()
try:
fd.connect((host, port))
except socket.gaierror:
self.close_socket(fd)
msg = 'Could not find host %s' % host
raise error.UnknownHostError(msg)
except socket.error:
self.close_socket(fd)
msg = 'Could not open IO for %s:%d' % (host, port)
raise error.RpcIOError(msg)
return fd
def close_socket(self, fd):
try:
fd and fd.close()
except:
pass
def create_rpc_request(self, method, request):
rpc_request = rpc_pb.Request()
rpc_request.request_proto = request.SerializeToString()
rpc_request.service_name = method.containing_service.full_name
rpc_request.method_name = method.name
return rpc_request
def write_rpc_message(self, fd, rpc_request):
try:
wfile = fd.makefile('w')
wfile.write(rpc_request.SerializeToString())
wfile.flush()
fd.shutdown(socket.SHUT_WR)
except socket.error:
self.close_socket(fd)
raise error.RpcIOError('Error writing data to server')
def read_rpc_message(self, fd):
pass
def parse_reply(self, byte_stream, reply_class):
pass
def CallMethod(self, method, controller, request, reply_class, done):
pass
| bsd-2-clause | Python | |
23da66a6a36ab82b7cb356d110c26ef9a0412932 | Create EVE-FAG-DETECTOR.py | RavenNull/EVE-FAG-DETECTOR | EVE-FAG-DETECTOR.py | EVE-FAG-DETECTOR.py | print("EVE FAG DETECTOR v2.2.3")
eveName = input("What is the character's name?")
if(eveName == "Raven Null"):
print("Raven Null is not a fag.")
elif(eveName =="raven null"):
print("Learn how to use the shift key dumbass. Also, no faggotry was detected for Raven Null.")
else:
print("Player is confirmed to be a fag.")
| mit | Python | |
6214758e0a4b7140454a4ef244521e87541307e5 | Add python3 template | jpschewe/code-templates,jpschewe/code-templates,jpschewe/code-templates | python3.py | python3.py | #!/usr/bin/env python3
import warnings
with warnings.catch_warnings():
import re
import sys
import argparse
import os
import os.path
import logging
script_dir=os.path.abspath(os.path.dirname(__file__))
def create_preferences_directory():
if os.name != "posix":
from win32com.shell import shellcon, shell
homedir = "{}\\".format(shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0))
else:
homedir = "{}/".format(os.path.expanduser("~"))
projectname = "test"
if not os.path.isdir("{0}.{1}".format(homedir,projectname)):
os.makedirs("{0}.{1}".format(homedir,projectname))
def main(argv=None):
logging.basicConfig(level=logging.INFO)
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--logfile", dest="logfile", help="logfile", required=True)
args = parser.parse_args(argv)
for a in args[1:]:
pass
if __name__ == "__main__":
sys.exit(main())
| unlicense | Python | |
aea51be0e9428ddb4f72b3382fea1ae1cd99f1a9 | add crude performance test script | nschloe/meshio | test/performance.py | test/performance.py | # -*- coding: utf-8 -*-
#
import time
import numpy
import pytest
import meshio
def generate_mesh():
'''Generates a fairly large mesh.
'''
import pygmsh
geom = pygmsh.built_in.Geometry()
geom.add_circle(
[0.0, 0.0, 0.0],
1.0,
# 5.0e-3,
1.0e-2,
num_sections=4,
# If compound==False, the section borders have to be points of the
# discretization. If using a compound circle, they don't; gmsh can
# choose by itself where to point the circle points.
compound=True
)
X, cells, _, _, _ = pygmsh.generate_mesh(geom)
return X, cells
def read_write():
X, cells = generate_mesh()
formats = [
'ansys-ascii',
'ansys-binary',
'exodus',
'gmsh-ascii',
'gmsh-binary',
'dolfin-xml',
'med',
'medit',
'permas',
'moab',
'off',
'stl-ascii',
'stl-binary',
'vtk-ascii',
'vtk-binary',
'vtu-ascii',
'vtu-binary',
'xdmf',
]
filename = 'foo'
print()
print('format write (s) read(s)')
print()
for fmt in formats:
t = time.time()
meshio.write(filename, X, cells, file_format=fmt)
elapsed_write = time.time() - t
t = time.time()
meshio.read(filename, file_format=fmt)
elapsed_read = time.time() - t
print('{0: <12} {1:e} {2:e}'.format(fmt, elapsed_write, elapsed_read))
return
if __name__ == '__main__':
read_write()
| mit | Python | |
478a78494199b8282b635323128c07f2661df58b | add pong | Fun2LearnCode/Python | pong/pong.py | pong/pong.py | #TKinterPongGame.py
from tkinter import *
import random
import time
class Ball:
def __init__(self, canvas, paddle, color):
self.canvas = canvas
self.paddle = paddle
self.id = canvas.create_oval(10, 10, 25, 25, fill=color)
self.canvas.move(self.id, 245, 150)
starts = [-3, -2, -1, 1, 2, 3]
random.shuffle(starts)
self.x = starts[0]
self.y = 3
self.canvas_height = self.canvas.winfo_height()
self.canvas_width = self.canvas.winfo_width()
self.hit_bottom = False
def hit_paddle(self, pos):
paddle_pos = self.canvas.coords(self.paddle.id)
if pos[2] >= paddle_pos[0] and pos[0] <= paddle_pos[2]:
if pos[3] >= paddle_pos[1] and pos[3] <= paddle_pos[3]:
return True
return False
def draw(self):
self.canvas.move(self.id, self.x, self.y)
pos = self.canvas.coords(self.id)
self.canvas.after(10, self.draw)
if pos[1] <= 0:
self.y = 3
if pos[3] >= self.canvas_height:
self.hit_bottom = True
if self.hit_paddle(pos) == True:
self.y = -3
if pos[0] <= 0:
self.x = 3
if pos[2] >= self.canvas_width:
self.x = -3
class Paddle:
def __init__(self, canvas, color):
self.canvas = canvas
self.id = canvas.create_rectangle(0, 0, 100, 10, fill=color)
self.canvas.move(self.id, 200, 450)
self.x = 0
self.canvas_width = self.canvas.winfo_width()
self.canvas.bind_all('<KeyPress-Left>', self.turn_left)
self.canvas.bind_all('<KeyPress-Right>', self.turn_right)
def draw(self):
self.canvas.move(self.id, self.x, 0)
pos = self.canvas.coords(self.id)
self.canvas.after(10, self.draw)
if pos[0] <= 0:
self.x = 0
elif pos[2] >= self.canvas_width:
self.x = 0
def turn_left(self, evt):
self.x = -2
def turn_right(self, evt):
self.x = 2
root = Tk()
root.title("Pong Game")
root.resizable(0, 0)
root.wm_attributes("-topmost", 1)
canvas = Canvas(root, width=500, height=500)
canvas.pack()
root.update()
paddle = Paddle(canvas, 'purple')
ball = Ball(canvas, paddle, 'green')
ball.draw()
paddle.draw()
root.mainloop()
| mit | Python | |
97b9ee00277fa35c92886b1ed39864eba3707dce | Add organizer permissions to staff group | onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle | bluebottle/activities/migrations/0020_auto_20200224_1005.py | bluebottle/activities/migrations/0020_auto_20200224_1005.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-11-11 12:19
from __future__ import unicode_literals
from django.db import migrations
from bluebottle.utils.utils import update_group_permissions
def add_group_permissions(apps, schema_editor):
group_perms = {
'Staff': {
'perms': (
'change_organizer', 'add_organizer',
'delete_organizer',
)
},
}
update_group_permissions('activities', group_perms, apps)
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_merge_20200213_1038'),
]
operations = [
migrations.RunPython(add_group_permissions)
]
| bsd-3-clause | Python | |
f2ec232ce654a645e5d243cc2a794b7a69fd438d | use your RPI for powerpoint presentations! | timwaizenegger/raspberrypi-examples,rafaelkperes/raspberrypi-examples,rafaelkperes/raspberrypi-examples,timwaizenegger/raspberrypi-examples | presenter.py | presenter.py | """
this script uses XAutomation, i.e. the xte command. install it with:
>sudo apt-get install xautomation
"""
import RPi.GPIO as GPIO
import subprocess
import time
btnPin1 = 27
btnPin2 = 22
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(btnPin1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(btnPin2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def sendKey(key):
subprocess.Popen(['xte', 'key %s' % (key)])
def eventRight(e):
print("Right")
print(e)
sendKey("Right")
def eventLeft(e):
print("Left")
print(e)
sendKey("Left")
GPIO.add_event_detect(btnPin1, GPIO.RISING, bouncetime=200, callback=eventRight)
GPIO.add_event_detect(btnPin2, GPIO.RISING, bouncetime=200, callback=eventLeft)
while(True):
time.sleep(0.1)
| mit | Python | |
9ce799511701f1d8f06ce2555253325ad8c76cc2 | add abstract action class | euclio/rocket-propelled-rpg | rprpg/battle/action.py | rprpg/battle/action.py | import abc
class Action(object):
__metaclass__ = abc.ABCMeta
def __init__(self, requires_target):
self.target = None
self.requires_target = requires_target
@abc.abstractmethod
def execute(self):
pass
| mit | Python | |
b77187592e3a6ba4fd06c13fb2a576ab9066d893 | add test for #313 | gmarkall/numba,numba/numba,stonebig/numba,cpcloud/numba,ssarangi/numba,ssarangi/numba,stefanseefeld/numba,ssarangi/numba,numba/numba,sklam/numba,cpcloud/numba,IntelLabs/numba,pitrou/numba,jriehl/numba,gdementen/numba,sklam/numba,IntelLabs/numba,gmarkall/numba,gmarkall/numba,IntelLabs/numba,seibert/numba,stonebig/numba,numba/numba,stuartarchibald/numba,stonebig/numba,gdementen/numba,seibert/numba,pombredanne/numba,jriehl/numba,gmarkall/numba,sklam/numba,stuartarchibald/numba,stuartarchibald/numba,stefanseefeld/numba,sklam/numba,seibert/numba,stefanseefeld/numba,stefanseefeld/numba,jriehl/numba,jriehl/numba,stuartarchibald/numba,seibert/numba,GaZ3ll3/numba,pitrou/numba,numba/numba,pitrou/numba,cpcloud/numba,stonebig/numba,gmarkall/numba,sklam/numba,gdementen/numba,gdementen/numba,pombredanne/numba,stefanseefeld/numba,IntelLabs/numba,GaZ3ll3/numba,pombredanne/numba,GaZ3ll3/numba,numba/numba,GaZ3ll3/numba,ssarangi/numba,cpcloud/numba,IntelLabs/numba,gdementen/numba,seibert/numba,pombredanne/numba,stonebig/numba,jriehl/numba,pitrou/numba,stuartarchibald/numba,cpcloud/numba,pombredanne/numba,ssarangi/numba,pitrou/numba,GaZ3ll3/numba | numba/tests/issues/test_issue_313.py | numba/tests/issues/test_issue_313.py | # -*- coding: utf-8 -*-
from numba import void, double, jit
import numpy as np
# thanks to @ufechner7
def multiassign(res0, res1, val0, val1):
res0[:], res1[:] = val0[:], val1[:]
if __name__ == "__main__":
multiassign1 = jit(void(double[:], double[:], double[:], double[:]))(multiassign)
res0 = np.zeros(2)
res1 = np.zeros(2)
val0 = np.array([0.0,0.0])
val1 = np.array([1.0,1.0])
multiassign1(res0, res1, val0, val1)
assert (res0 == val0).all()
assert (res1 == val1).all()
| bsd-2-clause | Python | |
efbf98235b82c954364f35cb09f63006e23346e2 | Create tests for JavaScript parser. | eddieantonio/ad-hoc-miner,naturalness/sensibility,naturalness/sensibility,eddieantonio/ad-hoc-miner,naturalness/sensibility,eddieantonio/ad-hoc-miner,eddieantonio/ad-hoc-miner,naturalness/sensibility | tests/test_lang_javascript.py | tests/test_lang_javascript.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import pytest # type: ignore
from sensibility.language import Language
from sensibility.language.javascript import javascript
from sensibility.token_utils import Position
from location_factory import LocationFactory
test_file = r"""#!/usr/bin/env node
/*!
* This is an example file.
*/
import {ಠ_ಠ} from "-_-";
/* TODO: crazy ES2017 features. */
"""
def test_sanity_check() -> None:
assert isinstance(javascript, Language)
def test_tokenize() -> None:
tokens = javascript.tokenize(test_file)
# TODO: more robust tests for this.
assert len(tokens) == 7
def test_summarize() -> None:
with pytest.raises(SyntaxError):
javascript.summarize('import #')
summary = javascript.summarize(test_file)
assert summary.sloc == 1
assert summary.n_tokens == 7
def test_pipeline() -> None:
loc = LocationFactory(Position(line=6, column=0))
result = list(javascript.pipeline.execute_with_locations(test_file))
assert result[:4] == [
(loc.across(len("import")), 'IMPORT'),
(loc.space().across(1), '{'),
(loc.space().across(len("ಠ_ಠ")), 'IDENTIFIER'),
(loc.space().across(1), '}'),
]
# TODO: Test more locations?
| apache-2.0 | Python | |
b29a37c92efca42cbd85b24306455b063dce33e2 | debug module can be imported to cause break-on-exception | hobson/pug,hobson/pug,hobson/pug,hobson/pug | pug/debug.py | pug/debug.py | """Import this module to invoke the interractive python debugger, ipydb, on any exception
Resources:
Based on http://stackoverflow.com/a/242531/623735
Examples:
>>> import debug
>>> x=[][0]
"""
# from http://stackoverflow.com/a/242514/623735
# if __name__ == '__main__':
# try:
# main()
# except:
# type, value, tb = sys.exc_info()
# traceback.print_exc()
# last_frame = lambda tb=tb: last_frame(tb.tb_next) if tb.tb_next else tb
# frame = last_frame().tb_frame
# ns = dict(frame.f_globals)
# ns.update(frame.f_locals)
# code.interact(local=ns)
import sys
def bug_info(type, value, tb):
"""Prints the traceback and invokes the ipython debugger on any exception"""
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# We are in interactive mode or don't have a tty-like device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
# TODO: Why not import pdb earlier, outside this function ?
import traceback, ipdb
# We are NOT in interactive mode, print the exception
traceback.print_exception(type, value, tb)
print
# Start the debugger in post-mortem mode.
# `ipdb.pm()` is deprecated so use `ipdb.post_mortem()` instead
ipdb.post_mortem(tb)
# assign the bug_info function to the system exception hook/callback
sys.excepthook = bug_info | mit | Python | |
fd3a4e39b17995f75d9b6027b4abcb29013f479d | add network/sshd.py. | ptrsxu/snippetpy | network/sshd.py | network/sshd.py | from __future__ import print_function
import os
import sys
import re
import time
import datetime
def get_ptyreq_reply(line):
# for Microsoft Windows, the `pty-req reply` in sshd log would be 0.
# for Linux, the `pty-req reply` would be 1.
REGEX = r'(?<=pty-req reply )\d'
try:
return re.search(REGEX, line).group(0)
except AttributeError:
return None
def get_pid(line):
REGEX = r'(?<=sshd\[)\d+'
try:
return re.search(REGEX, line).group(0)
except AttributeError:
return None
def kill_conns_from_microsoft():
AUTH_LOG_FILE = '/var/log/auth.log'
# AUTH_LOG_FILE = '/var/log/secure'
SSHD_CONFIG_FULE = '/etc/ssh/sshd_config'
CMD = "/bin/sed -i.bak 's/LogLevel.*/LogLevel DEBUG1/g' %s" % SSHD_CONFIG_FULE
sys.stderr.write('configuring the LogLevel to DEBUG1...\n')
try:
os.system(CMD)
except:
sys.stderr.write('You may need to configure LogLevel to DEBUG1\
manually in file /etc/ssh/sshd_config. and restart this\
program later.\n')
sys.stderr.write("sshd breaking windows connections\
daemon started with pid %d\n" % os.getpid())
while True:
for line in open(AUTH_LOG_FILE):
if get_ptyreq_reply(line) == '0':
try:
os.kill(int(get_pid(line)), 9)
print('[%s, KILLED:] %s' %
(datetime.datetime.now(), line),
file=sys.stderr)
except OSError:
continue
time.sleep(1)
| mit | Python | |
45c746c6c6aee03092b2b08bf1aff73ead85683e | add form | wasit7/clubped,wasit7/clubped | 150409/server/form.py | 150409/server/form.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 18:48:43 2015
@author: Wasit
"""
from flask import Flask
from flask import request
app = Flask(__name__)
form_str="""<form action="login" method="POST">
First name:<br>
<input type="text" name="firstname" value="Mickey">
<br>
Last name:<br>
<input type="text" name="lastname" value="Mouse">
<br><br>
<input type="submit" value="Submit">
</form>"""
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
firstname = request.form["firstname"]
lastname = request.form["lastname"]
return "Your name is %s %s"%(firstname,lastname)
else:
return form_str
if __name__ == '__main__':
app.run(debug=True) | bsd-2-clause | Python | |
a9077802269270a1d8cfb685400b9f23f45d6940 | add propagator test | vangj/py-bbn,vangj/py-bbn | tests/pptc/test_propagator.py | tests/pptc/test_propagator.py | from pybbn.graph.dag import BbnUtil
from pybbn.pptc.potentialinitializer import PotentialInitializer
from pybbn.pptc.moralizer import Moralizer
from pybbn.pptc.triangulator import Triangulator
from pybbn.pptc.transformer import Transformer
from pybbn.pptc.initializer import Initializer
from pybbn.pptc.propagator import Propagator
from nose import with_setup
def setup():
pass
def teardown():
pass
@with_setup(setup, teardown)
def test_propagator():
bbn = BbnUtil.get_huang_graph()
PotentialInitializer.init(bbn)
ug = Moralizer.moralize(bbn)
cliques = Triangulator.triangulate(ug)
join_tree = Transformer.transform(cliques)
Initializer.initialize(join_tree)
Propagator.propagate(join_tree)
# assert later
# for clique in join_tree.get_cliques():
# potential = join_tree.potentials[clique.id]
# print(clique)
# print(potential)
# total = sum([entry.value for entry in potential.entries])
# print('total {}'.format(total))
# assert 1 == 2
| apache-2.0 | Python | |
9100015cf25d0ab09aa3b8d6410343f933b599fc | Add quicklook.py, a quick way of checking properties across all the data | osmlab/nycbuildings,osmlab/nycbuildings,osmlab/nycbuildings | quicklook.py | quicklook.py | from fiona import collection
from sys import argv
from glob import glob
if (len(argv) == 2):
addrs = glob("chunks/addresses-%s.shp")
for addr in addrs:
print addr
else:
addrs = glob("chunks/addresses*.shp")
for addr in addrs:
try:
with collection(addr, "r") as input:
for address in input:
if 'properties' in address and 'STREET_NAM' in address['properties']:
if address['properties']['STREET_NAM'].lower().startswith('ft '):
print 'possible FORT: ' + address['properties']['STREET_NAM']
except Exception, e:
print 'error with ' + addr | bsd-3-clause | Python | |
34939554fc3697867979a6ca711583b24d38def0 | Create quicksort.py | mschruf/python | quicksort.py | quicksort.py | """Implements Quicksort algorithm.
"""
import random # for random selection of pivot element
def swap_elements(mut_seq, index1, index2):
"""Swaps two elements of mutable sequence.
Args:
mut_seq: mutable sequence
index1: index of element to be swapped with element with index 'index2'
index2: index of element to be swapped with element with index 'index1'
Returns:
Nothing
"""
if index1 == index2:
return
temp = mut_seq[index1]
mut_seq[index1] = mut_seq[index2]
mut_seq[index2] = temp
def quicksort(mut_seq, index_start, index_end):
"""Sorts mutable sequence in place using Quicksort algorithm.
Args:
mut_seq: mutable sequence to be sorted
index_start: index of first element of l to be sorted
index_end: index of last element of l to be sorted
Returns:
Nothing
Raises:
IndexError: if argument 'index_end' exceeds greatest valid index for
argument 'l'
"""
if index_end >= len(mut_seq):
raise IndexError
if index_end - index_start < 1:
return
pivot_index = random.randrange(index_start, index_end + 1)
# temporarily move pivot element out of way at end of array
swap_elements(mut_seq, pivot_index, index_end)
pivot_index = index_end
# partition
swappable_index = index_start
for i in range(index_start, index_end):
if mut_seq[i] < mut_seq[pivot_index]:
swap_elements(mut_seq, i, swappable_index)
swappable_index += 1
# move pivot to its final place in sorted order
swap_elements(mut_seq, swappable_index, pivot_index)
pivot_index = swappable_index
# now sort subsequence to left and right of pivot element
quicksort(mut_seq, index_start, pivot_index - 1)
quicksort(mut_seq, pivot_index + 1, index_end)
| cc0-1.0 | Python | |
09ee7c5972f3a508355f6dfd49ff05d8de482cd9 | Add example of slide-hold-slide test | jrleeman/rsfmodel | shs_example.py | shs_example.py | import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.RateState()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.b = 0.01 # Empirical coefficient for the evolution effect
model.dc = 10. # Critical slip distance
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 10. # Initial slider velocity, generally is vlp(t=0)
model.vref = 10. # Reference velocity, generally vlp(t=0)
model.stateLaw = model.dieterichState # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,150.01,0.01)
# We want to slide at 10 um/s for 20 s, hold for 100 s, then slide again
lp_velocity = 10* np.ones_like(model.model_time)
lp_velocity[20*100:120*100] = 0. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
results = model.solve()
# Make the phase plot
model.phasePlot()
# Make a plot in displacement
model.dispPlot()
# Make a plot in time
model.timePlot()
| mit | Python | |
4f7b103d6c5fa3b07abb23e346caa995a7f803ef | Make new test fail correctlyish | mkusz/invoke,kejbaly2/invoke,mkusz/invoke,pyinvoke/invoke,mattrobenolt/invoke,kejbaly2/invoke,tyewang/invoke,frol/invoke,singingwolfboy/invoke,pyinvoke/invoke,frol/invoke,pfmoore/invoke,pfmoore/invoke,mattrobenolt/invoke | tests/completion.py | tests/completion.py | import sys
from nose.tools import ok_
from _utils import _output_eq, IntegrationSpec, _dispatch, trap, expect_exit
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
@trap
def top_level_with_dash_means_core_options(self):
with expect_exit(0):
_dispatch('inv --complete -- -')
output = sys.stdout.getvalue()
# No point mirroring all core options, just spot check a few
for flag in ('--no-dedupe', '-d', '--debug', '-V', '--version'):
ok_(flag in output)
| from _utils import _output_eq, IntegrationSpec
class ShellCompletion(IntegrationSpec):
"""
Shell tab-completion behavior
"""
def no_input_means_just_task_names(self):
_output_eq('-c simple_ns_list --complete', "z_toplevel\na.b.subtask\n")
def no_input_with_no_tasks_yields_empty_response(self):
_output_eq('-c empty --complete', "")
def top_level_with_dash_means_core_options(self):
_output_eq('--complete -- -', "--lol\n--wut")
| bsd-2-clause | Python |
b5dcc8d77ebbe3f1e62599164139cf60927c94c8 | Create Precio.py | mdmirabal/Parcial2-Prog3 | Precio.py | Precio.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from att import *
#from att import Sector_Destino
from zona import zona
from Conexion import *
# lugar = "Costa del Este"
# # lugar = "Altos del Hipódromo"
# # destino="El Tecal"
# # # lugar = "El Tecal"
# destino="Costa del Este"
lugar = "Albrook"
destino ="24 de Diciembre"
camino=[]
def Precio(lugar, destino):
precio= ""
_sector_Origen = Sector_Origen(lugar)
_sector_Destino = Sector_Destino(destino)
# Sino es un Sector Origen
if not _sector_Origen:
# print ("El Origen no es un sector")
_sector_Origen = zona(lugar)
# Sino es un Sector Destino
if not _sector_Destino:
# print ("El Destino no es un Sector")
_sector_Destino = zona(destino)
camino = [_sector_Origen[1],_sector_Destino[1]]
print ("Ir de "+lugar+" A "+destino)
if _sector_Origen[0] == _sector_Destino[0]:
tabla = _sector_Origen[0]
precio = str(DbPrecio_Sector(tabla,camino))
print ("CUESTA S==> "+precio)
elif _sector_Origen[0] == "zona" and _sector_Destino[0] !="zona" or _sector_Origen[0] !="zona" and _sector_Destino[0] == "zona":
tabla = "ZONA-SEC"
precio = str(DbPrecio_Sector(tabla,camino))
print ("CUESTA Z==> "+precio)
else:
precio ="null"
print (precio)
return precio
| mit | Python | |
661943403b9a4b7c28bf9e0a59ba937dc2298fef | Add SSH auto detect feature | isidroamv/netmiko,ktbyers/netmiko,fooelisa/netmiko,ktbyers/netmiko,isidroamv/netmiko,fooelisa/netmiko | netmiko/ssh_autodetect.py | netmiko/ssh_autodetect.py | """
This module is used to auto-detect the type of a device in order to automatically create a
Netmiko connection.
The will avoid to hard coding the 'device_type' when using the ConnectHandler factory function
from Netmiko.
"""
from netmiko.ssh_dispatcher import CLASS_MAPPER_BASE, ConnectHandler
SSH_MAPPER_BASE = {}
for k, v in CLASS_MAPPER_BASE.iteritems():
if getattr(v, "autodetect", None):
SSH_MAPPER_BASE[k] = v
class SSHDetect(object):
"""
The SSHDetect class tries to automatically guess the device type running on the SSH remote end.
Parameters
----------
*args : list
The same *args that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
*kwargs : dict
The same *kwargs that you might provide to the netmiko.ssh_dispatcher.ConnectHandler.
Attributes
----------
connection : netmiko.terminal_server.TerminalServer
A basic connection to the remote SSH end.
potential_matches: dict
Dict of (device type, accuracy) that is populated trough a interaction with the remote end.
Methods
-------
autodetect()
Try to determine the device type.
"""
def __init__(self, *args, **kwargs):
"""
Constructor of the SSHDetect class
"""
self.connection = ConnectHandler(*args, **kwargs)
if self.connection.device_type != "terminal_server":
self.connection.disconnect()
raise ValueError("The connection device_type must be of 'terminal_server'")
self.potential_matches = {}
def autodetect(self):
"""
Try to guess the best 'device_type' based on each device_type autodetect method.
Returns
-------
bast_match : str or None
The device type that is currently the best to use to interact with the device
"""
for k, v in SSH_MAPPER_BASE.items():
try:
accuracy = v.autodetect(self.connection)
self.potential_matches[k] = accuracy
except:
pass
if not self.potential_matches:
self.connection.disconnect()
return None
best_match = sorted(self.potential_matches.items(), key=lambda t:t[0])
self.connection.disconnect()
return best_match[0][0]
| mit | Python | |
98dcea81abcfb795c61b24b260c5a5592d24b2d4 | use this file to built unit tests | rfleschenberg/django-shop,jrief/django-shop,nimbis/django-shop,nimbis/django-shop,jrief/django-shop,jrief/django-shop,rfleschenberg/django-shop,awesto/django-shop,divio/django-shop,jrief/django-shop,divio/django-shop,awesto/django-shop,divio/django-shop,nimbis/django-shop,khchine5/django-shop,khchine5/django-shop,rfleschenberg/django-shop,awesto/django-shop,nimbis/django-shop,rfleschenberg/django-shop,khchine5/django-shop,khchine5/django-shop | tests/test_money.py | tests/test_money.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
import cPickle as pickle
from django.utils.formats import number_format
from . import Money
from .money_maker import MoneyMaker, AbstractMoney
# Tests:
m1 = Money('1.46')
m2 = Money('3.54')
m3 = Money(Decimal('8.99'))
m4 = Money(m3)
num = Decimal(3)
print m1 > 0
nan = Money()
print nan
print repr(nan)
print nan + m1
print m1 + nan
print m2 - nan
print nan - m3
m4 = 100 * m1
print int(m1)
pickled = pickle.dumps(m4)
m5 = pickle.loads(pickled)
assert m4 == m5
print MoneyMaker('JPY')(99)
#print AbstractMoney('3.55')
print repr(m2)
print m1 + m2
print float(m2)
print '{:f}'.format(m2)
print number_format(m1)
print num * m1
print m2 * num
print str(m1)
print m1 - m2
print m1 * num
print num * m2
print m1 / num
m1 += m2
print m1
m3 = -m2
print m3
print Decimal(m3)
p1 = MoneyMaker('GBP')('7.77')
p2 = MoneyMaker('GBP')('1.44')
print repr(p2)
print p1 + p2
z = 0
print m1 + z
print z + m1
| bsd-3-clause | Python | |
0d32800fec1419eac39711fd8c94ce07896cddaf | Test for the gaussian output layer, going through all same fit() and predict() tests as the linear output. | gticket/scikit-neuralnetwork,IndraVikas/scikit-neuralnetwork,capitancambio/scikit-neuralnetwork,agomariz/scikit-neuralnetwork,freakynit/scikit-neuralnetwork,KhanSuleyman/scikit-neuralnetwork,aigamedev/scikit-neuralnetwork | sknn/tests/test_gaussian.py | sknn/tests/test_gaussian.py | import unittest
from nose.tools import (assert_is_not_none, assert_raises, assert_equal)
from sknn.mlp import MultiLayerPerceptronRegressor as MLPR
from . import test_linear
class TestGaussianOutput(test_linear.TestLinearNetwork):
def setUp(self):
self.nn = MLPR(layers=[("LinearGaussian",)])
| bsd-3-clause | Python | |
dfa8f4b867a17a6f63a0e2e2e5f7f615ad514fc0 | improve proj | parrt/msan501,parrt/msan501,parrt/msan501 | notes/code/linked_list.py | notes/code/linked_list.py | # Hmm.... python classes are not so clear.
class LinkedList:
pass
class Node:
def __str__(self):
return str(self.value)
names = LinkedList()
names.head = None
def add(llist,x):
newnode = Node()
newnode.value = x
newnode.next = llist.head
llist.head = newnode
def printlist(llist):
p = llist.head
while p is not None:
print p
p = p.next
add(names, "ter")
add(names, "mary")
printlist(names)
| mit | Python | |
5fb337e143ae55d98b3772dc6f5059d0c01115c1 | Create package __init__.py file. | tuttleofx/sconsProject | __init__.py | __init__.py | """
SConsProject
The sconsproject package proposes a way to easily create the compilation system
of your project with the minimum of information. It's an helper around SCons.
########################################
# Example 1
from sconsProject import SConsProject
project = SConsProject()
Export('project')
Export({'libs':project.libs})
project.begin()
project.SConscript()
project.end()
########################################
# Example 2
# If you have common creation things in your project, create a class for your project which inherite this class.
# So this function is accessible in all SConscript files.
# You can also overload some SConsProject function to cusomize it.
class MyProject( SConsProject ):
def createCustomPlugins( self, sources=[], libs=[] ):
\"""
Create a particular type of plugins from a sources list and a libraries list.
\"""
pluginName = self.getName()
env_local = self.createEnv( libs )
env_local.AppendUnique( CCFLAGS = self.CC['visibilityhidden'] )
plugin = env_local.SharedLibrary( target=pluginName, source=sources )
env_local.InstallAs( self.inOutputBin(), plugin )
project = MyProject(
Export('project')
Export({'libs':project.libs})
project.begin()
project.SConscript()
project.end()
########################################
"""
from project import SConsProject
| mit | Python | |
436029e6f4626d0e13393206cc38e5f2cae9c770 | Create __init__.py | ShuaiGitHub/moca | __init__.py | __init__.py | apache-2.0 | Python | ||
3402602d0aaf39e491a6422ba35f175bc249539b | Add __init__.py. | eagleflo/mpyq,fengthedroid/mpyq | __init__.py | __init__.py | from mpyq import MPQArchive
__all__ = ['MPQArchive']
| bsd-2-clause | Python | |
6d33848780d71c9bf725288f1f92d022f143a7e6 | add sift_matcher #132 | pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc | jsk_2015_05_baxter_apc/node_scripts/sift_matcher.py | jsk_2015_05_baxter_apc/node_scripts/sift_matcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import gzip
import cPickle as pickle
import numpy as np
import yaml
import rospy
from posedetection_msgs.msg import ImageFeature0D
from jsk_2014_picking_challenge.srv import ObjectMatch, ObjectMatchResponse
from sift_matcher_oneimg import SiftMatcherOneImg
class SiftMatcher(object):
def __init__(self):
# load object list
dirname = os.path.dirname(os.path.abspath(__file__))
ymlfile = os.path.join(dirname, '../data/object_list.yml')
self.object_list = yaml.load(open(ymlfile))
rospy.loginfo('Loading sift data')
self.all_siftdata = self._load_all_siftdata()
rospy.Service('/semi/sift_matcher', ObjectMatch, self._cb_matcher)
sub_imgfeature = rospy.Subscriber('/ImageFeature0D', ImageFeature0D,
self._cb_imgfeature)
rospy.loginfo('Ready to reseive match request')
def _cb_matcher(self, req):
probs = self._get_object_probability(req.objects)
return ObjectMatchResponse(probabilities=probs)
def _cb_imgfeature(self, msg):
self.query_features = msg.features
def _get_object_probability(self, obj_names):
"""Get object match probabilities"""
query_features = self.query_features
n_matches = []
for obj_name in obj_names:
if obj_name not in self.object_list:
n_matches.append(0)
continue
# find best match in train features
siftdata = self.all_siftdata.get(obj_name, None)
if siftdata is None: # does not exists data file
n_matches.append(0)
continue
train_matches = []
for train_des in siftdata['descriptors']:
matches = SiftMatcherOneImg.find_match(
query_features.descriptors, train_des)
train_matches.append(len(matches))
n_matches.append(max(train_matches)) # best match
n_matches = np.array(n_matches)
return n_matches / n_matches.max()
def _load_all_siftdata(self):
"""Load sift data of all objects"""
object_list = self.object_list
all_siftdata = {obj_name: self.load_siftdata(obj_name)
for obj_name in object_list}
return all_siftdata
@staticmethod
def load_siftdata(obj_name):
"""Load sift data from pkl file"""
dirname = os.path.dirname(os.path.abspath('__file__'))
datafile = os.path.join(dirname, '../data/siftdata',
obj_name+'.pkl.gz')
if not os.path.exists(datafile):
return # does not exists
with gzip.open(datafile, 'rb') as f:
return pickle.load(f)
def main():
rospy.init_node('sift_matcher')
sm = SiftMatcher()
rospy.spin()
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
c0aa12111befbc6a5779be6a0111f7b80027082d | add new package (#6659) | mfherbst/spack,LLNL/spack,EmreAtes/spack,EmreAtes/spack,krafczyk/spack,LLNL/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack,krafczyk/spack,krafczyk/spack,LLNL/spack,EmreAtes/spack,LLNL/spack,mfherbst/spack,krafczyk/spack,tmerrick1/spack,EmreAtes/spack,EmreAtes/spack,tmerrick1/spack,matthiasdiener/spack,iulian787/spack,matthiasdiener/spack,tmerrick1/spack,mfherbst/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,iulian787/spack,iulian787/spack,iulian787/spack,matthiasdiener/spack,krafczyk/spack,tmerrick1/spack,tmerrick1/spack | var/spack/repos/builtin/packages/tasmanian/package.py | var/spack/repos/builtin/packages/tasmanian/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Tasmanian(CMakePackage):
"""The Toolkit for Adaptive Stochastic Modeling and Non-Intrusive
ApproximatioN is a robust library for high dimensional integration and
interpolation as well as parameter calibration."""
homepage = "http://tasmanian.ornl.gov"
url = "http://tasmanian.ornl.gov/documents/Tasmanian_v5.0.zip"
version('5.0', '4bf131841d786033863d271739be0f7a')
| lgpl-2.1 | Python | |
d8fd5835b6ba0a6de757fe4aa63fa97bdbd15f83 | add new package (#2486) | iulian787/spack,matthiasdiener/spack,LLNL/spack,matthiasdiener/spack,iulian787/spack,TheTimmy/spack,skosukhin/spack,LLNL/spack,lgarren/spack,krafczyk/spack,tmerrick1/spack,lgarren/spack,EmreAtes/spack,tmerrick1/spack,krafczyk/spack,mfherbst/spack,TheTimmy/spack,krafczyk/spack,LLNL/spack,mfherbst/spack,lgarren/spack,tmerrick1/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,EmreAtes/spack,iulian787/spack,skosukhin/spack,tmerrick1/spack,skosukhin/spack,TheTimmy/spack,krafczyk/spack,mfherbst/spack,EmreAtes/spack,iulian787/spack,TheTimmy/spack,matthiasdiener/spack,mfherbst/spack,EmreAtes/spack,LLNL/spack,lgarren/spack,matthiasdiener/spack,lgarren/spack,TheTimmy/spack,EmreAtes/spack,matthiasdiener/spack,skosukhin/spack,iulian787/spack,mfherbst/spack,skosukhin/spack | var/spack/repos/builtin/packages/rename/package.py | var/spack/repos/builtin/packages/rename/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Rename(Package):
"""Perl-powered file rename script with many helpful built-ins."""
homepage = "http://plasmasturm.org/code/rename"
url = "https://github.com/ap/rename/archive/v1.600.tar.gz"
version('1.600', '91beb555c93d407420b5dad191069bb3')
depends_on('perl', type='nolink')
def install(self, spec, prefix):
Executable('pod2man')('rename', 'rename.1')
bdir = join_path(prefix, 'bin')
mkdirp(bdir)
install('rename', bdir)
mdir = join_path(prefix, 'share', 'man', 'man1')
mkdirp(mdir)
install('rename.1', mdir)
| lgpl-2.1 | Python | |
a3261609967444c01039908d4eb7ccd7feaf3616 | add blinker example. | devlights/try-python | trypython/extlib/blinker01.py | trypython/extlib/blinker01.py | """
イベントディスパッチライブラリ blinker のサンプルです。
基本的な signal の使い方について。
REFERENCES::
https://github.com/jek/blinker
https://pythonhosted.org/blinker/
"""
import blinker as bl
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import hr
class Sample(SampleBase):
"""Blinker の基本的な使い方について"""
def exec(self):
"""サンプル処理を実行します。"""
# --------------------------------------------------------------
# blinker は、イベント通知を行ってくれるライブラリ。
# 使い方がシンプルで速いのが特徴。
# Observerパターンを利用したい場合に有効。
#
# blinker では、まず最初に signal() でシグナルを作成する。
# signal は、好きに命名することが可能
# 通知を受けるためには、 connect メソッドでアタッチする。
# あとは、 send メソッドで値を送信するたびにアタッチされたハンドラに通知が発行される。
# --------------------------------------------------------------
my_signal = bl.signal('my-signal')
# sender を指定しない場合は ブロードキャスト 扱いとなる。
# つまり、この signal で発行されたすべての値を受け取ることが出来る。
my_signal.connect(self.print_message)
hr('broadcast')
for i in range(5):
my_signal.send(i)
hr('broadcast + filtering')
# sender を指定している場合、値に合致したイベントのみが通知される。
my_signal.connect(self.print_message2, sender=3)
for i in range(5):
my_signal.send(i)
@staticmethod
def print_message(message):
print(f'notify signal [{message}]')
@staticmethod
def print_message2(message):
print(f'notify signal 2 [{message}]')
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python | |
558e5ff1a4d33f339d34a4fe2fac649cd22f45c3 | add reprlib.repr() sample. | devlights/try-python | trypython/stdlib/reprlib01.py | trypython/stdlib/reprlib01.py | """
reprlib についてのサンプルです。
link::
https://docs.python.jp/3/library/reprlib.html
"""
import functools
import random
import reprlib
import string
from typing import Sequence, Callable
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class ManyFields:
def __init__(self, field_names: Sequence[str], value_factory: Callable[[], int]):
for name in field_names:
self.__dict__[name] = value_factory()
def __repr__(self) -> str:
return ','.join(f'{k}={v}' for k, v in self.__dict__.items())
class Sample(SampleBase):
def exec(self):
# ------------------------------------------------
# reprlib.repr()
#
# 通常のrepr()では、とても長い出力になってしまう場合に
# 便利なメソッド。repr()は開発者用の出力を表示するので
# どうしても長くなってしまう場合がある。
# そのようなときに、reprlib.repr() を使うといい感じ。
# ------------------------------------------------
names = string.ascii_letters
value_factory = functools.partial(random.Random().randint, 1, 100)
many_fields = ManyFields(names, value_factory)
pr('repr', repr(many_fields))
pr('reprlib.repr', reprlib.repr(many_fields))
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python | |
a4945972bb4f04c2d18fec87ec4c6d6e955cba1c | add release script | centsent/hydeo | release.py | release.py | #! /usr/bin/env python
import subprocess
import sys
GIT_SHOW_REF_HEARD = 'git show-ref --verify --quiet refs/heads/%s'
GIT_SHOW_REF_TAGS = 'git show-ref --verify --quiet refs/tags/%s'
def checkExistingTag(version):
if (subprocess.call((GIT_SHOW_REF_HEARD % version).split()) == 0 or
subprocess.call((GIT_SHOW_REF_TAGS % version).split()) == 0):
print "Error: The tag '%s' already exists" % version
raise Exception()
# def checkout(node):
# if subprocess.call(('git checkout %s' % node).split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE) != 0:
# print "Error: The git node '%s' doesn't exist" % node
# exit(-1)
def commit(version):
untrackedFiles = subprocess.Popen('git ls-files -o --exclude-standard'.split(), stdout=subprocess.PIPE)
subprocess.call(('git add %s' % untrackedFiles.stdout.read().replace('\n', ' ')).split())
subprocess.call(['git', 'commit', '-am', '"chore release: new release %s"' % version], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
subprocess.call(('git tag %s' % version).split())
# print "Publishing new commit to master"
# subprocess.call('git push origin master'.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
print "Publishing new tag"
subprocess.call(('git push origin %s' % version).split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
print "Release %s created!" % version
if __name__ == "__main__":
try:
if len(sys.argv) == 1:
print "Error: The version name is required"
raise Exception()
version = sys.argv[1]
checkExistingTag(version)
commit(version)
except Exception as e:
exit(-1)
| mit | Python | |
e8e00263acb7f04e19c0839bd881151b6ed4abd1 | Create webhook.py | fantazic/fb_pixel,fantazic/fb_pixel | webhook.py | webhook.py | print 'Hello'
| mit | Python | |
d2c674bf5583ea20e63a64486efb5d0e9e908e5a | Add module for mapping data onto KEGG pathways. | theandygross/Figures | src/Figures/KEGG.py | src/Figures/KEGG.py | __author__ = 'agross'
import re
import itertools
import urllib
import pandas as pd
from matplotlib.colors import rgb2hex
from matplotlib.cm import RdBu
KEGG_PATH = 'http://www.kegg.jp/kegg-bin/'
from Figures.KEGG import *
def pull_pathway_info_from_kegg(kegg_id):
o = urllib.urlopen('http://rest.kegg.jp/get/' + kegg_id).read()
o = o.splitlines()
'''need to parse out when new sections start'''
sections = {}
for i, n in enumerate(o):
s = n.split()[0]
if n[0] != ' ' and s not in sections:
sections[s] = i
sections = pd.Series(sections).order()
o = [l[12:] for l in o] # get rid of fixed-width section headings
'''Pull out gene information, ec = enzyme, ko = complex'''
start = sections['GENE']
stop = [sections.iloc[i+1] for i, s in enumerate(sections.index)
if s == 'GENE'][0]
gene = o[start:stop]
#return gene
mapping = []
for g in gene:
try:
g = g.split(';')
gene_id = g[0].split()[0]
gene_name = g[0].split()[1]
desc = re.findall('\[(.*?)\]', g[1]) # stuff in [brackets]
ko = [e for e in desc if 'KO:' in e]
if len(ko) > 0:
ko = ko[0][3:].split()
else:
ko = ['None']
ec = [e for e in desc if 'EC:' in e]
if len(ec) > 0:
ec = ec[0][3:].split()
else:
ec = ['None']
for i, j in itertools.product(ec, ko):
mapping.append(pd.Series({'id': gene_id, 'gene': gene_name,
'ec': i, 'ko': j}))
except:
print g
mapping = pd.DataFrame(mapping)
return mapping
def plot_data_on_pathway(kegg_id, mapping, dist):
mapping = mapping[mapping.gene.isin(dist.index)]
order = mapping.gene.map(mapping.groupby('gene').size()).order()
mapping = mapping.ix[order.index]
symbol_to_kegg = mapping.set_index('gene').id
symbol_to_kegg = symbol_to_kegg.groupby(level=0).first()
dist = pd.Series(dist, name='dist')
ec = mapping.set_index('gene').join(dist).groupby('ko').median()
ec = ec.dist.dropna().order()
gm = pd.concat([mapping.groupby('ko').first().gene, ec], 1)
gm = gm.set_index('gene').dist.groupby(level=0).first()
cmap = gm.map(lambda v: rgb2hex(RdBu(1-v)).upper()[1:])
s = '%0D%'.join(['hsa:{}+%23{}'.format(symbol_to_kegg.ix[i], cmap.ix[i])
for i in gm.index if i in symbol_to_kegg])
s = '{}show_pathway?map={}&multi_query={}'.format(KEGG_PATH, kegg_id, s)
print s
def parse_entry(e):
d = dict(e.attrib.iteritems())
components = [c.attrib['id'] for c in e.findall('component')]
d['components'] = components
d = pd.Series(d)
return d
| mit | Python | |
6bba6221343ec2b2c2626504ea46aba621a39df0 | convert gatk result to gistic segment file | shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl | lib/Format/gatk2gistic_segment_file.py | lib/Format/gatk2gistic_segment_file.py | import argparse
import gzip
import math
import logging
parser = argparse.ArgumentParser(description="Convert GATK segment CNV to gistic2 segmentation file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input list file', required=True)
parser.add_argument('-n', '--no_chr', action='store_true', default=False, help='Remove chr from chromosome')
parser.add_argument('-o', '--output', action='store', nargs='?', default="-", help="Output file", required=True)
args = parser.parse_args()
def convert(name1, file1, no_chr, fout):
with gzip.open(file1, "rt") as fin:
for line in fin:
if line.startswith('#'):
continue
parts = line.rstrip().split('\t')
# if parts[4] == '.':
# continue
cn = int(parts[9].split(':')[1])
# if cn == 2:
# continue
start = int(parts[1])
end = int(parts[7].replace('END=', ''))
n_markers = math.ceil((end + 1 - start) / 1000)
if cn == 0:
segcn = -5
else:
segcn = math.log2(cn) - 1
if no_chr:
chr = parts[0].replace('chr','')
else:
chr = parts[0]
segcnstr = "{:.2f}".format(segcn)
fout.write(f"{name1}\t{chr}\t{start}\t{end}\t{n_markers}\t{segcnstr}\n")
logger = logging.getLogger('convert')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
with open(args.output, "wt") as fout:
with open(args.input, "rt") as fin:
for line in fin:
parts = line.rstrip().split('\t')
logger.info(f"converting {parts[1]} : {parts[0]}")
convert(parts[1], parts[0], args.no_chr, fout)
logger.info('done')
| apache-2.0 | Python | |
27177a3e27edfa8524c9bffee5aa4e4430d1e311 | Create base mesh | josuemontano/blender_wrapper | api/mesh.py | api/mesh.py | import bpy
from .base import BlenderObject
class Mesh(BlenderObject):
"""Base mesh"""
def add_modifier(self, type):
"""Add a modifier to the active object
:param type: Type of the modifier, must be one of the following:
- DATA_TRANSFER Data Transfer.
- MESH_CACHE Mesh Cache.
- MESH_SEQUENCE_CACHE Mesh Sequence Cache.
- NORMAL_EDIT Normal Edit.
- UV_PROJECT UV Project.
- UV_WARP UV Warp.
- VERTEX_WEIGHT_EDIT Vertex Weight Edit.
- VERTEX_WEIGHT_MIX Vertex Weight Mix.
- VERTEX_WEIGHT_PROXIMITY Vertex Weight Proximity.
- ARRAY Array.
- BEVEL Bevel.
- BOOLEAN Boolean.
- BUILD Build.
- DECIMATE Decimate.
- EDGE_SPLIT Edge Split.
- MASK Mask.
- MIRROR Mirror.
- MULTIRES Multiresolution.
- REMESH Remesh.
- SCREW Screw.
- SKIN Skin.
- SOLIDIFY Solidify.
- SUBSURF Subdivision Surface.
- TRIANGULATE Triangulate.
- WIREFRAME Wireframe, Generate a wireframe on the edges of a mesh.
- ARMATURE Armature.
- CAST Cast.
- CORRECTIVE_SMOOTH Corrective Smooth.
- CURVE Curve.
- DISPLACE Displace.
- HOOK Hook.
- LAPLACIANSMOOTH Laplacian Smooth.
- LAPLACIANDEFORM Laplacian Deform.
- LATTICE Lattice.
- MESH_DEFORM Mesh Deform.
- SHRINKWRAP Shrinkwrap.
- SIMPLE_DEFORM Simple Deform.
- SMOOTH Smooth.
- WARP Warp.
- WAVE Wave.
- CLOTH Cloth.
- COLLISION Collision.
- DYNAMIC_PAINT Dynamic Paint.
- EXPLODE Explode.
- FLUID_SIMULATION Fluid Simulation.
- OCEAN Ocean.
- PARTICLE_INSTANCE Particle Instance.
- PARTICLE_SYSTEM Particle System.
- SMOKE Smoke.
- SOFT_BODY Soft Body.
- SURFACE Surface.
"""
bpy.ops.object.modifier_add(type)
| mit | Python | |
0b8ca8c7015ddd567abdd08e508a27c6ee3ed778 | fix history pickup payload | yunity/foodsaving-backend,yunity/yunity-core,yunity/yunity-core,yunity/foodsaving-backend,yunity/foodsaving-backend | karrot/history/migrations/0009_extend_historic_data_more.py | karrot/history/migrations/0009_extend_historic_data_more.py | from django.db import migrations
BATCH_SIZE = 1000
def migrate(apps, schema_editor):
History = apps.get_model('history', 'History')
# rewrite pickup date payload date field from string to list
save_payload = []
for h in History.objects.filter(payload__date__0__isnull=True, payload__date__isnull=False):
h.payload['date'] = [h.payload['date']]
save_payload.append(h)
History.objects.bulk_update(save_payload, fields=['payload'], batch_size=BATCH_SIZE)
class Migration(migrations.Migration):
dependencies = [
('history', '0008_extend_historic_data'),
]
operations = [migrations.RunPython(migrate, migrations.RunPython.noop, elidable=True)]
| agpl-3.0 | Python | |
adbdacfceca33e0dc22f1d28b8e1963932c7e31a | save work for augmentation | kennethjiang/donkey,kennethjiang/donkey,kennethjiang/donkey | donkeycar/parts/stores/augmentation.py | donkeycar/parts/stores/augmentation.py | import numpy as np
def white_unbalance(x, y):
# Adjust white balance.
min_channel_high_end = 0.25
max_channel_low_end = 0.25
rmin = random.random()*min_channel_high_end
gmin = random.random()*min_channel_high_end
bmin = random.random()*min_channel_high_end
rmax = random.random()*max_channel_low_end + 1 - max_channel_low_end
gmax = random.random()*max_channel_low_end + 1 - max_channel_low_end
bmax = random.random()*max_channel_low_end + 1 - max_channel_low_end
new_image = np.empty((source.height, source.width, 3), dtype=np.float32)
image = np.multiply(np.array(source), 1/255.)
# Make exposure ocasionally brighter
image = np.clip(np.multiply(image, random.random()*0.3+1.0), 0.0, 1.0)
new_image[:, :, 0] = np.add(np.multiply(image[:, :, 0], (rmax-rmin)), rmin)
new_image[:, :, 1] = np.add(np.multiply(image[:, :, 1], (gmax-gmin)), gmin)
new_image[:, :, 2] = np.add(np.multiply(image[:, :, 2], (bmax-bmin)), bmin)
new_image = np.multiply(new_image, 255)
image = Image.fromarray(np.uint8(new_image))
return image
| mit | Python | |
23526005714a7cdeecc9288ddcdda562004e0c91 | Add rply based parser | funkybob/rattle,funkybob/rattle | rattle/parser.py | rattle/parser.py |
import ast
import rply
lg = rply.LexerGenerator()
lg.add('NUMBER', '\d+')
lg.add('STRING', "'.*?'|\".*?\"")
lg.add('NAME', '\w+')
lg.add('LSQB', '\[')
lg.add('RSQB', '\]')
lg.add('LPAREN', '\(')
lg.add('RPAREN', '\)')
#lg.add('EQUALS', '=')
#lg.add('COMMA', ',')
lg.add('DOT', '\.')
pg = rply.ParserGenerator(
[rule.name for rule in lg.rules],
precedence = [
],
)
'''
kwarg : NAME EQUALS expr
arg : expr
arg_list : arg
| arg COMMA arg_list
expr : NAME
: NUMBER
: STRING
: expr DOT NAME
: expr LSQB expr RSQB
: expr LPAREN RPAREN
: expr LPAREN arg_list RPAREN
: expr LPAREN kwarg_list RPAREN
: expr LPAREN arg_list COMMA kwarg_list RPAREN
'''
@pg.production('expr : NAME')
def expr_NAME(p):
'''Look up a NAME in Context'''
return ast.Subscript(
value=ast.Name(id='context', ctx=ast.Load()),
slice=ast.Index(value=ast.Str(s=p[0].getstr()), ctx=ast.Load()),
ctx=ast.Load(),
)
@pg.production('expr : STRING')
def expr_STRING(p):
return ast.Str(s=p[0].getstr()[1:-1])
@pg.production('expr : NUMBER')
def expr_NUMBER(p):
return ast.Number(n=int(p[0].getstr()))
@pg.production('expr : expr DOT NAME')
def expr_DOT_NAME(p):
lterm, _, rterm = p
return ast.Attribute(
value=lterm,
attr=rterm.getstr(),
ctx=ast.Load(),
)
@pg.production('expr : expr LSQB expr RSQB')
def expr_SUBSCRIPT(p):
src, _, subscript, _ = p
return ast.Subscript(
value=src,
slice=ast.Index(value=subscript, ctx=ast.Load()),
ctx=ast.Load(),
)
@pg.production('expr : expr LPAREN RPAREN')
def expr_empty_call(p):
func, _, _ = p
return ast.Call(
func=func,
args=[],
keywords=[],
)
@pg.error
def error(token):
raise ValueError('Unexpected token: %r' % token)
if __name__ == '__main__':
lexer = lg.build()
parser = pg.build()
class Mock(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
TESTS = (
('a', {'a': 'yes'}, 'yes'),
('a.b', {'a': Mock(b='yes')}, 'yes'),
('a["b"]', {'a': {'b': 'yes'}}, 'yes'),
)
for src, ctx, out in TESTS:
tokens = lexer.lex(src)
expr = parser.parse(iter(tokens))
src = ast.Module(body=[
ast.Assign(
targets=[ast.Name(id='result', ctx=ast.Store())],
value=expr,
)
])
ast.fix_missing_locations(src)
code = compile(src, filename='<ast>', mode='exec')
glob = {'context': ctx}
exec(code, glob)
assert(glob['result'] == out)
| mit | Python | |
b5e9821642c07f30909f7cbaff193af6666777a7 | Revert "disable 'svn init' for blink temporarily" | withtone/depot_tools,michalliu/chromium-depot_tools,Phonebooth/depot_tools,duongbaoduy/gtools,Neozaru/depot_tools,gcodetogit/depot_tools,gcodetogit/depot_tools,liaorubei/depot_tools,HackFisher/depot_tools,kromain/chromium-tools,liaorubei/depot_tools,ajohnson23/depot_tools,Chilledheart/depot_tools,smikes/depot_tools,airtimemedia/depot_tools,Phonebooth/depot_tools,SuYiling/chrome_depot_tools,coreos/depot_tools,withtone/depot_tools,chinmaygarde/depot_tools,G-P-S/depot_tools,SuYiling/chrome_depot_tools,HackFisher/depot_tools,aleonliao/depot_tools,Neozaru/depot_tools,azunite/chrome_build,fanjunwei/depot_tools,smikes/depot_tools,jankeromnes/depot_tools,sarvex/depot-tools,airtimemedia/depot_tools,withtone/depot_tools,azunite/chrome_build,duanwujie/depot_tools,coreos/depot_tools,disigma/depot_tools,coreos/depot_tools,Midrya/chromium,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,duongbaoduy/gtools,duanwujie/depot_tools,primiano/depot_tools,ajohnson23/depot_tools,azureplus/chromium_depot_tools,hsharsha/depot_tools,michalliu/chromium-depot_tools,aleonliao/depot_tools,CoherentLabs/depot_tools,liaorubei/depot_tools,Neozaru/depot_tools,jankeromnes/depot_tools,sarvex/depot-tools,Midrya/chromium,fanjunwei/depot_tools,Neozaru/depot_tools,mlufei/depot_tools,HackFisher/depot_tools,hsharsha/depot_tools,G-P-S/depot_tools,disigma/depot_tools,mlufei/depot_tools,azureplus/chromium_depot_tools,primiano/depot_tools,hsharsha/depot_tools,michalliu/chromium-depot_tools,smikes/depot_tools,ajohnson23/depot_tools,sarvex/depot-tools,kaiix/depot_tools,npe9/depot_tools,npe9/depot_tools,fracting/depot_tools,npe9/depot_tools,primiano/depot_tools,xuyuhan/depot_tools,kromain/chromium-tools,chinmaygarde/depot_tools,azureplus/chromium_depot_tools,kromain/chromium-tools,Phonebooth/depot_tools,Chilledheart/depot_tools,yetu/repotools,eatbyte/depot_tools,HackFisher/depot_tools,eatbyte/depot_tools,xuyuhan/depot_tools,smikes/depot_tools,jankeromnes/depot_tools,disigma/depot_tools,G-P-S/depot_tools,cybertk/depot_tools,cybertk/depot_tools,jankeromnes/depot_tools,coreos/depot_tools,jankeromnes/depot_tools,Phonebooth/depot_tools,liaorubei/depot_tools,cybertk/depot_tools,airtimemedia/depot_tools,yetu/repotools,yetu/repotools,cybertk/depot_tools,sarvex/depot-tools,Chilledheart/depot_tools,cybertk/depot_tools,jankeromnes/depot_tools,azunite/chrome_build,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,mlufei/depot_tools,aleonliao/depot_tools,coreos/depot_tools,npe9/depot_tools,Midrya/chromium,chinmaygarde/depot_tools,duanwujie/depot_tools,fanjunwei/depot_tools,airtimemedia/depot_tools,eatbyte/depot_tools,Chilledheart/depot_tools,xuyuhan/depot_tools,michalliu/chromium-depot_tools,eatbyte/depot_tools,jankeromnes/depot_tools,smikes/depot_tools,duongbaoduy/gtools,Neozaru/depot_tools,G-P-S/depot_tools,Chilledheart/depot_tools,CoherentLabs/depot_tools,kaiix/depot_tools,cpanelli/-git-clone-https-chromium.googlesource.com-chromium-tools-depot_tools,xuyuhan/depot_tools,kromain/chromium-tools,fanjunwei/depot_tools,fracting/depot_tools,fracting/depot_tools,kaiix/depot_tools,SuYiling/chrome_depot_tools,gcodetogit/depot_tools,coreos/depot_tools | recipes/blink.py | recipes/blink.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
# pylint: disable=F0401
import recipe_util
import sys
# pylint: disable=W0232
class Blink(recipe_util.Recipe):
"""Basic Recipe alias for Blink -> Chromium."""
@staticmethod
def fetch_spec(props):
submodule_spec = {
'third_party/WebKit': {
'svn_url': 'svn://svn.chromium.org/blink/trunk',
'svn_branch': 'trunk',
'svn_ref': 'master',
}
}
return {'alias': {
'recipe': 'chromium',
'props': ['--webkit_rev=ToT',
'--submodule_git_svn_spec=' + json.dumps(submodule_spec)]
}
}
def main(argv=None):
Blink().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TOOD(dpranke): reenable import json
# pylint: disable=F0401
import recipe_util
import sys
# pylint: disable=W0232
class Blink(recipe_util.Recipe):
"""Basic Recipe alias for Blink -> Chromium."""
@staticmethod
def fetch_spec(props):
# TODO(dpranke): reenable
#submodule_spec = {
# 'third_party/WebKit': {
# 'svn_url': 'svn://svn.chromium.org/blink/trunk',
# 'svn_branch': 'trunk',
# 'svn_ref': 'master',
# }
#}
return {'alias': {
'recipe': 'chromium',
'props': ['--webkit_rev=ToT',
# '--submodule_git_svn_spec=' + json.dumps(submodule_spec)
]
}
}
def main(argv=None):
Blink().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | Python |
345b26fff755dbdea4021ba9bc613647a374644c | Add match group migration | leventebakos/football-ech,leventebakos/football-ech | matches/migrations/0007_match_group.py | matches/migrations/0007_match_group.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-24 13:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matches', '0006_match_is_finished'),
]
operations = [
migrations.AddField(
model_name='match',
name='group',
field=models.CharField(max_length=20, null=True),
),
]
| mit | Python | |
907ff7fca6228248010dbb86fc81c2266cdeb264 | Add release script | LukeStebbing/Ergometer,LukeStebbing/Ergometer,LukeStebbing/Ergometer | release.py | release.py | #!/usr/bin/env python
from collections import OrderedDict
from itertools import zip_longest
import json
import os
import re
from subprocess import check_output, CalledProcessError
import sys
from zipfile import ZipFile
def sh(command, v=False):
if v:
print(command)
return check_output(command, text=True).strip()
def parse_version(v):
return [int(s) for s in v.split(".")]
def dereference(link):
try:
return sh(f"git rev-parse --verify -q {link}^0")
except CalledProcessError:
return ""
version_string = sys.argv[1]
prefixed_version = f"v{version_string}"
version = parse_version(version_string)
os.chdir(os.path.dirname(os.path.realpath(__file__)))
assert not sh("git status --porcelain")
assert sh("git branch") == "* master"
with open("manifest.json") as f:
manifest = json.load(f, object_pairs_hook=OrderedDict)
manifest_version = parse_version(manifest["version"])
if version != manifest_version:
delta = list(
vs[0] - vs[1]
for vs in zip_longest(version, manifest_version, fillvalue=0)
)
increment = delta.index(1)
assert all(i == 0 for i in delta[0:increment])
assert all(i <= 0 for i in delta[increment + 1 :])
manifest["version"] = version_string
with open("manifest.json", "w", newline="\n") as f:
json.dump(manifest, f, indent=2)
print("", file=f)
sh(f"git commit -a -m {prefixed_version}", v=True)
tag_commit = dereference(prefixed_version)
if tag_commit:
assert tag_commit == dereference("HEAD")
else:
sh(f"git tag {prefixed_version} -m {prefixed_version}", v=True)
sh("git merge-base --is-ancestor origin/master master")
if dereference("master") != dereference("origin/master"):
sh("git push --follow-tags", v=True)
files = ["manifest.json", "config.js"]
for file in sh("git ls-files").splitlines():
m = lambda p: re.search(p, file)
if m(r"\.(html|js)$") and not m(r"\btest\b"):
files.append(file)
with ZipFile("ergometer.zip", "w") as zip:
for file in files:
print(f"zipping {file}")
zip.write(file)
| apache-2.0 | Python | |
1efab4cc16ded69d8da798aea61806890710bd2a | Add script drawing samples from P(z) binary file | marcoviero/Utils | draw_pofz_samples.py | draw_pofz_samples.py | from sys import exit
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import brentq, fsolve
from struct import *
#fileName = '/Users/guochaosun/Desktop/Caltech_OBSCOS/pofz/UVISTA_DR2_master_v2.1.tempfilt'
fileName = '/data/gjsun/catalogs/pofz_calc/UVISTA_DR2_master_v2.1.tempfilt'
with open(fileName, mode='rb') as file: # b is important -> binary
fileContent = file.read()
NFILT, NTEMP, NZ, NOBJ = unpack("iiii", fileContent[:16])
print NFILT, NTEMP, NZ, NOBJ
tempfilt_flat = unpack("d"*NFILT*NTEMP*NZ, fileContent[16:16+8*NFILT*NTEMP*NZ])
lc_flat = unpack("d"*NFILT, fileContent[16+8*NFILT*NTEMP*NZ: 16+8*NFILT*NTEMP*NZ + 8*NFILT])
zgrid_flat = unpack("d"*NZ, fileContent[16+8*NFILT*NTEMP*NZ + 8*NFILT: 16+8*NFILT*NTEMP*NZ + 8*NFILT + 8*NZ])
zgrid = np.array(list(zgrid_flat))
#fileName = '/Users/guochaosun/Desktop/Caltech_OBSCOS/pofz/UVISTA_DR2_master_v2.1.pz'
fileName = '/data/gjsun/catalogs/pofz_calc/UVISTA_DR2_master_v2.1.pz'
with open(fileName, mode='rb') as file: # b is important -> binary
fileContent = file.read()
NZ, NOBJ = unpack("ii", fileContent[:8])
chi2fit_flat = unpack("d"*NZ*NOBJ, fileContent[8:8*NZ*NOBJ+8])
chi2fit_arr = np.array(list(chi2fit_flat)).reshape(NOBJ,NZ)
chi2fit_arr = chi2fit_arr.T
pz = chi2fit_arr
pz = np.exp(-0.5*pz)
pz_final = pz/np.trapz(pz[:,0], zgrid)
# Seed random numbers
rand = np.random.RandomState(42)
dim = 100
# Define a z_pert array
# z_pert_arr = np.zeros((NOBJ, dim))
file_to_write = open('/data/gjsun/catalogs/pofz_calc/z_pert_array.dat', 'w+')
for igal in range(NOBJ):
#for igal in range(10):
xpts = zgrid
ypts = pz_final[:,igal]
ncounts = xpts.shape[0]
pdf_norm = ypts/np.sum(ypts)
cpdf = np.zeros(ncounts+1)
for i in range(1,ncounts+1):
cpdf[i] = cpdf[i-1] + pdf_norm[i-1]
long_xpts = np.append(xpts, [xpts[-1]*2-xpts[-2]])
sm_cpdf = InterpolatedUnivariateSpline(long_xpts, cpdf)
def sm_cpdf_filt(x):
return np.minimum(np.maximum(sm_cpdf(x),1.0E-9),1.-1.0E-9)
def get_new_x(prob_in, x_data):
f_to_solve = lambda x: sm_cpdf_filt(x) - prob_in
#f_to_solve = lambda x: sm_cpdf(x) - prob_in
#print 'x_data =', x_data
try:
soln = brentq(f_to_solve, x_data[0]-0.001, x_data[-1]+0.1)
return soln
except:
#print prob_in
#plt.plot(xpts, pdf_norm, 'r-')
#plt.show()
#print sm_cpdf_filt(x_data[0]-0.001), sm_cpdf_filt(x_data[-1]+0.1)
#print f_to_solve(x_data[0]-0.001), f_to_solve(x_data[-1]+0.1)
print '------------ FAULTY ------------'
print 'Value Returned: ', xpts[np.argmax(pdf_norm)]
return xpts[np.argmax(pdf_norm)]
prob_in = rand.uniform(1.0E-2,9.9E-1,dim)
for j in range(dim):
#z_pert_arr[igal, j] = get_new_x(prob_in[j], xpts)
file_to_write.write( '%.5f\t' % get_new_x(prob_in[j], xpts) )
file_to_write.write('\n')
#samples = get_new_x(prob_in, xpts)
#z_pert_arr[igal, :] = samples
if igal%1000 == 0:
print 'DONE %d galaxies!' % igal
#weights = np.ones_like(samples)/float(len(samples))
#plt.hist(samples, bins=20, weights=weights)
#plt.plot(xpts, pdf_norm, 'r-')
#plt.xlim([0.0,1.0])
#plt.show()
exit(0) | mit | Python | |
3ac53be481ec0b6465420d4a223378263f87475c | add test client | surendrakopera/ssp_server | test/client.py | test/client.py | import json
import socket
import ssl
import struct
import logging
import time
import sys
from kutils.ksonsocket import KSONClientConnection
from threading import Thread
def _client_default_handler(json_connection, msg):
print msg
def main(argv):
client_connection = KSONClientConnection(port=8080, secure=False, handler=_client_default_handler)
client_connection.connect()
client_connection.start()
import time
while True:
client_connection.send_msg(
{
"username" : "suren",
"password" : "password",
}
)
time.sleep(1)
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-3-clause | Python | |
53f54b114d23e5aa5e19aebc145944156d7ae87c | add example file starting to summarize an LRAUV log like the old review scripts | bluesquall/okeanidanalysis | examples/review-scripts/log-summary.py | examples/review-scripts/log-summary.py | #!/usr/bin/env python
import matplotlib.pyplot as plt
import okeanidanalysis as oa
def main(logfile, verbose=0):
"""summarize an LRAUV log file with plots
"""
if type(logfile) is not str: s = oa.logs.OkeanidLog(logfile.name)
else: s = oa.logs.OkeanidLog(logfile)
map_fig = plt.figure()
mbm = oa.maps.MontereyBay(resolution='h')
mbm.drawdefault()
s.map_track(mbm, 'universal', 'k-', start_stop_marker=True)
vplane_fig = plt.figure()
s.vplane(fig=vplane_fig)
plt.show()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='plot summary of LRAUV log')
logfile = parser.add_argument('logfile', type=argparse.FileType('rb'),
help='the log to summarize in plots')
parser.add_argument('-V', '--version', action='version',
version='%(prog)s 0.0.1',
help='display version information and exit')
parser.add_argument('-v', '--verbose', action='count', default=0,
help='display verbose output')
args = parser.parse_args()
main(**args.__dict__)
| mit | Python | |
237df47a31240ebf35a5de304bcc6c73e8f921b9 | Create merge_sort_fastest.py | TheAlgorithms/Python | sorts/merge_sort_fastest.py | sorts/merge_sort_fastest.py | '''
Python implementation of merge sort algorithm.
Takes an average of 0.6 microseconds to sort a list of length 1000 items.
Best Case Scenario : O(n)
Worst Case Scenario : O(n)
'''
def merge_sort(LIST):
start = []
end = []
a = LIST[0]
b = LIST[-1]
while (LIST.index(a) == LIST.index(b) and len(LIST) <=2):
a = min(LIST)
b = max(LIST)
start.append(a)
end.append(b)
LIST.remove(a)
LIST.remove(b)
end.reverse()
return start + end
| mit | Python | |
ec5337dc7d8fe4f642447e9b438818b61cdc74c2 | call docker logs on context.failed=true | bren3582/fabric,adecaro/fabric,binhn/obc-peer,gongsu832/fabric,bren3582/fabric,jjjjibm/fabric,isghe/obc-peer,jjjjibm/fabric,hyperledger-archives/fabric,andresgaragiola/fabric,ariccy/fabric,gennadylaventman/fabric,popldo/fabric,gennadylaventman/fabric,itp4IBM/fabric,tuand27613/fabric,ariccy/fabric,andresgaragiola/fabric,srderson/fabric,andresgaragiola/fabric,bburgdave/fabric,angrbrd/fabric,rameshthoomu/obc-peer,bren3582/fabric,isghe/obc-peer,gongsu832/obc-peer,JonathanLevi/fabric,bburgdave/fabric,tuand27613/fabric,popldo/fabric,angrbrd/fabric,itp4IBM/fabric,isghe/obc-peer,gongsu832/fabric,angrbrd/fabric,srderson/fabric,popldo/fabric,srderson/fabric,gromeroar/fabric,ariccy/fabric,bburgdave/fabric,murali-katipalli-dtcc/fabric,gromeroar/fabric,itp4IBM/fabric,gennadylaventman/fabric,hyperledger-archives/fabric,ariccy/fabric,gongsu832/fabric,gennadylaventman/fabric,ahuachen/fabric,JonathanLevi/fabric,tuand27613/fabric,bren3582/fabric,popldo/fabric,ahuachen/fabric,gromeroar/fabric,popldo/fabric,noamher/obc-peer,gromeroar/fabric,bren3582/fabric,JonathanLevi/fabric,sumanair/obc-peer,ariccy/fabric,jjjjibm/fabric,adecaro/fabric,andresgaragiola/fabric,gongsu832/fabric,murali-katipalli-dtcc/fabric,ahuachen/fabric,tuand27613/fabric,angrbrd/fabric,hyperledger-archives/fabric,itp4IBM/fabric,hyperledger-archives/fabric,srderson/fabric,JonathanLevi/fabric,sumanair/obc-peer,ahuachen/fabric,gongsu832/obc-peer,andresgaragiola/fabric,murali-katipalli-dtcc/fabric,openblockchain/obc-peer,binhn/obc-peer,tuand27613/obc-peer,murali-katipalli-dtcc/fabric,gongsu832/obc-peer,ariccy/fabric,ahuachen/fabric,adecaro/fabric,adecaro/fabric,andresgaragiola/fabric,rameshthoomu/obc-peer,noamher/obc-peer,murali-katipalli-dtcc/fabric,gennadylaventman/fabric,JonathanLevi/fabric,ariccy/fabric,bren3582/fabric,popldo/fabric,murali-katipalli-dtcc/fabric,openblockchain/obc-peer,tuand27613/obc-peer,hyperledger-archives/fabric,angrbrd/fabric,itp4IBM/fabric,tuand27613/obc-peer,openblockchain/obc-peer,jjjjibm/fabric,JonathanLevi/fabric,gennadylaventman/fabric,srderson/fabric,noamher/obc-peer,itp4IBM/fabric,ahuachen/fabric,tuand27613/fabric,adecaro/fabric,jjjjibm/fabric,ahuachen/fabric,srderson/fabric,itp4IBM/fabric,rameshthoomu/obc-peer,jjjjibm/fabric,binhn/obc-peer,srderson/fabric,bburgdave/fabric,bren3582/fabric,adecaro/fabric,angrbrd/fabric,angrbrd/fabric,tuand27613/fabric,adecaro/fabric,isghe/obc-peer,sumanair/obc-peer,murali-katipalli-dtcc/fabric,isghe/obc-peer,tuand27613/fabric,gromeroar/fabric,gennadylaventman/fabric,andresgaragiola/fabric,JonathanLevi/fabric,gromeroar/fabric,gromeroar/fabric,bburgdave/fabric,hyperledger-archives/fabric,popldo/fabric,hyperledger-archives/fabric,jjjjibm/fabric | openchain/peer/bddtests/environment.py | openchain/peer/bddtests/environment.py |
import subprocess
from steps.bdd_test_util import cli_call
def after_scenario(context, scenario):
if context.failed:
file_suffix = "_" + scenario.name.replace(" ", "_") + ".log"
for containerData in context.compose_containers:
with open(containerData.containerName + file_suffix, "w+") as logfile:
sys_rc = subprocess.call(["docker", "logs", containerData.containerName], stdout=logfile, stderr=logfile)
if sys_rc !=0 :
print("****** cannot get logs for {0}. Docker rc = {1}".format(containerData.containerName,sys_rc))
if 'doNotDecompose' in scenario.tags:
print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
else:
if 'compose_yaml' in context:
print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True)
# now remove any other containers (chaincodes)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "ps", "-qa"], expect_success=True)
if context.compose_returncode == 0:
# Remove each container
for containerId in context.compose_output.splitlines():
#print("docker rm {0}".format(containerId))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "rm", "-f", containerId], expect_success=True)
|
from steps.bdd_test_util import cli_call
def after_scenario(context, scenario):
if context.failed:
filename = scenario.name.replace(" ", "_")
for containerData in context.compose_containers:
logfile = containerData.containerName + filename
print("********** Test failed. Docker container logs written to {0}".format(logfile))
if 'doNotDecompose' in scenario.tags:
print("Not going to decompose after scenario {0}, with yaml '{1}'".format(scenario.name, context.compose_yaml))
else:
if 'compose_yaml' in context:
print("Decomposing with yaml '{0}' after scenario {1}, ".format(context.compose_yaml, scenario.name))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "kill"], expect_success=True)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker-compose", "-f", context.compose_yaml, "rm","-f"], expect_success=True)
# now remove any other containers (chaincodes)
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "ps", "-qa"], expect_success=True)
if context.compose_returncode == 0:
# Remove each container
for containerId in context.compose_output.splitlines():
#print("docker rm {0}".format(containerId))
context.compose_output, context.compose_error, context.compose_returncode = \
cli_call(context, ["docker", "rm", containerId], expect_success=True)
| apache-2.0 | Python |
03c0b3e679e08f857a78479d73b5335ed2af46f0 | Create InputDataIO.py | cmsc471-swagbag/ScoreAttackBot | InputDataIO.py | InputDataIO.py | import win32api, win32con
import actionFunctions
#Write Function for AI
#Text format:
# [goodness number] [distance from ground] [distance from pipe]
#returns a list of the last iteration's input values
def writeToFile(goodness, distFromGround, distFromPipe)
outputFile = open("BotInputData.txt", 'w')
if (outputFile):
outputFile.write(str(goodness))
outputFile.write(' ')
outputFile.write(str(distFromGround))
outputFile.write(' ')
outputFIle.write(str(distFromPipe))
outputFile.write('\n')
outputFile.close()
def readFromFile()
inputFile = open("BotInputData.txt")
for line in inputFile:
inputData = line.split()
return inputData
| mit | Python | |
e63b4b62cd5cd957da11e6c23bac1844d2a414a3 | Create main.py | clccmh/pomodoro | pomodor/main.py | pomodor/main.py | #!/usr/bin/env python
__version__ = '0.0.1'
__name__ = 'pomodoro'
import click
import progressbar
import time
@click.command()
@click.option('--minutes', default=25, help='Number of minutes, default 25.')
def pomodoro(minutes):
bar = progressbar.ProgressBar(widgets=[
progressbar.Bar(),
])
for i in bar(range(minutes*60)):
time.sleep(1)
if __name__ == '__main__':
pomodoro()
| mit | Python | |
5d7c18219dfe3595a07901e7e56f69068ef7d586 | add vectorization example | kohr-h/odl,odlgroup/odl,kohr-h/odl,aringh/odl,odlgroup/odl,aringh/odl | examples/vectorization.py | examples/vectorization.py | # Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Example showing how to use vectorization of FunctionSpaceVector's."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
import odl
import timeit
def performace_example():
# Create a function space
X = odl.FunctionSpace(odl.Interval(0, 1))
# Functions default to vectorized
f_vec = X.element(lambda x: x**2)
# If 'vectorized=False' is used, odl automatically vectorizes
f_novec = X.element(lambda x: x**2, vectorized=False)
# Example of runtime, expect vectorized to be much faster
points = np.linspace(0, 1, 10000)
print('Vectorized runtime: {:5f}'
''.format(timeit.timeit(lambda: f_vec(points), number=100)))
print('Non-vectorized runtime: {:5f}'
''.format(timeit.timeit(lambda: f_novec(points), number=100)))
def numba_example():
# Some functions are not easily vectorized,
# here we can use numba to improve performance.
try:
import numba
except ImportError:
print('Numba not installed, skipping.')
return
def myfunc(x):
"Return a-b if a>b, otherwise return a+b"
if x[0] > x[1]:
return x[0] - x[1]
else:
return x[0] + x[1]
my_vectorized_func = numba.vectorize(myfunc)
# Create functions
X = odl.FunctionSpace(odl.Rectangle([0, 0], [1, 1]))
f_default = X.element(myfunc, vectorized=False)
f_numba = X.element(my_vectorized_func)
# Create points
points = odl.uniform_sampling(X.domain, [100, 100]).points().T
print('Vectorized runtime: {:5f}'
''.format(timeit.timeit(lambda: f_default(points), number=100)))
print('Non-vectorized runtime: {:5f}'
''.format(timeit.timeit(lambda: f_numba(points), number=100)))
if __name__ == '__main__':
print('Running performance example')
performace_example()
print('Running numba example')
numba_example()
| mpl-2.0 | Python | |
6aaf8b3843b0da2083b41519b273dfe2a5f43172 | Add experimental structarray code | oneklc/dimod,oneklc/dimod | scratch.py | scratch.py | import unittest
import numpy as np
def data_struct_array(sample, **vectors): # data_struct_array(sample, *, energy, **vectors):
"""Combine samples and per-sample data into a numpy structured array.
Args:
sample (array_like):
Samples, in any form that can be converted into a numpy array.
energy (array_like, required):
Required keyword argument. Energies, in any form that can be converted into a numpy
1-dimensional array.
**kwargs (array_like):
Other per-sample data, in any form that can be converted into a numpy array.
Returns:
:obj:`~numpy.ndarray`: A numpy structured array. Has fields ['sample', 'energy', **kwargs]
"""
if not sample:
# if samples are empty
sample = np.zeros((0, 0), dtype=np.int8)
else:
sample = np.asanyarray(sample, dtype=np.int8)
if sample.ndim < 2:
sample = np.expand_dims(sample, 0)
num_samples, num_variables = sample.shape
datavectors = {}
datatypes = [('sample', np.dtype(np.int8), (num_variables,))]
for kwarg, vector in vectors.items():
datavectors[kwarg] = vector = np.asanyarray(vector)
if vector.shape[0] != num_samples:
msg = ('{} and sample have a mismatched shape {}, {}. They must have the same size '
'in the first axis.').format(kwarg, vector.shape, sample.shape)
raise ValueError(msg)
datatypes.append((kwarg, vector.dtype, vector.shape[1:]))
if 'energy' not in datavectors:
# consistent error with the one thrown in python3
raise TypeError('data_struct_array() needs keyword-only argument energy')
elif datavectors['energy'].shape != (num_samples,):
raise ValueError('energy should be a vector of length {}'.format(num_samples))
data = np.rec.array(np.zeros(num_samples, dtype=datatypes))
data['sample'] = sample
for kwarg, vector in datavectors.items():
data[kwarg] = vector
return data
class TestSamplesStructuredArray(unittest.TestCase):
def test_empty(self):
data = data_struct_array([], energy=[])
self.assertEqual(data.shape, (0,))
self.assertEqual(len(data.dtype.fields), 2)
self.assertIn('sample', data.dtype.fields)
self.assertIn('energy', data.dtype.fields)
def test_single_sample(self):
data = data_struct_array([-1, 1, -1], energy=[1.5])
self.assertEqual(data.shape, (1,))
self.assertEqual(len(data.dtype.fields), 2)
self.assertIn('sample', data.dtype.fields)
self.assertIn('energy', data.dtype.fields)
def test_single_sample_nested(self):
data = data_struct_array([[-1, 1, -1]], energy=[1.5])
self.assertEqual(data.shape, (1,))
self.assertEqual(len(data.dtype.fields), 2)
self.assertIn('sample', data.dtype.fields)
self.assertIn('energy', data.dtype.fields)
def test_multiple_samples(self):
data = data_struct_array([[-1, +1, -1], [+1, -1, +1]], energy=[1.5, 4.5])
self.assertEqual(data.shape, (2,))
self.assertEqual(len(data.dtype.fields), 2)
self.assertIn('sample', data.dtype.fields)
self.assertIn('energy', data.dtype.fields)
def test_extra_data_vector(self):
data = data_struct_array([[-1, +1, -1], [+1, -1, +1]], energy=[1.5, 4.5], occurrences=np.asarray([1, 2]))
self.assertEqual(data.shape, (2,))
self.assertEqual(len(data.dtype.fields), 3)
self.assertIn('sample', data.dtype.fields)
self.assertIn('energy', data.dtype.fields)
self.assertIn('occurrences', data.dtype.fields)
def test_data_vector_higher_dimension(self):
data = data_struct_array([[-1, +1, -1], [+1, -1, +1]], energy=[1.5, 4.5], occurrences=[[0, 1], [1, 2]])
self.assertEqual(data.shape, (2,))
self.assertEqual(len(data.dtype.fields), 3)
self.assertIn('sample', data.dtype.fields)
self.assertIn('energy', data.dtype.fields)
self.assertIn('occurrences', data.dtype.fields)
def test_mismatched_vector_samples_rows(self):
with self.assertRaises(ValueError):
data_struct_array([[-1, +1, -1], [+1, -1, +1]], energy=[1.5, 4.5, 5.6])
def test_protected_sample_kwarg(self):
with self.assertRaises(TypeError):
data_struct_array([[-1, +1, -1], [+1, -1, +1]], energy=[1.5, 4.5], sample=[5, 6])
def test_missing_kwarg_energy(self):
with self.assertRaises(TypeError):
data_struct_array([[-1, +1, -1], [+1, -1, +1]], occ=[5, 6])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
e40ddb72243faaf5a754537ea11d0393b02b27f2 | Create rest-mqtt.py | deneba/rest-mqtt | rest-mqtt.py | rest-mqtt.py | # Title: Script to catch rest calls for home automation and forward to MQTT (rest-mqtt)
# Author: Dr. Asif Rana (aiqbalrana@gmail.com)
# Date: 20160525
from flask import Flask
from flask import request
from flask_restful import Resource, Api
import paho.mqtt.publish as publish
import paho.mqtt.client as mqtt
import time
import sys
import ConfigParser
import json
config = ConfigParser.ConfigParser()
config.read("code-list.txt")
# Topic on which the broker or other mqtt clients are listening
topic = "/home/command/switchthings"
app = Flask(__name__)
api = Api(app)
class EasyHomeSwitch(Resource):
def post(self):
try:
rdata = request.data
kdata = rdata.replace("\n","")
kdata = json.dumps(kdata)
kdata = json.loads(kdata)
kdata = eval(str(kdata))
# REST key value pair. The used key here is "skey"
topicdata = kdata["skey"].strip()
command = config.get('EasyHomeList', str(topicdata))
client = mqtt.Client()
# Host address of mqtt (e.g., mosquitto broker)
client.connect("192.11.3.212")
client.publish(topic, command)
client.disconnect()
print("Time:" + time.strftime("%d/%b/%y %H%M%S",time.localtime()) + ", Sent => Topic:" + topic + ", Data: " + command);
return {'status': 'success'}
except Exception,e:
print("Error: " + str(e))
api.add_resource(EasyHomeSwitch, '/EasyHomeSwitch')
if __name__ == '__main__':
# Bind to any ip on this host
app.run(debug=False, host='0.0.0.0')
| apache-2.0 | Python | |
da5949e020709e38f88d923dce5ab656696a0bd7 | Copy code from cluster mixin | dotsdl/msmbuilder,brookehus/msmbuilder,msultan/msmbuilder,stephenliu1989/msmbuilder,rmcgibbo/msmbuilder,peastman/msmbuilder,Eigenstate/msmbuilder,brookehus/msmbuilder,cxhernandez/msmbuilder,mpharrigan/mixtape,rafwiewiora/msmbuilder,msmbuilder/msmbuilder,msultan/msmbuilder,msmbuilder/msmbuilder,dotsdl/msmbuilder,stephenliu1989/msmbuilder,msmbuilder/msmbuilder,rafwiewiora/msmbuilder,stephenliu1989/msmbuilder,rafwiewiora/msmbuilder,rmcgibbo/msmbuilder,brookehus/msmbuilder,peastman/msmbuilder,rmcgibbo/msmbuilder,msultan/msmbuilder,dotsdl/msmbuilder,msmbuilder/msmbuilder,rafwiewiora/msmbuilder,mpharrigan/mixtape,cxhernandez/msmbuilder,Eigenstate/msmbuilder,Eigenstate/msmbuilder,dr-nate/msmbuilder,peastman/msmbuilder,dr-nate/msmbuilder,peastman/msmbuilder,mpharrigan/mixtape,peastman/msmbuilder,cxhernandez/msmbuilder,Eigenstate/msmbuilder,stephenliu1989/msmbuilder,rmcgibbo/msmbuilder,dr-nate/msmbuilder,msultan/msmbuilder,Eigenstate/msmbuilder,cxhernandez/msmbuilder,mpharrigan/mixtape,rafwiewiora/msmbuilder,dotsdl/msmbuilder,brookehus/msmbuilder,dr-nate/msmbuilder,brookehus/msmbuilder,msultan/msmbuilder,msmbuilder/msmbuilder,mpharrigan/mixtape,cxhernandez/msmbuilder,dr-nate/msmbuilder | Mixtape/pca.py | Mixtape/pca.py | # Author: Matthew Harrigan <matthew.p.harrigan@gmail.com>
# Contributors:
# Copyright (c) 2014, Stanford University and the Authors
# All rights reserved.
#
# Mixtape is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Mixtape. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division, absolute_import
from sklearn import decomposition
import numpy as np
from six import PY2
__all__ = ['PCA']
class MultiSequenceDecompositionMixin(object):
# The API for the scikit-learn decomposition object is, in fit(), that
# they take a single 2D array of shape (n_data_points, n_features).
#
# For clustering a collection of timeseries, we need to preserve
# the structure of which data_point came from which sequence. If
# we concatenate the sequences together, we lose that information.
#
# This mixin is basically a little "adaptor" that changes fit()
# so that it accepts a list of sequences. Its implementation
# concatenates the sequences, calls the superclass fit(), and
# then splits the labels_ back into the sequenced form.
def fit(self, sequences):
"""Fit the clustering on the data
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
Returns
-------
self
"""
s = super(MultiSequenceDecompositionMixin, self) if PY2 else super()
s.fit(self._concat(sequences))
return self
def _concat(self, sequences):
self.__lengths = [len(s) for s in sequences]
if len(sequences) > 0 and isinstance(sequences[0], np.ndarray):
concat = np.concatenate(sequences)
else:
# if the input sequences are not numpy arrays, we need to guess
# how to concatenate them. this operation below works for mdtraj
# trajectories (which is the use case that I want to be sure to
# support), but in general the python container protocol doesn't
# give us a generic way to make sure we merged sequences
concat = sequences[0].join(sequences[1:])
assert sum(self.__lengths) == len(concat)
return concat
def _split(self, concat):
return [concat[cl - l: cl] for (cl, l) in
zip(np.cumsum(self.__lengths), self.__lengths)]
def predict(self, sequences):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
Returns
-------
Y : list of arrays, each of shape [sequence_length,]
Index of the closest center each sample belongs to.
"""
s = super(MultiSequenceDecompositionMixin, self) if PY2 else super()
predictions = []
for sequence in sequences:
predictions.append(s.predict(sequence))
return predictions
def fit_predict(self, sequences):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
sequences : list of array-like, each of shape [sequence_length, n_features]
A list of multivariate timeseries. Each sequence may have
a different length, but they all must have the same number
of features.
Returns
-------
Y : list of ndarray, each of shape [sequence_length, ]
Cluster labels
"""
s = super(MultiSequenceDecompositionMixin, self) if PY2 else super()
if hasattr(s, 'fit_predict'):
labels = s.fit_predict(sequences)
else:
self.fit(sequences)
labels = self.predict(sequences)
if not isinstance(labels, list):
labels = self._split(labels)
return labels
def transform(self, sequences):
"""Alias for predict"""
return self.predict(sequences)
def fit_transform(self, sequences):
"""Alias for fit_predict"""
return self.fit_predict(sequences)
class PCA(MultiSequenceDecompositionMixin, decomposition.PCA):
pass
| lgpl-2.1 | Python | |
785e37a65ee8bc311326e3c2db90a273858a345c | Prepare for rally-openstack package | openstack/rally,yeming233/rally,openstack/rally,openstack/rally,openstack/rally,yeming233/rally | rally/plugins/__init__.py | rally/plugins/__init__.py | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import decorator
from rally.common.plugin import discover
PLUGINS_LOADED = False
def load():
global PLUGINS_LOADED
if not PLUGINS_LOADED:
discover.import_modules_from_package("rally.deployment.engines")
discover.import_modules_from_package("rally.deployment.serverprovider")
discover.import_modules_from_package("rally.plugins.common")
try:
import rally_openstack # noqa
except ImportError:
# print warnings when rally_openstack will be released
discover.import_modules_from_package("rally.plugins.openstack")
discover.import_modules_from_package("rally.plugins.workload")
discover.import_modules_by_entry_point()
discover.load_plugins("/opt/rally/plugins/")
discover.load_plugins(os.path.expanduser("~/.rally/plugins/"))
PLUGINS_LOADED = True
@decorator.decorator
def ensure_plugins_are_loaded(f, *args, **kwargs):
load()
return f(*args, **kwargs)
| # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import decorator
from rally.common.plugin import discover
PLUGINS_LOADED = False
def load():
global PLUGINS_LOADED
if not PLUGINS_LOADED:
discover.import_modules_from_package("rally.deployment.engines")
discover.import_modules_from_package("rally.deployment.serverprovider")
discover.import_modules_from_package("rally.plugins")
discover.import_modules_by_entry_point()
discover.load_plugins("/opt/rally/plugins/")
discover.load_plugins(os.path.expanduser("~/.rally/plugins/"))
PLUGINS_LOADED = True
@decorator.decorator
def ensure_plugins_are_loaded(f, *args, **kwargs):
load()
return f(*args, **kwargs)
| apache-2.0 | Python |
d31528540950cd8b5e8a398b50bc59ea98271d26 | Create overlay.py | sonus89/FIPER,sonus89/FIPER,sonus89/FIPER | client/opencv/pic_on_pic/overlay.py | client/opencv/pic_on_pic/overlay.py | import cv2
import numpy as np
# Load two images
img1 = cv2.imread('1.jpg')
img2 = cv2.imread('2.png')
# I want to put logo on top-left corner, So I create a ROI
rows,cols,channels = img2.shape
roi = img1[0:rows, 0:cols ]
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg,img2_fg)
img1[0:rows, 0:cols ] = dst
cv2.imshow('res',img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| mit | Python | |
74ffdab0c54f332b8787aea04582ee7312a34b4c | Remove control_id column from assessments table | selahssea/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core | src/ggrc/migrations/versions/20161123124848_1f5c3e0025da_remove_control_id_column_from_.py | src/ggrc/migrations/versions/20161123124848_1f5c3e0025da_remove_control_id_column_from_.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Remove control_id column from assessments table
Create Date: 2016-11-23 12:48:48.942528
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
# revision identifiers, used by Alembic.
revision = '1f5c3e0025da'
down_revision = '4afe69ce3c38'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
relationships = table(
"relationships",
column('source_id', sa.Integer),
column('source_type', sa.String),
column('destination_id', sa.Integer),
column('destination_type', sa.String),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime)
)
connection = op.get_bind()
rows_to_add = connection.execute(
"""
SELECT id, control_id from assessments
WHERE control_id IS NOT NULL
AND id NOT IN (
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.control_id
AND r.destination_id=a.id
WHERE r.source_type='Control'
AND r.destination_type='Assessment'
UNION
SELECT a.id from relationships AS r
INNER JOIN assessments AS a
ON r.source_id=a.id
AND r.destination_id=a.control_id
WHERE r.destination_type='Control'
AND r.source_type='Assessment'
);
"""
)
now = datetime.now()
op.bulk_insert(relationships, [
{'source_id': assessment_id,
'source_type': 'Assessment',
'destination_id': control_id,
'destination_type': 'Control',
'created_at': now,
'updated_at': now} for (assessment_id, control_id) in rows_to_add]
)
op.drop_constraint(
"fk_control_control_assessment",
"assessments",
"foreignkey",
)
op.drop_column("assessments", "control_id")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.add_column(
"assessments",
sa.Column('control_id', sa.Integer, nullable=True)
)
op.create_foreign_key(
"fk_control_control_assessment", "assessments",
"controls", ["control_id"], ["id"]
)
| apache-2.0 | Python | |
7a6fd0ca5951bc1134fd0ac5b0cdbbb9b94fb3d4 | add palindrome exercise | daveagp/websheets,dz0/websheets,daveagp/websheets,dz0/websheets,daveagp/websheets,daveagp/websheets,dz0/websheets,daveagp/websheets,dz0/websheets,dz0/websheets,daveagp/websheets,dz0/websheets | exercises/IsPalindrome.py | exercises/IsPalindrome.py | description = r"""
(Exercise 3.1.13)
A <i>palindrome</i> is a string that reads the same forwards or backwards,
like <tt>"RADAR"</tt> or <tt>"STOOTS"</tt>.
Define a method <tt>isPalindrome</tt>
that takes as input a string and returns true if the
string is a palindrome, and false otherwise. You will need to use
the instance methods <tt>charAt()</tt> and <tt>length()</tt>
from the <a href="http://introcs.cs.princeton.edu/java/11cheatsheet/#String">String API</a>."""
source_code = r"""
public static boolean isPalindrome(String s) {
\[
// it's only necessary to do half the length many checks
for (int i=0; i<s.length()/2; i++) {
// look at ith character from start and end
if (s.charAt(i) != s.charAt(s.length()-i-1))
return false;
}
return true; // everything matched
]\
}
"""
tests = r"""
test("isPalindrome", "racecar");
test("isPalindrome", "ferrari");
test("isPalindrome", "foolproof");
test("isPalindrome", "cool");
test("isPalindrome", "rester");
test("isPalindrome", "redder");
test("isPalindrome", "pinker");
test("isPalindrome", "o");
test("isPalindrome", "ok");
test("isPalindrome", "kk");
test("isPalindrome", "joUO9G");
test("isPalindrome", "rt2$77$2tr");
test("isPalindrome", "Qay&2&yaQ");
test("isPalindrome", "");
"""
| agpl-3.0 | Python | |
79d6958670090f052ee0a1b1150364618838ff5e | add dev example settings file | KuwaitNET/cookiecutter-django-cms,KuwaitNET/cookiecutter-django-cms | {{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/settings/dev_example.py | {{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/settings/dev_example.py | import os
from .base import *
DEBUG = True
ALLOWED_HOSTS = ['*']
DEV = DEBUG
INSTALLED_APPS += ('debug_toolbar',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'arwoad.db',
}
}
SECRET_KEY = 'devel'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 2
AUTH_PASSWORD_VALIDATORS = [] | bsd-2-clause | Python | |
ea0e181659f328198955b07252d09ad2e3da0e42 | change name of setupTS | slac207/cs207project,slac207/cs207project,slac207/cs207project,slac207/cs207project | setupTS.py | setupTS.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup file for timeseries.
This file was generated with PyScaffold 2.5.6, a tool that easily
puts up a scaffold for your new Python project. Learn more under:
http://pyscaffold.readthedocs.org/
"""
import sys
from setuptools import setup
def setup_package():
needs_sphinx = {'build_sphinx', 'upload_docs'}.intersection(sys.argv)
sphinx = ['sphinx'] if needs_sphinx else []
setup(setup_requires=['six', 'pyscaffold>=2.5a0,<2.6a0'] + sphinx,
use_pyscaffold=True)
if __name__ == "__main__":
setup_package()
| mit | Python | |
92d520127e01369be10f777d85e6021a023dec7d | Create scraper.py | azide0x37/modocDB,azide0x37/modocDB,azide0x37/modocDB,azide0x37/modocDB | scraper.py | scraper.py | #modocDB/scraper.py
#Scrapes the offender web search library for offender data
class Scraper:
def __init__(self):
pass
dataSet = Scraper()
| mit | Python | |
02831edd80949a474f3923f67addb642f8da0492 | Create randomQuizGenerator.py | PetrosKatri/mypythonscripts | Generating-Random-Quiz-Files/randomQuizGenerator.py | Generating-Random-Quiz-Files/randomQuizGenerator.py | #! python 3
# randomQuizGenerator.py - Creates quizzes with questions and answers in
# random order, along with the answer key.
import random
# The quiz data. Keys are states and values are their capitals.
capitals = {
'Alabama': 'Montgomery',
'Alaska': 'Juneau',
'Arizona': 'Phoenix',
'Arkansas': 'Little Rock',
'California': 'Sacramento',
'Colorado': 'Denver',
'Connecticut': 'Hartford',
'Delaware': 'Dover',
'Florida': 'Tallahassee',
'Georgia': 'Atlanta',
'Hawaii': 'Honolulu',
'Idaho': 'Boise',
'Illinois': 'Springfield',
'Indiana': 'Indianapolis',
'Iowa': 'Des Moines',
'Kansas': 'Topeka',
'Kentucky': 'Frankfort',
'Louisiana': 'Baton Rouge',
'Maine': 'Augusta',
'Maryland': 'Annapolis',
'Massachusetts': 'Boston',
'Michigan': 'Lansing',
'Minnesota': 'St. Paul',
'Mississippi': 'Jackson',
'Missouri': 'Jefferson City',
'Montana': 'Helena',
'Nebraska': 'Lincoln',
'Nevada': 'Carson City',
'New Hampshire': 'Concord',
'New Jersey': 'Trenton',
'New Mexico': 'Santa Fe',
'New York': 'Albany',
'North Carolina': 'Raleigh',
'North Dakota': 'Bismarck',
'Ohio': 'Columbus',
'Oklahoma': 'Oklahoma City',
'Oregon': 'Salem',
'Pennsylvania': 'Harrisburg',
'Rhode Island': 'Providence',
'South Carolina': 'Columbia',
'South Dakota': 'Pierre',
'Tennessee': 'Nashville',
'Texas': 'Austin',
'Utah': 'Salt Lake City',
'Vermont': 'Montpelier',
'Virginia': 'Richmond',
'Washington': 'Olympia',
'West Virginia': 'Charleston',
'Wisconsin': 'Madison',
'Wyoming': 'Cheyenne'}
# Generate 35 quiz files.
for quizNum in range(35):
# Create the quiz and answer key files.
quizFile = open('capitalsquiz%s.txt' % (quizNum + 1), 'w')
answerKeyFile = open('capitalsquiz_answers%s.txt' % (quizNum + 1), 'w')
# Write out the header for the quiz.
quizFile.write('Name:\n\nDate:\n\nPeriod:\n\n')
quizFile.write((' ' * 20) + 'State Capitals Quiz (Form %s)' % (quizNum + 1))
quizFile.write('\n\n')
# Shuffle the order of the states.
states = list(capitals.keys())
random.shuffle(states)
# Loop through all 50 states, making a question for each.
for questionNum in range(50):
# Get right and wrong answers.
correctAnswer = capitals[states[questionNum]]
wrongAnswers = list(capitals.values())
del wrongAnswers[wrongAnswers.index(correctAnswer)]
wrongAnswers = random.sample(wrongAnswers, 3)
answerOptions = wrongAnswers + [correctAnswer]
random.shuffle(answerOptions)
# Write the question and the answer options to the quiz file.
quizFile.write('%s. What is the capital of %s?\n' % (questionNum + 1, states[questionNum]))
for i in range(4):
quizFile.write(' %s. %s\n' % ('ABCD'[i], answerOptions[i]))
quizFile.write('\n')
# Write the answer key to a file.
answerKeyFile.write('%s. %s\n' % (questionNum + 1, 'ABCD'[answerOptions.index(correctAnswer)]))
quizFile.close()
answerKeyFile.close()
| mit | Python | |
809acb9fd29b0834745571c21f128d02932bc118 | Add bgr2hsv. | wwwins/OpenCV-Samples | bgr2hsv.py | bgr2hsv.py | import cv2
import numpy
import sys
bgrColor = None
# Get user supplied values
if len(sys.argv) != 3:
print("""
Usage:
python bgr2hsv.py -rgb 0,255,0
Output:
OpenCV HSV: [60, 255, 255]
""")
sys.exit()
if sys.argv[1]=="-rgb":
bgrColor = sys.argv[2].split(',')
bgrColor.reverse()
if bgrColor is None:
sys.exit()
bgrColor = numpy.uint8([[bgrColor]])
hsvColor = cv2.cvtColor(bgrColor,cv2.COLOR_BGR2HSV)
print "OpenCV HSV:", hsvColor.tolist()[0][0]
| mit | Python | |
04c665b7bb71320b920812001f364a5b49544d41 | add tests for FasterRCNN | pfnet/chainercv,chainer/chainercv,chainer/chainercv,yuyu2172/chainercv,yuyu2172/chainercv | tests/links_tests/model_tests/fpn_tests/test_faster_rcnn.py | tests/links_tests/model_tests/fpn_tests/test_faster_rcnn.py | import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import FasterRCNN
from chainercv.links.model.fpn import Head
from chainercv.links.model.fpn import RPN
from chainercv.utils import assert_is_detection_link
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
scales = (1 / 2, 1 / 4, 1 / 8)
mean = _random_array(np, (3, 1, 1))
def __call__(self, x):
n, _, h, w = x.shape
return [chainer.Variable(_random_array(
self.xp, (n, 16, int(h * scale), int(w * scale))))
for scale in self.scales]
class DummyFasterRCNN(FasterRCNN):
def __init__(self, n_fg_class):
extractor = DummyExtractor()
super(DummyFasterRCNN, self).__init__(
extractor=extractor,
rpn=RPN(extractor.scales),
head=Head(n_fg_class + 1, extractor.scales),
)
@testing.parameterize(
{'n_fg_class': 1},
{'n_fg_class': 5},
{'n_fg_class': 20},
)
class TestFasterRCNN(unittest.TestCase):
def setUp(self):
self.link = DummyFasterRCNN(n_fg_class=self.n_fg_class)
def test_use_preset(self):
self.link.nms_thresh = 0
self.link.score_thresh = 0
self.link.use_preset('visualize')
self.assertEqual(self.link.nms_thresh, 0.5)
self.assertEqual(self.link.score_thresh, 0.7)
self.link.nms_thresh = 0
self.link.score_thresh = 0
self.link.use_preset('evaluate')
self.assertEqual(self.link.nms_thresh, 0.5)
self.assertEqual(self.link.score_thresh, 0.05)
with self.assertRaises(ValueError):
self.link.use_preset('unknown')
def _check_call(self):
x = _random_array(self.link.xp, (2, 3, 32, 32))
with chainer.using_config('train', False):
rois, roi_indices, head_locs, head_confs = self.link(x)
self.assertEqual(len(rois), len(self.link.extractor.scales))
self.assertEqual(len(roi_indices), len(self.link.extractor.scales))
for l in range(len(self.link.extractor.scales)):
self.assertIsInstance(rois[l], self.link.xp.ndarray)
self.assertEqual(rois[l].shape[1:], (4,))
self.assertIsInstance(roi_indices[l], self.link.xp.ndarray)
self.assertEqual(roi_indices[l].shape[1:], ())
self.assertEqual(rois[l].shape[0], roi_indices[l].shape[0])
n_roi = sum(
len(rois[l]) for l in range(len(self.link.extractor.scales)))
self.assertIsInstance(head_locs, chainer.Variable)
self.assertIsInstance(head_locs.array, self.link.xp.ndarray)
self.assertEqual(head_locs.shape, (n_roi, self.n_fg_class + 1, 4))
self.assertIsInstance(head_confs, chainer.Variable)
self.assertIsInstance(head_confs.array, self.link.xp.ndarray)
self.assertEqual(head_confs.shape, (n_roi, self.n_fg_class + 1))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def test_call_train_mode(self):
x = _random_array(self.link.xp, (2, 3, 32, 32))
with self.assertRaises(AssertionError):
with chainer.using_config('train', True):
self.link(x)
def test_predict_cpu(self):
assert_is_detection_link(self.link, self.n_fg_class)
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
assert_is_detection_link(self.link, self.n_fg_class)
def test_prepare(self):
imgs = [
np.random.randint(0, 255, size=(3, 480, 640)).astype(np.float32),
np.random.randint(0, 255, size=(3, 320, 320)).astype(np.float32),
]
x, scales = self.link.prepare(imgs)
self.assertEqual(x.shape, (2, 3, 800, 1088))
testing.run_module(__name__, __file__)
| mit | Python | |
21f248d3fc90a604cd85283c48e339784e6ea523 | add command line interface | ContinuumIO/topik,kcompher/topik,kcompher/topik,kcompher/topik,lewismc/topik,ContinuumIO/topik,lewismc/topik,lewismc/topik | topik/cli.py | topik/cli.py | from __future__ import absolute_import
import os
from topik.readers import iter_document_json_stream, iter_documents_folder
from topik.tokenizers import (SimpleTokenizer, CollocationsTokenizer,
EntitiesTokenizer, MixedTokenizer)
from topik.vectorizers import CorpusBOW
from topik.models import LDA
from topik.viz import Termite
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="TOPIC SPACE")
parser.add_argument("-d", "--data", required=True, action = "store", help="Path to input data for topic modeling ")
parser.add_argument("-f", "--format", action = "store", help="Data format provided: "
"Currently available:"
"streaming_json, folder_files", default='streaming_json')
parser.add_argument("-n", "--name", action="store", help="Topic modeling name", default="topic_model")
parser.add_argument("-p", "--preprocessing", action = "store",
help="Tokenize method to use id: "
"1-Simple, 2-Collocations, 3-Entities, 4-Mix", defaul='1')
parser.add_argument("-t", "--topics", action = "store",
help="Number of topics to find", default='10')
return parser.parse_args()
def main():
args = parse_args()
ntopics = int(args.topics)
if args.data:
# Select reader depending on `--format` argument given.
if args.format == 'folder_files':
documents = iter_documents_folder(args.data)
else:
documents = iter_document_json_stream(args.data)
if args.preprocessing == "1":
corpus = SimpleTokenizer(documents)
elif args.preprocessing == "2" :
corpus = CollocationsTokenizer(documents)
elif args.preprocessing == "3":
corpus = EntitiesTokenizer(documents)
elif args.preprocessing == "4":
corpus = MixedTokenizer(documents)
else:
print("Processing value invalid, using 1-Simple by default")
corpus = SimpleTokenizer(documents)
name = args.name
os.makedirs(name)
path = name
# Create dictionary
corpus_bow = CorpusBOW(corpus)
corpus_dict = corpus_bow.save_dict(os.path.join(path,'%s.dict' % name))
# Serialize and store the corpus
corpus_bow.serialize(os.path.join(path,'%s.mm' % name))
# Create LDA model from corpus and dictionary
lda = LDA(os.path.join(path,'%s.mm' % name), os.path.join(path,'%s.dict' % name), ntopics)
# Generate the input for the termite plot
lda.termite_data(os.path.join(path,'%s_termite.csv' % name))
# Get termite plot for this model
termite = Termite(os.path.join(path,'%s_termite.csv' % name), "Termite Plot for %s" % name)
termite.plot(os.path.join(path,'%s_termite.html' %name))
#get_documents_output_file(args.data, corpus_bow, corpus_dict, lda.model, name)
if __name__ == "__main__":
main() | bsd-3-clause | Python | |
316ec13f041bc8b928ba7bb1253c0242ed2b5579 | Add example test script | Drewsif/PySecretSOCKS | example.py | example.py | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function, unicode_literals
import secretsocks
import socket
import Queue
import threading
class Client(secretsocks.Client):
def __init__(self, ip, port):
secretsocks.Client.__init__(self)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
s.settimeout(2)
self.conn = s
self.alive = True
self.start()
def recv(self):
while self.alive:
try:
data = self.conn.recv(4092)
self.recvbuf.put(data)
except socket.timeout:
continue
except:
self.alive = False
self.conn.close()
def write(self):
while self.alive:
try:
data = self.writebuf.get(timeout=10)
except Queue.Empty:
continue
self.conn.sendall(data)
class Server(secretsocks.Server):
def __init__(self, ip, port):
secretsocks.Server.__init__(self)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((ip, port))
s.listen(1)
self.conn, nill = s.accept()
self.conn.settimeout(2)
self.alive = True
self.start()
def recv(self):
while self.alive:
try:
data = self.conn.recv(4092)
self.recvbuf.put(data)
except socket.timeout:
continue
except:
self.alive = False
self.conn.close()
def write(self):
while self.alive:
try:
data = self.writebuf.get(timeout=10)
except Queue.Empty:
continue
self.conn.sendall(data)
def start_fake_remote():
Server('127.0.0.1', 8080)
if __name__ == '__main__':
secretsocks.DEBUG = True
print('Starting the fake remote server...')
t = threading.Thread(target=start_fake_remote)
t.daemon = True
t.start()
print('Creating a the client...')
client = Client('127.0.0.1', 8080)
print('Starting socks server...')
server = secretsocks.SocksServer(client, host='127.0.0.1', port=1080)
server.wait()
| mit | Python | |
d7f24713ad5b8372f5c5aacac252521d2dbbe192 | Add ui version | CivicVision/datahub,CivicVision/datahub,USStateDept/FPA_Core,johnjohndoe/spendb,spendb/spendb,openspending/spendb,nathanhilbert/FPA_Core,USStateDept/FPA_Core,johnjohndoe/spendb,johnjohndoe/spendb,pudo/spendb,CivicVision/datahub,pudo/spendb,USStateDept/FPA_Core,nathanhilbert/FPA_Core,spendb/spendb,spendb/spendb,pudo/spendb,openspending/spendb,nathanhilbert/FPA_Core,openspending/spendb | openspending/ui/__init__.py | openspending/ui/__init__.py | __version__ = '0.8' | agpl-3.0 | Python | |
c41212807781af2c4f6078bbb1bb047591c0923a | Create tweet_basic.py | umangahuja1/Python | Automation/tweet_basic.py | Automation/tweet_basic.py | from selenium import webdriver
from getpass import getpass
from time import sleep
usr = input('Enter your username or email : ')
pwd = getpass('Enter your password : ')
msg = input('Enter your tweet : ')
driver = webdriver.Chrome()
driver.get('https://twitter.com/login')
usr_box = driver.find_element_by_class_name('js-username-field')
usr_box.send_keys(usr)
sleep(3)
pwd_box = driver.find_element_by_class_name('js-password-field')
pwd_box.send_keys(pwd)
sleep(3)
login_button = driver.find_element_by_css_selector('button.submit.EdgeButton.EdgeButton--primary.EdgeButtom--medium')
login_button.submit()
sleep(3)
text_box = driver.find_element_by_id('tweet-box-home-timeline')
text_box.send_keys(msg)
sleep(3)
tweet_button = driver.find_element_by_css_selector('button.tweet-action.EdgeButton.EdgeButton--primary.js-tweet-btn')
tweet_button.click()
| apache-2.0 | Python | |
83515e8e4c354a68e317f6ba9f97c9ca73e0850c | Add Henon plot | martinmcbride/python-projects-for-gcse,martinmcbride/python-projects-for-gcse | fractals/iterate-henon.py | fractals/iterate-henon.py | import matplotlib.pyplot as plt
xvalues = []
yvalues = []
x = 1.12
y = 0.09
for i in range(10):
xvalues.append(x)
yvalues.append(y)
print(x, y)
x, y = y+1-1.4*x*x, 0.3*x
plt.plot(xvalues, yvalues)
plt.plot(xvalues, yvalues, 'bo')
plt.show() | mit | Python | |
fe4e5b0eba9d1064ce17cc5b8e7c64271969f2a9 | add initial migration for core app | edx/credentials,edx/credentials,edx/credentials,edx/credentials | credentials/apps/core/migrations/0001_initial.py | credentials/apps/core/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.core.validators
import django.contrib.auth.models
class Migration(migrations.Migration):
dependencies = [
('auth', '0006_require_contenttypes_0002'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(null=True, verbose_name='last login', blank=True)),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username')),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=254, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('full_name', models.CharField(max_length=255, null=True, verbose_name='Full Name', blank=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'get_latest_by': 'date_joined',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| agpl-3.0 | Python | |
5444920a3447be65592e14d26aba0ef6d257f6cb | Create rename_images.py | ElizabethSutton/Rename_images | rename_images.py | rename_images.py | #!/usr/bin/env python
import os
rootdir = os.getcwd()
for root, dirs, files in os.walk(rootdir):
for directory in dirs:
workingdir = os.path.join(root, directory)
contents = os.listdir(workingdir)
if '*.jpg' or '*.JPG' in contents:
count = 1
os.chdir(workingdir)
for file in contents:
if os.path.isfile(file):
if file == '.DS_Store':
os.remove(file)
elif directory.lower() not in file or directory not in file:
newfile = directory + " " + str(count) + '.jpg'
os.rename(file, newfile)
count = count + 1
os.chdir(rootdir)
| mit | Python | |
206eeb3c9ef3df1e2f7c4f7b2e3c2a002e642375 | Fix tab/spaces issue | kieselai/bashForEach.py | simpleLogger.py | simpleLogger.py | from colorama import init, Fore, Back
# Local imports
import formatter
init(autoreset=True)
class SimpleLogger:
""" Class Used to log messages to the terminal """
@staticmethod
def init(verboseLoggingEnabled):
""" Initialize the logger with this function
Parameters:
verboseLoggingEnabled (boolean): switches verbose messages on/off
"""
SimpleLogger.verboseLoggingEnabled = verboseLoggingEnabled
@staticmethod
def output(*messages, **namedArgs):
""" Print a message with optional colored output
Parameters:
*messages (string or list(string, ...)): messages to print
foreground (string): optional named argument containing a code to set the foreground
background (string): optional named argument containing a code to set the background
"""
(foreground, background) = (namedArgs.get("foreground", Fore.RESET), namedArgs.get("background", Back.RESET))
for msg in messages:
# Make msg into a list that is 1 level deep
msg = formatter.Formatter.FlattenList([msg])
# Print color codes as well as the messages provided
print(foreground + background + "".join(msg))
@staticmethod
def outputVerbose(*messages):
""" If verbose messages are enabled, print each message provided
Parameters:
*messages (string, or list(string)): messages to print
"""
if (SimpleLogger.verboseLoggingEnabled):
for m in messages: SimpleLogger.output(formatter.Formatter.CoerceToList(m), foreground=Fore.RED, background=Back.BLACK)
@staticmethod
def outputCommand(command):
""" Print the command that is to be executed
Parameters:
command (string): the command to be printed
"""
SimpleLogger.output(["\n> ", command], foreground=Fore.CYAN, background=Back.BLACK)
# Initialize SimpleLogger to disable verbose logging by default
SimpleLogger.verboseLoggingEnabled = False | apache-2.0 | Python | |
2178c706dc583c9a2325de7a9ca87a7ea9cb088b | Add start td_agent step | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/workflow/steps/util/region_migration/td_agent.py | dbaas/workflow/steps/util/region_migration/td_agent.py | # -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from util import build_context_script
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.steps.util import test_bash_script_error
from workflow.steps.util import build_start_td_agent_script
from workflow.exceptions.error_codes import DBAAS_0020
LOG = logging.getLogger(__name__)
class StartTDAgent(BaseStep):
def __unicode__(self):
return "Starting td agent..."
def do(self, workflow_dict):
try:
for source_host in workflow_dict['source_hosts']:
host = source_host.future_host
LOG.info("Starting td_agent on host {}".format(host))
cs_host_attr = CS_HostAttr.objects.get(host=host)
context_dict = {}
script = test_bash_script_error()
script += build_start_td_agent_script()
script = build_context_script(context_dict, script)
LOG.info(script)
output = {}
return_code = exec_remote_command(server=host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
LOG.error("Error starting td_agent")
LOG.error(str(output))
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
try:
LOG.info('Rollback starting td_agents - nothing to do')
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0020)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
| bsd-3-clause | Python | |
25802ed351018958fecf0808f70265d678709ce6 | Create 3longestWord.py | GabrielGhe/CoderbyteChallenges,GabrielGhe/CoderbyteChallenges | 3longestWord.py | 3longestWord.py | #Finds the longest word in a string
#1. Split the string by space using .split()
#2. using max function to find the biggest using the length as the criteria
def LongestWord(sen):
return max(sen.split(), key=len)
print LongestWord(raw_input())
| mit | Python | |
164f01c8a44c48486b3a2d161ac219e11c1a69e0 | Add check-version command | mnieber/dodo_commands | dodo_commands/extra/standard_commands/check-version.py | dodo_commands/extra/standard_commands/check-version.py | """Compare configuration version to version in original project config file."""
from . import DodoCommand
import os
import sys
import yaml
class Command(DodoCommand): # noqa
def _get_version(self, config_filename):
with open(config_filename) as f:
config = yaml.load(f.read())
version = config.get("ROOT", {}).get("required_dodo_commands_version", "").split(".")
return [x for x in version if x != ""]
def handle_imp(self, **kwargs): # noqa
config_filename = os.path.join(
self.get_config("/ROOT/project_dir", ""),
"dodo_commands",
"res",
"config.yaml"
)
required_version = self._get_version(config_filename)
if required_version:
actual_version = DodoCommand.get_version().split(".")
if required_version > actual_version:
sys.stdout.write(
'The dodo_commands package needs to be upgraded (%s < %s). Tip: use "dodo-upgrade"\n'
% (
".".join(actual_version),
".".join(required_version),
),
)
| mit | Python | |
35d3f38a9450c06212a4eceb93bfb890806d7f3e | Create Longest_Valid_Parentheses.py | jakeshi/algo | Longest_Valid_Parentheses.py | Longest_Valid_Parentheses.py | class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
stack = []
ret = 0
last = -1
for i in range(len(s)):
if s[i] == '(':
stack.append(i)
else:
if not stack:
last = i
else:
stack.pop()
if stack==[]:
ret = max(ret,i-last)
else: # there are multiple remaining left brackets
ret = max(ret,i-stack[-1])
#print("i is {} and ret temp is {},ret is {}".format(0,ret_temp,ret))
return ret
def stringToString(input):
return input[1:-1].decode('string_escape')
def intToString(input):
if input is None:
input = 0
return str(input)
def main():
import sys
def readlines():
for line in sys.stdin:
yield line.strip('\n')
lines = readlines()
while True:
try:
line = lines.next()
s = stringToString(line)
ret = Solution().longestValidParentheses(s)
out = intToString(ret)
print out
except StopIteration:
break
if __name__ == '__main__':
main()
| mit | Python | |
372bd768acae6fbf425271b193d1734e5001c71a | Add initial working solution 4 | xliiauo/leetcode,xliiauo/leetcode,xiao0720/leetcode,xliiauo/leetcode,xiao0720/leetcode | 4/Solution.py | 4/Solution.py | class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums1.extend(nums2)
merged = sorted(nums1)
length = len(merged)
if length % 2 != 0:
return merged[int(length/2)]
else:
return (merged[int(length/2) - 1] + merged[int(length/2)])/2
if __name__ == '__main__':
nums1 = [1, 2, 3]
nums2 = [3, 4, 5]
print(Solution().findMedianSortedArrays(nums1, nums2)) | mit | Python | |
84769e0cf76f750e7b8c5315a937d881b097d969 | Add binding.gyp file for building the module with node-gyp. Should be used for nodejs v0.7 and v0.8 or so. | w1nk/node-odbc,w1nk/node-odbc,Akpotohwo/node-ibm_db,abiliooliveira/node-ibm_db,dfbaskin/node-odbc,ibmdb/node-ibm_db,qpresley/node-ibm_db,deckar01/node-odbc,wankdanker/node-odbc,bustta/node-odbc,Akpotohwo/node-ibm_db,qpresley/node-ibm_db,elkorep/node-ibm_db,w1nk/node-odbc,qpresley/node-ibm_db,bustta/node-odbc,abiliooliveira/node-ibm_db,strongloop-forks/node-ibm_db,bustta/node-odbc,Akpotohwo/node-ibm_db,bzuillsmith/node-odbc,jbaxter0810/node-odbc,silveirado/node-ibm_db,silveirado/node-ibm_db,ibmdb/node-ibm_db,strongloop-forks/node-ibm_db,Akpotohwo/node-ibm_db,silveirado/node-ibm_db,dfbaskin/node-odbc,strongloop-forks/node-ibm_db,silveirado/node-ibm_db,Papercloud/node-odbc,Akpotohwo/node-ibm_db,qpresley/node-ibm_db,elkorep/node-ibm_db,wankdanker/node-odbc,Papercloud/node-odbc,jbaxter0810/node-odbc,bzuillsmith/node-odbc,deckar01/node-odbc,wankdanker/node-odbc,gmahomarf/node-odbc,jbaxter0810/node-odbc,gmahomarf/node-odbc,qpresley/node-ibm_db,Papercloud/node-odbc,qpresley/node-ibm_db,deckar01/node-odbc,ibmdb/node-ibm_db,bustta/node-odbc,elkorep/node-ibm_db,abiliooliveira/node-ibm_db,jbaxter0810/node-odbc,bzuillsmith/node-odbc,deckar01/node-odbc,Papercloud/node-odbc,strongloop-forks/node-ibm_db,dfbaskin/node-odbc,w1nk/node-odbc,bzuillsmith/node-odbc,elkorep/node-ibm_db,wankdanker/node-odbc,ibmdb/node-ibm_db,ibmdb/node-ibm_db,abiliooliveira/node-ibm_db,Akpotohwo/node-ibm_db,gmahomarf/node-odbc,dfbaskin/node-odbc,ibmdb/node-ibm_db,gmahomarf/node-odbc | binding.gyp | binding.gyp | {
'targets' : [
{
'target_name' : 'odbc_bindings',
'sources' : [
'src/Database.cpp'
],
'libraries' : [
'-lodbc'
],
'include_dirs' : [
'/usr/local/lib',
'/opt/local/lib',
'/usr/include'
],
'conditions' : [
[ 'OS == "linux"', {
}],
[ 'OS=="win"', {
}]
]
}
]
}
| mit | Python | |
2f0d186ef1a0b63e4c66ecd9e63cd1d02c73ecf8 | Create LeetCode-SummaryRanges.py | lingcheng99/Algorithm | LeetCode-SummaryRanges.py | LeetCode-SummaryRanges.py | """
Given a sorted integer array without duplicates, return the summary of its ranges.
For example, given [0,1,2,4,5,7], return ["0->2","4->5","7"].
"""
class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
alist=[]
i=0
while i<len(nums)-1:
while nums[i]!=nums[i+1]-1:
i+=1
if i<len(nums):
head=nums[i]
while i<len(nums)-1 and nums[i]==nums[i+1]-1:
i+=1
if head<nums[i]:
tail=nums[i]
alist.append(str(head)+'->'+str(tail))
i+=1
return alist
| mit | Python | |
e96e7cbcc1f7bc28832958e31baff11e8bfbf94c | Create gapstat.py | jaganadhg/data_science_notebooks | gapstat.py | gapstat.py | #!/usr/bin/env python
"""
Author : Jaganadh Gopinadhan
Licence : Apahce 2
e-mail jaganadhg at gmail dot com
"""
import scipy
from sklearn.cluster import KMeans
from sklearn.datasets import load_iris
import pandas as pd
class TWHGapStat(object):
"""
Implementation of Gap Statistic from Tibshirani, Walther, Hastie to determine the
inherent number of clusters in a dataset with k-means clustering.
Ref Paper : https://web.stanford.edu/~hastie/Papers/gap.pdf
"""
def generate_random_data(self, X):
"""
Populate reference data.
Parameters
----------
X : Numpy Array
The base data from which random sample has to be generated
Returns
-------
reference : Numpy Array
Reference data generated using the Numpy/Scipy random utiity .
NUmber of diamensions in the data will be as same as the base
dataset.
"""
reference = scipy.random.random_sample(size=(X.shape[0], X.shape[1]))
return reference
def _fit_cluster(self,X, n_cluster, n_iter=5):
"""
Fit cluster on reference data and return inertia mean.
Parameters
----------
X : numpy array
The base data
n_cluster : int
The number of clusters to form
n_iter : int, default = 5
number iterative lustering experiments has to be perfromed in the data.
If the data is large keep it less than 5, so that the run time will be less.
Returns
-------
mean_nertia : float
Returns the mean intertia value.
"""
iterations = range(1, n_iter + 1)
ref_inertias = pd.Series(index=iterations)
for iteration in iterations:
clusterer = KMeans(n_clusters=n_cluster, n_init=3, n_jobs=-1)
# If you are using Windows server n_jobs = -1 will be dangerous. So the
# value should be set to max cores - 3 . If we use all the cores available
# in Windows server sklearn tends to throw memory error
clusterer.fit(X)
ref_inertias[iteration] = clusterer.inertia_
mean_nertia = ref_inertias.mean()
return mean_nertia
def fit(self,X,max_k):
"""
Compute Gap Statistics
Parameters
----------
X : numpy array
The base data
max_k :int
Maximum value to which we are going to test the 'k' in k-means algorithmn
Returns
-------
gap_stat : Pandas Series
For eack k in max_k range gap stat value is returned as a Pandas Sereies.
Index is K and valuess correspondes to gap stattistics for each K
"""
k_range = range(1,max_k + 1)
gap_stat = pd.Series(index=k_range)
ref_data = self.generate_random_data(X)
for k in k_range:
base_clusterer = KMeans(n_clusters=k,n_init = 3, n_jobs = -1)
base_clusterer.fit(X)
ref_intertia = self._fit_cluster(ref_data,k)
cur_gap = scipy.log(ref_intertia - base_clusterer.inertia_)
gap_stat[k] = cur_gap
return gap_stat
if __name__ == "__main__":
iris = load_iris()
X = iris.data
gap_stat = TWHGapStat()
gs = gap_stat.fit(X,5)
print gs
| bsd-3-clause | Python | |
5013a7dfc3297a5f8d6f6d7107f0839f5b12d150 | Update network-delay-time.py | kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode | Python/network-delay-time.py | Python/network-delay-time.py | # Time: O((|E| + |V|) * log|V|)
# Space: O(|E| + |V|)
# Dijkstra's algorithm
class Solution(object):
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
adj = [[] for _ in xrange(N)]
for u, v, w in times:
adj[u-1].append((v-1, w))
result = 0
lookup = set()
min_heap = [(0, K-1)]
while min_heap and len(lookup) != N:
result, u = heapq.heappop(min_heap)
lookup.add(u)
for v, w in adj[u]:
if v in lookup: continue
heapq.heappush(min_heap, (result+w, v))
return result if len(lookup) == N else -1
| # Time: O((|E| + |V|) * log|V|)
# Space: O(|E| + |V|)
# Dijkstra's algorithm
class Solution(object):
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
min_heap = []
adj = [[] for _ in xrange(N)]
for u, v, w in times:
adj[u-1].append((v-1, w))
lookup, result = set(), 0
heapq.heappush(min_heap, (0, K-1))
while min_heap and len(lookup) != N:
result, u = heapq.heappop(min_heap)
lookup.add(u)
for v, w in adj[u]:
if v in lookup: continue
heapq.heappush(min_heap, (result+w, v))
return result if len(lookup) == N else -1
| mit | Python |
9532664cabae175aed73355d79f62f2326e2a9ef | add flask as server in react | yuncliu/Learn,yuncliu/Learn,yuncliu/Learn,yuncliu/Learn,yuncliu/Learn,yuncliu/Learn,yuncliu/Learn | react/s.py | react/s.py | from flask import Flask
import os
app = Flask(__name__, static_url_path='', static_folder='')
def serve_dir(path):
h = "<html>\n"
h = h + "<ul>\n"
for entry in os.scandir(path):
a = "<li><a href='{0}/{1}'>{1}</a></li>\n".format(path, entry.name)
h = h + a
h = h + "</ul>\n"
h = h + "</html>"
return h
@app.route('/')
def root():
print('request path is /')
return serve_dir('.')
@app.route('/api/<path>')
def path(path):
print(path)
return "api"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=3000)
| bsd-3-clause | Python | |
b66ad576230fb7c96a8f5c6c7b6af8a8e4c8d0b5 | Add module for Source Engine game logic entities | BHSPitMonkey/vmflib | vmf/games/source.py | vmf/games/source.py | """
Helper classes for creating maps in any Source Engine game.
"""
from vmf.vmf import Entity
from vmf.types import Origin
class LogicAuto(Entity):
"""Sets up certain game logic. Fires some useful map events.
https://developer.valvesoftware.com/wiki/Logic_auto
"""
def __init__(self):
Entity.__init__(self, "logic_auto")
self.origin = Origin()
| bsd-2-clause | Python | |
688a9ab3bcc8312d88014ed93177c0b9d75f88cc | Create minimum_partition.py | TheAlgorithms/Python | dynamic_programming/minimum_partition.py | dynamic_programming/minimum_partition.py | """
Partition a set into two subsets such that the difference of subset sums is minimum
"""
def findMin(arr):
n = len(arr)
s = sum(arr)
dp = [[False for x in range(s+1)]for y in range(n+1)]
for i in range(1, n+1):
dp[i][0] = True
for i in range(1, s+1):
dp[0][i] = False
for i in range(1, n+1):
for j in range(1, s+1):
dp[i][j]= dp[i][j-1]
if (arr[i-1] <= j):
dp[i][j] = dp[i][j] or dp[i-1][j-arr[i-1]]
for j in range(s/2, -1, -1):
if dp[n][j] == True:
diff = s-2*j
break;
return diff
| mit | Python | |
a198ab95fe34f368f8a579ec5e4c8fb7ef27c9c5 | Solve non abundant sums | daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various | project_euler/023.non_abundant_sums.py | project_euler/023.non_abundant_sums.py | '''
Problem 023
A perfect number is a number for which the sum of its proper divisors is exactly
equal to the number. For example, the sum of the proper divisors of 28 would
be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.
A number n is called deficient if the sum of its proper divisors is less than n
and it is called abundant if this sum exceeds n.
As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest
number that can be written as the sum of two abundant numbers is 24. By
mathematical analysis, it can be shown that all integers greater than 28123
can be written as the sum of two abundant numbers. However, this upper limit
cannot be reduced any further by analysis even though it is known that the
greatest number that cannot be expressed as the sum of two abundant numbers
is less than this limit.
Find the sum of all the positive integers which cannot be written as the sum of
two abundant numbers.
Solution: Copyright 2017 Dave Cuthbert, MIT License
'''
import math
from collections import defaultdict
class AbundantNumbers():
abundant = defaultdict(lambda: False)
def __init__(self):
self.abundant
def is_abundant(self, n):
return self.abundant[n]
def set_abundant(self, n):
self.abundant[n] = True
def find_factors(number):
last_divisor = int(math.sqrt(number)) + 1
factors = set([1])
for divisor in range(2, last_divisor):
if (number % divisor == 0):
factors.add(divisor)
factors.add(int(number / divisor))
return factors
def solve_problem(n):
UPPER_LIMIT = n
abundants = AbundantNumbers()
non_abundant_sum = 0
# Find all the abundants
for i in range(1, UPPER_LIMIT):
if abundants.is_abundant(i):
pass
else:
factor_sum = 0
factors_i = find_factors(i)
for f in factors_i :
factor_sum += f
if factor_sum > i:
count = 1
while i < UPPER_LIMIT:
i *= count
abundants.set_abundant(i)
count += 1
# Check if sum of 2 abundants
for current_number in range(1, UPPER_LIMIT + 1):
flag = True
for number_to_subtract in range(1, current_number):
if abundants.is_abundant(number_to_subtract):
remainder = current_number - number_to_subtract
if abundants.is_abundant(remainder):
flag = False
break
if flag:
non_abundant_sum += current_number
return(non_abundant_sum)
if __name__ == "__main__":
n = 28123
print(solve_problem(n))
| mit | Python | |
b18cf437f499ee8ebe536779883d05518a69c00f | Fix conflicting migrations | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/migrations/0100_update_program_model.py | accelerator/migrations/0100_update_program_model.py | # Generated by Django 2.2.28 on 2022-04-20 13:05
import sorl.thumbnail.fields
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
<<<<<<< HEAD:accelerator/migrations/0101_update_program_model.py
('accelerator', '0100_add_innovation_stage_model'),
=======
('accelerator', '0099_program_interest_related_changes'),
>>>>>>> 5dfd5bb ([AC-9452] Fix conflicting migrations):accelerator/migrations/0100_update_program_model.py
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=sorl.thumbnail.fields.ImageField(
blank=True,
null=True
upload_to='program_images'),
),
]
| mit | Python | |
64d4e6d302702e912d3db1125a00d22ad8eee2ab | Create get_collocations3.py | ahegel/scripts | get_collocations3.py | get_collocations3.py | import string
from nltk.tokenize import word_tokenize
from nltk.collocations import BigramCollocationFinder
from nltk.collocations import BigramAssocMeasures
from nltk.corpus import stopwords
# find collocations for each word
def get_collocations(corpus, windowsize=10, numresults=10):
'''This function uses the Natural Language Toolkit to find the top collocations in a corpus.
It takes as an argument a string that contains the corpus you want to
find collocations from. It prints the top collocations it finds.
'''
# convert the corpus (a string) into a list of words
tokens = word_tokenize(corpus)
# initialize the bigram association measures object to score each collocation
bigram_measures = BigramAssocMeasures()
# initialize the bigram collocation finder object to find and rank collocations
finder = BigramCollocationFinder.from_words(tokens, window_size=windowsize)
# apply a series of filters to narrow down the collocation results
ignored_words = stopwords.words('english')
finder.apply_word_filter(lambda w: len(w) < 2 or w.lower() in ignored_words)
finder.apply_freq_filter(1)
# calculate the top results by T-score
# list of all possible measures: .raw_freq, .pmi, .likelihood_ratio, .chi_sq, .phi_sq, .fisher, .student_t, .mi_like, .poisson_stirling, .jaccard, .dice
results = finder.nbest(bigram_measures.student_t, numresults)
# print the results
print("Top ", str(numresults), " collocations:")
for k, v in results:
print(str(k), ", ", str(v))
def get_keyword_collocations(corpus, keyword, windowsize=10, numresults=10):
'''This function uses the Natural Language Toolkit to find collocations
for a specific keyword in a corpus. It takes as an argument a string that
contains the corpus you want to find collocations from. It prints the top
collocations it finds for each keyword.
'''
# convert the corpus (a string) into a list of words
tokens = word_tokenize(corpus)
# initialize the bigram association measures object to score each collocation
bigram_measures = BigramAssocMeasures()
# initialize the bigram collocation finder object to find and rank collocations
finder = BigramCollocationFinder.from_words(tokens, window_size=windowsize)
# initialize a function that will narrow down collocates that don't contain the keyword
keyword_filter = lambda *w: keyword not in w
# apply a series of filters to narrow down the collocation results
ignored_words = stopwords.words('english')
finder.apply_word_filter(lambda w: len(w) < 2 or w.lower() in ignored_words)
finder.apply_freq_filter(1)
finder.apply_ngram_filter(keyword_filter)
# calculate the top results by T-score
# list of all possible measures: .raw_freq, .pmi, .likelihood_ratio, .chi_sq, .phi_sq, .fisher, .student_t, .mi_like, .poisson_stirling, .jaccard, .dice
results = finder.nbest(bigram_measures.student_t, numresults)
# print the results
print("Top collocations for ", str(keyword), ":")
collocates = ''
for k, v in results:
if k != keyword:
collocates += k + ' '
else:
collocates += v + ' '
print(collocates, '\n')
# Replace this with your filename
infile = "sample_corpus.txt"
# Read in the corpus you want to find collocations from
with open(infile) as tmpfile:
data = tmpfile.read()
# Clean the data
data = data.translate(None, string.punctuation) # remove punctuation
data = "".join(i for i in data if ord(i) < 128) # remove non-ascii characters
# Get the top collocations for the entire corpus
get_collocations(data)
print(' ')
# Replace this with a list of keywords you want to find collocations for
words_of_interest = ["love", "death"]
# Get the top collocations for each keyword in the list above
for word in words_of_interest:
get_keyword_collocations(data, word)
| mit | Python | |
7d60876f2b1152fbe92ba90ec0fa9e2acfb05af2 | fix InMoov launching button | MyRobotLab/myrobotlab,MyRobotLab/myrobotlab,MyRobotLab/myrobotlab,MyRobotLab/myrobotlab,MyRobotLab/myrobotlab,MyRobotLab/myrobotlab,MyRobotLab/myrobotlab | src/main/resources/resource/Intro/InMoov01_start.py | src/main/resources/resource/Intro/InMoov01_start.py | #########################################
# InMoov01_start.py
# categories: inmoov
# more info @: http://myrobotlab.org/service/InMoov
#########################################
# uncomment for virtual hardware
# Platform.setVirtual(True)
i01 = Runtime.start('i01', 'InMoov2') | apache-2.0 | Python | |
8f2ec9462b038704d00f85b706cfb34a19eac974 | add weekly backup job | relembra/relembra | etc/weekly_backup.py | etc/weekly_backup.py | #!/usr/bin/env python
from datetime import datetime
import os.path
import sys
def day_abbrev():
return datetime.now().strftime('%Y%m%d').lower()
def warn(txt):
print >> sys.stderr, "WARNING: %s!" % txt
os.system("""echo "At %s: %s" | mail -s "WARNING: %s" euccastro@gmail.com""" % (datetime.now(), txt, txt))
def backup():
backup_root = "/opt/datomic-free-0.9.5372/bak"
backup_path = os.path.join(backup_root, 'mon')
tar_name = "%s.tar.xz" % day_abbrev()
os.chdir(backup_root)
retcode = os.system("tar -cJf %s %s" % (tar_name, backup_path))
if retcode != 0:
warn("Couldn't tar")
return
retcode = os.system("/usr/bin/rclone --checksum copy %s acd:relembra-weekly-backups" % tar_name)
if retcode != 0:
warn("Couldn't upload backup")
os.unlink(tar_name)
if __name__ == '__main__':
backup()
| apache-2.0 | Python | |
2084569577c24fa0ba028bb6d0e8c8b62a604c61 | Create arduinoMicroShiftRegisterTests_PythonCompanion_debugSerialCmds_staticListOfPatterns.py | stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests,stephaneAG/Python_tests | pyxhook/arduinoMicroShiftRegisterTests_PythonCompanion_debugSerialCmds_staticListOfPatterns.py | pyxhook/arduinoMicroShiftRegisterTests_PythonCompanion_debugSerialCmds_staticListOfPatterns.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
## -*- coding: iso-8859-1 -*-
# the above is to prevent that damn " SyntaxError: Non-ASCII character '\xc2' " error
# "Arduino companion"
# test implm of a LEDs animations using an Arduino Micro & 74HC595 shift registers
import serial # needed for serial communication with the uC/Arduino
import time
import sys # needed to print stuff on the same line in stdout
# initial setup
# init the Arduino serial connection
#ser = serial.Serial('/dev/ttyUSB0', 9600, timeout=None)
try:
ser = serial.Serial('/dev/ttyACM1', 9600, timeout=None)
except (OSError, IOError) as e:
print "' should connect uC/Arduino first ;p !"
quit()
def formatPattern( pattern ):
return '/'.join( pattern )
# define a list of patterns that'll loop through
# aka quick test before writing a simple module that 'll alter a provided pattern
curr_pattern_idx = 0;
# simplest for quick test & debug
#patterns = ['A', 'B', 'C']
#pattern: one shift register at a time
'''
patterns = [
'0000000011111111',
'1111111100000000'
]
'''
#pattern: K2000 hardcoded animation
#: to reverse a string in Python:
# 'Stephane garnier'[::-1] # => 'reinrag enahpetS'
# for item in patterns:
# print item[::-1]
# print '/'.join(item[::-1]) # to format it for uC
# print "'"+item[::-1]+"'," # to copy/paste the result for the below patterns list
patterns = [
'0000000110000000',
'0000000011000000',
'0000000001100000',
'0000000000110000',
'0000000000011000',
'0000000000001100',
'0000000000000110',
'0000000000000011', # right most
'0000000000000001', # right most & cutoff
'0000000000000011', # right most
'0000000000000110',
'0000000000001100',
'0000000000011000',
'0000000000110000',
'0000000001100000',
'0000000011000000',
'0000000110000000', # same as starting position, middle
'0000001100000000',
'0000011000000000',
'0000110000000000',
'0001100000000000',
'0011000000000000',
'0110000000000000',
'1100000000000000', # left most
'1000000000000000', # left most & cutoff
'1100000000000000', # left most
'0110000000000000',
'0011000000000000',
'0001100000000000',
'0000110000000000',
'0000011000000000',
'0000001100000000',
#'0000000110000000', # start position
]
def nextPattern():
global curr_pattern_idx
if curr_pattern_idx < len(patterns)-1:
curr_pattern_idx += 1
#print curr_pattern_idx, patterns[curr_pattern_idx]
return patterns[curr_pattern_idx]
#return formatPattern( patterns[curr_pattern_idx] )
else:
curr_pattern_idx = 0
#print curr_pattern_idx, patterns[curr_pattern_idx]
return patterns[curr_pattern_idx]
#return formatPattern( patterns[curr_pattern_idx] )
def prevPattern():
global curr_pattern_idx
if curr_pattern_idx > 0:
curr_pattern_idx -= 1
#print curr_pattern_idx, patterns[curr_pattern_idx]
return patterns[curr_pattern_idx]
#return formatPattern( patterns[curr_pattern_idx] )
else:
curr_pattern_idx = len(patterns)-1
#print curr_pattern_idx, patterns[curr_pattern_idx]
return patterns[curr_pattern_idx]
#return formatPattern( patterns[curr_pattern_idx] )
while 1 == 1:
# clear the entire terminal
sys.stdout.write("\x1b[2J\x1b[H")
newPattern = nextPattern()
sys.stdout.write( '\n[ PATTERNS TEST ]' )
sys.stdout.write( '\n\nCurrent pattern: \n' + newPattern )
sys.stdout.write( '\n\nFormatted: \n' + formatPattern( newPattern ) )
# test pattern: all outputs of the first shift register HIGH
#ser.write( '0/0/0/0/0/0/0/0/1/1/1/1/1/1/1/1\n' )
#ser.write( '1/1/1/1/1/1/1/1/0/0/0/0/0/0/0/0\n' )
ser.write( formatPattern( newPattern )+'\n' )
uCmessage = ser.readline()
# print uCmessage
sys.stdout.write( '\n\nuC/Arduino callback: \n' + uCmessage )
sys.stdout.flush()
time.sleep( 0.100 )
# close the serial connection with the uC/Arduino
ser.close()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.