commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
0caeed31553dbc2a201cf5e2e50013ea946507c1 | Add packagist. | sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria | plumeria/plugins/packagist.py | plumeria/plugins/packagist.py | from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
@commands.register("packagist", "composer", category="Development")
@rate_limit()
async def packagist(message):
"""
Search the Packagist repository for a package.
Example::
/packagist discord
Response::
\u2022 laravel-notification-channels/discord - Laravel [...]
\u2022 socialiteproviders/discord - Discord OAuth2 Prov[...]
\u2022 team-reflex/oauth2-discord - OAuth2 client for a[...]
\u2022 pnobbe/oauth2-discord - Discord OAuth 2.0 Client[...]
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get("https://packagist.org/search.json", params=[
('q', q),
])
data = r.json()
if len(data['results']):
return "\n".join(map(lambda e:
"\u2022 **{name}** - {desc} <{url}>".format(
name=e['name'],
desc=e['description'],
url=e['url']),
data['results']))
else:
raise CommandError("no results found")
| mit | Python | |
7a3d41aea381ba914fb7a615ab6de1ff10d1cf89 | Add initial tool dependencies generator. | bioarchive/aRchive_source_code,bioarchive/aRchive_source_code,bioarchive/aRchive_source_code,bioarchive/aRchive_source_code | get_galaxy_tool_dependencies.py | get_galaxy_tool_dependencies.py | #!/usr/bin/env python
import sys
import requests
from string import Template
try:
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import StrVector
import rpy2.robjects.packages as rpackages
except ImportError:
raise ImportError(
"RPy2 must be installed to use this script.")
ARCHIVE_URL_TEMPLATE = 'http://bioarchive.galaxyproject.org'
R_VERSION = '3.2'
R_PACKAGE_NAME = 'package_r_%s' % (R_VERSION.replace('.', '_'))
PACKAGE_NAME = 'monocle'
PACKAGE_VERSION = ''
README = '%s in version %s' % (PACKAGE_NAME, PACKAGE_VERSION)
PACKAGE_XML_TEMPLATE = "<package>%s</package>"
def package_exists(path):
res = requests.head(path)
return res.status_code == requests.codes.found
def install_dependencies():
base = importr('base')
base.source("http://www.bioconductor.org/biocLite.R")
biocinstaller = importr("BiocInstaller")
biocinstaller.biocLite("pkgDepTools")
def download_archive(url):
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return local_filename
def get_dependencies_url( package_name ):
robjects.r("""
library("pkgDepTools")
library("BiocInstaller")
getPackageDependencies <- function( package )
{
dependencies <- makeDepGraph(biocinstallRepos(), type="source", keep.builtin=TRUE, dosize=FALSE)
packages <- getInstallOrder( package, dependencies, needed.only=FALSE )$packages
contrib_url <- contrib.url(biocinstallRepos(), type = "source")
available_packages <- available.packages( contrib_url )
package_names <- as.vector( available_packages[,"Package"] )
package_versions <- as.vector( available_packages[,"Version"] )
package_urls <- as.vector(available_packages[,"Repository"])
intersect <- match(packages, available_packages )
intersect <- intersect[ !is.na(intersect) ]
paste( package_urls[intersect], paste(paste( package_names[intersect], package_versions[intersect], sep="_"), "tar.gz", sep="."), sep="/" )
}
"""
)
r_get_package_deps = robjects.r['getPackageDependencies']
return [url for url in r_get_package_deps( package_name ) if not url.startswith('NA')]
if __name__ == '__main__':
urls = get_dependencies_url( PACKAGE_NAME )
packages = []
for url in urls:
if url.find('bioconductor') != -1:
aRchive_url = "%s/%s" % (ARCHIVE_URL_TEMPLATE, url.split('/')[-1])
packages.append( PACKAGE_XML_TEMPLATE % aRchive_url )
else:
packages.append( PACKAGE_XML_TEMPLATE % url )
download_archive(url)
substitutes = {
'R_VERSION': R_VERSION,
'R_PACKAGE_NAME': R_PACKAGE_NAME,
'README': README,
'PACKAGE_NAME': PACKAGE_NAME,
'PACKAGE_VERSION': PACKAGE_VERSION
}
substitutes['DEPENDENCIES'] = '\n '.join( packages )
with open( 'tool_dependencies.xml' ) as handle:
r_package_template = Template( handle.read() )
sys.stdout.write( r_package_template.safe_substitute( substitutes ) )
| mit | Python | |
19f7538ec804916e2ba702669f1aa3e69de44592 | add parallel_map | jobovy/galpy,followthesheep/galpy,followthesheep/galpy,followthesheep/galpy,jobovy/galpy,jobovy/galpy,jobovy/galpy,followthesheep/galpy | galpy/util/multi.py | galpy/util/multi.py | #Brian Refsdal's parallel_map, from astropython.org
#Not sure what license this is released under, but until I know better:
#
#Copyright (c) 2010, Brian Refsdal
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
#1. Redistributions of source code must retain the above copyright
#notice, this list of conditions and the following disclaimer.
#
#2. Redistributions in binary form must reproduce the above copyright
#notice, this list of conditions and the following disclaimer in the
#documentation and/or other materials provided with the distribution.
#
#3. The name of the author may not be used to endorse or promote
#products derived from this software without specific prior written
#permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
_multi=False
_ncpus=1
try:
# May raise ImportError
import multiprocessing
_multi=True
# May raise NotImplementedError
_ncpus = multiprocessing.cpu_count()
except:
pass
__all__ = ('parallel_map',)
def worker(f, ii, chunk, out_q, err_q, lock):
"""
A worker function that maps an input function over a
slice of the input iterable.
:param f : callable function that accepts argument from iterable
:param ii : process ID
:param chunk: slice of input iterable
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param lock : thread-safe lock to protect a resource
( useful in extending parallel_map() )
"""
vals = []
# iterate over slice
for val in chunk:
try:
result = f(val)
except Exception, e:
err_q.put(e)
return
vals.append(result)
# output the result and task ID to output queue
out_q.put( (ii, vals) )
def run_tasks(procs, err_q, out_q, num):
"""
A function that executes populated processes and processes
the resultant array. Checks error queue for any exceptions.
:param procs: list of Process objects
:param out_q: thread-safe output queue
:param err_q: thread-safe queue to populate on exception
:param num : length of resultant array
"""
# function to terminate processes that are still running.
die = (lambda vals : [val.terminate() for val in vals
if val.exitcode is None])
try:
for proc in procs:
proc.start()
for proc in procs:
proc.join()
except Exception, e:
# kill all slave processes on ctrl-C
die(procs)
raise e
if not err_q.empty():
# kill all on any exception from any one slave
die(procs)
raise err_q.get()
# Processes finish in arbitrary order. Process IDs double
# as index in the resultant array.
results=[None]*num;
while not out_q.empty():
idx, result = out_q.get()
results[idx] = result
# Remove extra dimension added by array_split
return list(numpy.concatenate(results))
def parallel_map(function, sequence, numcores=None):
"""
A parallelized version of the native Python map function that
utilizes the Python multiprocessing module to divide and
conquer sequence.
parallel_map does not yet support multiple argument sequences.
:param function: callable function that accepts argument from iterable
:param sequence: iterable sequence
:param numcores: number of cores to use
"""
if not callable(function):
raise TypeError("input function '%s' is not callable" %
repr(function))
if not numpy.iterable(sequence):
raise TypeError("input '%s' is not iterable" %
repr(sequence))
size = len(sequence)
if not _multi or size == 1:
return map(function, sequence)
if numcores is None:
numcores = _ncpus
# Returns a started SyncManager object which can be used for sharing
# objects between processes. The returned manager object corresponds
# to a spawned child process and has methods which will create shared
# objects and return corresponding proxies.
manager = multiprocessing.Manager()
# Create FIFO queue and lock shared objects and return proxies to them.
# The managers handles a server process that manages shared objects that
# each slave process has access to. Bottom line -- thread-safe.
out_q = manager.Queue()
err_q = manager.Queue()
lock = manager.Lock()
# if sequence is less than numcores, only use len sequence number of
# processes
if size < numcores:
numcores = size
# group sequence into numcores-worth of chunks
sequence = numpy.array_split(sequence, numcores)
procs = [multiprocessing.Process(target=worker,
args=(function, ii, chunk, out_q, err_q, lock))
for ii, chunk in enumerate(sequence)]
return run_tasks(procs, err_q, out_q, numcores)
if __name__ == "__main__":
"""
Unit test of parallel_map()
Create an arbitrary length list of references to a single
matrix containing random floats and compute the eigenvals
in serial and parallel. Compare the results and timings.
"""
import time
numtasks = 5
#size = (1024,1024)
size = (512,512)
vals = numpy.random.rand(*size)
f = numpy.linalg.eigvals
iterable = [vals]*numtasks
print ('Running numpy.linalg.eigvals %iX on matrix size [%i,%i]' %
(numtasks,size[0],size[1]))
tt = time.time()
presult = parallel_map(f, iterable)
print 'parallel map in %g secs' % (time.time()-tt)
tt = time.time()
result = map(f, iterable)
print 'serial map in %g secs' % (time.time()-tt)
assert (numpy.asarray(result) == numpy.asarray(presult)).all()
| bsd-3-clause | Python | |
3b4c1ec38e4725536bb11ec04ec0624282e166c0 | Create proxy.py | ruoshuifuping/AdslProxyPool | proxy.py | proxy.py | import subprocess
import time
class ProxyClient():
def restart_client(self):
while True:
(status, output) = subprocess.getstatusoutput('systemctl restart tinyproxy.service')
if status ==0:
print("tinyproxy 重启成功")
time.sleep(3600)
else:
print("tinyproxy 重启失败,再次重启")
def proxy():
client = ProxyClient()
client.restart_client()
if __name__ == '__main__':
proxy()
| mit | Python | |
9a1cf12d2eab79abe313cc211b697e05d4a1d3c1 | Solve 010 | daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various | programming_challenges/010.py | programming_challenges/010.py | '''
Problem 010
Solutionum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
Copyright 2017 Dave Cuthbert, MIT License
'''
import math
def get_primes(number):
while True:
if is_prime(number):
yield number
number += 1
def is_prime(number):
if number > 1:
if number == 2:
return True
if number % 2 == 0:
return False
for current in range(3, int(math.sqrt(number) + 1), 2):
if number % current == 0:
return False
return True
return False
def solve_problem(maximum_value):
running_sum = 0
for next_prime in get_primes(2):
if next_prime < maximum_value:
running_sum += next_prime
else:
return running_sum
if __name__ == "__main__":
maximum_value = 2000000
print(solve_problem(maximum_value))
| mit | Python | |
fdcf6fe1792c462221e0c6c35c13cc23ad39a2e3 | Create pythonhelloworld.py | KateKramer/classal3 | pythonhelloworld.py | pythonhelloworld.py | print "hello world"
| unlicense | Python | |
fafbb9e84a63f0de1f84ce94ba8766a8fdc23f8e | package for the item containers | rrpg/engine,rrpg/engine | models/item_container.py | models/item_container.py | # -*- coding: utf-8 -*-
from models.Model import Model
class item_container:
"""
Class to interact with the item containers, such as chests.
"""
@staticmethod
def getAllFromIdArea(idArea):
itemContainerTypes = model.getTypes()
containers = model.loadBy({'id_area': idArea})
for k, c in enumerate(containers):
containers[k]['type_label'] = itemContainerTypes[containers[k]['id_item_container_type']]
return containers
class model(Model):
"""
Class to interact with the values in the database.
"""
fields = (
'id_item_container',
'id_item_container_type',
'id_area',
'items'
)
@staticmethod
def getTypes():
"""
Returns the available types as an dict with ids as keys and labels as
values
@return dict the types
"""
query = "\
SELECT\
id_item_container_type,\
label\
FROM\
item_container_type\
"
return {t['id_item_container_type']: t['label'] for t in Model.fetchAllRows(query)}
class exception(BaseException):
"""
Class for the exceptions concerning item containers.
"""
pass
| mit | Python | |
3490b1172f8df77af3963c86ce3967a6d9b4af5e | Add gender choices factory | masschallenge/django-accelerator,masschallenge/django-accelerator | accelerator/tests/factories/gender_choices_factory.py | accelerator/tests/factories/gender_choices_factory.py | # MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
from factory import Sequence
from factory.django import DjangoModelFactory
GenderChoices = swapper.load_model('accelerator', 'GenderChoices')
class GenderChoicesFactory(DjangoModelFactory):
class Meta:
model = GenderChoices
name = Sequence(lambda n: "test_choice{0}".format(n))
| mit | Python | |
44e6c6007a37dc4c9375303a6555c646618d4e38 | add tf dadaset with generator with args | jeffzhengye/pylearn,jeffzhengye/pylearn,jeffzhengye/pylearn,jeffzhengye/pylearn | tensorflow_learning/tf2/tf_dataset_from_generator_args.py | tensorflow_learning/tf2/tf_dataset_from_generator_args.py | # -*- coding: utf-8 -*-
'''
@author: jeffzhengye
@contact: yezheng@scuec.edu.cn
@file: tf_dataset_from_generator_args.py
@time: 2021/1/5 16:27
@desc:
'''
import tensorflow as tf
import numpy as np
import collections
def movingWindow(data, window_size):
print(type(window_size))
window_size = int( window_size )
buffer = collections.deque(data[:window_size - 1], maxlen=window_size)
for i, datum in enumerate(data[window_size - 1:]):
buffer.append(datum)
for b in buffer:
yield datum, b
window_size = 2
data = np.arange(10)
dataset = tf.data.Dataset.from_generator(
movingWindow,
args=(data, window_size),
output_types=(np.int32, np.int32)
)
print(next(movingWindow(data, window_size)))
print(next(iter(dataset)))
| unlicense | Python | |
79637efbdda03cea88fa6a59b24a27f1d393c79f | Add tests for previous commit | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/util/tests/test_es_interface.py | corehq/util/tests/test_es_interface.py | from django.test import SimpleTestCase
from mock import ANY, patch
from corehq.apps.es.tests.utils import es_test
from corehq.elastic import SerializationError, get_es_new
from corehq.util.es.interface import ElasticsearchInterface
@es_test
class TestESInterface(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.es = get_es_new()
def _validate_es_scan_search_params(self, scan_query, search_query):
"""Call ElasticsearchInterface.scan() and test that the resulting API
search parameters match what we expect.
Notably:
- Search call does not include the `search_type='scan'`.
- Calling `scan(..., query=scan_query, ...)` results in an API call
where `body == search_query`.
"""
interface = ElasticsearchInterface(self.es)
skw = {
"index": "et",
"doc_type": "al",
"request_timeout": ANY,
"scroll": ANY,
"size": ANY,
}
with patch.object(self.es, "search") as search:
try:
list(interface.scan(skw["index"], scan_query, skw["doc_type"]))
except SerializationError:
# fails to serialize the Mock object.
pass
search.assert_called_once_with(body=search_query, **skw)
def test_scan_no_searchtype_scan(self):
"""Tests that search_type='scan' is not added to the search parameters"""
self._validate_es_scan_search_params({}, {"sort": "_doc"})
def test_scan_query_extended(self):
"""Tests that sort=_doc is added to an non-empty query"""
self._validate_es_scan_search_params({"_id": "abc"},
{"_id": "abc", "sort": "_doc"})
def test_scan_query_sort_safe(self):
"""Tests that a provided a `sort` query will not be overwritten"""
self._validate_es_scan_search_params({"sort": "_id"}, {"sort": "_id"})
| bsd-3-clause | Python | |
a9195264349b695daf02abb5cf17ced8a6a6110c | Add setup.py | AntagonistHQ/openprovider.py | setup.py | setup.py | # coding=utf-8
from distutils.core import setup
setup(
name='openprovider.py',
version='0.0.1',
author='Antagonist B.V.',
author_email='info@antagonist.nl',
packages=['openprovider'],
url='http://pypi.python.org/pypi/openprovider.py/',
license='LICENSE.rst',
description='An unofficial library for the OpenProvider API',
long_description=open('README.rst').read(),
install_requires=[
"requests >= 2.3.0",
"lxml >= 3.3.5"
]
) | mit | Python | |
e8568c3fd621a37020de015fac59dfd15141b51f | Update praw to 3.5.0 | michael-lazar/rtv,michael-lazar/rtv,michael-lazar/rtv,shaggytwodope/rtv,shaggytwodope/rtv,5225225/rtv,5225225/rtv | setup.py | setup.py | import sys
import setuptools
from version import __version__ as version
requirements = ['tornado', 'praw==3.5.0', 'six', 'requests', 'kitchen']
# Python 2: add required concurrent.futures backport from Python 3.2
if sys.version_info.major <= 2:
requirements.append('futures')
setuptools.setup(
name='rtv',
version=version,
description='A simple terminal viewer for Reddit (Reddit Terminal Viewer)',
long_description=open('README.rst').read(),
url='https://github.com/michael-lazar/rtv',
author='Michael Lazar',
author_email='lazar.michael22@gmail.com',
license='MIT',
keywords='reddit terminal praw curses',
packages=['rtv'],
package_data={'rtv': ['templates/*', 'rtv.cfg']},
data_files=[("share/man/man1", ["rtv.1"])],
extras_require={
':python_version=="2.6" or python_version=="2.7"': ['futures']},
install_requires=requirements,
entry_points={'console_scripts': ['rtv=rtv.__main__:main']},
classifiers=[
'Intended Audience :: End Users/Desktop',
'Environment :: Console :: Curses',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Terminals',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary',
],
)
| import sys
import setuptools
from version import __version__ as version
requirements = ['tornado', 'praw==3.4.0', 'six', 'requests', 'kitchen']
# Python 2: add required concurrent.futures backport from Python 3.2
if sys.version_info.major <= 2:
requirements.append('futures')
setuptools.setup(
name='rtv',
version=version,
description='A simple terminal viewer for Reddit (Reddit Terminal Viewer)',
long_description=open('README.rst').read(),
url='https://github.com/michael-lazar/rtv',
author='Michael Lazar',
author_email='lazar.michael22@gmail.com',
license='MIT',
keywords='reddit terminal praw curses',
packages=['rtv'],
package_data={'rtv': ['templates/*', 'rtv.cfg']},
data_files=[("share/man/man1", ["rtv.1"])],
extras_require={
':python_version=="2.6" or python_version=="2.7"': ['futures']},
install_requires=requirements,
entry_points={'console_scripts': ['rtv=rtv.__main__:main']},
classifiers=[
'Intended Audience :: End Users/Desktop',
'Environment :: Console :: Curses',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: Terminals',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: News/Diary',
],
)
| mit | Python |
d2fd2a473747fa90183c78c0c12cd933bdc1a4b6 | add solution for 2019 day 1 part 1 | kmcginn/advent-of-code | 2019/day01/rocket.py | 2019/day01/rocket.py | #! python3
"""
from: https://adventofcode.com/2019/day/1
--- Day 1: The Tyranny of the Rocket Equation ---
Santa has become stranded at the edge of the Solar System while delivering presents to other planets! To accurately calculate his position in space, safely align his warp drive, and return to Earth in time to save Christmas, he needs you to bring him measurements from fifty stars.
Collect stars by solving puzzles. Two puzzles will be made available on each day in the Advent calendar; the second puzzle is unlocked when you complete the first. Each puzzle grants one star. Good luck!
The Elves quickly load you into a spacecraft and prepare to launch.
At the first Go / No Go poll, every Elf is Go until the Fuel Counter-Upper. They haven't determined the amount of fuel required yet.
Fuel required to launch a given module is based on its mass. Specifically, to find the fuel required for a module, take its mass, divide by three, round down, and subtract 2.
For example:
For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to get 2.
For a mass of 14, dividing by 3 and rounding down still yields 4, so the fuel required is also 2.
For a mass of 1969, the fuel required is 654.
For a mass of 100756, the fuel required is 33583.
The Fuel Counter-Upper needs to know the total fuel requirement. To find it, individually calculate the fuel needed for the mass of each module (your puzzle input), then add together all the fuel values.
What is the sum of the fuel requirements for all of the modules on your spacecraft?
"""
import os
def main():
"""Solve the problem!"""
script_dir = os.path.dirname(__file__)
file_path = os.path.join(script_dir, './input.txt')
total_fuel = 0
with open(file_path) as input_file:
for line in input_file:
total_fuel += int(line)//3 - 2
print(total_fuel)
if __name__ == "__main__":
main()
| mit | Python | |
4f42bf42c6dcb44f7a0972bb9c00818d087c808f | Add file | mpsonntag/bulk-rename,mpsonntag/bulk-rename | setup.py | setup.py | try:
from setuptools import setup
except ImportError as ex:
from distutils.core import setup
packages = [
'bren'
]
with open('README.rst') as f:
description_text = f.read()
install_req = ["pyyaml"]
setup(
name='bulkrename',
version='1.0.0',
description='bulk file rename',
author='Michael Sonntag',
packages=packages,
test_suite='test',
install_requires=install_req,
include_package_data=True,
long_description=description_text,
license="BSD"
)
| bsd-3-clause | Python | |
0954ec9e191f1a5280ea190ca025d005683db595 | add an analyze script for kld | adrn/StreamMorphology,adrn/StreamMorphology,adrn/StreamMorphology | scripts/ensemble/analyze.py | scripts/ensemble/analyze.py | # coding: utf-8
""" Analyze the output from KLD mapping """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
# Third-party
from astropy import log as logger
import matplotlib.pyplot as plt
import numpy as np
# Project
from streammorphology.ensemble import read
def main(path, vbounds=None):
# read in initial conditions
w0 = np.load(os.path.join(path, 'w0.npy'))
norbits = len(w0)
# read freqmap output
cache_filename = os.path.join(path, 'allkld.dat')
d = read(cache_filename, norbits=len(w0))
logger.info("{} total orbits".format(norbits))
logger.info("\t{} successful".format(d['success'].sum()))
logger.info("\t{} not successful".format((~d['success']).sum()))
good_ix = d['success']
dens_map_t = d['thresh_t'] / 1000.
dens_map_t = dens_map_t[good_ix]
# color scaling
if vbounds is None:
vmin = dens_map_t.min()
vmax = dens_map_t.max()
else:
vmin,vmax = vbounds
# plot initial condition grid, colored by fractional diffusion rate
fig,ax = plt.subplots(1,1,figsize=(9.75,8))
ax.set_xlim(0, max([w0[:,0].max(),w0[:,2].max()]))
ax.set_ylim(*ax.get_xlim())
# automatically determine symbol size
xy_pixels = ax.transData.transform(np.vstack([w0[good_ix,0],w0[good_ix,2]]).T)
xpix, ypix = xy_pixels.T
# In matplotlib, 0,0 is the lower left corner, whereas it's usually the upper
# right for most image software, so we'll flip the y-coords
width, height = fig.canvas.get_width_height()
ypix = height - ypix
# this assumes that your data-points are equally spaced
sz = max((xpix[1]-xpix[0])**2, (ypix[1]-ypix[0])**2)
# plot bad points
ax.scatter(w0[~good_ix,0], w0[~good_ix,2], c='r', s=sz, marker='s')
# plot good points, colored
c = ax.scatter(w0[good_ix,0], w0[good_ix,2], c=dens_map_t,
vmin=vmin, vmax=vmax, cmap='Greys', s=sz, marker='s')
ax.set_xlabel(r'$x_0$ $[{\rm kpc}]$')
ax.set_ylabel(r'$z_0$ $[{\rm kpc}]$')
fig.colorbar(c)
fig.tight_layout()
fig.savefig(os.path.join(path,"kld_map.pdf"))
if __name__ == '__main__':
from argparse import ArgumentParser
# Define parser object
parser = ArgumentParser(description="")
parser.add_argument("-p", "--path", dest="path", required=True,
help="Path to a Numpy memmap file containing the results "
"of frequency mapping.")
parser.add_argument("--vbounds", dest="vbounds", default=None, type=str,
help="bounds of color scale")
args = parser.parse_args()
if args.vbounds is not None:
vbounds = map(float, args.vbounds.split(","))
else:
vbounds = None
main(args.path, vbounds=vbounds)
| mit | Python | |
7e3a894796bb11eb77c0352d5104754086e70f8e | Add setup.py | airekans/recall | setup.py | setup.py | from setuptools import setup, find_packages
import sys, os
version = '0.1'
setup(name='recall',
version=version,
description="Python High performance RPC framework based on protobuf",
long_description="""\
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='rpc gevent',
author='Yaolong Huang',
author_email='airekans@gmail.com',
url='https://github.com/airekans/recall',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'gevent',
'protobuf>=2.3'
],
entry_points="""
# -*- Entry points: -*-
""",
)
| mit | Python | |
24bbb4fafa0732252c4d8561783826ed5eba6cff | add setup.py | youtalk/python-voicetext | setup.py | setup.py | # -*- coding: utf-8 -*-
from setuptools import setup
setup(name='python-voicetext',
version='0.1',
license='Apache License 2.0',
description='Python library of VoiceText Web API',
author='Yutaka Kondo',
author_email='yutaka.kondo@youtalk.jp',
url='https://github.com/youtalk/python-voicetext',
packages=['voicetext'],
download_url='https://github.com/youtalk/python-voicetext/releases/tag/0.1',
requires=['requests', 'PyAudio'],
platforms = ['POSIX', 'Mac OS X', 'Windows'],
)
| apache-2.0 | Python | |
fd3eaa3810ce82db864b3fcafe61d16ab53d85e5 | Add simple Python web server for performance testing | akisaarinen/ccf,akisaarinen/ccf | perftest/scripts/webserver.py | perftest/scripts/webserver.py | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do(self):
self.send_response(200)
self.wfile.write('{"headers":{"type":"type"},"content":{"b":2}}')
def do_GET(self):
self.do()
def do_POST(self):
self.do()
def main():
try:
server = HTTPServer(('', 8080), Handler)
print 'started httpserver...'
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down server'
server.socket.close()
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
891737a86e8f7007ac6d040f3f01afc420cd8c99 | Create 2-keys-keyboard.py | kamyu104/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/2-keys-keyboard.py | Python/2-keys-keyboard.py | # Time: O(sqrt(n))
# Space: O(1)
# Initially on a notepad only one character 'A' is present.
# You can perform two operations on this notepad for each step:
#
# Copy All: You can copy all the characters present on the notepad (partial copy is not allowed).
# Paste: You can paste the characters which are copied last time.
# Given a number n.
# You have to get exactly n 'A' on the notepad by performing the minimum number of steps permitted.
# Output the minimum number of steps to get n 'A'.
#
# Example 1:
# Input: 3
# Output: 3
# Explanation:
# Intitally, we have one character 'A'.
# In step 1, we use Copy All operation.
# In step 2, we use Paste operation to get 'AA'.
# In step 3, we use Paste operation to get 'AAA'.
# Note:
# The n will be in the range [1, 1000].
class Solution(object):
def minSteps(self, n):
"""
:type n: int
:rtype: int
"""
result = 0
p = 2
# the answer is the sum of prime factors
while p**2 <= n:
while n % p == 0:
result += p
n //= p
p += 1
if n > 1:
result += n
return result
| mit | Python | |
293983d24467cbb224f29b4a6149b518fe966603 | Add forest Predictor. | vincentadam87/gatsby-hackathon-seizure,vincentadam87/gatsby-hackathon-seizure | code/python/seizures/prediction/ForestPredictor.py | code/python/seizures/prediction/ForestPredictor.py | from abc import abstractmethod
from sklearn.ensemble import RandomForestClassifier
import numpy as np
class ForestPredictor(object):
""""
A simple application of RandomForestClassifier
@author: Shaun
"""
def __init__(self):
self.clf = RandomForestClassifier()
@abstractmethod
def fit(self, X, y):
"""
Method to fit the model.
Parameters:
X - 2d numpy array of training data
y - 1d numpy array of training labels
"""
self.clf = self.clf.fit(X, y)
@abstractmethod
def predict(self, X):
"""
Method to apply the model data
Parameters:
X - 2d numpy array of test data
"""
return self.clf.predict_proba(X)[:, 1]
if __name__ == '__main__':
N = 1000
D = 2
X = np.random.rand(N, D)
y = np.random.randint(0, 2, N)
predictor = ForestPredictor()
predictor.fit(X, y)
x = np.random.rand(1, D)
pred = predictor.predict(x)
print pred
| bsd-2-clause | Python | |
c2d2d086d336a48593cae6950584566fc40a68b0 | 添加498789867插件的源代码 | ecator/anki-theme-baiduplay,ecator/anki-theme-baiduplay,ecator/anki-theme-baiduplay | Replay_buttons_on_card.py | Replay_buttons_on_card.py | # -*- mode: Python ; coding: utf-8 -*-
#
# Copyright © 2013–16 Roland Sieker <ospalh@gmail.com>
#
# License: GNU AGPL, version 3 or later;
# http://www.gnu.org/copyleft/agpl.html
"""Add-on for Anki 2 to add AnkiDroid-style replay buttons."""
import os
import re
import shutil
from BeautifulSoup import BeautifulSoup
from PyQt4.QtCore import QUrl
from PyQt4.QtGui import QDesktopServices
from anki.cards import Card
from anki.hooks import addHook, wrap
from anki.sound import play
from aqt import mw
from aqt.browser import Browser
from aqt.browser import DataModel
from aqt.clayout import CardLayout
from aqt.reviewer import Reviewer
__version__ = "2.0.0"
sound_re = ur"\[sound:(.*?)\]"
hide_class_name = u'browserhide'
def svg_css(Card):
"""Add the svg button style to the card style"""
return u"""<style scoped>
.replaybutton span {
display: inline-block;
vertical-align: middle;
padding: 5px;
}
.replaybutton span svg {
stroke: none;
fill: black;
display: inline;
height: 1em;
width: 1em;
min-width: 12px;
min-height: 12px;
}
</style>
""" + old_css(Card)
def play_button_filter(
qa_html, qa_type, dummy_fields, dummy_model, dummy_data, dummy_col):
u"""
Filter the questions and answers to add play buttons.
"""
def add_button(sound):
u"""
Add a button after the match.
Add a button after the match to replay the audio. The title is
set to "Replay" on the question side to hide information or to
the file name on the answer.
"""
if 'q' == qa_type:
title = u"Replay"
else:
title = sound.group(1)
return u"""{orig}<a href='javascript:py.link("ankiplay{fn}");' \
title="{ttl}" class="replaybutton browserhide"><span><svg viewBox="0 0 32 32">\
<polygon points="11,25 25,16 11,7"/>Replay</svg></span></a>\
<span style="display: none;">[sound:{fn}]</span>""".format(
orig=sound.group(0), fn=sound.group(1), ttl=title)
# The [ ] are the square brackets that we want to
# appear as brackets and not trigger the playing of the
# sound. The span inside the a around the svg is there to
# bring this closer in line with AnkiDroid.
return re.sub(sound_re, add_button, qa_html)
def review_link_handler_wrapper(reviewer, url):
u"""Play the sound or call the original link handler."""
if url.startswith("ankiplay"):
play(url[8:])
else:
original_review_link_handler(reviewer, url)
def simple_link_handler(url):
u"""Play the file."""
if url.startswith("ankiplay"):
play(url[8:])
else:
QDesktopServices.openUrl(QUrl(url))
def add_clayout_link_handler(clayout, dummy_t):
u"""Make sure we play the files from the card layout window."""
clayout.forms[-1]['pform'].frontWeb.setLinkHandler(simple_link_handler)
clayout.forms[-1]['pform'].backWeb.setLinkHandler(simple_link_handler)
def add_preview_link_handler(browser):
u"""Make sure we play the files from the preview window."""
browser._previewWeb.setLinkHandler(simple_link_handler)
def reduce_format_qa(self, text):
u"""Remove elements with a given class before displaying."""
soup = BeautifulSoup(text)
for hide in soup.findAll(True, {'class': re.compile(
'\\b' + hide_class_name + '\\b')}):
hide.extract()
return original_format_qa(self, unicode(soup))
original_review_link_handler = Reviewer._linkHandler
Reviewer._linkHandler = review_link_handler_wrapper
original_format_qa = DataModel.formatQA
DataModel.formatQA = reduce_format_qa
old_css = Card.css
Card.css = svg_css
addHook("mungeQA", play_button_filter)
Browser._openPreview = wrap(Browser._openPreview, add_preview_link_handler)
CardLayout.addTab = wrap(CardLayout.addTab, add_clayout_link_handler)
| mit | Python | |
c19bc112e7e13f9d63746dfd2b073edf369f8e82 | add `setup.py` | lookup/lu-dj-utils,lookup/lu-dj-utils | setup.py | setup.py | #!/usr/bin/env python
from __future__ import absolute_import, print_function, unicode_literals
from setuptools import find_packages, setup
import lu_dj_utils
with open('README.rst') as f:
readme = f.read()
packages = find_packages()
classifiers = (
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
# the FSF refers to it as "Modified BSD License". Other names include
# "New BSD", "revised BSD", "BSD-3", or "3-clause BSD"
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
)
setup(
name="lu-dj-utils",
version=lu_dj_utils.__version__,
description='',
long_description=readme,
author='German Larrain',
author_email='glarrain@users.noreply.github.com',
url='https://github.com/lookup/lu-dj-utils',
packages=packages,
license='3-clause BSD', # TODO: verify name is correct
zip_safe=False,
classifiers=classifiers,
)
| bsd-3-clause | Python | |
7255a3213418fe4bb3365bd60537f7e88af0c4cd | Add bare-bones setup.py and build file structure | CDIPS-AI-2017/pensieve | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='pensieve',
version='0.0.1',
description=u"A Python package to extract character mems from a corpus of text",
author=u"CDIPS-AI 2017",
author_email='sam.dixon@berkeley.edu',
url='https://github.com/CDIPS-AI-2017/pensieve',
license='Apache 2.0',
) | apache-2.0 | Python | |
73edec331031de644320927800375b9f84f6e143 | Read requirements.txt for setup install_requires, keywords and classifiers added for PyPi | 9p0le/simiki,tankywoo/simiki,tankywoo/simiki,tankywoo/simiki,9p0le/simiki,zhaochunqi/simiki,9p0le/simiki,zhaochunqi/simiki,zhaochunqi/simiki | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
import simiki
entry_points = {
"console_scripts": [
"simiki = simiki.cli:main",
]
}
requires = open("requirements.txt").readlines()
setup(
name = "simiki",
version = simiki.__version__,
url = "https://github.com/tankywoo/simiki",
author = "Tanky Woo",
author_email = "me@tankywoo.com",
description = "Simiki is a simple wiki framework, written in Python.",
keywords = "simiki, wiki, generator",
license = "MIT License",
packages = find_packages(),
include_package_data=True,
install_requires = requires,
entry_points = entry_points,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
import simiki
entry_points = {
"console_scripts": [
"simiki = simiki.cli:main",
]
}
requires = [
"Markdown",
"Pygments",
"Jinja2",
"PyYAML",
"docopt",
]
setup(
name = "simiki",
version = simiki.__version__,
url = "https://github.com/tankywoo/simiki",
author = "Tanky Woo",
author_email = "me@tankywoo.com",
description = "Simiki is a simple wiki framework, written in Python.",
license = "MIT License",
packages = find_packages(),
include_package_data=True,
install_requires = requires,
entry_points = entry_points,
)
| mit | Python |
88a617758eb869786d0703b2b53b5a030d7e7ac2 | Add Python 3.4 to working environments | jazzband/django-mongonaut,lchsk/django-mongonaut,pydanny/django-mongonaut,pydanny/django-mongonaut,pydanny/django-mongonaut,lchsk/django-mongonaut,jazzband/django-mongonaut,jazzband/django-mongonaut,lchsk/django-mongonaut | setup.py | setup.py | import os, sys
from setuptools import setup, find_packages
import mongonaut
LONG_DESCRIPTION = open('README.rst').read() + "\n\n"
CHANGELOG = open('CHANGELOG.rst').read()
LONG_DESCRIPTION += CHANGELOG
version = mongonaut.__version__
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-mongonaut',
version=version,
description="An introspective interface for Django and MongoDB",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='mongodb,django',
author=mongonaut.__author__,
author_email='pydanny@gmail.com',
url='http://github.com/pydanny/django-mongonaut',
license='MIT',
packages=find_packages(exclude=['examples']),
include_package_data=True,
install_requires=['mongoengine>=0.5.2'],
zip_safe=False,
use_2to3 = True,
)
| import os, sys
from setuptools import setup, find_packages
import mongonaut
LONG_DESCRIPTION = open('README.rst').read() + "\n\n"
CHANGELOG = open('CHANGELOG.rst').read()
LONG_DESCRIPTION += CHANGELOG
version = mongonaut.__version__
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-mongonaut',
version=version,
description="An introspective interface for Django and MongoDB",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='mongodb,django',
author=mongonaut.__author__,
author_email='pydanny@gmail.com',
url='http://github.com/pydanny/django-mongonaut',
license='MIT',
packages=find_packages(exclude=['examples']),
include_package_data=True,
install_requires=['mongoengine>=0.5.2'],
zip_safe=False,
use_2to3 = True,
)
| mit | Python |
2344a5e72d7a3a31d014ca31f42023740c56d060 | add ndncache/fieldpercent.py | melon-li/tools,melon-li/tools,melon-li/tools | ndncache/fieldpercent.py | ndncache/fieldpercent.py | #!/usr/bin/python
#coding:utf-8
'''Function:analyze bro conn logs and print specific field percent,
@param: log directory
@param: field list analyze
author:melon li
date: 2016.03.28
'''
import sys
import os
FIELDS=['ts', 'uid', 'id.orig_h', 'id.orig_p', 'id.resp_h', 'id.resp_p',
'proto', 'service duration', 'orig_bytes', 'resp_bytes', 'conn_state',
'local_orig','local_resp', 'missed_bytes', 'history orig_pkts', 'orig_ip_bytes',
'resp_pkts', 'resp_ip_bytes', 'tunnel_parents']
#FIELDS_PERCENT=['id.orig_h', 'id.orig_p', 'id.resp_h', 'id.resp_p',
# 'proto', 'service duration', 'orig_bytes', 'resp_bytes', 'conn_state',
# 'local_orig']
TYPES=['time', 'string', 'addr', 'port', 'addr', 'port', 'enum', 'string', 'interval',
'count', 'count', 'string', 'bool', 'bool', 'count', 'string', 'count', 'count',
'count', 'count', 'string']
def usage():
print "Usage:%s <log_dir> <fields>" % sys.argv[0]
print "\tFor example: %s /home/ \"id.resp_p id.orig_p\"" % sys.argv[0]
sys.exit(1)
try:
log_dir = sys.argv[1]
fields = sys.argv[2]
except:
usage()
if not os.path.isdir(log_dir):
print "ERROR: %s does not exist!" % log_dir
usage()
sys.exit(2)
#print log_dir
#print fields
files = os.listdir(log_dir)
files = [elem for elem in files if elem.find('conn.log') != -1 ]
files.sort()
for f in files:
f_path = os.path.join(log_dir, f)
with open(f_path, 'r') as fp:
while 1:
line = fp.readline()
if not line: break
print line
| apache-2.0 | Python | |
4a6d45d102c76647bc7c4ff30f4b888108dd2d7c | Bump version to 2.6.0.2dev | prakashpp/nereid,riteshshrv/nereid,prakashpp/nereid,fulfilio/nereid,riteshshrv/nereid,fulfilio/nereid,usudaysingh/nereid,usudaysingh/nereid | setup.py | setup.py | '''
Nereid
Nereid - Tryton as a web framework
:copyright: (c) 2010-2013 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
from setuptools import setup
setup(
name='Nereid',
version='2.6.0.2dev',
url='http://nereid.openlabs.co.in/docs/',
license='GPLv3',
author='Openlabs Technologies & Consulting (P) Limited',
author_email='info@openlabs.co.in',
description='Tryton - Web Framework',
long_description=__doc__,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'distribute',
'trytond_nereid',
'flask',
'wtforms',
'wtforms-recaptcha',
'babel',
'speaklater',
'Flask-Babel',
],
packages=[
'nereid',
'nereid.contrib',
'nereid.tests',
],
package_dir={
'nereid': 'nereid',
'nereid.contrib': 'nereid/contrib',
'nereid.tests': 'tests',
},
zip_safe=False,
platforms='any',
)
| '''
Nereid
Nereid - Tryton as a web framework
:copyright: (c) 2010-2013 by Openlabs Technologies & Consulting (P) Ltd.
:license: GPLv3, see LICENSE for more details
'''
from setuptools import setup
setup(
name='Nereid',
version='2.6.0.1',
url='http://nereid.openlabs.co.in/docs/',
license='GPLv3',
author='Openlabs Technologies & Consulting (P) Limited',
author_email='info@openlabs.co.in',
description='Tryton - Web Framework',
long_description=__doc__,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=[
'distribute',
'trytond_nereid',
'flask',
'wtforms',
'wtforms-recaptcha',
'babel',
'speaklater',
'Flask-Babel',
],
packages=[
'nereid',
'nereid.contrib',
'nereid.tests',
],
package_dir={
'nereid': 'nereid',
'nereid.contrib': 'nereid/contrib',
'nereid.tests': 'tests',
},
zip_safe=False,
platforms='any',
)
| bsd-3-clause | Python |
0907e4f32e4e0bb48f4f101b520ce8f28c731d6c | Add setup.py | allanburleson/python-adventure-game,disorientedperson/python-adventure-game | setup.py | setup.py | from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# TODO: change to rst
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pag',
version='0.1.0',
description='A simple text-based adventure game engine.',
long_description=long_description,
url='https://github.com/allanburleson/python-adventure-game',
author='Allan Burleson',
author_email='burleson.a@icloud.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Education :: Testing',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
keywords='game text text-based adventure interactive-fiction',
packages=find_packages(exclude='tests'),
package_data={
'pag': ['dictionary/*.txt']
}
)
| mit | Python | |
7b0779a402070ec88d9afeab42388b6432391336 | ADD AND CHANGE | wmtiger/dealsmp,wmtiger/dealsmp | zt.py | zt.py | #!/usr/bin/env python
# -*- conding: utf-8 -*-
########################
#File Name:zt.py
#Author:WmTiger
#Mail:bfstiger@gmail.com
#Created Time:2016-09-07 12:59:28
########################
import picamera
import time
import io
import zbar
from PIL import Image
def getQR():
stream = io.BytesIO()
sc = zbar.ImageScanner()
sc.parse_config("enable")
with picamera.PiCamera() as c:
c.start_preview()
time.sleep(0.5)
c.capture(stream, format='jpeg')
stream.seek(0)
pim = Image.open(stream).convert('L')
w, h = pim.size
raw = pim.tostring()
zim = zbar.Image(w,h,'Y800', raw)
sc.scan(zim)
data = ''
for sb in zim:
data += sb.data
del(zim)
return data
if __name__ == '__main__':
print getQR()
| apache-2.0 | Python | |
8812d487c33a8f0f1c96336cd27ad2fa942175f6 | add setup.py | astex/sequential | setup.py | setup.py | from distutils.core import setup
setup(
name="sequential",
packages=["sequential"],
version="1.0.0",
description="Sequential wrappers for python functions.",
author="Phil Condreay",
author_email="0astex@gmail.com",
url="https://github.com/astex/sequential",
keywords=["functions", "decorators", "multithreading", "convenience"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"
],
long_description="""\
Sequential
-----
Sequential wrappers for python functions allowing you to easily define order of
execution independent of content of the function.
"""
)
| mit | Python | |
365540e17885cf41043358f14a04d0fa15ecb4ec | update 002 with python | liuyang1/euler,liuyang1/euler,liuyang1/euler | 002.py | 002.py | def func(thresh):
a = 1
b = 1
ret = 0
while 1:
a, b = a + b, a
if a > thresh:
return ret
if a % 2 == 0:
ret += a
print func(4*1000*1000)
| mit | Python | |
a675b289f7848a773ded80f943f60156a224fd17 | Add a setup.py to allow use in pip requirements files. | adamfast/python-tweetar | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='tweetar',
version='1.0.0',
description='Script to post on Twitter METARs retrieved from NOAA.',
author='Adam Fast',
author_email='adamfast@gmail.com',
url='https://github.com/adamfast/python-tweetar',
packages=find_packages(),
package_data={
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities'
],
)
| bsd-3-clause | Python | |
62a405bf2574320a7fe1e941129056e8157121b5 | Add setup.py | gisce/sii | setup.py | setup.py | # coding=utf-8
from setuptools import setup, find_packages
PACKAGES_DATA = {'sii': ['data/*.xsd']}
setup(
name='sii',
description='Librería de Suministro Inmediato de Información',
author='GISCE-TI, S.L.',
author_email='devel@gisce.net',
url='http://www.gisce.net',
version='0.1.0alpha',
license='General Public Licence 2',
long_description='''Long description''',
provides=['sii'],
install_requires=['libcomxml', 'marshmallow'],
tests_require=['expects'],
packages=find_packages(exclude=['tests']),
package_data=PACKAGES_DATA,
test_suite='tests',
)
| mit | Python | |
6fc87957cff4e0ef1c95d604a8fb0630f2bcda38 | Add initial setup.py to please buildout. | cidadania/e-cidadania,cidadania/e-cidadania | setup.py | setup.py | import os
import re
from setuptools import setup, find_packages
setup(
name = 'e-cidadania',
description=("e-cidadania is a project to develop an open source "
"application for citizen participation, usable by "
"associations, companies and administrations."),
version = '0.1.5',
packages = find_packages(exclude=['parts']),
author='Oscar',
url='http://ecidadania.org',
license='GPLv2',
install_requires = [
],
tests_require=[
],
include_package_data = True,
zip_safe = False,
)
| apache-2.0 | Python | |
7f0eaa3974845b0e62c033fc4c3b0079c8e37465 | Add setup.py | cgohlke/imagecodecs,cgohlke/imagecodecs,cgohlke/imagecodecs | setup.py | setup.py | # -*- coding: utf-8 -*-
# imagecodecs/setup.py
from setuptools import setup, Extension
from Cython.Distutils import build_ext
import sys
import os
import re
import warnings
import numpy
buildnumber = '' # '.post0'
with open('imagecodecs/_imagecodecs.pyx') as fh:
code = fh.read()
version = re.search("__version__ = '(.*?)'", code).groups()[0]
description = re.search('"""(.*)\.\n', code).groups()[0]
readme = re.search('[\r\n?|\n]{2}"""(.*)"""[\r\n?|\n]{2}__version__', code,
re.MULTILINE| re.DOTALL).groups()[0]
license = re.search('(# Copyright.*?[\r\n?|\n])[\r\n?|\n]+""', code,
re.MULTILINE| re.DOTALL).groups()[0]
readme = '\n'.join([description, '=' * len(description)]
+ readme.splitlines()[1:])
license = license.replace('# ', '').replace('#', '')
if 'sdist' in sys.argv:
with open('LICENSE', 'w') as fh:
fh.write(license)
with open('README.rst', 'w') as fh:
fh.write(readme)
ext_modules = [
Extension(
'imagecodecs._imagecodecs',
['imagecodecs/_imagecodecs.pyx', 'imagecodecs/imagecodecs.c'],
include_dirs=[numpy.get_include(), 'imagecodecs'],
libraries=['zlib', 'lz4', 'lzf', 'webp', 'png', 'jxrlib', 'jpeg',
'zstd_static', 'lzma-static', 'libbz2', 'openjp2'],
extra_compile_args=['/DWIN32', '/DLZMA_API_STATIC', '/DOPJ_STATIC'],
),
Extension(
'imagecodecs._jpeg12',
['imagecodecs/_jpeg12.pyx'],
include_dirs=[numpy.get_include(), 'imagecodecs'],
libraries=['jpeg12'],
extra_compile_args=['/DBITS_IN_JSAMPLE=12'],
),
]
setup_args = dict(
name='imagecodecs',
version=version + buildnumber,
description=description,
long_description=readme,
author='Christoph Gohlke',
author_email='cgohlke@uci.edu',
url='https://www.lfd.uci.edu/~gohlke/',
python_requires='>=2.7',
install_requires=['numpy>=1.14'],
tests_require=['pytest', 'zstd', 'lz4', 'python-lzf'],
packages=['imagecodecs'],
license='BSD',
zip_safe=False,
platforms=['any'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
try:
if '--universal' in sys.argv:
raise ValueError(
'Not building the _imagecodecs Cython extension in universal mode')
setup(ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
**setup_args)
except Exception as e:
warnings.warn(str(e))
warnings.warn(
'The _imagecodecs Cython extension module was not built.\n'
'Using a fallback module with limited functionality and performance.')
setup(**setup_args)
| bsd-3-clause | Python | |
c862150e8b9d015263f450483f7163e067df5b92 | add setup.py | hph/drake | setup.py | setup.py | #/usr/bin/python
#coding=utf8
import os
import sys
def authenticate():
'''Prompt the user for the superuser password if required.'''
# The euid (effective user id) of the superuser is 0.
euid = os.geteuid()
if euid != 0:
args = ['sudo', '-E', sys.executable] + sys.argv[:] + [os.environ]
# Replaces the current running process with the sudo authentication.
os.execlpe('sudo', *args)
return True
def setup():
'''Install drake.'''
authenticate()
os.system('cp -R drake/ ~/.drake')
os.system('chmod +x ~/.drake/drake.py')
os.system('ln -s ~/.drake/drake.py /usr/bin/drake')
def remove():
'''Uninstall drake.'''
authenticate()
os.system('rm -rf ~/.drake')
os.system('rm /usr/bin/drake')
def main():
if len(sys.argv) > 1:
if sys.argv[1] == 'install':
setup()
elif sys.argv[1] == 'uninstall':
remove()
if __name__ == '__main__':
main()
| mit | Python | |
5f410124e439ba5795335b3e0159eb1421e3ba52 | Package setup definition | gzuidhof/plotta-python | setup.py | setup.py | from setuptools import setup
setup(
name='plotta',
version='1.0.0a1',
install_requires=['unirest'],
description='Python wrapper for Plotta API',
url='https://github.com/gzuidhof/plotta-python',
license='MIT',
keywords='plot plotting plotta',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License'
]
)
| mit | Python | |
a03b9b0d219b54fce5bd3fcbef88b117d49115b1 | Add files via upload | yumikohey/WaiterCar,yumikohey/WaiterCar,yumikohey/WaiterCar | self-driving-car-ai/mlp_training.py | self-driving-car-ai/mlp_training.py | __author__ = 'zhengwang'
import cv2
import numpy as np
import glob
print 'Loading training data...'
e0 = cv2.getTickCount()
# load training data
image_array = np.zeros((1, 38400))
label_array = np.zeros((1, 4), 'float')
training_data = glob.glob('training_data/*.npz')
# image_array, label_array = np.load('training_data/test01.npz')
# print dir(training_data)
def train_ann(image_array, label_array):
train = image_array[1:, :]
train_labels = label_array[1:, :]
print train.shape
print train_labels.shape
e00 = cv2.getTickCount()
time0 = (e00 - e0)/ cv2.getTickFrequency()
print 'Loading image duration:', time0
# set start time
e1 = cv2.getTickCount()
# create MLP
layer_sizes = np.int32([38400, 32, 4])
model = cv2.ANN_MLP()
model.create(layer_sizes)
criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 500, 0.0001)
criteria2 = (cv2.TERM_CRITERIA_COUNT, 100, 0.001)
params = dict(term_crit = criteria,
train_method = cv2.ANN_MLP_TRAIN_PARAMS_BACKPROP,
bp_dw_scale = 0.001,
bp_moment_scale = 0.0 )
print 'Training MLP ...'
num_iter = model.train(train, train_labels, None, params = params)
# set end time
e2 = cv2.getTickCount()
time = (e2 - e1)/cv2.getTickFrequency()
print 'Training duration:', time
# save param
model.save('training_model/mlp.xml')
print 'Ran for %d iterations' % num_iter
ret, resp = model.predict(train)
prediction = resp.argmax(-1)
print 'Prediction:', prediction
true_labels = train_labels.argmax(-1)
print 'True labels:', true_labels
print 'Testing...'
train_rate = np.mean(prediction == true_labels)
print 'Train rate: %f:' % (train_rate*100)
for single_npz in training_data:
with np.load(single_npz) as data:
print "--------------------------------------"
print data.files
train_temp = data['train']
train_labels_temp = data['train_labels']
print single_npz
print train_temp.shape
print train_labels_temp.shape
train_ann(train_temp, train_labels_temp)
image_array = np.vstack((image_array, train_temp))
label_array = np.vstack((label_array, train_labels_temp))
train_ann(image_array,label_array) | mit | Python | |
27b7fe3c6ef33e2a810f0394e83b5f776e17a60b | add setup.py | LenxWei/MiSSH | setup.py | setup.py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from setuptools import setup, find_packages
setup(
name = "missh",
version = "0.1.1rc7",
# packages = find_packages(), #["mipass","missh-nox"],
py_modules = ["mipass"],
scripts = ['missh'],
install_requires = ["npyscreen >=2.0pre47", "pycrypto >=2.4.0", "python-daemon >=1.5.5", "pexpect >=2.3"],
description = "Minimalist Session Manager for SSH",
# long_description = "A minimalist session manager for Linux/OSX ssh users",
author = "Lenx Wei",
author_email = "lenx.wei@gmail.com",
license = "BSD",
keywords = "ssh session manager",
platforms = "Independant",
url = "http://lenx.100871.net/missh",
classifiers=[
# Reference: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Environment :: Console :: Curses",
"Operating System :: POSIX",
"Programming Language :: Python",
"Intended Audience :: End Users/Desktop",
"Topic :: Internet",
],
)
| bsd-2-clause | Python | |
34bb61bb7b634255a5828a0dbc695668bfe357cf | add setup.py for setuptools | bartekrutkowski/egniter | setup.py | setup.py | #!/usr/bin/env python
from os.path import join
from setuptools import setup, find_packages
setup(name='egniter',
version='1.0',
description='Egniter is a command line tool for easy launching VMWare' +
'ESX virtual machines using ESX API',
author='',
author_email='',
license='BSD3',
url='https://github.com/bartekrutkowski/egniter',
packages=find_packages(),
entry_points = {
'console_scripts': ['egniter = egniter:main',]
},
install_requires=['configparser', 'pysphere'],
)
| bsd-3-clause | Python | |
a00f47d66f87632fc28d49b97500132535c25d68 | Create setup.py | NickSanzotta/WiFiSuite | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='WiFiSuite',
version='v 1.05282017',
description='Enterprise WPA Wireless Tool suite ',
classifiers=[
'Development Status :: 1 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Operating System :: Kali Linux'
],
keywords='pentesting security wireless eap 802.1X radius',
url='https://github.com/NickSanzotta/WiFiSuite',
author='Nick Sanzotta',
license='MIT',
packages=find_packages(include=[
"wifisuite", "wifisuite.*", "wifisuite.wifisuite.*"
]),
install_requires=[
'wpa_supplicant',
'psutil',
'netifaces'
],
# entry_points = {
# 'console_scripts': ['wifisuite=wifisuite.wifisuite:main'],
# },
include_package_data=True,
zip_safe=False)
| mit | Python | |
fb53445b97f6667f4773b7f4ec32e03e2e2019d6 | Make setuptools optional for install. | Thhhza/XlsxWriter,jmcnamara/XlsxWriter,gechong/XlsxWriter,applicationdevm/XlsxWriter,mdeemer/XlsxWriter,seize-the-dave/XlsxWriter,seize-the-dave/XlsxWriter,jmcnamara/XlsxWriter,applicationdevm/XlsxWriter,liukaijv/XlsxWriter,gechong/XlsxWriter,liukaijv/XlsxWriter,Thhhza/XlsxWriter,gechong/XlsxWriter,yaojingwu1992/XlsxWriter,mdeemer/XlsxWriter,gechong/XlsxWriter,jkyeung/XlsxWriter,yaojingwu1992/XlsxWriter,applicationdevm/XlsxWriter,jmcnamara/XlsxWriter,liukaijv/XlsxWriter,jvrsantacruz/XlsxWriter,liukaijv/XlsxWriter,jvrsantacruz/XlsxWriter,mdeemer/XlsxWriter,applicationdevm/XlsxWriter,seize-the-dave/XlsxWriter,yaojingwu1992/XlsxWriter,jkyeung/XlsxWriter,jmcnamara/XlsxWriter,jvrsantacruz/XlsxWriter,Thhhza/XlsxWriter,yaojingwu1992/XlsxWriter,mdeemer/XlsxWriter,seize-the-dave/XlsxWriter,jkyeung/XlsxWriter,Thhhza/XlsxWriter,jvrsantacruz/XlsxWriter,jkyeung/XlsxWriter | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='XlsxWriter',
version='0.5.7',
author='John McNamara',
author_email='jmcnamara@cpan.org',
url='https://github.com/jmcnamara/XlsxWriter',
packages=['xlsxwriter'],
license='BSD',
description='A Python module for creating Excel XLSX files.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| from setuptools import setup
setup(
name='XlsxWriter',
version='0.5.7',
author='John McNamara',
author_email='jmcnamara@cpan.org',
url='https://github.com/jmcnamara/XlsxWriter',
packages=['xlsxwriter'],
license='BSD',
description='A Python module for creating Excel XLSX files.',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-2-clause | Python |
118e6fdd95d16c69a9d887d327046eadc61853f1 | Add setup.py | m110/grafcli,m110/grafcli | setup.py | setup.py | #!/usr/bin/python3
from setuptools import setup, find_packages
setup(name='grafcli',
version='0.1.0',
description='Grafana CLI management tool',
author='Milosz Smolka',
author_email='m110@m110.pl',
url='https://github.com/m110/grafcli',
packages=find_packages(exclude=['tests']),
scripts=['scripts/grafcli'],
data_files=[('/etc/grafcli', ['grafcli.example.conf'])],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.4',
'Topic :: System :: Systems Administration',
])
| mit | Python | |
98ce14b1ed7fb8729f26e5705910b15ab4928275 | Create rf_Table_Helpers.py | GLMeece/RF_Table_Helpers | rf_Table_Helpers.py | rf_Table_Helpers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: rf_Table_Helpers
:platform: Unix, Windows
:synopsis: Useful table introspection functions for Robot Framework's Selenium 2 Library.
.. moduleauthor:: Greg Meece <glmeece@gmail.com>
"""
from lxml import html
# ---------------------------------------------------------------
| mit | Python | |
bd724a63413ea3234d6f404ccac662febb2e1ccd | Complete exercise 9 | sdarji/lpthw,sdarji/lpthw,sdarji/lpthw,sdarji/lpthw | ex/ex09.py | ex/ex09.py | # LPTHW Exercise 9 -- Printing, Printing, Printing
# Here's some new strange stuff, remember type it exactly.
days = "Mon Tue Wed Thu Fri Sat Sun"
months = "Jan\nFeb\nMar\nApr\nMay\nJun\nJul\nAug"
print "Here are the days: ", days
print "Here are the months: ", months
print """
There's something going on here.
With the three double- quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
""" | unlicense | Python | |
64f79354695b24d99479f63c770887e6326b7102 | Create app.py | milebag/pythons | app.py | app.py | print("hello")
| apache-2.0 | Python | |
dc391d441310cc27f92d8feff8e46dc05a5af7b3 | Add unit tests for disabling orphaned workflow executions gc | nzlosh/st2,Plexxi/st2,StackStorm/st2,Plexxi/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,StackStorm/st2,nzlosh/st2,nzlosh/st2 | st2reactor/tests/unit/test_garbage_collector.py | st2reactor/tests/unit/test_garbage_collector.py | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import unittest
from oslo_config import cfg
import st2tests.config as tests_config
tests_config.parse_args()
from st2reactor.garbage_collector import base as garbage_collector
class GarbageCollectorServiceTest(unittest.TestCase):
def tearDown(self):
# Reset gc_max_idle_sec with a value of 1 to reenable for other tests.
cfg.CONF.set_override('gc_max_idle_sec', 1, group='workflow_engine')
super(GarbageCollectorServiceTest, self).tearDown()
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions_output',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_trigger_instances',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_timeout_inquiries',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_orphaned_workflow_executions',
mock.MagicMock(return_value=None))
def test_orphaned_workflow_executions_gc_enabled(self):
# Mock the default value of gc_max_idle_sec with a value >= 1 to enable. The config
# gc_max_idle_sec is assigned to _workflow_execution_max_idle which gc checks to see
# whether to run the routine.
cfg.CONF.set_override('gc_max_idle_sec', 1, group='workflow_engine')
# Run the garbage collection.
gc = garbage_collector.GarbageCollectorService(sleep_delay=0)
gc._perform_garbage_collection()
# Make sure _purge_orphaned_workflow_executions is called.
self.assertTrue(
garbage_collector.GarbageCollectorService._purge_orphaned_workflow_executions.called
)
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_action_executions_output',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_trigger_instances',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_timeout_inquiries',
mock.MagicMock(return_value=None))
@mock.patch.object(
garbage_collector.GarbageCollectorService,
'_purge_orphaned_workflow_executions',
mock.MagicMock(return_value=None))
def test_orphaned_workflow_executions_gc_disabled(self):
# Mock the default value of gc_max_idle_sec with a value of 0 to disable. The config
# gc_max_idle_sec is assigned to _workflow_execution_max_idle which gc checks to see
# whether to run the routine.
cfg.CONF.set_override('gc_max_idle_sec', 0, group='workflow_engine')
# Run the garbage collection.
gc = garbage_collector.GarbageCollectorService(sleep_delay=0)
gc._perform_garbage_collection()
# Make sure _purge_orphaned_workflow_executions is not called.
self.assertFalse(
garbage_collector.GarbageCollectorService._purge_orphaned_workflow_executions.called
)
| apache-2.0 | Python | |
989d8516e40890f3a75d040f6923cbe8bd9749ff | Create subprocess.py | diamontip/pract,diamontip/pract | subprocess.py | subprocess.py | #system information gather script.
import subprocess
def uname_func():
uname = 'uname'
uname_arg='-a'
print "collecting system information with %s command:\n" %uname
subprocess.call([uname,uname_arg])
def disk_func():
diskspace = "df"
diskspace_arg = "-h"
print "collecting diskspace information %s command: \n" % diskspace
subprocess.call([diskspace,diskspace_arg])
def main():
uname_func()
disk_func()
main()
| mit | Python | |
a7685738c9bd54a53858199b2225dbb4d1adce8e | Fix warning in plot_rank_mean | vighneshbirodkar/scikit-image,paalge/scikit-image,paalge/scikit-image,vighneshbirodkar/scikit-image,ofgulban/scikit-image,rjeli/scikit-image,rjeli/scikit-image,vighneshbirodkar/scikit-image,rjeli/scikit-image,paalge/scikit-image,ajaybhat/scikit-image,Hiyorimi/scikit-image,Hiyorimi/scikit-image,pratapvardhan/scikit-image,ofgulban/scikit-image,pratapvardhan/scikit-image,ofgulban/scikit-image,ajaybhat/scikit-image | doc/examples/filters/plot_rank_mean.py | doc/examples/filters/plot_rank_mean.py | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = data.coins()
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 10),
sharex=True, sharey=True)
ax = axes.ravel()
titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
ax[n].imshow(imgs[n])
ax[n].set_title(titles[n])
ax[n].set_adjustable('box-forced')
ax[n].axis('off')
plt.show()
| """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 10),
sharex=True, sharey=True)
ax = axes.ravel()
titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
ax[n].imshow(imgs[n])
ax[n].set_title(titles[n])
ax[n].set_adjustable('box-forced')
ax[n].axis('off')
plt.show()
| bsd-3-clause | Python |
64cc611c8a13379f62151629585e3fec9e442f82 | add timeout decorator | membase/membase-cli,membase/membase-cli,couchbaselabs/couchbase-cli,couchbase/couchbase-cli,membase/membase-cli,couchbaselabs/couchbase-cli,couchbase/couchbase-cli,couchbaselabs/couchbase-cli | timeout.py | timeout.py | #
# Copyright 2012, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import signal
import os
from threading import Timer
class TimeoutException(Exception):
"""Raise exception if execution time of function or method exceeds
specified timeout.
"""
def __init__(self, command):
self.command = command
def __str__(self):
if self.command == 'bucket-list':
return 'bucket is not reachable or does not exist'
elif self.command == 'bucket-create':
return 'most likely bucket is not created'
elif self.command == 'bucket-edit':
return 'most likely bucket parameters are not changed'
elif self.command == 'bucket-delete':
return 'most likely bucket is not deleted'
elif self.command == 'bucket-flush':
return 'most likely bucket is not flushed'
else:
return 'unknown error'
def timed_out(timeout=60):
def decorator(function):
def wrapper(*args, **kargs):
"""Start timer thread which sends SIGABRT signal to current
process after defined timeout. Create signal listener which raises
exception once signal is received. Run actual task in parallel.
"""
def send_signal():
pid = os.getpid()
os.kill(pid, signal.SIGABRT)
def handle_signal(signum, frame):
raise TimeoutException(args[1])
timer = Timer(timeout, send_signal)
timer.start()
signal.signal(signal.SIGABRT, handle_signal)
return function(*args, **kargs)
return wrapper
return decorator
| apache-2.0 | Python | |
72be8a8fd8345542096ba31e3f1428ea25ea9498 | Print with vs without a comma | nguyennam9696/Learn_Python_The_Hard_Way | ex6.py | ex6.py | end1 = "C"
end2 = "H"
end3 = "E"
end4 = "E"
end5 = "S"
end6 = "E"
end7 = "B"
end8 = "U"
end9 = "R"
end10 = "G"
end11 = "E"
end12 = "R"
# Printing without a comma
print end1 + end2 + end3 + end4 + end5 + end6
print end7 + end8 + end9 + end10 + end11 + end12
# Printing with a comma
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + end10 + end11 + end12 | mit | Python | |
e58fe43d032a98849acc9c0ca041432bea0dbdba | Create brick-wall.py | jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,kamyu104/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode | Python/brick-wall.py | Python/brick-wall.py | # Time: O(n), n is the total number of the bricks
# Space: O(m), m is the total number different widths
# There is a brick wall in front of you. The wall is rectangular and has several rows of bricks.
# The bricks have the same height but different width. You want to draw a vertical line from
# the top to the bottom and cross the least bricks.
#
# The brick wall is represented by a list of rows. Each row is a list of integers representing the
# width of each brick in this row from left to right.
#
# If your line go through the edge of a brick, then the brick is not considered as crossed.
# You need to find out how to draw the line to cross the least bricks and return the number of crossed bricks.
#
# You cannot draw a line just along one of the two vertical edges of the wall,
# in which case the line will obviously cross no bricks.
#
# Example:
# Input:
# [[1,2,2,1],
# [3,1,2],
# [1,3,2],
# [2,4],
# [3,1,2],
# [1,3,1,1]]
# Output: 2
#
# Note:
# The width sum of bricks in different rows are the same and won't exceed INT_MAX.
# The number of bricks in each row is in range [1,10,000].
# The height of wall is in range [1,10,000].
# Total number of bricks of the wall won't exceed 20,000.
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
widths = collections.defaultdict(int)
result = len(wall)
for row in wall:
width = 0
for i in xrange(len(row)-1):
width += row[i]
widths[width] += 1
result = min(result, len(wall) - widths[width]);
return result
| mit | Python | |
96035f6bb2a298cea859b1e5e9812e2dd83982d2 | Add script to upload files to shell applet | ENCODE-DCC/chip-seq-pipeline,ENCODE-DCC/chip-seq-pipeline,ENCODE-DCC/chip-seq-pipeline,ENCODE-DCC/chip-seq-pipeline | dnanexus/shell/resources/home/dnanexus/upload_file.py | dnanexus/shell/resources/home/dnanexus/upload_file.py | #!/usr/bin/env python
# -*- coding: latin-1 -*-
import os, sys, time, subprocess, json, requests
HEADERS = {
'Content-type': 'application/json',
'Accept': 'application/json',
}
path = 'test.fastq'
FILE_URL = 'http://test.encodedcc.org/TSTFF867178/upload/'
ENCODED_KEY = '...'
ENCODED_SECRET_KEY = '...'
response = requests.get(FILE_URL, headers=HEADERS, auth=(ENCODED_KEY, ENCODED_SECRET_KEY))
try:
response.raise_for_status()
except:
print('File object GET failed')
raise
item = response.json()['@graph'][0]
print(json.dumps(item, indent=4, sort_keys=True))
creds = item['upload_credentials']
env = os.environ.copy()
env.update({
'AWS_ACCESS_KEY_ID': creds['access_key'],
'AWS_SECRET_ACCESS_KEY': creds['secret_key'],
'AWS_SECURITY_TOKEN': creds['session_token'],
})
# ~10s/GB from Stanford - AWS Oregon
# ~12-15s/GB from AWS Ireland - AWS Oregon
print("Uploading file.")
start = time.time()
try:
subprocess.check_call(['aws', 's3', 'cp', path, creds['upload_url']], env=env)
except subprocess.CalledProcessError as e:
# The aws command returns a non-zero exit code on error.
print("Upload failed with exit code %d" % e.returncode)
sys.exit(e.returncode)
else:
end = time.time()
duration = end - start
print("Uploaded in %.2f seconds" % duration)
| mit | Python | |
2737bd64cd33a592a4d50ec1596177e01d859e72 | add point artist with draw and draw collection | compas-dev/compas | src/compas_rhino/artists/pointartist.py | src/compas_rhino/artists/pointartist.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas_rhino
from compas_rhino.artists import PrimitiveArtist
__all__ = ['PointArtist']
class PointArtist(PrimitiveArtist):
"""Artist for drawing ``Point`` objects.
Parameters
----------
point : compas.geometry.Point
A COMPAS point.
layer : str (optional)
The name of the layer that will contain the point.
Default value is ``None``, in which case the current layer will be used.
Examples
--------
>>>
""""
__module__ = "compas_rhino.artists"
def __init__(self, point, layer=None):
super(PointArtist, self).__init__(point, layer=layer)
self.settings.update({
'layer': layer
'color.point': (0, 0, 0)})
def draw(self):
"""Draw the point.
Returns
-------
guid: str
The GUID of the created Rhino object.
"""
points = []
points = [{'pos': list(self.primitive), 'color': self.settings['color.point']}]
guids = compas_rhino.draw_points(points, layer=self.settings['layer'], clear=False)
return guids
@staticmethod
def draw_collection(collection, color=None, layer=None, clear=False, group_collection=False, group_name=None):
"""Draw a collection of points.
Parameters
----------
collection: list of compas.geometry.Point
A collection of ``Point`` objects.
color: tuple or list of tuple (optional)
Color specification of the points.
If one RGB color is provided, it will be applied to all points.
If a list of RGB colors is provided, these colors are applied to the corresponding points.
A list of colors should have the same length as the collection, with one color per item.
Default value is ``None`` in which case the default point color of the artist is used.
layer: str (optional)
The layer in which the objects of the collection should be created.
Default is ``None``, in which case the default layer setting of the artist is used.
clear: bool (optional)
Clear the layer before drawing.
Default is ``False``.
group_collection: bool (optional)
Flag for grouping the objects of the collection.
Default is ``False``.
group_name: str (optional).
The name of the group.
Default is ``None``.
Returns
-------
guids: list
A list of GUIDs if the collection is not grouped.
groupname: str
The name of the group if the collection objects are grouped.
"""
layer = layer or self.settings['layer']
colors = list_like(collection, color, fill_value=self.settings['color.point'])
points = []
for point, color in zip(collection, colors):
points.append({
'pos': list(point),
'color': color})
if clear:
if layer:
compas_rhino.clear_layer(layer)
else:
compas_rhino.clear_current_layer()
guids = compas_rhino.draw_points(points, layer=layer)
if not group_collection:
return guids
group = compas_rhino.rs.AddGroup(group_name)
if group:
compas_rhino.rs.AddObjectsToGroup(guids, group)
return group
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| mit | Python | |
85f6b2437b57c6e33ff56422b15aaab690704218 | Add test to validate against schema | NaturalHistoryMuseum/ckanext-doi,NaturalHistoryMuseum/ckanext-doi,NaturalHistoryMuseum/ckanext-doi | ckanext/doi/tests/test_schema.py | ckanext/doi/tests/test_schema.py | #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-doi
# Created by the Natural History Museum in London, UK
import ckanext.doi.api as doi_api
import ckanext.doi.lib as doi_lib
import mock
import requests
from ckantest.factories import DataConstants
from ckantest.models import TestBase
from lxml import etree
class Resolver(etree.Resolver):
def resolve(self, url, pubid, context):
r = requests.get(url)
return self.resolve_string(r.content, context)
class TestSchema(TestBase):
plugins = [u'doi']
base_url = u'https://schema.datacite.org/meta/kernel-3/'
@classmethod
def setup_class(cls):
super(TestSchema, cls).setup_class()
r = requests.get(cls.base_url + 'metadata.xsd')
parser = etree.XMLParser(no_network=False)
parser.resolvers.add(Resolver())
xml_etree = etree.fromstring(r.content,
base_url=cls.base_url,
parser=parser)
cls.schema = etree.XMLSchema(xml_etree)
with mock.patch('ckan.lib.helpers.session', cls._session):
cls.package_dict = cls.data_factory().package(author=DataConstants.authors_short,
activate=False)
def test_validate_schema(self):
doi = doi_lib.get_or_create_doi(self.package_dict[u'id'])
metadata_dict = doi_lib.build_metadata(self.package_dict, doi)
api = doi_api.MetadataDataCiteAPI()
xml_string = api.metadata_to_xml(**metadata_dict)
xml_tree = etree.fromstring(xml_string)
self.schema.assertValid(xml_tree)
| mit | Python | |
56e3ec2e0e788797b252cf28438d7ca6bede29ef | Correct comparison | alexm92/sentry,daevaorn/sentry,JackDanger/sentry,BuildingLink/sentry,zenefits/sentry,JackDanger/sentry,fotinakis/sentry,ifduyue/sentry,daevaorn/sentry,ifduyue/sentry,BuildingLink/sentry,looker/sentry,gencer/sentry,JackDanger/sentry,BuildingLink/sentry,daevaorn/sentry,looker/sentry,gencer/sentry,ifduyue/sentry,looker/sentry,BuildingLink/sentry,mvaled/sentry,mvaled/sentry,alexm92/sentry,mvaled/sentry,nicholasserra/sentry,gencer/sentry,nicholasserra/sentry,beeftornado/sentry,daevaorn/sentry,ifduyue/sentry,jean/sentry,looker/sentry,jean/sentry,mvaled/sentry,JamesMura/sentry,mvaled/sentry,BuildingLink/sentry,zenefits/sentry,mitsuhiko/sentry,fotinakis/sentry,JamesMura/sentry,looker/sentry,alexm92/sentry,beeftornado/sentry,mitsuhiko/sentry,fotinakis/sentry,gencer/sentry,JamesMura/sentry,jean/sentry,jean/sentry,gencer/sentry,ifduyue/sentry,jean/sentry,zenefits/sentry,fotinakis/sentry,mvaled/sentry,zenefits/sentry,JamesMura/sentry,JamesMura/sentry,nicholasserra/sentry,beeftornado/sentry,zenefits/sentry | tests/sentry/api/endpoints/test_broadcast_index.py | tests/sentry/api/endpoints/test_broadcast_index.py | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Broadcast, BroadcastSeen
from sentry.testutils import APITestCase
class BroadcastListTest(APITestCase):
def test_simple(self):
broadcast1 = Broadcast.objects.create(message='bar', is_active=True)
Broadcast.objects.create(message='foo', is_active=False)
self.login_as(user=self.user)
url = reverse('sentry-api-0-broadcast-index')
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(broadcast1.id)
class BroadcastUpdateTest(APITestCase):
def test_simple(self):
broadcast1 = Broadcast.objects.create(message='bar', is_active=True)
broadcast2 = Broadcast.objects.create(message='foo', is_active=False)
self.login_as(user=self.user)
url = reverse('sentry-api-0-broadcast-index')
response = self.client.put(url, {
'hasSeen': '1'
})
assert response.status_code == 200
assert response.data['hasSeen']
assert BroadcastSeen.objects.filter(
user=self.user,
broadcast=broadcast1,
).exists()
assert not BroadcastSeen.objects.filter(
user=self.user,
broadcast=broadcast2,
).exists()
| from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import Broadcast, BroadcastSeen
from sentry.testutils import APITestCase
class BroadcastListTest(APITestCase):
def test_simple(self):
broadcast1 = Broadcast.objects.create(message='bar', is_active=True)
Broadcast.objects.create(message='foo', is_active=False)
self.login_as(user=self.user)
url = reverse('sentry-api-0-broadcast-index')
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]['id'] == str(broadcast1.id)
class BroadcastUpdateTest(APITestCase):
def test_simple(self):
broadcast1 = Broadcast.objects.create(message='bar', is_active=True)
broadcast2 = Broadcast.objects.create(message='foo', is_active=False)
self.login_as(user=self.user)
url = reverse('sentry-api-0-broadcast-index')
response = self.client.put(url, {
'hasSeen': '1'
})
assert response.status_code == 200
assert response.data['hasSeen'] == True
assert BroadcastSeen.objects.filter(
user=self.user,
broadcast=broadcast1,
).exists()
assert not BroadcastSeen.objects.filter(
user=self.user,
broadcast=broadcast2,
).exists()
| bsd-3-clause | Python |
de5e7e3555788bb5e62d1ad2d20208d4289e5fe5 | Add script for printing max accuracy across many networks | jonhoo/periscope,jonhoo/periscope | max.py | max.py | #!/usr/bin/env python3
import argparse
import numpy
import sys
import re
import os
import os.path
import tempfile
parser = argparse.ArgumentParser()
parser.add_argument('model',
type=argparse.FileType('rb'),
nargs='+',
help='path to .mdl to extract plot data from'
)
parser.add_argument('-n', '--names', help='set experiment names', nargs="+")
parser.add_argument('-s', '--set', help='plot only the given dataset', choices=['training', 'validation'], default='validation')
parser.add_argument('-k', '--atk', help='plot only accuracy at top-k', type=int, choices=[5, 1], default=1)
args = parser.parse_args()
i = -1
for model in args.model:
i += 1
try:
import pickle
model.seek(0)
formatver = pickle.load(model)
if type(formatver) != int:
formatver = 0
model.seek(0)
# add imports for unpickle to work
import lasagne
import theano
pickle.load(model) # state
# the things we actually care about
epoch = pickle.load(model)
if args.set == "training":
vals = pickle.load(model)
pickle.load(model) # validation
else:
pickle.load(model) # training
vals = pickle.load(model)
if args.atk == 1:
val = numpy.max([v[1] for v in vals])
else:
val = numpy.max([v[2] for v in vals])
num = re.sub(r'^.*?(\d+(\.\d+)?).*$', r'\1', args.names[i])
print("{}\t{}\t{}\t{}".format(args.names[i], epoch, val, num))
except EOFError:
print("Model {} is invalid".format(model.name))
sys.exit(1)
| mit | Python | |
9f67de8a0823edf66212ed84116a1138a5fd0adb | add tests | shmilee/gdpy3,shmilee/gdpy3,shmilee/gdpy3,shmilee/gdpy3 | tests/gdr-test1.py | tests/gdr-test1.py | # -*- coding: utf-8 -*-
# Copyright (c) 2017 shmilee
import os
import logging
import gdpy3.read as gdr
log = logging.getLogger('gdr')
if __name__ == '__main__':
# log.setLevel(20)
log.setLevel(10)
datadir = '/home/IFTS_shmilee/phiobo-4-test'
numpat = r'[-+]?\d+[\.]?\d*[eE]?[-+]?\d*'
mypats = [r'\*{6}\s+k_r\*rhoi=\s*?(?P<krrhoi>' + numpat + r'?)\s*?,'
+ r'\s+k_r\*rho0=\s*?(?P<krrho0>' + numpat + r'?)\s+?\*{6}',
r'\*{5}\s+k_r\*dlt_r=\s*?(?P<krdltr>' + numpat + r'?)\s+?\*{5}']
obj = gdr.read(datadir,
description='set kr*rho=0.1, q=1.4, e=0.2',
version='110922',
extension='hdf5',
# extension='npz',
additionalpats=mypats)
print('1. obj.datadir:\n%s\n' % obj.datadir)
print('2. obj.file:\n%s\n' % obj.file)
print('3. obj.datakeys:\n%s\n' % str(obj.datakeys))
print('4. obj.desc:\n%s\n' % obj.desc)
print('5. obj[obj.datakeys[10]]:\n%s\n' % obj[obj.datakeys[10]])
print(" obj['gtcout/krdltr']: %s\n" % obj['gtcout/krdltr'])
print(" obj['gtcout/krrhoi']: %s\n" % obj['gtcout/krrhoi'])
print("6. obj.keys():\n%s\n" % str(obj.keys()))
print("7. obj.find('phi', 'field'):\n%s\n" % str(obj.find('phi', 'field')))
| mit | Python | |
4a9e2ac4a92fb67fd1f77605b5db6e6c3e5becc4 | add CubicToQuadraticFilter | googlefonts/ufo2ft,googlei18n/ufo2ft,jamesgk/ufo2fdk,moyogo/ufo2ft,jamesgk/ufo2ft | Lib/ufo2ft/filters/cubicToQuadratic.py | Lib/ufo2ft/filters/cubicToQuadratic.py | from __future__ import (
print_function, division, absolute_import, unicode_literals)
from ufo2ft.filters import BaseFilter
from cu2qu.ufo import DEFAULT_MAX_ERR
from cu2qu.pens import Cu2QuPointPen
class CubicToQuadraticFilter(BaseFilter):
_kwargs = {
'conversionError': None,
'unitsPerEm': 1000,
'reverseDirection': True,
}
def start(self):
relativeError = self.conversionError or DEFAULT_MAX_ERR
self.absoluteError = relativeError * self.unitsPerEm
def filter(self, glyph, glyphSet=None):
if not len(glyph):
return False
pen = Cu2QuPointPen(
glyph.getPointPen(),
self.absoluteError,
reverse_direction=self.reverseDirection)
contours = list(glyph)
glyph.clearContours()
for contour in contours:
contour.drawPoints(pen)
return True
| mit | Python | |
1811ddb9e49307f3473add8363c464946dffb0d8 | Create jsonToExcel.py | miguelFLG13/JSONtoExcel | jsonToExcel.py | jsonToExcel.py | #! /usr/bin/python
"""
JSON to Excel v1.0
Python script to convert a JSON to a XLS file
Created by: Miguel Jiménez
Date: 22/10/2014
"""
import json
import xlwt
import sys
import os
if sys.argv[1] == "help":
print("Usage:\n\tjsonToExcel.py json_file.json")
sys.exit(1)
if not os.path.exists(sys.argv[1]):
print("Cannot open "+sys.argv[1])
sys.exit(1)
file_name = sys.argv[1]
extension = file_name.split('.')[-1]
if not extension in ('json'):
print("The extension of json_file is incorrect")
sys.exit(1)
file = open(file_name)
text_json = file.read()
try:
json_imported = json.loads(text_json)
except:
print("The content of json_file is incorrect")
sys.exit(1)
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('json exported')
columns = list(json_imported[0].keys())
i = 0
for column in columns:
worksheet.write(0, i, column)
i += 1
j = 1
for row in json_imported:
i = 0
for column in columns:
worksheet.write(j, i, row[column])
i += 1
j += 1
try:
workbook.save(file_name.split('.')[0] + '.xls')
except:
print("Can't write the xls file")
sys.exit(1)
| apache-2.0 | Python | |
41eeab76452cbbb92711c0a60bda4cfc28d8502f | Add network_training_tensorflow.py | tum-pbs/PhiFlow,tum-pbs/PhiFlow | demos/network_training_tensorflow.py | demos/network_training_tensorflow.py | """ TensorFlow Network Training Demo
Trains a simple CNN to make velocity fields incompressible.
This script runs for a certain number of steps before saving the trained network and halting.
"""
from phi.tf.flow import *
math.seed(0) # Make the results reproducible
net = u_net(2, 2) # for a fully connected network, use net = dense_net(2, 2, [64, 64, 64])
optimizer = keras.optimizers.Adam(1e-3)
@vis.action # make this function callable from the user interface
def save_model(step):
path = viewer.scene.subpath(f"net_{step}.h5")
net.save_weights(path)
viewer.info(f"Model saved to {path}.")
@vis.action
def reset():
math.seed(0)
net.load_weights(viewer.scene.subpath(f"net_0.h5"))
global optimizer
optimizer = keras.optimizers.Adam(1e-3)
prediction = CenteredGrid((0, 0), extrapolation.BOUNDARY, x=64, y=64)
prediction_div = CenteredGrid(0, 0, x=64, y=64)
viewer = view(scene=True, namespace=globals(), select='batch')
save_model(0)
reset() # Ensure that the first run will be identical to every time reset() is called
for step in viewer.range():
# Load or generate training data
data = CenteredGrid(Noise(batch(batch=8), channel(vector=2)), extrapolation.BOUNDARY, x=64, y=64)
with tf.GradientTape() as tape:
# Prediction
prediction = field.native_call(net, data) # calls net with shape (BATCH_SIZE, channels, spatial...)
# Simulation
prediction_div = field.divergence(prediction)
# Define loss and compute gradients
loss = field.l2_loss(prediction_div) + field.l2_loss(prediction - data)
gradients = tape.gradient(loss.mean, net.trainable_variables)
# Show curves in user interface
viewer.log_scalars(loss=loss, div=field.mean(abs(prediction_div)), distance=math.vec_abs(field.mean(abs(prediction - data))))
# Compute gradients and update weights
optimizer.apply_gradients(zip(gradients, net.trainable_variables))
if (step + 1) % 100 == 0:
save_model(step + 1)
| mit | Python | |
ead3203a804e6397a00531d3b9e284edd87d0718 | Add merge candidates cmd #72 | california-civic-data-coalition/django-calaccess-processed-data,california-civic-data-coalition/django-calaccess-processed-data | calaccess_processed/management/commands/mergecandidates.py | calaccess_processed/management/commands/mergecandidates.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Merge Persons that share the same CAL-ACCESS filer_id.
"""
from django.db.models import Count
from calaccess_processed.management.commands import LoadOCDModelsCommand
from opencivicdata.models import Person, PersonIdentifier
from opencivicdata.models.merge import merge
class Command(LoadOCDModelsCommand):
"""
Merge Persons that share the same CAL-ACCESS filer_id.
"""
help = 'Merge Persons that share the same CAL-ACCESS filer_id'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
# Loop over all CAL-ACCESS filer_ids linked to multiple Persons
shared_filer_ids_q = PersonIdentifier.objects.values(
'identifier',
).annotate(
person_count=Count('person'),
).filter(
scheme='calaccess_filer_id',
person_count__gt=1,
)
self.header(
"Merging %s Person sets with shared CAL-ACCESS filer_id" % shared_filer_ids_q.count()
)
for filer_id in shared_filer_ids_q.all():
# get the persons with that filer_id
persons = Person.objects.filter(
identifiers__scheme='calaccess_filer_id',
identifiers__identifier=filer_id['identifier'],
).all()
# each person will be merged into this one
survivor = persons[0]
# loop over all the rest of them
for i in range(1, filer_id['person_count']):
if survivor.id != persons[i].id:
merge(survivor, persons[i])
# also delete the now duplicated PersonIdentifier objects
if survivor.identifiers.count() > 1:
for i in survivor.identifiers.filter(scheme='calaccess_filer_id')[1:]:
i.delete()
self.success("Done!")
| mit | Python | |
a7629ef3acedaa688a455c01afb65c40a53c14b0 | Add unit test for drawing routines | jmeyers314/jtrace,jmeyers314/batoid,jmeyers314/jtrace,jmeyers314/batoid,jmeyers314/jtrace | tests/test_draw.py | tests/test_draw.py | import batoid
import time
import os
import yaml
import numpy as np
import pytest
from test_helpers import timer
# Use matplotlib with a non-interactive backend.
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
@timer
def initialize(ngrid=25, theta_x=1.):
DESI_fn = os.path.join(batoid.datadir, 'DESI', 'DESI.yaml')
config = yaml.safe_load(open(DESI_fn))
telescope = batoid.parse.parse_optic(config['opticalSystem'])
dirCos = batoid.utils.gnomicToDirCos(np.deg2rad(theta_x), 0.)
rays = batoid.rayGrid(
telescope.dist, telescope.pupilSize, dirCos[0], dirCos[1], -dirCos[2],
ngrid, 500e-9, 1.0, telescope.inMedium
)
return telescope, telescope.traceFull(rays)
@timer
def draw2dtelescope(ax, telescope):
telescope.draw2d(ax, c='k')
@timer
def draw2drays(ax, rays, start=None, stop=None):
batoid.drawTrace2d(ax, rays, start, stop, c='b', lw=1)
def test_draw2d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
draw2dtelescope(ax, telescope)
draw2drays(ax, rays)
@timer
def draw3dtelescope(ax, telescope):
telescope.draw3d(ax, c='k')
@timer
def draw3drays(ax, rays, start=None, stop=None):
batoid.drawTrace3d(ax, rays, start, stop, c='b', lw=1)
def test_draw3d(ngrid=25):
telescope, rays = initialize(ngrid)
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111, projection='3d')
draw3dtelescope(ax, telescope)
draw3drays(ax, rays)
if __name__ == '__main__':
test_draw3d()
plt.savefig('draw3d.png')
test_draw2d()
plt.savefig('draw2d.png')
| bsd-2-clause | Python | |
d84ece5908afdfed7eabdb443714b7c8a615e4a4 | Add Garden class - initial commit | bennuttall/rpi-greenhouse,RPi-Distro/python-rpi-greenhouse | garden/garden.py | garden/garden.py | from RPi import GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Greenhouse(object):
leds = {
'red': [16, 11, 23],
'white': [13, 9, 27],
'green': [21, 12, 25],
'blue': [20, 6, 22],
}
def __init__(self):
self._setup_gpio()
def _setup_gpio(self):
for colour in self.leds:
for led in self.leds[colour]:
GPIO.setup(led, GPIO.OUT)
GPIO.output(led, False)
def _turn_led_on_or_off(self, colour, index, on_or_off):
led = self.leds[colour][index]
GPIO.output(led, on_or_off)
def _turn_colour_leds_on_or_off(self, colour, on_or_off):
leds = self.leds[colour]
for led in range(len(leds)):
if on_or_off:
self.turn_led_on(colour, led)
else:
self.turn_led_off(colour, led)
def _turn_all_leds_on_or_off(self, on_or_off):
for colour in self.leds:
if on_or_off:
self.turn_colour_leds_on(colour)
else:
self.turn_colour_leds_off(colour)
def turn_led_on(self, colour, index):
"""
Turn a single LED on, by colour and index
e.g. turn_led_on('red', 0)
"""
self._turn_led_on_or_off(colour, index, on_or_off=True)
def turn_led_off(self, colour, index):
"""
Turn a single LED off, by colour and index
e.g. turn_led_off('red', 0)
"""
self._turn_led_on_or_off(colour, index, on_or_off=False)
def turn_colour_leds_on(self, colour):
"""
Turn all LEDs of a particular colour on
e.g. turn_colour_leds_on('red')
"""
self._turn_colour_leds_on_or_off(colour, on_or_off=True)
def turn_colour_leds_off(self, colour):
"""
Turn all LEDs of a particular colour off
e.g. turn_colour_leds_off('red')
"""
self._turn_colour_leds_on_or_off(colour, on_or_off=False)
def turn_all_leds_on(self):
"""
Turn all LEDs on
"""
self._turn_all_leds_on_or_off(on_or_off=True)
def turn_all_leds_off(self):
"""
Turn all LEDs off
"""
self._turn_all_leds_on_or_off(on_or_off=False)
def main():
greenhouse = Greenhouse()
greenhouse.turn_all_leds_on()
sleep(2)
greenhouse.turn_all_leds_off()
sleep(2)
for colour in greenhouse.leds:
greenhouse.turn_colour_leds_on(colour)
sleep(2)
greenhouse.turn_colour_leds_off(colour)
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
2ea675d3f719e6e309a47d6c0e9b98b407377267 | Create get_mac.py | ellipsys/sonar | get_mac.py | get_mac.py | import re, uuid
print ':'.join(re.findall('..', '%012x' % uuid.getnode()))
| artistic-2.0 | Python | |
a1294fc23c4875ef58676d14662e1628bb698065 | Add the test structure | DataDog/uwsgi-dogstatsd,DataDog/uwsgi-dogstatsd,DataDog/uwsgi-dogstatsd | tests/test.py | tests/test.py | import select
import socket
import sys
import urllib2
from time import time, sleep
UDP_SOCKET_TIMEOUT = 5
class Test(object):
def __init__(self, data):
self.data = data
def run(self):
return
class Data(object):
"""
The data aggregated from the uwsgi app
"""
def __init__(self):
self.data = {}
def parse_packet(self, packet):
tags = None
metadata = packet.split('|')
if (len(metadata) < 2):
raise Exception('Unparseable metric packet: %s' % packet)
name_value = metadata[0].split(':')
metric_type = metadata[1]
if (len(metadata) == 3):
tags = metadata[2].split(',')
if (len(tags) < 1 or not tags[0].startswith('#')):
raise Exception('Unparseable metric packet: %s' % packet)
tags[0] = tags[0][1:]
if (len(name_value) != 2):
raise Exception('Unparseable metric packet: %s' % packet)
metric = {
'name': name_value[0],
'value': name_value[1],
'metric_type': metric_type,
'tags': tags
}
print metric
self.data[name_value[0]] = metric
def new_packets(self, packets):
packets = unicode(packets, 'utf-8', errors='replace')
for packet in packets.splitlines():
if not packet.strip():
continue
self.parse_packet(packet)
def get_data(self):
return self.data
class Server(object):
"""
The process which will listen on the statd port
"""
config = {
'host': 'localhost',
'port': 8125
}
def __init__(self, data):
self.data = data
self.buffer_size = 1024 * 8
self.address = (self.config['host'], self.config['port'])
def run(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setblocking(0)
try:
self.socket.bind(self.address)
except socket.gaierror:
if self.address[0] == 'localhost':
log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
self.address = ('127.0.0.1', self.address[1])
self.socket.bind(self.address)
print "Listening on host & port: %s" % str(self.address)
sock = [self.socket]
select_select = select.select
timeout = UDP_SOCKET_TIMEOUT
self.running = True
while self.running:
try:
ready = select_select(sock, [], [], timeout)
if ready[0]:
message = self.socket.recv(self.buffer_size)
self.data.new_packets(message)
except (KeyboardInterrupt, SystemExit):
break
except Exception:
print 'Error receiving datagram'
def stop(self):
sef.running = False
def main():
data = Data()
server = Server(data)
test = Test(data)
try:
server.run()
test.run()
except:
print 'Error: unable to start thread'
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | Python | |
703a94b1352d710ee4577dee0c5ad5c349d0c49e | Add SMTP returner | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/returners/smtp_return.py | salt/returners/smtp_return.py | '''
Return salt data via email
The following fields can be set in the minion conf file:
smtp.from (required)
smtp.to (required)
smtp.host (required)
smtp.username (optional)
smtp.password (optional)
smtp.tls (optional, defaults to False)
smtp.subject (optional, but helpful)
smtp.fields (optional)
There are a few things to keep in mind:
* If a username is used, a password is also required.
* You should at least declare a subject, but you don't have to.
* smtp.fields lets you include the value(s) of various fields in the subject
line of the email. These are comma-delimited. For instance:
smtp.fields: id,fun
...will display the id of the minion and the name of the function in the
subject line. You may also use 'jid' (the job id), but it is generally
recommended not to use 'return', which contains the entire return data
structure (which can be very large).
'''
# Import python libs
import pprint
import logging
import smtplib
log = logging.getLogger(__name__)
def __virtual__():
return 'smtp_return'
def returner(ret):
'''
Send an email with the data
'''
from_addr = __salt__['config.option']('smtp.from')
to_addrs = __salt__['config.option']('smtp.to')
host = __salt__['config.option']('smtp.host')
user = __salt__['config.option']('smtp.username')
passwd = __salt__['config.option']('smtp.password')
subject = __salt__['config.option']('smtp.subject')
fields = __salt__['config.option']('smtp.fields').split(',')
for field in fields:
if field in ret.keys():
subject += ' {0}'.format(ret[field])
log.debug('subject')
content = pprint.pformat(ret['return'])
message = ('From: {0}\r\n'
'To: {1}\r\n'
'Subject: {2}\r\n'
'\r\n'
'id: {3}\r\n'
'function: {4}\r\n'
'jid: {5}\r\n'
'{6}').format(from_addr,
to_addrs,
subject,
ret['id'],
ret['fun'],
ret['jid'],
content)
server = smtplib.SMTP(host)
if __salt__['config.option']('smtp.tls') is True:
server.starttls()
if user and passwd:
server.login(user, passwd)
server.sendmail(from_addr, to_addrs, message)
server.quit()
| apache-2.0 | Python | |
ed9018fb5e234a60ea3affd0035cf562fea5fb05 | add transient spiral | followthesheep/galpy,jobovy/galpy,jobovy/galpy,followthesheep/galpy,followthesheep/galpy,followthesheep/galpy,jobovy/galpy,jobovy/galpy | galpy/potential_src/TransientLogSpiralPotential.py | galpy/potential_src/TransientLogSpiralPotential.py | ###############################################################################
# TransientLogSpiralPotential: a transient spiral potential
###############################################################################
import math as m
from planarPotential import planarPotential
_degtorad= m.pi/180.
class TransientLogSpiralPotential(planarPotential):
"""Class that implements a steady-state spiral potential
V(r,phi,t) = A(t)/alpha cos(alpha ln(r) + m(phi - Omegas*t-gamma))
where
A(t) = A_max exp(- [t-to]^2/sigma^2/2.)
"""
def __init__(self,amp=1.,omegas=0.65,A=0.035,
alpha=7.,m=2,gamma=m.pi/4.,p=None,
sigma=1.,to=0.):
"""
NAME:
__init__
PURPOSE:
initialize a transient logarithmic spiral potential localized
around to
INPUT:
amp - amplitude to be applied to the potential (default:
1., A below)
gamma - angle between sun-GC line and the line connecting the
peak of the spiral pattern at the Solar radius
(in rad; default=45 degree)
A - force amplitude (alpha*potential-amplitude; default=0.035)
omegas= - pattern speed (default=0.65)
m= number of arms
to= time at which the spiral peaks
sigma= "spiral duration" (sigma in Gaussian amplitude)
Either provide:
a) alpha=
b) p= pitch angle
OUTPUT:
(none)
HISTORY:
2011-03-27 - Started - Bovy (NYU)
"""
planarPotential.__init__(self,amp=amp)
self._omegas= omegas
self._A= A
self._m= m
self._gamma= gamma
self._to= to
self._sigma2= sigma**2.
if not p is None:
self._alpha= self._m*m.cot(p)
else:
self._alpha= alpha
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-03-27 - Started - Bovy (NYU)
"""
return self._A*m.exp(-(t-self._to)**2./2./self._sigma2)\
/self._alpha*m.cos(self._alpha*m.log(R)
+self._m*(phi-self._omegas*t-self._gamma))
def _Rforce(self,R,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
return self._A*m.exp(-(t-self._to)**2./2./self._sigma2)\
/R*m.cos(self._alpha*m.log(R)
+self._m*(phi-self._omegas*t-self._gamma))
def _phiforce(self,R,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
return self._A*m.exp(-(t-self._to)**2./2./self._sigma2)\
/self._alpha*self._m*m.cos(self._alpha*m.log(R)
+self._m*(phi-self._omegas*t
-self._gamma))
| bsd-3-clause | Python | |
dcc6ba3558696ba73d8e00f820eb8941b2d48b51 | test Console-Logging | MiracleWong/PythonBasic,MiracleWong/PythonBasic | testConsoleLoggin.py | testConsoleLoggin.py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from console_logging import console
console.log("Hello World!")
console.log("This is a log.")
console.error("This is an error.")
console.info("This is some neutral info.")
console.success("This is a success message.") | mit | Python | |
537e243d15d1689e9e14535acb01bf65c9cbffcf | Add migration <ADDON>GuidFile -> StoredFileNode | samchrisinger/osf.io,haoyuchen1992/osf.io,zachjanicki/osf.io,aaxelb/osf.io,hmoco/osf.io,samchrisinger/osf.io,KAsante95/osf.io,doublebits/osf.io,icereval/osf.io,mluo613/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,GageGaskins/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,acshi/osf.io,mluo613/osf.io,felliott/osf.io,samanehsan/osf.io,billyhunt/osf.io,danielneis/osf.io,leb2dg/osf.io,TomHeatwole/osf.io,monikagrabowska/osf.io,pattisdr/osf.io,DanielSBrown/osf.io,crcresearch/osf.io,zamattiac/osf.io,kch8qx/osf.io,leb2dg/osf.io,binoculars/osf.io,brandonPurvis/osf.io,njantrania/osf.io,wearpants/osf.io,jnayak1/osf.io,njantrania/osf.io,caseyrygt/osf.io,mluo613/osf.io,alexschiller/osf.io,mattclark/osf.io,cwisecarver/osf.io,mfraezz/osf.io,jnayak1/osf.io,billyhunt/osf.io,arpitar/osf.io,wearpants/osf.io,caneruguz/osf.io,wearpants/osf.io,aaxelb/osf.io,billyhunt/osf.io,CenterForOpenScience/osf.io,hmoco/osf.io,Johnetordoff/osf.io,RomanZWang/osf.io,haoyuchen1992/osf.io,brianjgeiger/osf.io,SSJohns/osf.io,brandonPurvis/osf.io,zamattiac/osf.io,sloria/osf.io,ticklemepierce/osf.io,mattclark/osf.io,Nesiehr/osf.io,felliott/osf.io,zamattiac/osf.io,arpitar/osf.io,TomHeatwole/osf.io,alexschiller/osf.io,Ghalko/osf.io,ZobairAlijan/osf.io,ZobairAlijan/osf.io,HalcyonChimera/osf.io,alexschiller/osf.io,ticklemepierce/osf.io,laurenrevere/osf.io,icereval/osf.io,amyshi188/osf.io,chennan47/osf.io,TomHeatwole/osf.io,rdhyee/osf.io,cosenal/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,samanehsan/osf.io,chennan47/osf.io,petermalcolm/osf.io,Nesiehr/osf.io,laurenrevere/osf.io,asanfilippo7/osf.io,Ghalko/osf.io,GageGaskins/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,adlius/osf.io,caseyrollins/osf.io,mluke93/osf.io,SSJohns/osf.io,kwierman/osf.io,TomBaxter/osf.io,petermalcolm/osf.io,asanfilippo7/osf.io,GageGaskins/osf.io,amyshi188/osf.io,baylee-d/osf.io,caseyrollins/osf.io,felliott/osf.io,cslzchen/osf.io,cslzchen/osf.io,sloria/osf.io,brianjgeiger/osf.io,acshi/osf.io,RomanZWang/osf.io,Johnetordoff/osf.io,DanielSBrown/osf.io,brandonPurvis/osf.io,cwisecarver/osf.io,mluke93/osf.io,hmoco/osf.io,danielneis/osf.io,GageGaskins/osf.io,saradbowman/osf.io,RomanZWang/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,adlius/osf.io,arpitar/osf.io,brianjgeiger/osf.io,doublebits/osf.io,pattisdr/osf.io,caseyrygt/osf.io,caneruguz/osf.io,haoyuchen1992/osf.io,mfraezz/osf.io,cwisecarver/osf.io,acshi/osf.io,erinspace/osf.io,adlius/osf.io,arpitar/osf.io,brandonPurvis/osf.io,asanfilippo7/osf.io,acshi/osf.io,billyhunt/osf.io,samanehsan/osf.io,monikagrabowska/osf.io,alexschiller/osf.io,jnayak1/osf.io,abought/osf.io,Ghalko/osf.io,kwierman/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,KAsante95/osf.io,samanehsan/osf.io,njantrania/osf.io,CenterForOpenScience/osf.io,rdhyee/osf.io,adlius/osf.io,billyhunt/osf.io,sloria/osf.io,hmoco/osf.io,binoculars/osf.io,caneruguz/osf.io,samchrisinger/osf.io,zamattiac/osf.io,kwierman/osf.io,mluo613/osf.io,caseyrollins/osf.io,kch8qx/osf.io,ticklemepierce/osf.io,Nesiehr/osf.io,zachjanicki/osf.io,danielneis/osf.io,erinspace/osf.io,caneruguz/osf.io,emetsger/osf.io,jnayak1/osf.io,caseyrygt/osf.io,samchrisinger/osf.io,RomanZWang/osf.io,mluo613/osf.io,mluke93/osf.io,mluke93/osf.io,abought/osf.io,kch8qx/osf.io,felliott/osf.io,KAsante95/osf.io,monikagrabowska/osf.io,ticklemepierce/osf.io,pattisdr/osf.io,laurenrevere/osf.io,GageGaskins/osf.io,acshi/osf.io,emetsger/osf.io,asanfilippo7/osf.io,zachjanicki/osf.io,aaxelb/osf.io,DanielSBrown/osf.io,mfraezz/osf.io,cslzchen/osf.io,zachjanicki/osf.io,chennan47/osf.io,KAsante95/osf.io,abought/osf.io,kch8qx/osf.io,alexschiller/osf.io,haoyuchen1992/osf.io,baylee-d/osf.io,cslzchen/osf.io,emetsger/osf.io,crcresearch/osf.io,danielneis/osf.io,monikagrabowska/osf.io,kch8qx/osf.io,cwisecarver/osf.io,leb2dg/osf.io,njantrania/osf.io,wearpants/osf.io,caseyrygt/osf.io,DanielSBrown/osf.io,abought/osf.io,chrisseto/osf.io,binoculars/osf.io,cosenal/osf.io,petermalcolm/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,crcresearch/osf.io,emetsger/osf.io,amyshi188/osf.io,TomBaxter/osf.io,brandonPurvis/osf.io,leb2dg/osf.io,ZobairAlijan/osf.io,doublebits/osf.io,petermalcolm/osf.io,amyshi188/osf.io,icereval/osf.io,rdhyee/osf.io,Ghalko/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,baylee-d/osf.io,TomHeatwole/osf.io,ZobairAlijan/osf.io,KAsante95/osf.io,RomanZWang/osf.io,mfraezz/osf.io,Nesiehr/osf.io,kwierman/osf.io,SSJohns/osf.io,cosenal/osf.io,mattclark/osf.io,aaxelb/osf.io,SSJohns/osf.io,chrisseto/osf.io,cosenal/osf.io,chrisseto/osf.io | scripts/migrate_file_guids.py | scripts/migrate_file_guids.py | import sys
import logging
from website.app import init_app
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
from framework.guid.model import Guid
from website.files import models
from website.addons.box.model import BoxFile
from website.addons.s3.model import S3GuidFile
from website.addons.dropbox.model import DropboxFile
from website.addons.github.model import GithubGuidFile
from website.addons.dataverse.model import DataverseFile
from website.addons.figshare.model import FigShareGuidFile
from website.addons.osfstorage.model import OsfStorageGuidFile
from website.addons.googledrive.model import GoogleDriveGuidFile
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Migrating OsfStorage Guids')
migrate_osfstorage_guids()
logger.info('Migrating Box Guids')
migrate_guids(BoxFile, 'box')
logger.info('Migrating S3 Guids')
migrate_guids(S3GuidFile, 's3')
logger.info('Migrating Dropbox Guids')
migrate_guids(DropboxFile, 'dropbox')
logger.info('Migrating Github Guids')
migrate_guids(GithubGuidFile, 'github')
logger.info('Migrating Dataverse Guids')
migrate_guids(DataverseFile, 'dataverse')
logger.info('Migrating figshare Guids')
migrate_guids(FigShareGuidFile, 'figshare')
logger.info('Migrating GoogleDrive Guids')
migrate_guids(GoogleDriveGuidFile, 'googledrive')
def migrate_osfstorage_guids():
for guid in OsfStorageGuidFile.find():
referent = models.StoredFileNode.load(guid.waterbutler_path.strip('/'))
if referent is None:
logger.warning('OsfStorageGuidFile {} resolved to None; skipping'.format(guid._id))
continue
actual_guid = Guid.load(guid._id)
assert actual_guid is not None
actual_guid.referent = referent
actual_guid.save()
assert actual_guid._id == referent.get_guid()._id
def migrate_guids(guid_type, provider):
for guid in guid_type.find():
# Note: No metadata is populated here
# It will be populated whenever this guid is next viewed
if guid.node is None:
logger.warning('{}({})\'s node is None; skipping'.format(guid_type, guid._id))
continue
models.StoredFileNode(
is_file=True,
node=guid.node,
provider=provider,
path=guid.waterbutler_path,
name=guid.waterbutler_path,
materialized_path=guid.waterbutler_path,
).save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| apache-2.0 | Python | |
a06d614a119ae25c702b51a282adc8d3f2971f9e | Stop false matches with changelog->upgradeactions linking | asm0dey/Flexget,offbyone/Flexget,dsemi/Flexget,antivirtel/Flexget,lildadou/Flexget,oxc/Flexget,patsissons/Flexget,Pretagonist/Flexget,LynxyssCZ/Flexget,Danfocus/Flexget,offbyone/Flexget,ibrahimkarahan/Flexget,spencerjanssen/Flexget,tsnoam/Flexget,tsnoam/Flexget,poulpito/Flexget,ZefQ/Flexget,Pretagonist/Flexget,dsemi/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,gazpachoking/Flexget,Danfocus/Flexget,oxc/Flexget,cvium/Flexget,sean797/Flexget,jacobmetrick/Flexget,LynxyssCZ/Flexget,camon/Flexget,malkavi/Flexget,v17al/Flexget,vfrc2/Flexget,X-dark/Flexget,malkavi/Flexget,cvium/Flexget,ianstalk/Flexget,qvazzler/Flexget,Flexget/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,camon/Flexget,X-dark/Flexget,drwyrm/Flexget,patsissons/Flexget,antivirtel/Flexget,lildadou/Flexget,cvium/Flexget,voriux/Flexget,ZefQ/Flexget,crawln45/Flexget,voriux/Flexget,ianstalk/Flexget,qk4l/Flexget,LynxyssCZ/Flexget,thalamus/Flexget,asm0dey/Flexget,tobinjt/Flexget,gazpachoking/Flexget,thalamus/Flexget,crawln45/Flexget,poulpito/Flexget,patsissons/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,qvazzler/Flexget,grrr2/Flexget,ZefQ/Flexget,JorisDeRieck/Flexget,v17al/Flexget,tsnoam/Flexget,Flexget/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,tobinjt/Flexget,ibrahimkarahan/Flexget,thalamus/Flexget,crawln45/Flexget,Danfocus/Flexget,vfrc2/Flexget,qk4l/Flexget,jawilson/Flexget,ianstalk/Flexget,tvcsantos/Flexget,crawln45/Flexget,X-dark/Flexget,jawilson/Flexget,drwyrm/Flexget,malkavi/Flexget,antivirtel/Flexget,v17al/Flexget,lildadou/Flexget,JorisDeRieck/Flexget,OmgOhnoes/Flexget,ratoaq2/Flexget,jacobmetrick/Flexget,grrr2/Flexget,ratoaq2/Flexget,asm0dey/Flexget,offbyone/Flexget,xfouloux/Flexget,oxc/Flexget,grrr2/Flexget,tarzasai/Flexget,LynxyssCZ/Flexget,tarzasai/Flexget,tobinjt/Flexget,vfrc2/Flexget,malkavi/Flexget,tobinjt/Flexget,qk4l/Flexget,tvcsantos/Flexget,drwyrm/Flexget,jacobmetrick/Flexget,JorisDeRieck/Flexget,spencerjanssen/Flexget,Flexget/Flexget,qvazzler/Flexget,sean797/Flexget,poulpito/Flexget,xfouloux/Flexget,Pretagonist/Flexget,sean797/Flexget,Danfocus/Flexget,xfouloux/Flexget,ratoaq2/Flexget,tarzasai/Flexget,spencerjanssen/Flexget | gen-changelog.py | gen-changelog.py | # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
from bs4 import BeautifulSoup
import dateutil.parser
import requests
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
ua_response = requests.get('http://flexget.com/wiki/UpgradeActions')
ua_soup = BeautifulSoup(ua_response.text, 'html5lib')
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
ver = tag.group(1)
ua_link = ''
result = ua_soup.find('h3', text=re.compile(' %s ' % re.escape(ver)))
if result:
ua_link = '^[wiki:UpgradeActions#%s upgrade actions]^ ' % result['id']
out_file.write('\n=== %s (%s) %s===\n\n' % (ver, date.strftime('%Y.%m.%d'), ua_link))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| # Writes a changelog in trac WikiFormatting based on a git log
from __future__ import unicode_literals, division, absolute_import
import codecs
from itertools import ifilter
import os
import re
import subprocess
import sys
from bs4 import BeautifulSoup
import dateutil.parser
import requests
out_path = 'ChangeLog'
if len(sys.argv) > 1:
dir_name = os.path.dirname(sys.argv[1])
if dir_name and not os.path.isdir(dir_name):
print 'Output dir doesn\'t exist: %s' % sys.argv[1]
sys.exit(1)
out_path = sys.argv[1]
ua_response = requests.get('http://flexget.com/wiki/UpgradeActions')
ua_soup = BeautifulSoup(ua_response.text, 'html5lib')
# 1.0.3280 was last revision on svn
git_log_output = subprocess.check_output(['git', 'log', '--pretty=%n---%n.%d%n%ci%n%h%n%s%n%-b%n---%n',
'--topo-order', '--decorate=full','refs/tags/1.0.3280..HEAD'])
git_log_iter = ifilter(None, git_log_output.decode('utf-8').splitlines())
with codecs.open(out_path, 'w', encoding='utf-8') as out_file:
for line in git_log_iter:
assert line == '---'
tag = re.search('refs/tags/([\d.]+)', next(git_log_iter))
date = dateutil.parser.parse(next(git_log_iter))
commit_hash = next(git_log_iter)
body = list(iter(git_log_iter.next, '---'))
if tag:
ver = tag.group(1)
ua_link = ''
result = ua_soup.find('h3', text=re.compile(re.escape(ver)))
if result:
ua_link = '^[wiki:UpgradeActions#%s upgrade actions]^ ' % result['id']
out_file.write('\n=== %s (%s) %s===\n\n' % (ver, date.strftime('%Y.%m.%d'), ua_link))
out_file.write(' * (%s) %s\n' % (commit_hash, '[[BR]]\n '.join(body)))
| mit | Python |
4ac7d4ec350e0f56d39c72c8aa33894ff28d3fb0 | add replacement for pfSctl | opnsense/core,opnsense/core,opnsense/core,opnsense/core,opnsense/core,opnsense/core | src/opnsense/service/configd_ctl.py | src/opnsense/service/configd_ctl.py | #!/usr/local/bin/python2.7
"""
Copyright (c) 2015 Ad Schellevis
part of OPNsense (https://www.opnsense.org/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
package : configd
function: commandline tool to send commands to configd (response to stdout)
"""
import socket
import sys
__author__ = 'Ad Schellevis'
configd_socket_name = 'testing/configd.socket'
if len(sys.argv) <= 1:
print 'usage : %s <command>'%sys.argv[0]
sys.exit(0)
else:
for exec_command in sys.argv[1:]:
# Create and open unix domain socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(configd_socket_name)
try:
sock.send(exec_command)
data = []
while True:
line = sock.recv(4096)
if line:
data.append(line)
else:
break
print (''.join(data))
finally:
sock.close()
| bsd-2-clause | Python | |
7a687d60a607dda79e14e9e8ba50e92a19e3e916 | Add basic main() function | mrmarkgray/haldane | haldane.py | haldane.py | from sensor import *
from display import *
from timer import *
from model import *
import time
def main():
sensor = Sensor.factory("MS5803-14B")
model = Model.factory("Buhlmann")
timer = Timer.factory("Sleep", 1)
display = Display.factory("Stdio")
print("- Starting Haldane -")
temp = sensor.getTemperature()
pres = sensor.getPressure()
start_time = int(time.time())
model.reset(pres, 0)
while timer.wait():
temp = sensor.getTemperature()
pres = sensor.getPressure()
current_time = int(time.time()) - start_time
print(current_time)
depth = (pres - 1.013) * 10
if depth < 0: depth = 0
model.update(pres, current_time)
ndl = model.ndl()
if ndl == 99: ndl = "N/A"
display.display(ndl, current_time, depth, temp)
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
80af3f92c08404a57cd660e84c1bf9d8f983920c | Create hangman.py | KeyboardFire/ascii-gallows-club | hangman.py | hangman.py | print 'This game is so fun right'
print 'You won'
| mit | Python | |
2cbeff222a7b3d0746d62c46566d463c95102fe6 | Optimize the home page. #15 | Minkov/site,Minkov/site,Phoenix1369/site,monouno/site,monouno/site,monouno/site,Minkov/site,monouno/site,Phoenix1369/site,monouno/site,DMOJ/site,Phoenix1369/site,Minkov/site,Phoenix1369/site,DMOJ/site,DMOJ/site,DMOJ/site | judge/views/blog.py | judge/views/blog.py | from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils import timezone
from django.views.generic import ListView, DetailView
from judge.comments import CommentedDetailView
from judge.models import BlogPost, Comment, Problem
from judge.utils.diggpaginator import DiggPaginator
from judge.utils.views import TitleMixin
class PostList(ListView):
model = BlogPost
paginate_by = 10
context_object_name = 'posts'
template_name = 'blog/list.jade'
title = None
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
return DiggPaginator(queryset, per_page, body=6, padding=2,
orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs)
def get_queryset(self):
return BlogPost.objects.filter(visible=True, publish_on__lte=timezone.now()).order_by('-sticky', '-publish_on')
def get_context_data(self, **kwargs):
context = super(PostList, self).get_context_data(**kwargs)
context['title'] = self.title or 'Page %d of Posts' % context['page_obj'].number
context['first_page_href'] = reverse('home')
context['page_prefix'] = reverse('blog_post_list')
context['comments'] = Comment.objects.select_related('author__user').defer('author__about').order_by('-id')[:10]
context['problems'] = Problem.objects.filter(is_public=True).order_by('-date', '-id')[:7]
return context
class PostView(TitleMixin, CommentedDetailView):
model = BlogPost
pk_url_kwarg = 'id'
context_object_name = 'post'
template_name = 'blog/content.jade'
def get_title(self):
return self.object.title
def get_comment_page(self):
return 'b:%s' % self.object.id
def get_object(self, queryset=None):
post = super(PostView, self).get_object(queryset)
if (not post.visible or post.publish_on > timezone.now())\
and not self.request.user.has_perm('judge.see_hidden_post'):
raise Http404()
return post
| from django.core.urlresolvers import reverse
from django.http import Http404
from django.utils import timezone
from django.views.generic import ListView, DetailView
from judge.comments import CommentedDetailView
from judge.models import BlogPost, Comment, Problem
from judge.utils.diggpaginator import DiggPaginator
from judge.utils.views import TitleMixin
class PostList(ListView):
model = BlogPost
paginate_by = 10
context_object_name = 'posts'
template_name = 'blog/list.jade'
title = None
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
return DiggPaginator(queryset, per_page, body=6, padding=2,
orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs)
def get_queryset(self):
return BlogPost.objects.filter(visible=True, publish_on__lte=timezone.now()).order_by('-sticky', '-publish_on')
def get_context_data(self, **kwargs):
context = super(PostList, self).get_context_data(**kwargs)
context['title'] = self.title or 'Page %d of Posts' % context['page_obj'].number
context['first_page_href'] = reverse('home')
context['page_prefix'] = reverse('blog_post_list')
context['comments'] = Comment.objects.order_by('-id')[:10]
context['problems'] = Problem.objects.filter(is_public=True).order_by('-date', '-id')[:7]
return context
class PostView(TitleMixin, CommentedDetailView):
model = BlogPost
pk_url_kwarg = 'id'
context_object_name = 'post'
template_name = 'blog/content.jade'
def get_title(self):
return self.object.title
def get_comment_page(self):
return 'b:%s' % self.object.id
def get_object(self, queryset=None):
post = super(PostView, self).get_object(queryset)
if (not post.visible or post.publish_on > timezone.now())\
and not self.request.user.has_perm('judge.see_hidden_post'):
raise Http404()
return post
| agpl-3.0 | Python |
41f0533edc9ebe788722711af95e040d4f06abb9 | Add simple BIRD client class | fritz0705/lglass | lglass/bird.py | lglass/bird.py | # coding: utf-8
import subprocess
import netaddr
import lglass.route
class BirdClient(object):
def __init__(self, executable="birdc"):
self.executable = executable
def send(self, command, raw=False):
argv = [self.executable]
if raw:
argv.append("-v")
if isinstance(command, str):
argv.extend(command.split())
else:
argv.extend(command)
p = subprocess.Popen(argv,
stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
data = b""
while True:
rdata = p.stdout.read()
if len(rdata) == 0:
break
data += rdata
p.wait()
return data.split(b"\n", 1)[1]
def routes(self, table=None, protocol=None, primary=False, all=True, filtered=False):
command = ["show", "route"]
if table is not None:
command.append("table")
command.append(str(table))
if all:
command.append("all")
if primary:
command.append("primary")
if filtered:
command.append("filtered")
if protocol is not None:
command.append(str(protocol))
res = self.send(command)
return list(parse_routes(res.decode().splitlines()))
def protocols(self):
command = ["show", "protocols"]
res = self.send(command)
for line in res.splitlines()[1:]:
t = line.decode().split()
while len(t) < 7:
t.append(None)
yield tuple(t)
def parse_routes(lines):
lines_iter = iter(lines)
cur_prefix = None
cur_route = None
for line in lines_iter:
if line[0] == "\t":
# route annotation
key, value = line.split(":", 1)
cur_route[key.strip()] = value.strip()
continue
if cur_route is not None:
yield cur_route
if line[0] != " ":
cur_prefix, *args = line.split()
else:
args = line.split()
cur_route = lglass.route.Route(cur_prefix)
if args[0] == "via":
cur_route.nexthop = (netaddr.IPAddress(args[1]), args[3])
if args[-2][0] == "(" and args[-2][-1] == ")":
metric = args[-2][1:-1]
if "/" in metric:
metric = metric.split("/", 1)[0]
cur_route.metric = int(metric)
if cur_route is not None:
yield cur_route
| mit | Python | |
064386acbe509f872e40f3f577e7b6189ed91434 | Test threading in app server rather than in serverTest. | babble/babble,babble/babble,babble/babble,babble/babble,babble/babble,babble/babble | src/test/ed/lang/python/thread2_test.py | src/test/ed/lang/python/thread2_test.py | import _10gen
import ed.appserver.AppContext
import ed.lang.python.Python
import java.io.File
# FIXME: this test produces a lot of output
_10gen.__instance__ = ed.lang.python.Python.toPython(ed.appserver.AppContext(java.io.File('.')))
import test.test_thread
import test.test_threading
| apache-2.0 | Python | |
3d90377b062862d9461d9d8cca59025130d344d8 | Add first prototype for Aizek body | asydorchuk/robotics,asydorchuk/robotics | python/robotics/aizek_body.py | python/robotics/aizek_body.py | import math
from RPi import GPIO as gpio
from actors.redbot_motor_actor import RedbotMotorActor
from controllers.pid_controller import PIDController
from interfaces.spi.mcp3008_spi_interface import MCP3008SpiInterface
from sensors.redbot_wheel_encoder_sensor import RedbotWheelEncoderSensor
from sensors.sharp_ir_distance_sensor import SharpIrDistanceSensor
def uni_to_diff(v, w):
R = 0.032
L = 0.1
vel_l = (2.0 * v - L * w) / (2.0 * R)
vel_r = (2.0 * v + L * w) / (2.0 * R)
return vel_l, vel_r
class AizekBody(object):
'''Aizek is the first generation differential wheel robot.
The class implements software interface over robot's hardware.
'''
def __init__(self):
gpio.setmode(gpio.BOARD)
spi = MCP3008SpiInterface(0)
self.lmotor = RedbotMotorActor(gpio, 8, 10, 12)
self.rmotor = RedbotMotorActor(gpio, 11, 13, 15)
self.wencoder = RedbotWheelEncoderSensor(spi)
self.lsensor = SharpIrDistanceSensor(spi, 5)
self.fsensor = SharpIrDistanceSensor(spi, 4)
self.rsensor = SharpIrDistanceSensor(spi, 3)
self.prev_lradians, self.prev_rradians = self.readVelocitySensors()
self.pos_x, self.pos_y, self.phi = 0.0, 0.0, 0.0
self.lmotor_power, self.rmotor_power = 0.0, 0.0
self.R = 0.032
self.L = 0.1
def setGoal(self, x, y):
self.pos_x = -x
self.pos_y = -y
def updatePosition(self):
lradians, rradians = self.readVelocitySensors()
delta_lradians = lradians - self.prev_lradians
self.prev_lradians = lradians
if self.lmotor_power < 0.0:
delta_lradians = -delta_lradians
delta_rradians = rradians - self.prev_rradians
self.prev_rradians = rradians
if self.rmotor_power < 0.0:
delta_rradians = -delta_rradians
if delta_lradians == 0.0 and delta_rradians == 0.0:
return
distance = self.R * 0.5 * (delta_rradians + delta_lradians)
delta_phi = self.R / self.L * (delta_rradians - delta_lradians)
radius = distance / delta_phi
if delta_phi > 0.0:
circle_x = self.pos_x - radius * math.sin(self.phi)
circle_y = self.pos_y + radius * math.cos(self.phi)
circle_phi = (11.5 * math.pi + self.phi + delta_phi) % (2.0 * math.pi)
self.pos_x = circle_x + radius * math.cos(circle_phi)
self.pos_y = circle_y + radius * math.sin(circle_phi)
self.phi = (12.0 * math.pi + self.phi + delta_phi) % (2.0 * math.pi)
else:
circle_x = self.pos_x + radius * math.sin(self.phi)
circle_y = self.pos_y - radius * math.cos(self.phi)
circle_phi = (12.5 * math.pi + self.phi - delta_phi) % (2.0 * math.pi)
self.pos_x = circle_x + radius * math.cos(circle_phi)
self.pos_y = circle_y + radius * math.sin(circle_phi)
self.phi = (2.0 * math.pi + self.phi - delta_phi) % (2.0 * math.pi)
def start(self):
self.lmotor_power = 0.0
self.lmotor.start()
self.rmotor_power = 0.0
self.rmotor.start()
def stop(self):
self.lmotor_power = 0.0
self.lmotor.stop()
self.rmotor_power = 0.0
self.rmotor.stop()
def setControl(self, lmotor_power, rmotor_power):
self.lmotor_power = lmotor_power
self.lmotor.setPower(lmotor_power)
self.rmotor_power = rmotor_power
self.rmotor.setPower(rmotor_power)
def readVelocitySensors(self):
data = (
self.wencoder.getLeftWheelRadiansTotal(),
self.wencoder.getRightWheelRadiansTotal(),
)
return data
def readDistanceSensors(self):
data = (
self.lsensor.readDistance(),
self.fsensor.readDistance(),
self.rsensor.readDistance(),
)
return data
def main():
#controller = PIDController(0.1, 0.002, 0.005)
robot = AizekBody()
robot.start()
robot.updatePosition()
robot.stop()
if __name__ == '__main__':
main()
| mit | Python | |
ab79e32552e1292feb75db52426d6fd4418c8262 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/wave/read_wav_file2.py | python/wave/read_wav_file2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Read the content of an audio wave file (.wav)
# See: https://docs.python.org/3/library/wave.html
import wave
with wave.open("./test.wav", mode="rb") as wr:
print(wr.readframes(20)) # Read the 20 first frames
print("Num channels:", wr.getnchannels())
print("Sample width:", wr.getsampwidth())
print("Frame rate:", wr.getframerate())
print("Num frames:", wr.getnframes())
| mit | Python | |
610f560c086320b42d83aab7644fe5d9a99d2cb7 | Add channel invite test | DavidHHShao/slack | tests/unit/channels/test_invite.py | tests/unit/channels/test_invite.py | # Copyright (c) 2014 Katsuya Noguchi
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from mock import patch
import unittest
import slack
import slack.channels
import slack.http_client
slack.api_token = 'my_token'
class TestChannelsIncite(unittest.TestCase):
@patch.object(slack.http_client, 'get')
def test_invite(self, http_get_mock):
slack.channels.invite('C123456', 'U123456')
http_get_mock.assert_called_with('channels.create', {
'token': 'my_token',
'channel': 'C123456',
'user': 'U123456',
})
| mit | Python | |
9b4c716117966194d046eef56ea5f891ce3d40f9 | Implement char CNN | Nizametdinov/cnn-pos-tagger | model.py | model.py | import tensorflow as tf
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(input_, output_dim, k_h, k_w):
w = weight_variable([k_h, k_w, input_.get_shape()[-1], output_dim])
b = bias_variable([output_dim])
return tf.nn.conv2d(input_, w, strides=[1, 1, 1, 1], padding='VALID') + b
def model():
batch_size = 50
max_words_in_sentence = 50
max_word_length = 30
char_vocab_size = 100
embedding_size = 16
kernel_widths = [1, 2, 3, 4, 5, 6, 7]
kernel_features = [25 * w for w in kernel_widths]
input_ = tf.placeholder(tf.int32, [batch_size, max_words_in_sentence, max_word_length])
embeddings = tf.truncated_normal([char_vocab_size, embedding_size])
cnn_input = tf.nn.embedding_lookup(embeddings, input_)
cnn_input = tf.reshape(cnn_input, [-1, 1, max_word_length, embedding_size])
cnn_output = []
for kernel_width, kernel_feature_size in zip(kernel_widths, kernel_features):
reduced_size = max_word_length - kernel_width + 1
conv = conv2d(cnn_input, kernel_feature_size, 1, kernel_width)
pool = tf.nn.max_pool(conv, [1, reduced_size], strides=[1, 1, 1, 1], padding='VALID')
cnn_output.append(tf.squeeze(pool, [1, 2]))
cnn_output = tf.concat(cnn_output, 1)
| mit | Python | |
cf31afad7752c3da5eba72613f89e7247a3eaed4 | put actual file up | erichmatt/trainwreck-game | trainwreck.py | trainwreck.py | import pygame
from time import sleep
from pygame.locals import *
import sys
pygame.init()
white = (255,255,255)
black = (0,0,0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0,0, 255)
gray = (100, 100, 100)
train = pygame.image.load('pixel-train2.png')
screen = pygame.display.set_mode((600, 600))
pygame.display.set_caption('TRAIN WRECK')
screen.fill(green)
def train(x, y):
screen.blit(train, (x, y))
x = (300)
y = (300)
train()
pygame.display.flip()
| mit | Python | |
3b5234a370db18fc51d8ad8573981c85544abb47 | Add code to fix the python 2.6.6 auth failure issue. | edina/lockss-daemon,lockss/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,lockss/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,edina/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon,lockss/lockss-daemon | test/frameworks/lib/fix_auth_failure.py | test/frameworks/lib/fix_auth_failure.py | #!/usr/bin/env python
# $Id: fix_auth_failure.py,v 1.1 2011-02-15 20:28:38 barry409 Exp $
# Copyright (c) 2011 Board of Trustees of Leland Stanford Jr. University,
# all rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Stanford University shall not
# be used in advertising or otherwise to promote the sale, use or other dealings
# in this Software without prior written authorization from Stanford University.
import sys
import urllib2
def fix_auth_failure():
"""Fix the python 2.6.6 auth failure bug."""
if sys.version_info[:2] == (2, 6) and sys.version_info[2] >= 6:
def fixed_http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.retried = 0
return response
urllib2.HTTPBasicAuthHandler.http_error_401 = fixed_http_error_401
| bsd-3-clause | Python | |
670bd6e65721df376d8b3a5305a1fdb358d214d3 | bump version in __init__.py | WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow | oneflow/__init__.py | oneflow/__init__.py |
VERSION = '0.16.1.7'
|
VERSION = '0.16.1.6'
| agpl-3.0 | Python |
ece1b5f163abb61d080b5e8d0fc8d57a311635d0 | Fix changes in handler arguments | HelioGuilherme66/RIDE,robotframework/RIDE,robotframework/RIDE,caio2k/RIDE,caio2k/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE | src/robotide/spec/libraryfetcher.py | src/robotide/spec/libraryfetcher.py | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.running import TestLibrary
from robotide.spec.iteminfo import LibraryKeywordInfo
def get_import_result(path, args):
try:
lib = TestLibrary(path, args)
kws = [
LibraryKeywordInfo(
kw.name,
kw.doc,
kw.library.name,
_parse_args(kw.arguments)
) for kw in lib.handlers.values()]
return kws
except SystemExit:
raise ImportError('Library "%s" import failed' % path)
def _parse_args(handler_args):
args = []
if handler_args.positional:
args.extend(list(handler_args.positional))
if handler_args.defaults:
for i, value in enumerate(handler_args.defaults):
index = len(handler_args.positional) - len(handler_args.defaults) + i
args[index] = args[index] + '=' + unicode(value)
if handler_args.varargs:
args.append('*%s' % handler_args.varargs)
if handler_args.kwargs:
args.append('**%s' % handler_args.kwargs)
return args
| # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.running import TestLibrary
from robotide.spec.iteminfo import LibraryKeywordInfo
def get_import_result(path, args):
try:
lib = TestLibrary(path, args)
return [
LibraryKeywordInfo(
kw.name,
kw.doc,
kw.library.name,
_parse_args(kw.arguments)
) for kw in lib.handlers.values()]
except SystemExit:
raise ImportError('Library "%s" import failed' % path)
def _parse_args(handler_args):
args = []
if handler_args.names:
args.extend(list(handler_args.names))
if handler_args.defaults:
for i, value in enumerate(handler_args.defaults):
index = len(handler_args.names) - len(handler_args.defaults) + i
args[index] = args[index] + '=' + unicode(value)
if handler_args.varargs:
args.append('*%s' % handler_args.varargs)
return args
| apache-2.0 | Python |
b7c54e6bf4c91587df0e74f04ba63b50d01977aa | Create test2.py | MsRisaMarie/CodeChallenges,MsRisaMarie/CodeChallenges | test2.py | test2.py | import sys
with open(sys.argv[1]) as f:
lines = f.readlines()
for ln in lines:
ln = ln.replace("\n", "")
ln = ln.replace(" ", "")
rev_ln = ''.join(sorted(ln, reverse=True))
result = True
ln_len = len(ln)
half_ln_len = ln_len/2
i = 0
while i < half_ln_len:
if ln[i] != ln[ln_len-i-1]:
result = False
break
i += 1
output_result = "YES | "
if result == False:
output_result = "NO | "
output_result += rev_ln
print output_result
| mit | Python | |
0596076729b651e1042aa3bf2d5018620aa281af | Add async object list iterator | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | gmn/src/d1_gmn/app/management/commands/objectlist_async.py | gmn/src/d1_gmn/app/management/commands/objectlist_async.py | # -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Async ObjectList Iterator.
Fast retrieval of ObjectInfo from a DataONE Node.
"""
import asyncio
import logging
import d1_common.const
import d1_common.types.exceptions
DEFAULT_OBJECT_LIST_PAGE_SIZE = 1000
DEFAULT_MAX_CONCURRENT_D1_REST_CALLS = 20
class ObjectListIteratorAsync:
def __init__(
self,
async_client,
page_size=DEFAULT_OBJECT_LIST_PAGE_SIZE,
list_objects_args_dict=None,
max_concurrent_d1_rest_calls=DEFAULT_MAX_CONCURRENT_D1_REST_CALLS,
):
self._logger = logging.getLogger(__name__)
self._client = async_client
self._page_size = page_size
self._list_objects_args_dict = list_objects_args_dict or {}
self._max_concurrent_d1_rest_calls = max_concurrent_d1_rest_calls
async def itr(self):
object_count = await self._get_total_object_count()
self._logger.debug("Object count: {}".format(object_count))
page_count = (object_count - 1) // self._page_size + 1
self._logger.debug("Page count: {} at {} events per page".format(page_count, self._page_size))
# Debug
# page_count = 10
task_set = set()
for page_idx in range(page_count):
if (
len(task_set) >= self._max_concurrent_d1_rest_calls
):
done_set, task_set = await asyncio.wait(
task_set, return_when=asyncio.FIRST_COMPLETED
)
async for item_pyxb in self._iter_done(done_set):
yield item_pyxb
task_set.add(self._get_page(page_idx))
done_set, task_set = await asyncio.wait(task_set)
async for item_pyxb in self._iter_done(done_set):
yield item_pyxb
async def _get_page(self, page_idx):
page_start_idx = page_idx * self._page_size
try:
return await self._client.list_objects(
start=page_start_idx,
count=self._page_size,
**self._list_objects_args_dict
)
except d1_common.types.exceptions.DataONEException as e:
self._logger.debug(
'Skipped slice. page_idx={} page_start_idx={} page_size={} error="{}"'.format(
page_idx, page_start_idx, self._page_size, e.friendly_format()
)
)
async def _iter_done(self, done_set):
for iter_task in done_set:
for item_pyxb in iter_task.result().objectInfo:
yield item_pyxb
async def _get_total_object_count(self):
args_dict = self._list_objects_args_dict.copy()
args_dict["count"] = 0
return (await self._client.list_objects(**args_dict)).total
| apache-2.0 | Python | |
a8bd9defcf3359296acf7633041b036213868075 | Make getting started easier with a handy script | projectweekend/Pi-Camera-Time-Lapse,projectweekend/Pi-Camera-Time-Lapse | install.py | install.py | #!/usr/bin/env python
import subprocess
def sudo(command_text):
parts = ['sudo']
parts.extend(command_text.split(command_text))
subprocess.call(parts)
def apt_get_install(package_name):
command_text = "apt-get -y install {0}".format(package_name)
sudo(command_text)
def main():
# Install system dependencies
sudo("apt-get update")
sudo("apt-get -y upgrade")
apt_get_install("upstart")
apt_get_install("python-dev")
apt_get_install("python-pip")
# Setup the virtualenv
subprocess.call(["pip", "install", "virtualenv"])
subprocess.call(["virtualenv", "env", "--no-site-packages"])
subprocess.call(["source", "./env/bin/activate"])
subprocess.call(["pip", "install", "-r", "requirements.txt"])
# Make default images folder
subprocess.call(["mkdir", "/home/pi/images"])
# Copy Upstart scripts
subprocess.call(["cp", "upstart/dropbox-worker.conf", "/etc/init"])
subprocess.call(["cp", "upstart/time-lapse.conf", "/etc/init"])
print("Installation complete!")
print("Please reboot your Raspberry Pi :)")
if __name__ == '__main__':
main()
| mit | Python | |
fd1fcead596844552a90980166b9f2a73aabc089 | test cases for 3X3_2K1R scenario and to the fucntion that validates a individual configuration | matheuscas/chess-challenge | tests.py | tests.py | import naive
def test_is_valid_configuration_3X3_2K1R():
all_configurations = [
(1,0,1,0,0,0,0,4,0),
(1,0,0,0,0,4,1,0,0),
(0,0,1,4,0,0,0,0,1),
(0,4,0,0,0,0,1,0,1)
]
M = N = 3
for conf in all_configurations:
assert naive.is_valid_configuration(list(conf), M, N) == True
def test_configuration_3X3_2K1R():
pieces = [naive.KING, naive.KING, naive.ROOK]
M = N = 3
valid_configurations = [
[1,0,1,0,0,0,0,4,0],
[1,0,0,0,0,4,1,0,0],
[0,0,1,4,0,0,0,0,1],
[0,4,0,0,0,0,1,0,1]
]
valid_confs = naive.chess_challenge(pieces, M, N)
assert len(valid_confs)
for vc in valid_confs:
assert (vc in valid_configurations) == True
| mit | Python | |
b372c34cf8bb111c0dcc25b391220f54bc22b36f | add functioning bundestag_reden_down.py | defgsus/sociopyth,defgsus/sociopyth,defgsus/sociopyth | bundestag_down/bundestag_reden_down.py | bundestag_down/bundestag_reden_down.py |
# script to download all "Plenardebatten" from the Deutscher Bundestag
# http://dipbt.bundestag.de/doc/btp/17/17140.pdf
base_url = "http://dipbt.bundestag.de/doc/btp/"
def get_name( period, docnum ):
return base_url + '{:=02}'.format(period) + "/" + '{:=02}'.format(period) + '{:=03}'.format(docnum) + ".pdf"
def download(url):
"""Copy the contents of a file from a given URL
to a local file.
"""
import urllib2
import os.path
print "fetching " + url
# get url
try:
webFile = urllib2.urlopen(url)
except urllib2.HTTPError:
print "failed";
return False
if webFile.getcode() != 200:
return False
if webFile.geturl().find("navigation") != -1:
return False;
# filename & dir
local_name = "./debatten/" + url.split(base_url)[-1]
local_dir = os.path.dirname(local_name)
if os.path.exists(local_dir) == False:
if os.makedirs(local_dir) == False:
print "failed creating dir " + local_dir
return False
# save
localFile = open(local_name, 'w')
localFile.write(webFile.read())
webFile.close()
localFile.close()
return True
for period in xrange(1, 20):
start = 1
#if period == 1: start = 4683
for docnum in xrange(start, 999):
if download( get_name(period, docnum) ) == False:
print "failed"
break
| lgpl-2.1 | Python | |
deeeaba3070f3848c2ed85b85caa2291b5b42ae7 | add script for cleaning up build env '.build' dirs after a successful build and backup. | ccpgames/mumble-releng,ccpgames/mumble-releng | tools/cleanup-buildenv-build-dir.py | tools/cleanup-buildenv-build-dir.py | #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2014 The 'mumble-releng' Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file in the source tree or at
# <http://mumble.info/mumble-releng/LICENSE>.
# cleanup-buildenv-build-dir.py cleans up the
# '<buildenv-name>.build' directories that are
# used for storing source zips, tarballs and source
# trees when a build env is being built.
#
# This script can be used to trim such a directory
# after the build environment has finished building.
# We can't just remove the whole directory in all cases.
# Often times, the '.build' directory will include
# debugging symbols and other things we want to keep
# around. This script takes care not to delete the
# things we want to keep.
from __future__ import (unicode_literals, print_function, division)
import os
import sys
import platform
import ctypes
# The file extensions of files to keep around.
KEEP_EXT = (
# We need to keep '.pdb' files around in the
# buildenv's .build directory, at least for
# the win32-static build environment. This
# is because the static libraries in that
# environment make use of "object file PDBs",
# which are files named "vc120.pdb" (for
# MSVS2013). The paths of these files are
# hard-coded in the object files of the
# static libraries, and need to be kept
# around in order to be able to generate
# "proper" PDB files (linker PDBs) during
# linking of DLLs and EXEs.
'pdb',
# Files with the '.dbg' extension are typically
# files that we extract ourselves using tools
# like 'objdump'. This is to avoid shipping
# unnecessary debug symbols with all Mumble
# build products.
'dbg'
)
def makeAbs(path, unc=True):
'''
Converts the specified path to an absolute path.
On Windows, if unc is True, the function will convert the
path to a UNC-style path, allowing some APIs to escape the 255 character
limit of Win32 paths.
'''
if platform.system() == 'Windows':
uncpart = ''
if unc:
uncpart = '\\\\?\\'
alpha = list(range(ord('a'), ord('z'))) + list(range(ord('A'), ord('Z')))
drive = None
sep = None
absolute = True
if len(path) > 2 and path[0] in alpha and path[1] == ':':
drive = path[0]
sep = path[2]
if len(path) > 0 and path[0] == '/' or path[0] == '\\':
sep = path[0]
if sep is None:
absolute = False
if not absolute:
return uncpart + os.path.join(os.getcwd(), path)
else:
if sep == '/':
path = path.replace(sep, '\\')
if not drive:
drive = os.getcwd()[0]
path = drive + ':' + path
return uncpart + path
else:
if os.path.isabs(path):
return path
return os.path.join(os.getcwd(), path)
def rm(fn):
'''
Removes the file at fn.
On Windows systems, this function has special
cases that allow some otherwise hard-to-delete
files to be deleted anyway.
On all other systems, calling this function
is equivalent to calling os.remove().
'''
if platform.system() == 'Windows':
try:
os.remove(fn)
except WindowsError, e:
# Some of our files can be set to read only,
# or have other arcane permissions and flags
# set. In many of these cases, Python's
# os.remove() will fail with a Win32 'access
# denied' error (0x5).
#
# The 'fix' is to change the file's attributes
# to FILE_ATTRIBUTE_NORMAL and then to retry
# deleting the file.
ERROR_ACCESS_DENIED = 0x05
FILE_ATTRIBUTE_NORMAL = 0x80
if e.winerror == ERROR_ACCESS_DENIED:
if ctypes.windll.kernel32.SetFileAttributesW(unicode(fn), FILE_ATTRIBUTE_NORMAL) == 0:
errno = ctypes.windll.kernel32.GetLastError()
raise Exception('SetFileAttributesW failed with error code {0}'.format(errno))
os.remove(fn)
else:
raise
else:
os.remove(fn)
def main():
if len(sys.argv) < 2:
print('Usage: python cleanup-buildenv-build-dir.py <build-dir>')
print('')
print(' For example:')
print(' python cleanup-buildenv-build-dir.py /MumbleBuild/centos-ermine-1.2.x-2014-06-01-cf59267.build')
sys.exit(1)
build_dir = sys.argv[1]
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
absFn = makeAbs(os.path.join(dirpath, fn))
ext, _ = os.path.splitext(fn.lower())
if not ext in KEEP_EXT:
rm(absFn)
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
ac636016d75f0f874973edf2a8fe1ab991b868c7 | Fix #6: Added auto-setup script | stultus/PeARS,stultus/PeARS,stultus/PeARS | install.py | install.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import platform
import subprocess
# Identify the type of operating system for install packages
OS = platform.dist()[0].lower()
if OS == "fedora" or OS == "redhat":
install = "sudo yum install "
elif OS == "ubuntu" or OS == "debian":
install = "sudo apt-get install "
else:
print "Automatic installation is not available on your system.\n" \
"Please install the system using a description in the 'README.md'"
exit(1)
# It was installed packages: 'pip' and 'virtualenv'
with open("/dev/null", "a") as null:
# If the command 'which' the back 1, the package is not installed
if subprocess.call(["which", "pip"], stdout=null) is 1:
os.system(install + "python-pip" + " -y")
if subprocess.call(["which", "virtualenv"], stdout=null) is 1:
os.system(install + "virtualenv" + " -y")
# Configure virtual environment ('virtualenv')
os.system("sudo virtualenv pears_env")
os.system("bash --rcfile pears_env/bin/activate")
# Installing dependencies
os.system("cd pears_env/bin/ ; sudo -H pip2 install -r ../../requirements.txt")
os.system("cd ../../")
# Get the semantic space
os.system("wget aurelieherbelot.net/pears-demo/wikiwoods.dump.bz2")
os.system("./uncompress_db wikiwoods.dump.bz2")
| mit | Python | |
d71eaed10b2c62fc5b5c6436d146a561479198a3 | bump version in __init__.py | 1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow | oneflow/__init__.py | oneflow/__init__.py |
VERSION = '0.20.11.17'
|
VERSION = '0.20.11.16'
| agpl-3.0 | Python |
135a8117b14817ba06932f5af4c827e183265079 | bump version in __init__.py | WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow | oneflow/__init__.py | oneflow/__init__.py |
VERSION = '0.18.2.4'
|
VERSION = '0.18.2.2'
| agpl-3.0 | Python |
55b6134e9fbab02bbabb1a298f3bb2777608d9a1 | Fix issue 106 | PiRSquared17/django-page-cms,odyaka341/django-page-cms,Alwnikrotikz/django-page-cms,pombreda/django-page-cms,google-code-export/django-page-cms,odyaka341/django-page-cms,Alwnikrotikz/django-page-cms,PiRSquared17/django-page-cms,google-code-export/django-page-cms,pombreda/django-page-cms,PiRSquared17/django-page-cms,pombreda/django-page-cms,odyaka341/django-page-cms,google-code-export/django-page-cms,pombreda/django-page-cms,PiRSquared17/django-page-cms,google-code-export/django-page-cms,Alwnikrotikz/django-page-cms,odyaka341/django-page-cms,Alwnikrotikz/django-page-cms | pages/views.py | pages/views.py | # -*- coding: utf-8 -*-
from django.http import Http404, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.contrib.sites.models import SITE_CACHE
from pages import settings
from pages.models import Page, Content
from pages.utils import auto_render, get_language_from_request, get_page_from_slug
def details(request, slug=None, lang=None, ajax=False):
"""
This view get the root pages for navigation
and the current page to display if there is any.
All is rendered with the current page's template.
This view use the auto_render decorator. It means
that you can use the only_context extra parameter to get
only the local variables of this view without rendering
the template.
>>> from pages.views import details
>>> context = details(request, only_context=True)
This can be usefull if you want to write your own
view. You can reuse the following code without having to
copy and paste it.
"""
pages = Page.objects.navigation().order_by("tree_id")
current_page = False
if slug:
current_page = get_page_from_slug(slug, request, lang)
if current_page and request.META['PATH_INFO'] != \
current_page.get_absolute_url():
raise Http404
elif pages:
current_page = pages[0]
if not current_page:
raise Http404
if not (request.user.is_authenticated() and request.user.is_staff) and \
current_page.calculated_status in (Page.DRAFT, Page.EXPIRED):
raise Http404
if not lang:
lang = get_language_from_request(request, current_page)
if current_page.redirect_to:
# return this object if you want to activate redirections
http_redirect = HttpResponsePermanentRedirect(
current_page.redirect_to.get_absolute_url(lang))
template_name = current_page.get_template()
if ajax:
new_template_name = "body_%s" % template_name
return new_template_name, locals()
return template_name, {
'pages': pages,
'current_page': current_page,
'lang': lang,
'request': request,
}
details = auto_render(details) | # -*- coding: utf-8 -*-
from django.http import Http404, HttpResponsePermanentRedirect
from django.shortcuts import get_object_or_404
from django.contrib.sites.models import SITE_CACHE
from pages import settings
from pages.models import Page, Content
from pages.utils import auto_render, get_language_from_request, get_page_from_slug
def details(request, slug=None, lang=None, ajax=False):
"""
This view get the root pages for navigation
and the current page to display if there is any.
All is rendered with the current page's template.
This view use the auto_render decorator. It means
that you can use the only_context extra parameter to get
only the local variables of this view without rendering
the template.
>>> from pages.views import details
>>> context = details(request, only_context=True)
This can be usefull if you want to write your own
view. You can reuse the following code without having to
copy and paste it.
"""
pages = Page.objects.navigation().order_by("tree_id")
current_page = False
if slug:
current_page = get_page_from_slug(slug, request, lang)
if request.META['PATH_INFO'] != current_page.get_absolute_url():
raise Http404
elif pages:
current_page = pages[0]
if not current_page:
raise Http404
if not (request.user.is_authenticated() and request.user.is_staff) and \
current_page.calculated_status in (Page.DRAFT, Page.EXPIRED):
raise Http404
if not lang:
lang = get_language_from_request(request, current_page)
if current_page.redirect_to:
# return this object if you want to activate redirections
http_redirect = HttpResponsePermanentRedirect(
current_page.redirect_to.get_absolute_url(lang))
template_name = current_page.get_template()
if ajax:
new_template_name = "body_%s" % template_name
return new_template_name, locals()
return template_name, {
'pages': pages,
'current_page': current_page,
'lang': lang,
'request': request,
}
details = auto_render(details) | bsd-3-clause | Python |
92d878f191b222316dcebb9e9b5492f871c00e06 | Add instance class | zo7/deconvfaces | instance.py | instance.py | """
Instance class to hold data for each example.
"""
import os
import numpy as np
import scipy.misc as misc
from tqdm import tqdm
# ---- Enum classes for vector descriptions
class Gender:
female = [1., 0.]
male = [0., 1.]
class Emotion:
angry = [1., 0., 0., 0., 0., 0., 0., 0.]
contemptuous = [0., 1., 0., 0., 0., 0., 0., 0.]
disgusted = [0., 0., 1., 0., 0., 0., 0., 0.]
fearful = [0., 0., 0., 1., 0., 0., 0., 0.]
happy = [0., 0., 0., 0., 1., 0., 0., 0.]
neutral = [0., 0., 0., 0., 0., 1., 0., 0.]
sad = [0., 0., 0., 0., 0., 0., 1., 0.]
surprised = [0., 0., 0., 0., 0., 0., 0., 1.]
# ---- Loading functions
def load_data(directory):
"""
Loads instances from a directory.
Args:
directory (str): Directory where the data lives.
"""
identities = list()
instances = list()
# Load instances
print("Loading instances...")
for filename in tqdm(os.listdir(os.path.join(directory, 'image'))):
# Skip kids and left/right gazes
if 'Kid' in filename or 'frontal' not in filename:
continue
instance = Instance(directory, filename)
if instance.identity not in identities:
identities.append(instance.identity)
instances.append(instance)
# Normalize identities and create vectors
identity_map = dict()
for idx, identity in enumerate(identities):
identity_map[identity] = idx
for instance in instances:
instance.create_identity_vector(identity_map)
print("Loaded {} instances with {} identities"
.format(len(instances), len(identity_map)))
return instances
# ---- Instance class definition
class Instance:
"""
Holds information about each example.
"""
def __init__(self, directory, filename):
"""
Constructor for an Instance object.
Args:
directory (str): Base directory where the example lives.
filename (str): The name of the file of the example.
"""
self.image = misc.imread( os.path.join(directory, 'image', filename) )
self.image = self.image / 255.0
self.mask = misc.imread( os.path.join(directory, 'mask', filename) )
self.mask = self.mask / 255.0
# Parse filename to get parameters
items = filename.split('_')
# Represent orientation as sin/cos vector
angle = np.deg2rad(float(items[0][-3:])-90)
self.orientation = np.array([np.sin(angle), np.cos(angle)])
self.identity = int(items[1])-1 # Identities are 1-indexed
self.gender = np.array(getattr(Gender, items[3]))
self.emotion = np.array(getattr(Emotion, items[4]))
def create_identity_vector(self, identity_map):
"""
Creates a one-in-k encoding of the instance's identity.
Args:
identity_map (dict): Mapping from identity to a unique index.
"""
self.identity_vec = np.zeros(len(identity_map), dtype=np.float32)
self.identity_vec[ identity_map[self.identity] ] = 1.
def parameter_info(self):
"""
Return length and shape information about the instance's parameters.
Returns:
dict, of parameter information.
"""
info = dict()
info['identity_len'] = len(self.identity_vec)
info['gender_len'] = len(self.gender)
info['orientation_len'] = len(self.orientation)
info['emotion_len'] = len(self.emotion)
info['image_shape'] = tuple(self.image.shape[0:2])
return info
def th_image(self):
"""
Returns a Theano-ordered representation of the image.
"""
image = np.empty((3,)+self.image.shape[0:2])
for i in range(0, 3):
image[i,:,:] = self.image[:,:,i]
return image
def th_mask(self):
"""
Returns a Theano-ordered representation of the image.
"""
mask = np.empty((1,)+self.mask.shape[0:2])
mask[0,:,:] = self.mask[:,:,0]
return mask
| mit | Python | |
6f3579e6ac32211779481307f8e508469dde7605 | Add example of how to read from a external GPS outputting NMEA messages. | waterlinked/examples | externalNMEA.py | externalNMEA.py | from __future__ import print_function
import requests
import argparse
import time
import logging
import sys
import serial
import pynmea2
log = logging.getLogger()
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def set_position_master(url, latitude, longitude, orientation):
payload = dict(lat=latitude, lon=longitude, orientation=orientation)
r = requests.put(url, json=payload, timeout=10)
if r.status_code != 200:
log.error("Error setting position and orientation: {} {}".format(r.status_code, r.text))
def main():
parser = argparse.ArgumentParser(description="Push position and orientation of master to Underwater GPS")
parser.add_argument('-u', '--url', help='Base URL to use', type=str, default='http://37.139.8.112:8000')
parser.add_argument('-d', '--source', help='Device to read nmea strings from', type=str, default='/dev/ttyUSB0')
args = parser.parse_args()
baseurl = args.url
log.info("Using baseurl: %s source: %s", args.url, args.source)
reader = pynmea2.NMEAStremReader()
com = args.source
try:
com = serial.Serial(args.source, timeout=5.0)
except serial.SerialException:
log.warning('Could not connect to %s', args.source)
log.warning("Exiting")
sys.exit()
lat = 0
lon = 0
orientation = 0
gotUpdate = False
while True:
try:
data = com.read()
for msg in reader.next(data):
if type(msg) == pynmea2.types.talker.GGA:
lat = msg.latitude
lon = msg.longitude
gotUpdate = True
elif type(msg) == pynmea2.types.talker.HDT:
orientation = msg.heading
gotUpdate = True
except pynmea2.ParseError as e:
log.warning("Error while parsing NMEA string: {}".format(e))
if gotUpdate:
log.info('Sending position and orientation')
set_position_master('{}/api/v1/external/master'.format(baseurl), lat, lon, orientation)
gotUpdate = False
if __name__ == "__main__":
main()
| mit | Python | |
8117e10dc49f20a167d94ae278cd904a0e27188e | Add Class definitions for models for the products calls and variables | oldarmyc/pitchfork,rackerlabs/pitchfork,rackerlabs/pitchfork,rackerlabs/pitchfork,oldarmyc/pitchfork,oldarmyc/pitchfork | pitchfork/models.py | pitchfork/models.py |
from pitchfork import db
import re
class Product:
def __init__(self, product):
self.app_url = product.get('app_url')
self.title = product.get('title')
self.us_api = product.get('us_api')
self.uk_api = product.get('uk_api')
self.require_dc = bool(product.get('require_dc'))
self.doc_url = product.get('doc_url')
self.active = bool(product.get('active'))
self.db_name = product.get('db_name')
def __unicode__(self):
return self.title
def set_db_name(self):
temp = re.sub(' +', ' ', str(self.title.lower().strip()))
self.db_name = re.sub(' ', '_', temp)
class Call:
def __init__(self, call):
self.title = call.get('title').strip().lower().title()
self.short_description = call.get('short_description')
self.verb = call.get('verb')
self.api_uri = call.get('api_uri').strip()
self.doc_url = call.get('doc_url').strip()
self.add_to_header = bool(call.get('add_to_header'))
self.custom_header_key = call.get('custom_header_key', '').strip()
self.custom_header_value = call.get('custom_header_value', '').strip()
self.use_data = bool(call.get('use_data'))
self.data_object = call.get('data_object')
self.remove_token = bool(call.get('remove_token'))
self.remove_content_type = bool(call.get('remove_content_type'))
self.required_key = bool(call.get('required_key'))
self.required_key_name = call.get('required_key_name', '').strip()
self.required_key_type = call.get('required_key_type')
self.tested = bool(call.get('tested'))
self.variables = call.get('variables', [])
def __unicode__(self):
return self.title
class Variable:
def __init__(self, variable):
self.field_type = variable.get('field_type')
self.description = variable.get('description')
self.required = bool(variable.get('required'))
self.field_display_data = variable.get('field_display_data')
self.id_value = int(variable.get('id_value'))
self.field_display = variable.get('field_display')
self.variable_name = variable.get('variable_name', '').strip()
def __unicode__(self):
return self.variable_name
| apache-2.0 | Python | |
ac0d0b78b7b4eef913460894fca3af1ace222c7f | Test template for the Damage class | Enether/python_wow | tests/test_damage.py | tests/test_damage.py | import unittest
from damage import Damage
class DamageTests(unittest.TestCase):
def test_init(self):
dmg = Damage(phys_dmg=1.34, magic_dmg=1.49391)
expected_phys_dmg = 1.3
expected_m_dmg = 1.5
expected_absorbed = 0
# it should round the magic/phys dmg to 1 point after the decimal
self.assertEqual(dmg.phys_dmg, expected_phys_dmg)
self.assertEqual(dmg.magic_dmg, expected_m_dmg)
self.assertEqual(dmg.phys_absorbed, expected_absorbed)
self.assertEqual(dmg.magic_absorbed, expected_absorbed)
if __name__ == '__main__':
unittest.main() | mit | Python | |
dd5c0e1f66785089e4d996e40f1a8b93308ab42d | Add unittest | touilleMan/marshmallow-mongoengine | tests/test_params.py | tests/test_params.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime as dt
import decimal
from datetime import datetime
import mongoengine as me
import pytest
from marshmallow_mongoengine import ModelSchema
TEST_DB = 'marshmallow_mongoengine-test'
db = me.connect(TEST_DB)
class BaseTest(object):
@classmethod
def setup_method(self, method):
# Reset database from previous test run
db.drop_database(TEST_DB)
class TestParams(BaseTest):
def test_required(self):
class Doc(me.Document):
field_not_required = me.StringField()
field_required = me.StringField(required=True)
class DocSchema(ModelSchema):
class Meta:
model = Doc
doc, errors = DocSchema().load({'field_not_required': 'bad_doc'})
assert errors
# Now provide the required field
doc, errors = DocSchema().load({'field_required': 'good_doc'})
assert not errors
assert doc.field_not_required is None
assert doc.field_required == 'good_doc'
# Update should not take care of the required fields
doc, errors = DocSchema().update(doc, {'field_not_required': 'good_doc'})
assert not errors
assert doc.field_required == 'good_doc'
assert doc.field_not_required == 'good_doc'
def test_default(self):
def generate_default_value():
return 'default_generated_value'
class Doc(me.Document):
field_with_default = me.StringField(default='default_value')
field_required_with_default = me.StringField(required=True,
default=generate_default_value)
class DocSchema(ModelSchema):
class Meta:
model = Doc
# Make sure default doesn't shadow given values
doc, errors = DocSchema().load({'field_with_default': 'custom_value',
'field_required_with_default': 'custom_value'})
assert not errors
assert doc.field_with_default == 'custom_value'
assert doc.field_required_with_default == 'custom_value'
# Now use defaults
doc, errors = DocSchema().load({})
assert not errors
assert doc.field_with_default == 'default_value'
assert doc.field_required_with_default == 'default_generated_value'
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.