commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
706a88810abc1be1fcfa799b7bb46a1c8e774d59 | add pygithub.login_github() | lsst-sqre/sqre-codekit,lsst-sqre/sqre-codekit | codekit/pygithub.py | codekit/pygithub.py | """
pygithub based functions intended to replace the github3.py based functions in
codetools.
"""
import logging
from public import public
from github import Github
import codekit.codetools as codetools
logging.basicConfig()
logger = logging.getLogger('codekit')
@public
def login_github(token_path=None, token=None):
"""Log into GitHub using an existing token.
Parameters
----------
token_path : str, optional
Path to the token file. The default token is used otherwise.
token: str, optional
Literial token string. If specifified, this value is used instead of
reading from the token_path file.
Returns
-------
gh : :class:`github.GitHub` instance
A GitHub login instance.
"""
token = codetools.github_token(token_path=token_path, token=token)
return Github(token)
| mit | Python | |
abc527d4e35b2a0946986575fd6b2ae2a87e0556 | Create filter_vcf_deamination.py | ruidlpm/Utils,ruidlpm/Utils | filter_vcf_deamination.py | filter_vcf_deamination.py | #!/usr/bin/python
#
# filter_vcf_deamination.py
# version: 1.1
# Removes potential deamination from vcf file
# optional arguments:
# -h, --help show this help message and exit
# -i VCF_INPUT
# -o VCF_OUTPUT
# usage: filter_vcf_deamination.py [-h] [-i VCF_INPUT] [-o VCF_OUTPUT]
#
# Date: 12/11/2015
# Author: Rui Martiniano
#
# Note: included test
# run this:
# python filter_vcf_deamination.py -i test.vcf -o test_out.vcf
from __future__ import print_function
import argparse
import sys
import time
parser = argparse.ArgumentParser(description="Removes potential deamination from vcf file")
#add options to argparser
parser.add_argument('-i', action="store", dest="vcf_input", type=str)
parser.add_argument('-o', action="store", dest="vcf_output", type=str)
#test parameters
try:
options=parser.parse_args()
except:
parser.print_help()
sys.exit(0)
vcf_input=options.vcf_input
vcf_output=options.vcf_output
outfile=open(vcf_output,'w')
def parser(i):
"""
Removes potential deamination from vcf file
takes a snp (or line) in vcf file and when encountering
potential deamination, replaces its genotype with './.'
(missing).
"""
changed_snps=0
if (i[3]=='C' and i[4]=='T'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('1/1')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
elif (i[3]=='G' and i[4]=='A'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('1/1')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
elif (i[3]=='T' and i[4]=='C'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('0/0')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
elif (i[3]=='A' and i[4]=='G'):
newline=[]
for item in i:
if (item.startswith('0/1') or item.startswith('0/0')):
changed_item=item.replace(item, './.')
newline.append(changed_item)
changed_snps += 1
else:
newline.append(item)
# print(changed_snps)
return(newline)
else: #if none of the above, just return the unchanged line
return(i)
start = time.time()
#iterate through each line
counter=0
initial_vcf=[]
header=[]
with open(vcf_input,'r') as f:
for line in f:
#get header lines
if line.startswith('#'):
headline=line.strip('\n').split("\t")
header.append(headline)
outfile.write('\t'.join(headline) + '\n')
else:
counter += 1
#process snp lines with the parser function
snp=line.strip('\n').split("\t")
outfile.write('\t'.join(parser(snp)) + '\n')
sys.stdout.write("SNPs processed: %d \r" % (counter) )
sys.stdout.flush()
outfile.close()
end = time.time()
elapsed = end - start
print("\n" + str(round(elapsed,2)) + " sec.")
| mit | Python | |
b68c8eab696f5950c4cd528bf60506469c97d08a | Create fixer.py | Godod/utils | fixer.py | fixer.py | from datetime import datetime
from typing import List, TypeVar
import requests
BASE_URL = 'https://api.fixer.io/'
CURRENCY_CHOICE = ["EUR", "AUD", "BGN", "BRL", "CAD", "CHF", "CNY", "CZK",
"DKK", "GBP", "HKD", "HRK", "HUF", "IDR", "ILS",
"INR", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
"PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB",
"TRY", "USD", "ZAR"]
D = TypeVar('D', datetime, str)
class Fixer(object):
"""
class definining the api
date:
Either a date in "yyyy-mm-dd" format (available from 1999)
either "latest" for latest date
default = "latest"
base:
A currency in CURRENCY_CHOICE list.
Will setup the base currency for conversion
default = "EUR"
Will raise a ValueError exception
"""
def __init__(self, date: str = "latest", base: str = "EUR",
symbols: List[str] = None) -> None:
super(Fixer, self).__init__()
self.symbols_string = ''
if self.currency_available(base, "Base currency"):
self.base = base
if symbols:
self.symbols = []
for cur in symbols:
if self.currency_available(cur, "Symbols currency"):
self.symbols.append(cur)
self.symbols_string = 'symbols={}'.format(','.join(self.symbols))
self.check_date(date)
def currency_available(self, cur: str, method: str = "") -> bool:
if cur not in CURRENCY_CHOICE:
# Raise a ValueError exception
raise ValueError("Currency %s not available through this api" % cur,
method)
else:
return True
def check_date(self, dt: D) -> None:
if type(dt) == datetime:
self.date = dt
elif type(dt) == str:
if dt == "latest":
self.date = dt
else:
try:
self.date = datetime.strptime(dt, "%Y-%m-%d")
except ValueError as e:
raise e
if not self.date.year >= 1999:
raise ValueError("Data available from 1999, %s is to old" % self.date.strftime("%Y-%m-%d"))
if self.date > datetime.now():
raise ValueError("%s is in the future, data cannot be found" % self.date.strftime("%Y-%m-%d"))
else:
raise ValueError("%s does not match required date format" % dt)
def convert(self) -> str:
url = '%s%s?%s&base=%s' % (BASE_URL, self.date, self.symbols_string, self.base)
r = requests.get(url).json()
if 'error' in r:
raise ReferenceError(r['error'])
return r
| mit | Python | |
16ec8043799c7aac029c5528f1c00f96070434d4 | Move build view names function to utils | praekelt/jmbo-foundry,praekelt/jmbo-foundry,praekelt/jmbo-foundry | foundry/utils.py | foundry/utils.py | from django.conf import settings
def _build_view_names_recurse(url_patterns=None):
"""
Returns a tuple of url pattern names suitable for use as field choices
"""
if not url_patterns:
urlconf = settings.ROOT_URLCONF
url_patterns = __import__(settings.ROOT_URLCONF, globals(), locals(), \
['urlpatterns', ], -1).urlpatterns
result = []
for pattern in url_patterns:
try:
#result.append((pattern.name, pattern.name.title().replace('_', \
# ' ')))
if pattern.name is not None:
result.append((pattern.name, pattern.name))
except AttributeError:
# If the pattern itself is an include, recurively fetch it
# patterns. Ignore admin patterns.
if not pattern.regex.pattern.startswith('^admin'):
try:
result += _build_view_names_recurse(pattern.url_patterns)
except AttributeError:
pass
return result
def get_view_choices():
result = _build_view_names_recurse()
result.sort()
return result
| bsd-3-clause | Python | |
26ef831edbb25deaa7f3497c88d329db7ff8db91 | Add temporary file interpolate-layout.py | fonttools/fonttools,googlefonts/fonttools | Lib/fontTools/varLib/interpolate-layout.py | Lib/fontTools/varLib/interpolate-layout.py | """
Interpolate OpenType Layout tables (GDEF / GPOS / GSUB).
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables import otTables as ot
from fontTools.varLib import designspace, models, builder
import os.path
def _all_equal(lst):
it = iter(lst)
v0 = next(it)
for v in it:
if v0 != v:
return False
return True
def buildVarDevTable(store, master_values):
if _all_equal(master_values):
return None
deltas = master_values
return builder.buildVarDevTable(0xdeadbeef)
def _merge_OTL(font, model, master_ttfs, axes, base_idx):
print("Merging OpenType Layout tables")
GDEFs = [m['GDEF'].table for m in master_ttfs]
GPOSs = [m['GPOS'].table for m in master_ttfs]
GSUBs = [m['GSUB'].table for m in master_ttfs]
# Reuse the base font's tables
for tag in 'GDEF', 'GPOS', 'GSUB':
font[tag] = master_ttfs[base_idx][tag]
GPOS = font['GPOS'].table
getAnchor = lambda GPOS: GPOS.LookupList.Lookup[4].SubTable[0].MarkArray.MarkRecord[28].MarkAnchor
store_builder = builder.OnlineVarStoreBuilder(axes.keys())
store_builder.setModel(model)
anchors = [getAnchor(G) for G in GPOSs]
anchor = getAnchor(GPOS)
XDeviceTable = buildVarDevTable(store_builder, [a.XCoordinate for a in anchors])
YDeviceTable = buildVarDevTable(store_builder, [a.YCoordinate for a in anchors])
if XDeviceTable or YDeviceTable:
anchor.Format = 3
anchor.XDeviceTable = XDeviceTable
anchor.YDeviceTable = YDeviceTable
store = store_builder.finish()
# TODO insert in GDEF
def main(args=None):
import sys
if args is None:
args = sys.argv[1:]
designspace_filename = args[0]
locargs = args[1:]
outfile = os.path.splitext(designspace_filename)[0] + '-instance.ttf'
finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf')
loc = {}
for arg in locargs:
tag,val = arg.split('=')
loc[tag] = float(val)
print("Location:", loc)
masters, instances = designspace.load(designspace_filename)
base_idx = None
for i,m in enumerate(masters):
if 'info' in m and m['info']['copy']:
assert base_idx is None
base_idx = i
assert base_idx is not None, "Cannot find 'base' master; Add <info> element to one of the masters in the .designspace document."
from pprint import pprint
print("Masters:")
pprint(masters)
print("Index of base master:", base_idx)
print("Building GX")
print("Loading TTF masters")
basedir = os.path.dirname(designspace_filename)
master_ttfs = [finder(os.path.join(basedir, m['filename'])) for m in masters]
master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs]
#font = master_fonts[base_idx]
font = TTFont(master_ttfs[base_idx])
master_locs = [o['location'] for o in masters]
axis_tags = set(master_locs[0].keys())
assert all(axis_tags == set(m.keys()) for m in master_locs)
print("Axis tags:", axis_tags)
print("Master positions:")
pprint(master_locs)
# Set up axes
axes = {}
for tag in axis_tags:
default = master_locs[base_idx][tag]
lower = min(m[tag] for m in master_locs)
upper = max(m[tag] for m in master_locs)
axes[tag] = (lower, default, upper)
print("Axes:")
pprint(axes)
loc = models.normalizeLocation(loc, axes)
# Location is normalized now
print("Normalized location:", loc)
# Normalize master locations
master_locs = [models.normalizeLocation(m, axes) for m in master_locs]
print("Normalized master positions:")
print(master_locs)
# Assume single-model for now.
model = models.VariationModel(master_locs)
assert 0 == model.mapping[base_idx]
print("Building variations tables")
_merge_OTL(font, model, master_fonts, axes, base_idx)
print("Saving GX font", outfile)
font.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
main()
#sys.exit(0)
import doctest, sys
sys.exit(doctest.testmod().failed)
| mit | Python | |
f13045b5f933078225b89405a786c14da34d0af5 | Add ClamAV script to analyze HTTPS traffic for viruses | mhils/HoneyProxy,mhils/HoneyProxy,mhils/HoneyProxy,mhils/HoneyProxy | scripts/clamav.py | scripts/clamav.py | import pyclamd
from libmproxy.flow import decoded
#http://www.eicar.org/85-0-Download.html
clamd = pyclamd.ClamdUnixSocket()
try:
# test if server is reachable
clamd.ping()
except AttributeError, pyclamd.ConnectionError:
# if failed, test for network socket
clamd = pyclamd.ClamdNetworkSocket()
clamd.ping() #fails instantly if we dont get a proper connection
print "ClamAV running: %s" % clamd.version()
def response(context, flow):
with decoded(flow.response):
clamd_result = clamd.scan_stream(flow.response.content)
if clamd_result:
print "Virus detected: ",clamd_result
flow.response.content = "HoneyProxy has detected a virus and stopped this page from loading: %s" % str(clamd_result["stream"])
flow.response.headers["Content-Length"] = [str(len(flow.response.content))]
flow.response.headers["Content-Type"] = ["text/html"]
del flow.response.headers["Content-Disposition"]
del flow.response.headers["Content-Encoding"]
flow.response.code = 403
flow.response.msg = "Forbidden"
| mit | Python | |
079b5d26ef01a29a36672495cf794417204d336e | add unit test, check small average mean squared error | jseabold/statsmodels,josef-pkt/statsmodels,jseabold/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,statsmodels/statsmodels,bashtage/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,bashtage/statsmodels,josef-pkt/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,josef-pkt/statsmodels,statsmodels/statsmodels,josef-pkt/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,jseabold/statsmodels,bashtage/statsmodels,statsmodels/statsmodels | statsmodels/nonparametric/tests/test_asymmetric.py | statsmodels/nonparametric/tests/test_asymmetric.py | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 8 16:18:21 2021
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from numpy.testing import assert_array_less
from scipy import stats
import pytest
import statsmodels.nonparametric.kernels_asymmetric as kern
kernels_rplus = [("gamma", 0.1),
("gamma2", 0.1),
("invgamma", 0.02),
("invgauss", 0.01),
("recipinvgauss", 0.1),
("bs", 0.1),
("lognorm", 0.01),
("weibull", 0.1),
]
kernels_unit = [("beta", 0.005),
("beta2", 0.005),
]
class TestKernelsRplus(object):
@classmethod
def setup_class(cls):
b = 2
scale = 1.5
np.random.seed(1)
nobs = 1000
distr0 = stats.gamma(b, scale=scale)
rvs = distr0.rvs(size=nobs)
x_plot = np.linspace(0.5, 16, 51) + 1e-13
cls.rvs = rvs
cls.x_plot = x_plot
cls.pdf_dgp = distr0.pdf(x_plot)
cls.cdf_dgp = distr0.cdf(x_plot)
cls.amse_pdf = 1e-4 # tol for average mean squared error
cls.amse_cdf = 5e-4
@pytest.mark.parametrize('case', kernels_rplus)
def test_kernels(self, case):
name, bw = case
rvs = self.rvs
x_plot = self.x_plot
kde = []
kce = []
func_pdf = getattr(kern, "kernel_pdf_" + name)
func_cdf = getattr(kern, "kernel_cdf_" + name)
for xi in x_plot:
kde.append(func_pdf(xi, rvs, bw))
kce.append(func_cdf(xi, rvs, bw))
kde = np.asarray(kde)
kce = np.asarray(kce)
# average mean squared error
amse = ((kde - self.pdf_dgp)**2).mean()
assert_array_less(amse, self.amse_pdf)
amse = ((kce - self.cdf_dgp)**2).mean()
assert_array_less(amse, self.amse_cdf)
class TestKernelsUnit(TestKernelsRplus):
@classmethod
def setup_class(cls):
np.random.seed(987456)
nobs = 1000
distr0 = stats.beta(2, 3)
rvs = distr0.rvs(size=nobs)
x_plot = np.linspace(0, 1, 51)
cls.rvs = rvs
cls.x_plot = x_plot
cls.pdf_dgp = distr0.pdf(x_plot)
cls.cdf_dgp = distr0.cdf(x_plot)
cls.amse_pdf = 0.01
cls.amse_cdf = 5e-3
@pytest.mark.parametrize('case', kernels_unit)
def test_kernels(self, case):
super(TestKernelsUnit, self).test_kernels(case)
| bsd-3-clause | Python | |
805b393c51d9fa82f0dd28aa502378dfcf80924b | Add a binary demo. | mwhoffman/reggie | reggie/demos/binary.py | reggie/demos/binary.py | import os
import numpy as np
import mwhutils.plotting as mp
import mwhutils.grid as mg
import reggie as rg
if __name__ == '__main__':
cdir = os.path.abspath(os.path.dirname(__file__))
data = np.load(os.path.join(cdir, 'xy.npz'))
# create the GP and optimize the model
gp1 = rg.make_gp(0.1, 1.0, 0.1)
gp1.add_data(data['X'], data['y'])
gp1.optimize()
xmin = data['X'].min()
xmax = data['X'].max()
like = rg.likelihoods.Probit()
kern = gp1._post.kern.copy()
mean = gp1._post.mean.copy()
f = gp1.sample_f(100)
X = mg.uniform([(xmin, xmax)], 1000)
Y = like.sample(f.get(X))
gp2 = rg.GP(like, kern, mean, inference='laplace')
gp2.add_data(X, Y)
gp2.optimize()
# create the figure
fig = mp.figure(1, 1, 2)
fig.hold()
# get the posterior moments for the first model
n = 500
x = np.linspace(xmin, xmax, n)
mu, s2 = gp1.predict(x[:, None])
fig[0].plot_banded(x, mu, 2*np.sqrt(s2))
fig[0].scatter(*gp1.data)
fig[0].xlabel = 'inputs, X'
fig[0].ylabel = 'outputs, Y'
fig[0].title = 'Basic GP'
# get the posterior moments for the second model
mu, s2 = gp2.predict(x[:, None])
fig[1].plot_banded(x, mu, 2*np.sqrt(s2))
fig[1].scatter(*gp2.data)
fig[1].plot(x, f.get(x[:, None]))
fig[1].xlabel = 'inputs, X'
fig[1].title = 'Binary GP\n(with sampled function)'
# show the figure
fig.draw()
mp.show()
| bsd-2-clause | Python | |
def9592885ab4093973e8547de5deac3b7022515 | Create MaxSubarray_003.py | Chasego/codi,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/codirit,cc13ny/Allin,Chasego/codirit,Chasego/cod,Chasego/codi,Chasego/codirit,Chasego/codirit,cc13ny/Allin,cc13ny/algo,cc13ny/algo,cc13ny/Allin,cc13ny/algo,Chasego/codi,Chasego/cod,Chasego/cod,cc13ny/algo,Chasego/cod,cc13ny/Allin,cc13ny/Allin,Chasego/codi,Chasego/codi | leetcode/053-Maximum-Subarray/MaxSubarray_003.py | leetcode/053-Maximum-Subarray/MaxSubarray_003.py | class Solution:
# @param {integer[]} nums
# @return {integer}
def maxSubArray(self, nums):
res, tmp = nums[0], nums[0]
for i in range(1, len(nums)):
tmp = max(tmp + nums[i], nums[i])
res = max(res, tmp)
return res
| mit | Python | |
67300787f1f910065a88396f99f0d4dd25bec2d1 | apply monkeypatch | vaporry/ethereum-buildbot,ethereum/ethereum-buildbot,vaporry/ethereum-buildbot,vaporry/ethereum-buildbot,ethereum/ethereum-buildbot,ethereum/ethereum-buildbot | buildbot.tac | buildbot.tac | import os
from monkeypatch import apply_patches
apply_patches()
from twisted.application import service
from buildbot.master import BuildMaster
basedir = '.'
rotateLength = 10000000
maxRotatedFiles = 10
configfile = 'master.cfg'
# Default umask for server
umask = None
# if this is a relocatable tac file, get the directory containing the TAC
if basedir == '.':
import os.path
basedir = os.path.abspath(os.path.dirname(__file__))
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
from twisted.python.logfile import LogFile
from twisted.python.log import ILogObserver, FileLogObserver
logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength,
maxRotatedFiles=maxRotatedFiles)
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
m = BuildMaster(basedir, configfile, umask)
m.setServiceParent(application)
m.log_rotation.rotateLength = rotateLength
m.log_rotation.maxRotatedFiles = maxRotatedFiles
| import os
from twisted.application import service
from buildbot.master import BuildMaster
basedir = '.'
rotateLength = 10000000
maxRotatedFiles = 10
configfile = 'master.cfg'
# Default umask for server
umask = None
# if this is a relocatable tac file, get the directory containing the TAC
if basedir == '.':
import os.path
basedir = os.path.abspath(os.path.dirname(__file__))
# note: this line is matched against to check that this is a buildmaster
# directory; do not edit it.
application = service.Application('buildmaster')
from twisted.python.logfile import LogFile
from twisted.python.log import ILogObserver, FileLogObserver
logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength,
maxRotatedFiles=maxRotatedFiles)
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
m = BuildMaster(basedir, configfile, umask)
m.setServiceParent(application)
m.log_rotation.rotateLength = rotateLength
m.log_rotation.maxRotatedFiles = maxRotatedFiles
| mit | Python |
e85bab14ab8058ba14d1f73dd2d47d8c38318c48 | Add db_sqlite.py | tricorder42/chapterman | db_sqlite.py | db_sqlite.py | import sqlite3 | mit | Python | |
266a3a3ddb99afc6fa696bdd2b7d3dc770b921ea | Add enroller talking to redis | pglbutt/spanky,pglbutt/spanky,pglbutt/spanky | spanky/lib/enroll.py | spanky/lib/enroll.py | import redis
class Enroller(object):
def __init__(self, config):
self.config = config
@property
def conn(self):
if not hasattr(self, '_conn'):
self._conn = redis.StrictRedis(host='localhost', port=6379, db=0)
return self._conn
def join(self, name, host, port):
self.conn.lpush(name, '%s:%s' % (host, port))
def enrolled(self, name):
return self.conn.lrange(name, 0, -1)
def main():
e = Enroller({})
e.join('foo', 'bar', 8080)
print(e.enrolled('foo'))
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
95ceeb0af4e549e0d211b4e1ba6157d26ad5e44d | Fix race between MQ and mongo setting QueuedAt | cgourlay/tapiriik,cheatos101/tapiriik,cmgrote/tapiriik,abhijit86k/tapiriik,dmschreiber/tapiriik,cpfair/tapiriik,abhijit86k/tapiriik,cpfair/tapiriik,mjnbike/tapiriik,abs0/tapiriik,gavioto/tapiriik,mjnbike/tapiriik,cheatos101/tapiriik,brunoflores/tapiriik,marxin/tapiriik,brunoflores/tapiriik,dlenski/tapiriik,abs0/tapiriik,dmschreiber/tapiriik,dmschreiber/tapiriik,abs0/tapiriik,cheatos101/tapiriik,mduggan/tapiriik,mjnbike/tapiriik,abhijit86k/tapiriik,gavioto/tapiriik,marxin/tapiriik,cpfair/tapiriik,cheatos101/tapiriik,marxin/tapiriik,olamy/tapiriik,cmgrote/tapiriik,campbellr/tapiriik,brunoflores/tapiriik,abhijit86k/tapiriik,mduggan/tapiriik,olamy/tapiriik,olamy/tapiriik,olamy/tapiriik,cmgrote/tapiriik,mduggan/tapiriik,mduggan/tapiriik,abs0/tapiriik,cgourlay/tapiriik,niosus/tapiriik,cgourlay/tapiriik,campbellr/tapiriik,mjnbike/tapiriik,brunoflores/tapiriik,dlenski/tapiriik,niosus/tapiriik,gavioto/tapiriik,marxin/tapiriik,cpfair/tapiriik,cgourlay/tapiriik,campbellr/tapiriik,cmgrote/tapiriik,dlenski/tapiriik,campbellr/tapiriik,dmschreiber/tapiriik,niosus/tapiriik,dlenski/tapiriik,gavioto/tapiriik,niosus/tapiriik | sync_scheduler.py | sync_scheduler.py | from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
from tapiriik.settings import MONGO_FULL_WRITE_CONCERN
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = [x["_id"] for x in users]
db.users.update({"_id": {"$in": scheduled_ids}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True, w=MONGO_FULL_WRITE_CONCERN)
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
time.sleep(1)
| from tapiriik.database import db
from tapiriik.messagequeue import mq
from tapiriik.sync import Sync
from datetime import datetime
from pymongo.read_preferences import ReadPreference
import kombu
import time
Sync.InitializeWorkerBindings()
producer = kombu.Producer(Sync._channel, Sync._exchange)
while True:
queueing_at = datetime.utcnow()
users = db.users.find(
{
"NextSynchronization": {"$lte": datetime.utcnow()}
},
{
"_id": True,
"SynchronizationHostRestriction": True
},
read_preference=ReadPreference.PRIMARY
)
scheduled_ids = set()
for user in users:
producer.publish({"user_id": str(user["_id"]), "queued_at": queueing_at.isoformat()}, routing_key=user["SynchronizationHostRestriction"] if "SynchronizationHostRestriction" in user and user["SynchronizationHostRestriction"] else "")
scheduled_ids.add(user["_id"])
print("Scheduled %d users at %s" % (len(scheduled_ids), datetime.utcnow()))
db.users.update({"_id": {"$in": list(scheduled_ids)}}, {"$set": {"QueuedAt": queueing_at}, "$unset": {"NextSynchronization": True}}, multi=True)
time.sleep(1)
| apache-2.0 | Python |
e532a4a5ba6706974dc1245b269f18fa0e82cb66 | Create duplicates.py | aiskov/storekeeper | module/duplicates.py | module/duplicates.py | import os
import sys
def search(dir)
for root, subdirs, files in os.walk(dir):
print('Dir(%s)' % root)
for filename in files:
print('- File(%s)' % r)
| apache-2.0 | Python | |
c8271b02c3636aa9620cce8b85c823ff0ec35c4a | Add a mobile device test of the Skype website | seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase | examples/test_skype_site.py | examples/test_skype_site.py | """
This is a mobile device test for Chromium-based browsers (such as MS Edge)
Usage: pytest test_skype_site.py --mobile --browser=edge
Default mobile settings for User Agent and Device Metrics if not specifed:
User Agent: --agent="Mozilla/5.0 (Linux; Android 9; Pixel 3 XL)"
CSS Width, CSS Height, Pixel-Ratio: --metrics="411,731,3"
"""
from seleniumbase import BaseCase
class SkypeWebsiteTestClass(BaseCase):
def test_skype_website_on_mobile(self):
if not self.mobile_emulator:
print("\n This test is only for mobile devices / emulators!")
print(" (Usage: '--mobile' with a Chromium-based browser.)")
self.skip_test("Please rerun this test using '--mobile!'!")
self.open("https://www.skype.com/en/")
self.assert_text("Install Skype", "div.appInfo")
self.highlight("div.appBannerContent")
self.highlight('[itemprop="url"]')
self.highlight("h1")
self.highlight_click('[title="Download Skype"]')
self.assert_element('[aria-label="Microsoft"]')
self.assert_text("Download Skype", "h1")
self.highlight("div.appBannerContent")
self.highlight("h1")
self.assert_text("Skype for Mobile", "h2")
self.highlight("h2")
self.highlight("#get-skype-0")
self.highlight_click('[title*="Select from list"]')
self.highlight('[data-bi-id*="android"]')
self.highlight('[data-bi-id*="ios"]')
self.highlight('[data-bi-id*="windows10"]')
| mit | Python | |
ea54e294d68962ec370dc1dc2381720f53ce6f01 | Add local_settings.py stuff | aapris/django-voikko-experiments,aapris/django-voikko-experiments | voiexp/local_settings_example.py | voiexp/local_settings_example.py | SECRET_KEY = '.uadjgfi67&%€yuhgsdfakjhgayv&/%yugjhdfsc$y53'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'some.example.com', ]
LANGUAGE_CODE = 'fi-fi'
TIME_ZONE = 'Europe/Helsinki'
| mit | Python | |
8daf4237aa84a6b032e7627afb31b29a44f47ddc | Add another .py file for progress bar | vicyangworld/AutoOfficer | ProgressBar.py | ProgressBar.py | import sys, time
from CmdFormat import CmdFormat
class ProgressBar(CmdFormat):
def __init__(self, count = 0, total = 0, width = 80, bWithheader=True, bWithPercent=True,barColor='white'):
super(CmdFormat, self).__init__()
self.count = count
self.total = total
self.width = width
self.bWithheader = bWithheader
self.bWithPercent = bWithPercent
self.__barColor = barColor
def __Set_bar_color(self):
if type(self.__barColor) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar!")
if self.__barColor=='red':
self.set_cmd_color(4|8)
if self.__barColor=='green':
self.set_cmd_color(2|8)
if self.__barColor=='blue':
self.set_cmd_color(1|10)
if self.__barColor=='yellow':
self.set_cmd_color(6|8)
def Move(self, s):
self.count += 1
sys.stdout.write(' '*(self.width + 20) + '\r')
sys.stdout.flush()
print(s)
progress = self.width * self.count / self.total
if(self.bWithheader):sys.stdout.write('{0:3}/{1:3}:'.format(self.count, self.total))
percent = progress * 100.0 / self.total
if(self.bWithPercent):
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']' + ' %.2f' % progress + '%' + '\r')
self.reset_color()
else:
self.__Set_bar_color()
sys.stdout.write('[' + int(progress)*'>' + int(self.width - progress)*'-' + ']'+'\r')
self.reset_color()
if progress == self.width:
sys.stdout.write('\n')
sys.stdout.flush()
def Set_cmd_color(self,color):
if type(color) != type('a'):
raise TypeError("Wrong argument type of __Set_bar_color(color) in class ProgressBar!")
if color=='red':
self.set_cmd_color(4|8)
if color=='green':
self.set_cmd_color(2|8)
if color=='blue':
self.set_cmd_color(1|10)
if color=='yellow':
self.set_cmd_color(6|8)
=
if __name__ == '__main__':
bar = ProgressBar(total = 15,bWithheader=True,bWithPercent=True,barColor='green')
for i in range(15):
bar.Set_cmd_color('red')
bar.Move('sdfds ')
time.sleep(1)
| mit | Python | |
49070f3ae636c458551ea53b1cb79975dd029a4c | add methods | jfzhang95/lightML | RNN/methods.py | RNN/methods.py | #!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: James Zhang
@date:
"""
import numpy as np
import theano
import theano.tensor as T
from theano.ifelse import ifelse
from theano.tensor.shared_randomstreams import RandomStreams
from collections import OrderedDict
import copy
import sys
sys.setrecursionlimit(1000000) #例如这里设置为一百万
def handle_binary_vector(given_list, k):
# handle_binary_vector[0] 返回二值化后的列表
# handle_binary_vector[1] 返回原列表
tmp_list = copy.deepcopy(given_list)
given_list.sort(reverse=True)
new_sort_array = given_list[0:k]
index_list = []
for each_num in new_sort_array:
index_list.append(tmp_list.index(each_num))
new_vector_list=np.zeros(len(given_list),dtype='int64')
for each_position in index_list:
new_vector_list[each_position]=1
return (new_vector_list,tmp_list)
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def random_weights(shape, name=None):
# return theano.shared(floatX(np.random.randn(*shape) * 0.01), name=name)
return theano.shared(floatX(np.random.uniform(size=shape, low=-0.5, high=0.5)), name=name)
def zeros(shape, name=""):
return theano.shared(floatX(np.zeros(shape)), name=name)
def softmax(X, temperature=1.0):
e_x = T.exp((X - X.max(axis=1).dimshuffle(0, 'x')) / temperature) # dimshuffle(0, 'x') output 2 dim array
# return prob of each label. prob1+...+probn = 1
return e_x / e_x.sum(axis=1).dimshuffle(0, 'x') # dimshuffle(0, 'x') output 2 dim array
def sigmoid(X):
return 1 / (1 + T.exp(-X))
def dropout(X, dropout_prob=0.0):
retain_prob = 1 - dropout_prob
srng = RandomStreams(seed=1234)
X *= srng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)
X /= retain_prob
return X
# def dropout(x, dropout_prob):
# if dropout_prob < 0. or dropout_prob > 1.:
# raise Exception('Dropout level must be in interval [0, 1]')
# retain_prob = 1. - dropout_prob
# sample=np.random.binomial(n=1, p=retain_prob, size=x.shape)
# x *= sample
# x /= retain_prob
# return x
def rectify(X):
return T.maximum(X, 0.)
def clip(X, epsilon):
return T.maximum(T.minimum(X, epsilon), -1*epsilon)
def scale(X, max_norm):
curr_norm = T.sum(T.abs_(X))
return ifelse(T.lt(curr_norm, max_norm), X, max_norm * (X / curr_norm))
def SGD(loss, params, learning_rate, lambda2=0.05):
# problem in update
# updates = {}
# grads have no value??
updates = OrderedDict()
grads = T.grad(cost=loss, wrt=params)
for p, g in zip(params, grads):
# updates.append([p, p-learning_rate*(g+lambda2*p)]) # lambda*p regulzation
updates[p] = p - learning_rate * (g + lambda2 * p)
return updates, grads
def momentum(loss, params, caches, learning_rate=0.1, rho=0.1, clip_at=0.0, scale_norm=0.0, lambda2=0.0):
updates = OrderedDict()
grads = T.grad(cost=loss, wrt=params)
for p, c, g in zip(params, caches, grads):
if clip_at > 0.0:
grad = clip(g, clip_at) # Clip(limit)the values in the array.这个方法会给出一个区间,在区间之外的数字将被剪除到区间的边缘
else:
grad = g
if scale_norm > 0.0:
grad = scale(grad, scale_norm)
delta = rho * grad + (1-rho) * c
updates[p] = p - learning_rate * (delta + lambda2 * p)
return updates, grads
def get_params(layers):
# zhe ge hanshu de gongneng shi?
params = []
for layer in layers:
for param in layer.get_params():
params.append(param)
return params
def make_caches(params):
caches = []
for p in params:
caches.append(theano.shared(floatX(np.zeros(p.get_value().shape))))
return caches
"""
make_caches的功能:
提供和p(参数)同shape的全0矩阵
用与梯度下降方法
"""
def one_step_updates(layers):
updates = []
for layer in layers:
updates += layer.updates()
return updates
| mit | Python | |
5e3b2ca14c4cc421e47d2709fe52390ee51eee11 | Create S3toSQS.py | ndchristian/AWS-Lambda-Functions | SQS/S3toSQS.py | SQS/S3toSQS.py | """
Copyright 2016 Nicholas Christian
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Just a note: SQS seems to be not meant to send large amounts of data through it because the number of messages
# per batch are limited to 10 and the maximum size of a batch is 256 KB.
# If you are sending large amounts of data make sure the lambda function settings are reflective of the amount
# of time and resources this needs.
from __future__ import print_function
from gzip import open as g_open
from string import printable
from sys import getsizeof
from urllib import unquote_plus
from boto3 import client
SQS = client('sqs')
S3 = client('s3')
QUEUE_NAME = ""
MESSAGE_RETENTION_PERIOD = '' # In seconds
def memos(event, context):
print("Loading Function...")
bucket = event['Records'][0]['s3']['bucket']['name']
key = unquote_plus(event['Records'][0]['s3']['object']['key']).decode('utf8')
S3.download_file(bucket, key, '/tmp/%s' % (key.split('/')[-1]))
# If queue already exists it will just fetch the url of the queue.
queue_url = SQS.create_queue(QueueName=QUEUE_NAME,
Attributes={'MessageRetentionPeriod': MESSAGE_RETENTION_PERIOD})['QueueUrl']
with g_open('/tmp/%s' % (key.split('/')[-1]), 'r+') as f:
batch_of_mess = []
for identifier, content in enumerate(f.readlines()):
# Gets rid of odd unicode characters that SQS does not like and the message would fail to send.
batch_of_mess.append({'Id': str(identifier),
'MessageBody': ''.join(l for l in content if l in printable)})
# Maximum size of a batch is 256 KB and/or 10 messages.
if getsizeof(batch_of_mess) >= 225 or len(batch_of_mess) == 10:
message = SQS.send_message_batch(QueueUrl=queue_url,
Entries=batch_of_mess)
# SQS does not throw up an error if a message fails to send.
if 'Failed' in message:
print(message)
del batch_of_mess[:]
# Takes the remainder of the messages and sends them to SQS.
if batch_of_mess:
last_message = SQS.send_message_batch(QueueUrl=queue_url,
Entries=batch_of_mess)
if 'Failed' in last_message:
print(last_message)
print("Done!")
| apache-2.0 | Python | |
f0e733a3f62d37dc25d70b334dd3e1e46936477d | Add missing non-important migration | ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display | homedisplay/info_transportation/migrations/0016_auto_20150304_2159.py | homedisplay/info_transportation/migrations/0016_auto_20150304_2159.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('info_transportation', '0015_line_type'),
]
operations = [
migrations.AlterField(
model_name='line',
name='type',
field=models.CharField(default=b'bus', help_text=b'Liikennev\xc3\xa4linetyyppi', max_length=10, verbose_name=b'Tyyppi', choices=[(b'bus', b'bus'), (b'tram', b'tram'), (b'train', b'train'), (b'metro', b'metro')]),
preserve_default=True,
),
]
| bsd-3-clause | Python | |
468a4c181768f0dcfcaa40201c26015b7c94e39e | add random gesture test | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/moz4r/Test/random.py | home/moz4r/Test/random.py | import random
from time import sleep
i01 = Runtime.createAndStart("i01", "InMoov")
i01.startHead("COM3")
sleep(1)
def MoveHeadRandomize():
if IcanMoveHeadRandom==1:
i01.moveHead(random.randint(50,130),random.randint(50,130))
MoveHeadTimer = Runtime.start("MoveHeadTimer","Clock")
MoveHeadTimer.setInterval(1001)
def MoveHead(timedata):
MoveHeadRandomize()
MoveHeadTimer.setInterval(random.randint(600,1200))
def MoveHeadStopped():
if IcanMoveHeadRandom==1:
i01.moveHead(90,90)
HeadSide.moveTo(90)
def MoveHeadStart():
MoveHeadRandomize()
MoveHeadTimer.addListener("pulse", python.name, "MoveHead")
MoveHeadTimer.addListener("clockStopped", python.name, "MoveHeadStopped")
MoveHeadTimer.addListener("clockStarted", python.name, "MoveHeadStart")
MoveHeadTimer.startClock()
#start to move head random 10 seconds
IcanMoveHeadRandom=1
sleep(10)
IcanMoveHeadRandom=0
| apache-2.0 | Python | |
bb63af8be9abf1bcc8f3716bbd1a1a375685533f | Add a new feed bot, abusehelper.contrib.abusech.feodoccbot, for catching abuse.ch's Feodo Tracker RSS feed. | abusesa/abusehelper | abusehelper/contrib/abusech/feodoccbot.py | abusehelper/contrib/abusech/feodoccbot.py | from abusehelper.core import bot
from . import host_or_ip, split_description, AbuseCHFeedBot
class FeodoCcBot(AbuseCHFeedBot):
feed_type = "c&c"
feeds = bot.ListParam(default=["https://feodotracker.abuse.ch/feodotracker.rss"])
# The timestamp in the title appears to be the firstseen timestamp,
# skip including it as the "source time".
parse_title = None
def parse_description(self, description):
got_version = False
for key, value in split_description(description):
if key == "version":
yield "malware", "feodo." + value.strip().lower()
got_version = True
elif key == "host":
yield host_or_ip(value)
if not got_version:
yield "malware", "feodo"
if __name__ == "__main__":
FeodoCcBot.from_command_line().execute()
| mit | Python | |
94c125925b61a57bd29e9265dc993e1d868f2b7f | Create Selenium_Google.py | christieewen/Scripts,christieewen/Scripts,christieewen/Scripts | Selenium_Google.py | Selenium_Google.py | __author__ = 'Christie'
#
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
browser = webdriver.Firefox()
browser.get('http://www.google.com')
assert 'Google' in browser.title
#browser.get('http://www.yahoo.com')
#assert 'Yahoo' in browser.title
#elem = browser.find_element_by_name('p') # Find the Yahoo search box
elem = browser.find_element_by_name('q') # Find the Google search box
elem.send_keys('seleniumhq' + Keys.RETURN)
browser.quit()
| apache-2.0 | Python | |
b00ae10f9ad841131ead33aa690587b7e2c50976 | Add fetch recipe for fletch | hsharsha/depot_tools,hsharsha/depot_tools,duanwujie/depot_tools,aleonliao/depot_tools,hsharsha/depot_tools,duanwujie/depot_tools,aleonliao/depot_tools,primiano/depot_tools,duongbaoduy/gtools,Midrya/chromium,duongbaoduy/gtools,disigma/depot_tools,azureplus/chromium_depot_tools,CoherentLabs/depot_tools,aleonliao/depot_tools,Midrya/chromium,primiano/depot_tools,duongbaoduy/gtools,CoherentLabs/depot_tools,azureplus/chromium_depot_tools,primiano/depot_tools,disigma/depot_tools,disigma/depot_tools,azureplus/chromium_depot_tools,duanwujie/depot_tools,Midrya/chromium | recipes/fletch.py | recipes/fletch.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Fletch(recipe_util.Recipe):
"""Basic Recipe class for Fletch."""
@staticmethod
def fetch_spec(props):
url = 'https://github.com/dart-lang/fletch.git'
solution = {
'name' :'fletch',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
'custom_deps': {},
'safesync_url': '',
}
spec = {
'solutions': [solution],
}
if props.get('target_os'):
spec['target_os'] = props['target_os'].split(',')
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'fletch'
def main(argv=None):
return Fletch().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | Python | |
76c25395590aa9dee64ca138633f01b62ac0d26b | Add new provider migration for osf registrations | aaxelb/SHARE,laurenbarker/SHARE,CenterForOpenScience/SHARE,aaxelb/SHARE,zamattiac/SHARE,zamattiac/SHARE,zamattiac/SHARE,laurenbarker/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE,laurenbarker/SHARE,CenterForOpenScience/SHARE | providers/io/osf/registrations/migrations/0001_initial.py | providers/io/osf/registrations/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 16:17
from __future__ import unicode_literals
from django.db import migrations
import share.robot
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
operations = [
migrations.RunPython(
code=share.robot.RobotUserMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotOauthTokenMigration('io.osf.registrations'),
),
migrations.RunPython(
code=share.robot.RobotScheduleMigration('io.osf.registrations'),
),
]
| apache-2.0 | Python | |
81391212d0e0cecfbce14195e1ca8cd1d96a6671 | Create Euler_2.py | ANoonan93/Python_code | Euler_2.py | Euler_2.py | fib = 1
fib2 = 2
temp = 0
total = 0
while temp <= 4000000:
temp = fib2
if temp % 2 == 0:
total += temp
temp = fib + fib2
fib = fib2
fib2 = temp
print(total)
| mit | Python | |
2c115a1b437aa36b42f74c04136601d9362dd5f6 | add cutflow | rootpy/rootpy,kreczko/rootpy,rootpy/rootpy,ndawe/rootpy,ndawe/rootpy,rootpy/rootpy,ndawe/rootpy,kreczko/rootpy,kreczko/rootpy | rootpy/tree/cutflow.py | rootpy/tree/cutflow.py | import struct
class Cutflow(object):
def __init__(self, names):
self.__names = names
self.__dict = dict((name, '0') for name in names)
def __setitem__(self, item, value):
self.__dict[item] = str(int(bool(value)))
def bitstring(self):
return ''.join([self.__dict[item] for item in self.__names])
def int(self):
return int(self.bitstring(), 2)
| bsd-3-clause | Python | |
a5b2db02926573ec1bc338d611af9f0ca363b237 | add convoluving response function | berkeley-stat159/project-iota | convoluving_response.py | convoluving_response.py | import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from scipy.stats import gamma
from stimuli import events2neural
def hrf(times):
""" Return values for HRF at given times """
# Gamma pdf for the peak
peak_values = gamma.pdf(times, 6)
# Gamma pdf for the undershoot
undershoot_values = gamma.pdf(times, 12)
# Combine them
values = peak_values - 0.35 * undershoot_values
# Scale max to 0.6
return values / np.max(values) * 0.6
def constructing_convo(fname, n_volx):
tr_times = np.arange(0, 30, 2.5)
hrf_at_trs = hrf(tr_times)
neural_prediction = events2neural(fname + '_cond.txt', 2.5, n_volx)
all_tr_times = np.arange(n_volx) * 2.5
convolved = np.convolve(neural_prediction, hrf_at_trs)
convolved = convolved[:-len(hrf_at_trs)-1]
plt.plot(all_tr_times, neural_prediction)
plt.plot(all_tr_times[0:len(convolved)], convolved)
plt.show()
np.savetxt(fname + '_conv.txt', convolved)
if __name__ == '__main__':
from sys import argv
filename = argv[1]
if not filename:
filename = 'ds114_sub009_t2r1'
constructing_convo(filename, 173) | bsd-3-clause | Python | |
7a91235b1d6ed45a5452c455dd86797bbf092d17 | Update S3Session.py | Percona-Lab/mongodb_consistent_backup,Percona-Lab/mongodb_consistent_backup,timvaillancourt/mongodb_consistent_backup,timvaillancourt/mongodb_consistent_backup,Percona-Lab/mongodb_consistent_backup,timvaillancourt/mongodb_consistent_backup | mongodb_consistent_backup/Upload/S3/S3Session.py | mongodb_consistent_backup/Upload/S3/S3Session.py | import logging
import boto
import boto.s3
class S3Session:
def __init__(self, access_key, secret_key, s3_host='s3.amazonaws.com', secure=True, num_retries=5, socket_timeout=15):
self.access_key = access_key
self.secret_key = secret_key
self.s3_host = s3_host
self.secure = secure
self.num_retries = num_retries
self.socket_timeout = socket_timeout
for section in boto.config.sections():
boto.config.remove_section(section)
boto.config.add_section('Boto')
boto.config.setbool('Boto', 'is_secure', self.secure)
boto.config.set('Boto', 'http_socket_timeout', str(self.socket_timeout))
boto.config.set('Boto', 'num_retries', str(self.num_retries))
self._conn = None
self.connect()
def close(self):
if not self._conn:
self._conn.close()
pass
def connect(self):
if not self._conn:
try:
logging.debug("Connecting to AWS S3 with Access Key: %s" % self.access_key)
self._conn = boto.s3.S3Connection(
self.access_key,
self.secret_key,
host=self.s3_host,
is_secure=self.secure
)
logging.debug("Successfully connected to AWS S3 with Access Key: %s" % self.access_key)
except Exception, e:
logging.error("Cannot connect to AWS S3 with Access Key: %s!" % self.access_key)
raise e
return self._conn
def get_bucket(self, bucket_name):
try:
logging.debug("Connecting to AWS S3 Bucket: %s" % bucket_name)
return self._conn.get_bucket(bucket_name)
except Exception, e:
logging.error("Cannot connect to AWS S3 Bucket: %s!" % bucket_name)
raise e
| import logging
from boto import config
from boto.s3 import S3Connection
class S3Session:
def __init__(self, access_key, secret_key, s3_host='s3.amazonaws.com', secure=True, num_retries=5, socket_timeout=15):
self.access_key = access_key
self.secret_key = secret_key
self.s3_host = s3_host
self.secure = secure
self.num_retries = num_retries
self.socket_timeout = socket_timeout
for section in config.sections():
config.remove_section(section)
config.add_section('Boto')
config.setbool('Boto', 'is_secure', self.secure)
config.set('Boto', 'http_socket_timeout', str(self.socket_timeout))
config.set('Boto', 'num_retries', str(self.num_retries))
self._conn = None
self.connect()
def close(self):
if not self._conn:
self._conn.close()
def connect(self):
if not self._conn:
try:
logging.debug("Connecting to AWS S3 with Access Key: %s" % self.access_key)
self._conn = S3Connection(
self.access_key,
self.secret_key,
host=self.s3_host,
is_secure=self.secure
)
logging.debug("Successfully connected to AWS S3 with Access Key: %s" % self.access_key)
except Exception, e:
logging.error("Cannot connect to AWS S3 with Access Key: %s!" % self.access_key)
raise e
return self._conn
def get_bucket(self, bucket_name):
try:
logging.debug("Connecting to AWS S3 Bucket: %s" % bucket_name)
return self._conn.get_bucket(bucket_name)
except Exception, e:
logging.error("Cannot connect to AWS S3 Bucket: %s!" % bucket_name)
raise e
| apache-2.0 | Python |
7c755e7839f7c602a6c93b1aa2f5011e89d15c85 | Create command for generating prices for flavors | opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor | nodeconductor/iaas/management/commands/addmissingpricelistflavors.py | nodeconductor/iaas/management/commands/addmissingpricelistflavors.py | from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
from nodeconductor.cost_tracking.models import DefaultPriceListItem
from nodeconductor.iaas.models import Flavor, Instance
class Command(BaseCommand):
def handle(self, *args, **options):
instance_content_type = ContentType.objects.get_for_model(Instance)
self.stdout.write('Checking flavors existance in DefaultPriceListItem table ...')
for flavor in Flavor.objects.all():
lookup_kwargs = {'item_type': 'flavor', 'key': flavor.name, 'resource_content_type': instance_content_type}
if not DefaultPriceListItem.objects.filter(**lookup_kwargs).exists():
item = DefaultPriceListItem(**lookup_kwargs)
item.name = 'Flavor type: {}'.format(flavor.name)
item.save()
self.stdout.write('DefaultPriceListItem was created for flavor {}'.format(flavor.name))
self.stdout.write('... Done')
| mit | Python | |
2616d8f3ef51a8551ac14a9e83b0298b8165093a | Add work-in-progress script to fixup a standalone plugin library. | frizaro/Veloview,frizaro/Veloview,Kitware/VeloView,Kitware/VeloView,Kitware/VeloView,Kitware/VeloView,frizaro/Veloview,frizaro/Veloview,Kitware/VeloView | Superbuild/Projects/apple/fixup_plugin2.py | Superbuild/Projects/apple/fixup_plugin2.py | #!/usr/bin/env python
import subprocess
import os
plugin = 'libVelodyneHDLPlugin.dylib'
paraviewBuildDir = '/source/paraview/build'
nameprefix = '@executable_path/../Libraries/'
prefix = '@executable_path/../Libraries/'
# The official ParaView OSX binaries are built with hdf5, not vtkhdf5.
# Also, they are built with Python 2.6, not 2.7
namechanges = {
'libvtkhdf5_hl-pv3.98.1.dylib' : 'libhdf5.1.8.9.dylib',
'libvtkhdf5-pv3.98.1.dylib' : 'libhdf5_hl.1.8.9.dylib',
'libvtkWrappingPython27-pv3.98.1.dylib' : 'libvtkWrappingPython26-pv3.98.1.dylib'
}
changePythonFramework = False
def fixupPlugin():
output = subprocess.check_output(['otool', '-L', plugin])
lines = output.split('\n')
libs = []
qtlibs = []
for l in lines:
l = l.strip().split(' ')[0]
if l.startswith(paraviewBuildDir):
libs.append(l)
if l.startswith('Qt'):
qtlibs.append(l)
for qtlib in qtlibs:
command = 'install_name_tool -change %s @executable_path/../Frameworks/%s %s' % (qtlib, qtlib, plugin)
subprocess.call(command.split())
if changePythonFramework:
command = 'install_name_tool -change /System/Library/Frameworks/Python.framework/Versions/2.7/Python /System/Library/Frameworks/Python.framework/Versions/2.6/Python %s' % (plugin)
subprocess.call(command.split())
for lib in libs:
name = os.path.basename(lib)
if name in namechanges:
name = namechanges[name]
command = 'install_name_tool -change %s %s%s %s' % (lib, prefix, name, plugin)
subprocess.call(command.split())
pvlib = '/Applications/paraview.app/Contents/Libraries/' + name
if not os.path.exists(pvlib):
print 'notfound:', pvlib
command = 'install_name_tool -id %s%s %s' % (nameprefix, os.path.basename(plugin), plugin)
subprocess.call(command.split())
if __name__ == '__main__':
fixupPlugin()
| apache-2.0 | Python | |
735135c5570edd38324fe3e94aa2f4c2f3043627 | Migrate data from contact_for_research_via and into contact_for_research_methods many to many field | ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend,ministryofjustice/cla_backend | cla_backend/apps/legalaid/migrations/0023_migrate_contact_for_research_via_field.py | cla_backend/apps/legalaid/migrations/0023_migrate_contact_for_research_via_field.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import Q
def migrate_contact_for_research_via_field_data(apps, schema_editor):
ContactResearchMethod = apps.get_model("legalaid", "ContactResearchMethod")
research_methods = {method.method: method.id for method in ContactResearchMethod.objects.all()}
PersonalDetails = apps.get_model("legalaid", "PersonalDetails")
models = PersonalDetails.objects.exclude(Q(contact_for_research_via="") | Q(contact_for_research_via=None))
for model in models:
if not model.contact_for_research_methods:
model.contact_for_research_methods = [research_methods.get(model.contact_for_research_via)]
model.save()
def rollback_migrate_contact_for_research_via_field_data(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [("legalaid", "0022_default_contact_for_research_methods")]
operations = [
migrations.RunPython(
migrate_contact_for_research_via_field_data, rollback_migrate_contact_for_research_via_field_data
)
]
| mit | Python | |
59ac9745064dd02903e35c1c51781505bad505df | add gunicorn config | gpodder/mygpo,gpodder/mygpo,gpodder/mygpo,gpodder/mygpo | gunicorn.conf.py | gunicorn.conf.py |
bind = "unix:/tmp/mygpo.sock"
workers = 2
worker_class = "gevent"
max_requests = 10000
| agpl-3.0 | Python | |
830a41911c5a2bc3982f35a6c6da38f6c659e78b | Create /pypardot/objects/tests/__init__.py | mneedham91/PyPardot4 | pypardot/objects/tests/__init__.py | pypardot/objects/tests/__init__.py | mit | Python | ||
f740dd60e7a4493269679e469c7f1ee5e24ff5af | add build/errors file | fedora-conary/conary,fedora-conary/conary,fedora-conary/conary,fedora-conary/conary,fedora-conary/conary | conary/build/errors.py | conary/build/errors.py |
class BuildError(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return self.msg
def __str__(self):
return repr(self)
class RecipeFileError(BuildError):
pass
class RecipeDependencyError(RecipeFileError):
pass
class BadRecipeNameError(RecipeFileError):
pass
| apache-2.0 | Python | |
18f385de7b287a932192f690cb74ff70a452cf47 | test settings file | jarcoal/django-filepicker-urlfield | fpurlfield/test_settings.py | fpurlfield/test_settings.py | # Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = ()
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
}
# TIME_ZONE = 'America/Chicago'
# LANGUAGE_CODE = 'en-us'
# USE_I18N = True
# USE_L10N = True
# USE_TZ = True
# MEDIA_ROOT = ''
# MEDIA_URL = ''
SECRET_KEY = 'secret'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
TEMPLATE_DIRS = ()
INSTALLED_APPS = (
'fpurlfield',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
} | mit | Python | |
62c85cf12b388411919b86ac498908336bfd5e12 | Create password_checker.py | MaximeKjaer/dailyprogrammer-challenges | Challenge-172/02-Intermediate-2/password_checker.py | Challenge-172/02-Intermediate-2/password_checker.py | #!/usr/bin/python
import hashlib
import uuid
password = 'test123'
f = open('salt.txt')
salt = f.read()
f.close()
f = open('encrypted.txt')
hashed_password = f.read()
f.close()
if hashlib.sha512(password + salt).hexdigest() == hashed_password:
print 'ACCESS GRANTED'
else:
print 'ACCESS DENIED'
| mit | Python | |
a59a2c3cbd9c8f029ab679f386ab61a6bcfb5108 | test py script for multibody | openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro | software/perception/constraint_app/scripts/simulate_mb.py | software/perception/constraint_app/scripts/simulate_mb.py | import sys
import os
# for bottime:
import time
import random
import numpy
import termios, atexit
from select import select
def kbhit():
dr,dw,de = select([sys.stdin], [], [], 0)
return dr <> []
myhome = os.environ.get("HOME")
path1 = myhome + "/drc/software/build/lib/python2.7/site-packages"
path2 = myhome + "/drc/software/build/lib/python2.7/dist-packages"
print path1
print path2
sys.path.append(path1)
sys.path.append(path2)
import lcm
from drc.affordance_collection_t import affordance_collection_t
from drc.affordance_t import affordance_t
from drc.affordance_track_collection_t import affordance_track_collection_t
from drc.affordance_track_t import affordance_track_t
from drc.vector_3d_t import vector_3d_t
def timestamp_now (): return int (time.time () * 1000000)
lc = lcm.LCM()
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
# 1. Send a fitted affordance to the module
aff_fit = affordance_t()
aff_fit.utime = timestamp_now()
aff_fit.otdf_type = "gate_valve"
aff_fit.uid = 0
aff_fit.nparams = 11
aff_fit.params = [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.2, 4.0, 4.0, 4.0]
aff_fit.param_names = ["x", "y", "z", "roll", "pitch", "yaw", "valve_radius", "tube_radius", "lX", "lY", "lZ"]
print 'Sending fit message'
lc.publish("AFFORDANCE_FIT", aff_fit.encode())
# 2. Send some sample "initial points" (i.e., the fit)
track_ids = [10, 20, 30]
track_means = numpy.matrix([[ 1.2, 1.2, 1.2 ],
[ 0.9, 0.8, 0.9 ],
[ 1.5, 1.0, 1.1 ]])
atc = affordance_track_collection_t()
atc.utime = timestamp_now()
atc.uid = 0
atc.ntracks = 2
for i in range(atc.ntracks):
track = affordance_track_t()
track.segment = ""
track.id = track_ids[i]
v = vector_3d_t()
v.x = track_means[i,0]
v.y = track_means[i,1]
v.z = track_means[i,2]
track.position = v
atc.tracks.append(track)
print 'Sending a bunch of initial points'
lc.publish("AFFORDANCE_TRACK_COLLECTION", atc.encode())
# 3. Send some noisy observations
for j in range(3):
atc = affordance_track_collection_t()
atc.utime = timestamp_now()
atc.uid = 0
count = 0
atc.ntracks = 0
for i in range(len(track_ids)):
if j == 2 and i == 1:
continue
track = affordance_track_t()
track.segment = ""
track.id = track_ids[i]
v = vector_3d_t()
var = 0.01*0.01;
v.x = track_means[i,0] + random.gauss(0.0,var)
v.y = track_means[i,1] + random.gauss(0.0,var)
v.z = track_means[i,2] + random.gauss(0.0,var)
track.position = v
atc.tracks.append(track)
atc.ntracks +=1
print 'queueing observation for segment', track.segment
print 'Sending a bunch of noisy observations'
lc.publish("AFFORDANCE_TRACK_COLLECTION", atc.encode())
print 'waiting...'
while ( not kbhit() ):
pass
sys.stdin.read(1)
| bsd-3-clause | Python | |
446984ad7b102587beac03d4329b5d0c061e2095 | Add preserve_{current_canvas,batch_state} and invisible_canvas context managers | rootpy/rootpy,kreczko/rootpy,ndawe/rootpy,ndawe/rootpy,kreczko/rootpy,rootpy/rootpy,rootpy/rootpy,ndawe/rootpy,kreczko/rootpy | rootpy/context.py | rootpy/context.py | from contextlib import contextmanager
import ROOT
@contextmanager
def preserve_current_canvas():
"""
Context manager which ensures that the current canvas remains the current
canvas when the context is left.
"""
old = ROOT.gPad.func()
try:
yield
finally:
if old:
old.cd()
else:
# Is it possible to set ROOT.gPad back to None, somehow?
pass
@contextmanager
def preserve_batch_state():
"""
Context manager which ensures the batch state is the same on exit as it was
on entry.
"""
old = ROOT.gROOT.IsBatch()
try:
yield
finally:
ROOT.gROOT.SetBatch(old)
@contextmanager
def invisible_canvas():
"""
Context manager yielding a temporary canvas drawn in batch mode, invisible
to the user. Original state is restored on exit.
Example use; obtain X axis object without interfering with anything:
with invisible_canvas() as c:
efficiency.Draw()
g = efficiency.GetPaintedGraph()
return g.GetXaxis()
"""
with preserve_batch_state():
ROOT.gROOT.SetBatch()
with preserve_current_canvas():
c = ROOT.TCanvas()
try:
c.cd()
yield c
finally:
c.Close()
| bsd-3-clause | Python | |
7cb77ef66cad41e1b5d4907272b899a24a689c2d | Test for #423 | dials/dials,dials/dials,dials/dials,dials/dials,dials/dials | test/algorithms/refinement/tst_dials-423.py | test/algorithms/refinement/tst_dials-423.py | #!/usr/bin/env cctbx.python
#
# Copyright (C) (2017) STFC Rutherford Appleton Laboratory, UK.
#
# Author: David Waterman.
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
""" Test the situation that led to https://github.com/dials/dials/issues/423.
In that case instantiating a Refiner for an experiment list with an I23
detector model caused the panel origins to move before any refinement took
place. This occured because for the input experiments.json the root frame for
the hierarchical detector is on source side of the laboratory frame origin, not
on the detector side. Prior to the fix this resulted in incorrect calculation
of the offsets of all panels from the root frame.
"""
from __future__ import absolute_import, division
import os
import libtbx.load_env # required for libtbx.env.find_in_repositories
from libtbx import phil
from dxtbx.model.experiment_list import ExperimentListFactory
from dials.array_family import flex
from dials.algorithms.refinement import RefinerFactory
class Test(object):
def __init__(self):
dials_regression = libtbx.env.find_in_repositories(
relative_path="dials_regression",
test=os.path.isdir)
data_dir = os.path.join(dials_regression, "refinement_test_data",
"dials-423")
exp_file = os.path.join(data_dir, 'experiments.json')
ref_file = os.path.join(data_dir, 'subset.pickle')
self._reflections = flex.reflection_table.from_pickle(ref_file)
self._experiments = ExperimentListFactory.from_json_file(exp_file,
check_format=False)
def run(self):
"""Test that the detector remains similar after refiner construction"""
from dials.algorithms.refinement.refiner import phil_scope
params = phil_scope.fetch(source=phil.parse('')).extract()
# disable outlier rejection for speed of refiner construction
params.refinement.reflections.outlier.algorithm='null'
refiner = RefinerFactory.from_parameters_data_experiments(params,
self._reflections, self._experiments)
d1 = self._experiments[0].detector
d2 = refiner.get_experiments()[0].detector
assert d1.is_similar_to(d2)
print "OK"
return
def run():
if not libtbx.env.has_module("dials_regression"):
print "Skipping tests in " + __file__ + " as dials_regression not present"
return
tst = Test()
tst.run()
if __name__ == '__main__':
run()
| bsd-3-clause | Python | |
32fcd5393402d868d8741385705f58b9e8eb7703 | Update __init__.py | MycroftAI/mycroft-core,aatchison/mycroft-core,linuxipho/mycroft-core,MycroftAI/mycroft-core,aatchison/mycroft-core,forslund/mycroft-core,linuxipho/mycroft-core,Dark5ide/mycroft-core,forslund/mycroft-core,Dark5ide/mycroft-core | mycroft/version/__init__.py | mycroft/version/__init__.py | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
from genericpath import exists, isfile
from mycroft.util.log import getLogger
__author__ = 'augustnmonteiro'
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 8
CORE_VERSION_BUILD = 11
# END_VERSION_BLOCK
CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." +
str(CORE_VERSION_MINOR) + "." +
str(CORE_VERSION_BUILD))
LOG = getLogger(__name__)
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
| # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
import json
from genericpath import exists, isfile
from mycroft.util.log import getLogger
__author__ = 'augustnmonteiro'
# The following lines are replaced during the release process.
# START_VERSION_BLOCK
CORE_VERSION_MAJOR = 0
CORE_VERSION_MINOR = 8
CORE_VERSION_BUILD = 10
# END_VERSION_BLOCK
CORE_VERSION_STR = (str(CORE_VERSION_MAJOR) + "." +
str(CORE_VERSION_MINOR) + "." +
str(CORE_VERSION_BUILD))
LOG = getLogger(__name__)
class VersionManager(object):
__location = "/opt/mycroft/version.json"
@staticmethod
def get():
if (exists(VersionManager.__location) and
isfile(VersionManager.__location)):
try:
with open(VersionManager.__location) as f:
return json.load(f)
except:
LOG.error("Failed to load version from '%s'"
% VersionManager.__location)
return {"coreVersion": None, "enclosureVersion": None}
| apache-2.0 | Python |
9cb122793d531690b621b4fa8f91481a105305e3 | Add new module: minion.list | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/minion.py | salt/modules/minion.py | # -*- coding: utf-8 -*-
'''
Module to provide information about minions
'''
# Import Python libs
import os
# Import Salt libs
import salt.utils
import salt.key
def list():
'''
Return a list of accepted, denied, unaccepted and rejected keys.
This is the same output as `salt-key -L`
CLI Example:
.. code-block:: bash
salt 'master' minion.list
'''
pki_dir = globals().get('__salt__')['config.get']('pki_dir', '')
transport = globals().get('__salt__')['config.get']('transport', '')
# We have to replace the minion/master directoryies
pki_dir = pki_dir.replace("minion", "master")
# The source code below is (nearly) a copy of salt.key.Key.list_keys
# We have to differentiate between RaetKey._check_minions_directories
# and Zeromq-Keys. Raet-Keys only have three states while ZeroMQ-keys
# have an additional 'denied' state.
if transport in ('zeromq', 'tcp'):
key_dirs = _check_minions_directories(pki_dir)
else:
key_dirs = _check_minions_directories_raetkey(pki_dir)
ret = {}
for dir_ in key_dirs:
ret[os.path.basename(dir_)] = []
try:
for fn_ in salt.utils.isorted(os.listdir(dir_)):
if not fn_.startswith('.'):
if os.path.isfile(os.path.join(dir_, fn_)):
ret[os.path.basename(dir_)].append(fn_)
except (OSError, IOError):
# key dir kind is not created yet, just skip
continue
return ret
def _check_minions_directories(pki_dir):
'''
Return the minion keys directory paths.
This function is a copy of salt.key.Key._check_minions_directories.
'''
minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC)
minions_pre = os.path.join(pki_dir, salt.key.Key.PEND)
minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ)
minions_denied = os.path.join(pki_dir, salt.key.Key.DEN)
return minions_accepted, minions_pre, minions_rejected, minions_denied
def _check_minions_directories_raetkey(pki_dir):
'''
Return the minion keys directory paths.
This function is a copy of salt.key.RaetKey._check_minions_directories.
'''
accepted = os.path.join(pki_dir, salt.key.RaetKey.ACC)
pre = os.path.join(pki_dir, salt.key.RaetKey.PEND)
rejected = os.path.join(pki_dir, salt.key.RaetKey.REJ)
return accepted, pre, rejected
| apache-2.0 | Python | |
91645e4abf4fa128a59257584ba385c19b642425 | Add @s0undtech's nb_open module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/utils/nb_popen.py | salt/utils/nb_popen.py | # -*- coding: utf-8 -*-
'''
saltcloud.utils.nb_popen
~~~~~~~~~~~~~~~~~~~~~~~~
Non blocking subprocess Popen.
:codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)`
:copyright: © 2013 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
'''
# Import python libs
import os
import sys
import fcntl
import logging
import subprocess
log = logging.getLogger(__name__)
class NonBlockingPopen(subprocess.Popen):
def __init__(self, *args, **kwargs):
self.stream_stds = kwargs.pop('stream_stds', False)
super(NonBlockingPopen, self).__init__(*args, **kwargs)
if self.stdout is not None:
fod = self.stdout.fileno()
fol = fcntl.fcntl(fod, fcntl.F_GETFL)
fcntl.fcntl(fod, fcntl.F_SETFL, fol | os.O_NONBLOCK)
self.obuff = ''
if self.stderr is not None:
fed = self.stderr.fileno()
fel = fcntl.fcntl(fed, fcntl.F_GETFL)
fcntl.fcntl(fed, fcntl.F_SETFL, fel | os.O_NONBLOCK)
self.ebuff = ''
log.info('Running command {0!r}'.format(*args))
def poll(self):
poll = super(NonBlockingPopen, self).poll()
if self.stdout is not None:
try:
obuff = self.stdout.read()
self.obuff += obuff
if obuff:
logging.getLogger(
'saltcloud.Popen.STDOUT.PID-{0}'.format(self.pid)
).debug(obuff.rstrip())
if self.stream_stds:
sys.stdout.write(obuff)
except IOError, err:
if err.errno not in (11, 35):
# We only handle Resource not ready properly, any other
# raise the exception
raise
if self.stderr is not None:
try:
ebuff = self.stderr.read()
self.ebuff += ebuff
if ebuff:
logging.getLogger(
'saltcloud.Popen.STDERR.PID-{0}'.format(self.pid)
).debug(ebuff.rstrip())
if self.stream_stds:
sys.stderr.write(ebuff)
except IOError, err:
if err.errno not in (11, 35):
# We only handle Resource not ready properly, any other
# raise the exception
raise
return poll
def __del__(self):
if self.stdout is not None:
try:
fod = self.stdout.fileno()
fol = fcntl.fcntl(fod, fcntl.F_GETFL)
fcntl.fcntl(fod, fcntl.F_SETFL, fol & ~os.O_NONBLOCK)
except ValueError:
# Closed FD
pass
if self.stderr is not None:
try:
fed = self.stderr.fileno()
fel = fcntl.fcntl(fed, fcntl.F_GETFL)
fcntl.fcntl(fed, fcntl.F_SETFL, fel & ~os.O_NONBLOCK)
except ValueError:
# Closed FD
pass
super(NonBlockingPopen, self).__del__()
| apache-2.0 | Python | |
f6e32ae48265232f25866dd9060b7cb80551e333 | Create main.py | e-parkinson/starter-naive-bayes-classifier | main.py | main.py |
def calcProbPos(bPlus,bMinus,cPlus,cMinus):
probPos = ((bPlus/cPlus)*(cPlus/(cPlus+cMinus)))/((bPlus+bMinus)/(cPlus+cMinus))
return probPos
def calcMean(t,i):
m = t/i
return m
print('Enter a statement without punctuation:')
userStatement = input().lower()
print('THINKING...')
userStatement = userStatement.strip('\n').split(' ')
cPlus = 0
cMinus = 0
meanTotal = 0
with open('sampleCorpora.txt') as corpora:
for line in corpora.readlines():
lineArray = line.strip('\n').split(',')
jment = str(lineArray[1])
if jment == "pos":
cPlus += 1
else:
if jment == "neg":
cMinus += 1
lengthInput = len(userStatement)
for n in range(0,lengthInput):
bPlus = 0
bMinus = 0
checkString = str(userStatement[n])
#print(checkString)
with open('sampleCorpora.txt') as corpora:
for line in corpora.readlines():
corporaLine = line.strip('\n').split(',')
checkAgainst = str(corporaLine[0])
if checkString in checkAgainst:
posNegCheck = str(corporaLine[1])
if posNegCheck == "pos":
bPlus += 1
else:
if posNegCheck == "neg":
bMinus += 1
probPos = calcProbPos(bPlus,bMinus,cPlus,cMinus)
meanTotal = meanTotal + probPos
print('RESULT: ')
mean = calcMean(meanTotal,lengthInput)
print(str(mean))
if mean > 0.5:
print('positive')
else:
if mean < 0.5:
print('negative')
else:
if mean == 0.5:
print('neutral')
| mit | Python | |
87a9769af3d201b925a5a4a259ccbd007257b1d3 | add python test: read_pack.py | akalend/hhvm-msgpack,akalend/hhvm-msgpack,akalend/hhvm-msgpack,akalend/hhvm-msgpack | test/read_pack.py | test/read_pack.py | import os
import msgpack
f = open("/tmp/data.bin", "r")
package = f.read(1024)
f.close()
data = msgpack.unpackb(package)
print data | mit | Python | |
3912416390ebe5df3c883b280cc6acac5169c1f7 | Add test to check if elements have at least one owner | amolenaar/gaphor,amolenaar/gaphor | tests/test_elements_have_owner.py | tests/test_elements_have_owner.py | """
For all relevant model elements, check if there is at least one "owner"
("owner" is a derived union).
This is needed to display all elements in the tree view.
"""
import itertools
import pytest
import gaphor.SysML.diagramitems
import gaphor.UML.diagramitems
from gaphor import UML
from gaphor.core.modeling import Element
from gaphor.core.modeling.properties import derived
from gaphor.diagram.support import get_model_element
def all_subset_properties(prop):
for sub in prop.subsets:
if isinstance(sub, derived):
yield from all_subset_properties(sub)
else:
yield sub
def all_presented_elements(module):
return (
get_model_element(getattr(module, name))
for name in dir(module)
if not name.startswith("_") and get_model_element(getattr(module, name))
)
def all_presented_uml_and_sysml_elements():
return itertools.chain(
all_presented_elements(gaphor.UML.diagramitems),
all_presented_elements(gaphor.SysML.diagramitems),
[
UML.ExecutionOccurrenceSpecification,
UML.ExtensionEnd,
UML.InstanceSpecification,
UML.MessageOccurrenceSpecification,
],
)
def concrete_owner_property(class_):
return (
p for p in class_.umlproperties() if p in all_subset_properties(Element.owner)
)
def test_all_presented_uml_and_sysml_elements():
elements = all_presented_uml_and_sysml_elements()
assert all(issubclass(c, Element) for c in elements)
@pytest.mark.parametrize("class_", all_presented_uml_and_sysml_elements())
def test_element_has_concrete_ownable_property(class_):
owners = list(concrete_owner_property(class_))
print(f"{class_}: {list(map(str, owners))}")
assert any(owners)
| lgpl-2.1 | Python | |
5307d1cf69c943f7f5fe9dfd475c93f317e8ebb7 | add import script for West Lancashire | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_west_lancashire.py | polling_stations/apps/data_collection/management/commands/import_west_lancashire.py | from data_collection.management.commands import BaseXpressWebLookupCsvImporter
class Command(BaseXpressWebLookupCsvImporter):
council_id = 'E07000127'
addresses_name = 'West Lancashire - PropertyPostCodePollingStationWebLookup-2017-03-08.TSV'
stations_name = 'West Lancashire - PropertyPostCodePollingStationWebLookup-2017-03-08.TSV'
elections = ['local.lancashire.2017-05-04']
csv_delimiter = '\t'
| bsd-3-clause | Python | |
93997e72f63dd586d1a683475f49a466571a9fb0 | Create index.py | yize1992/yize1992.github.io | index.py | index.py | #!/usr/bin/python
print("Hello, World!");
| apache-2.0 | Python | |
4a48b8dd804f9a287d35b697d851a660eec80a75 | Add tests for simple enums | adepue/richenum,hearsaycorp/richenum | tests/richenum/test_simple_enums.py | tests/richenum/test_simple_enums.py | import unittest
from richenum import EnumConstructionException, enum
Breakfast = enum(
COFFEE=0,
OATMEAL=1,
FRUIT=2)
class SimpleEnumTestSuite(unittest.TestCase):
def test_members_are_accessible_through_attributes(self):
self.assertEqual(Breakfast.COFFEE, 0)
def test_lookup_by_name(self):
self.assertEqual(Breakfast.get_id_by_label('COFFEE'), 0)
def test_lookup_by_value(self):
self.assertEqual(Breakfast.get_label_by_id(0), 'COFFEE')
def test_can_cast_to_list_of_choices(self):
self.assertEqual(
Breakfast.choices,
[(0, 'COFFEE'), (1, 'OATMEAL'), (2, 'FRUIT')])
def test_choices_are_ordered_by_value(self):
Shuffled = enum(FRUIT=2, COFFEE=0, OATMEAL=1)
self.assertEqual(Shuffled.choices, Breakfast.choices)
def test_values_can_be_any_hashable_type(self):
try:
Confused = enum(INT=0, TUPLE=(1, 2), STR='yup')
self.assertEqual(Confused.get_id_by_label('TUPLE'), (1, 2))
except:
self.fail('Simple enums should accept values of any hashable type.')
with self.assertRaisesRegexp(EnumConstructionException, 'hashable'):
Confused = enum(LIST=[1, 2])
| mit | Python | |
92075a04b0835b1209eaa806c2aeb44ca371ff2b | Add harfbuzz 0.9.40 | BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild | packages/harfbuzz.py | packages/harfbuzz.py | Package ('harfbuzz', '0.9.40',
sources = ['http://www.freedesktop.org/software/%{name}/release/%{name}-%{version}.tar.bz2'],
configure_flags = [
'--disable-silent-rules',
'--without-cairo',
'--without-freetype',
'--without-glib',
'--without-graphite2',
'--with-icu',
])
| mit | Python | |
564bf6484347fed1d3346ff42d79e4bba02a3c98 | add firs test | nechepurenko/automation | test_add_group.py | test_add_group.py | # -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class test_add_group(unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_test_add_group(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
wd.find_element_by_link_text("groups").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("test_name")
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("test_name")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test_header")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("test_header")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("test_footer")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
774b59a2bba95c4b617ac49e279bcbe73d6b6f3b | Add a script to plot timing data | nbigaouette/sorting,nbigaouette/sorting,nbigaouette/sorting,nbigaouette/sorting | profiling/plot.py | profiling/plot.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import glob
import numpy as np
import matplotlib.pyplot as plt
csv_files = glob.glob('*.csv')
fig = plt.figure()
ax = fig.add_subplot(111)
colors = iter(plt.cm.rainbow(np.linspace(0,1,len(csv_files))))
for csv_file in csv_files:
data = np.genfromtxt(csv_file, delimiter=',', skip_header=1).transpose()
j = data[0]
N = data[1]
avg = data[2]
std = data[3]
ax.errorbar(N, avg, yerr=std, label=csv_file, color=next(colors), marker='o')
ax.set_xlabel('N')
ax.set_ylabel('Timing [ms]')
ax.legend(loc='best')
plt.show()
| bsd-3-clause | Python | |
48c4a4fe9531123d6ca2b9af18162c916af09cc9 | Create moto_parser.py | aravindvnair99/Motorola-Moto-E-XT1022-condor-unbrick,aravindvnair99/Motorola-Moto-E-XT1022-condor-unbrick,aravindvnair99/Motorola-Moto-E-XT1022-condor-unbrick | Bootloader/moto_parser.py | Bootloader/moto_parser.py | mit | Python | ||
13fdc81cb32842dc5e0f05d2aa84c997cd59daa3 | Add test that, if we failed to open the log file, we don't try to write to it. | ipython/ipython,ipython/ipython | IPython/core/tests/test_logger.py | IPython/core/tests/test_logger.py | """Test IPython.core.logger"""
import nose.tools as nt
_ip = get_ipython()
def test_logstart_inaccessible_file():
try:
_ip.logger.logstart(logfname="/") # Opening that filename will fail.
except IOError:
pass
else:
nt.assert_true(False) # The try block should never pass.
try:
_ip.run_cell("a=1") # Check it doesn't try to log this
finally:
_ip.logger.log_active = False # If this fails, don't let later tests fail
| bsd-3-clause | Python | |
a8b3af76c1a6cbf61887f5721fd10bf2ef24b2f8 | Create A_Salinity_vertical_section_zy_movie.py | Herpinemmanuel/Oceanography | Cas_6/Vertical_sections/A_Salinity_vertical_section_zy_movie.py | Cas_6/Vertical_sections/A_Salinity_vertical_section_zy_movie.py |
plt.figure(2)
ax = plt.subplot(projection=ccrs.PlateCarree());
ds1['S'].where(ds1.hFacC>0)[nt,:,:,280].plot()
plt.title('Vertical Section (yz) of Salinity (XC = 0E)')
plt.text(5,5,nt,ha='center',wrap=True)
ax.coastlines()
gl = ax.gridlines(draw_labels=True, alpha = 0.5, linestyle='--');
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if (nt < 10):
plt.savefig('Salinity_Vertical_section_xz_Cas6-'+'00'+str(nt)+'.png')
plt.clf()
elif (nt > 9) and (nt < 100):
plt.savefig('Salinity_Vertical_section_xz_Cas6'+'0'+str(nt)+'.png')
plt.clf()
else:
plt.savefig('Salinity_Vertical_section_xz_Cas6'+str(nt)+'.png')
plt.clf()
| mit | Python | |
e653cffcd6711113ceb9ce412149e8155f4d6167 | add the plot file | ComplexNetTSP/Simulation-Files-for-Large-Scale-Model-for-Information-Dissemination-with-Device-to-Device | plot.py | plot.py | # -*- encoding: utf-8 -*-
# -------------------------------------------------------------------------------
# Copyright (c) 2014 Vincent Gauthier Telecom SudParis.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -------------------------------------------------------------------------------
__author__ = """\n""".join(['Vincent Gauthier <vgauthier@luxbulb.org>'])
import pickle as p
import pylab as plt
import numpy as np
import os
import argparse
###############################################################################
#
# Begining of global definition
#
simulation_end_time = 30.0
tau = 1.0/5
###############################################################################
#
# End of global definition
#
def matplotlib_setup(figsize_x=10, figsize_y=6):
import matplotlib as mpl
mpl.rcParams['font.size'] = 9.0
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.labelsize'] = 10
mpl.rcParams['ytick.labelsize'] = 10
mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['axes.labelweight'] = 'bold'
mpl.rcParams['axes.linewidth'] = 0.75
mpl.rcParams['lines.linewidth'] = 2
mpl.rcParams['lines.markersize'] = 8
mpl.rcParams['legend.numpoints'] = 1
# figure size in inch
mpl.rcParams['figure.figsize'] = figsize_x, figsize_y
# figure dots per inch
mpl.rcParams['figure.dpi'] = 300
def plot(I, A, output_dir):
dim = A.shape[0]
matplotlib_setup()
x = np.arange(0, simulation_end_time, tau)
plt.figure()
for i in xrange(dim):
plt.loglog(A[i, :], alpha=0.15)
plt.ylabel('Population')
plt.xlabel('Time in days')
#plt.xlim((10**(-1), 40))
plt.savefig(output_dir + '/diffusion.svg')
plt.savefig(output_dir + '/diffusion.pdf')
plt.figure()
plt.loglog(I, 'r', alpha=0.8)
#plt.xlim((10**(-1), 40))
plt.xlabel('Time in days ')
plt.ylabel('Infectious')
plt.savefig(output_dir + '/diffusion1.svg')
plt.savefig(output_dir + '/diffusion1.pdf')
def load_files(directory):
with open(directory+'/I.p', 'rb') as fp:
I = p.load(fp)
with open(directory+'/A.p', 'rb') as fp:
A = p.load(fp)
return I, A
if __name__ == '__main__':
#
# Parse argument
#
parser = argparse.ArgumentParser(description='Process SIR simulation with latent states.')
parser.add_argument('--output', help='output directory', required=True)
parser.add_argument('--input', help='input directory', required=True)
args = parser.parse_args()
argsdict = vars(args)
if args.output and args.input:
output_dir = argsdict['output']
input_dir = argsdict['input']
# Remove the last backslash of the string if exist
if output_dir.endswith('\\'):
output_dir = output_dir[:-1]
# Remove the last backslash of the string if exist
if input_dir.endswith('\\'):
input_dir = input_dir[:-1]
# if output dir doesn' extist create it
if not os.path.exists(output_dir):
os.makedirs(output_dir)
I, A = load_files(input_dir)
plot(I, A, output_dir)
| mit | Python | |
76e412121b80c39d9facc09a51d9b8aa4cdb9722 | Add Check timeouts functionality | robas/data-scraping,robas/data-scraping | OAB/oab_check_timeouts.py | OAB/oab_check_timeouts.py | #!/usr/bin/python
import argparse
import pycurl
import re
import csv
from StringIO import StringIO
from urllib import urlencode
from sys import exit
# Arguments handling
# Setting output filenames
inputfile = "lalala_nok.csv"
filename_ok = "output_ok.csv"
filename_nok = "output_nok.csv"
# Variable definitions
url = 'https://www2.oabsp.org.br/asp/consultaInscritos/consulta_nr_advogado.asp'
referer = 'https://www2.oabsp.org.br/asp/consultaInscritos/consulta01.asp'
no_results = 'Não há resultados que satisfaçam a busca'
# Building the pyCurl request
c = pycurl.Curl()
c.setopt(pycurl.SSL_VERIFYPEER, 0)
c.setopt(pycurl.SSL_VERIFYHOST, 0)
c.setopt(pycurl.SSLVERSION, 3)
c.setopt(pycurl.CONNECTTIMEOUT, 3)
c.setopt(pycurl.TIMEOUT, 3)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.REFERER, referer)
c.setopt(pycurl.HTTPHEADER, ['Connection: keep-alive'])
# Iterating through oab_codes
with open(inputfile, "r") as oab_codes:
for oab_codex in oab_codes:
oab_code = oab_codex.strip('\n')
post_data = {'pagina': 0,
'tipo_consulta' : 1,
'nr_inscricao' : oab_code,
'cbxadv' : 1,
'id_tipoinscricao': 1,
'parte_nome' : 1,
'idCidade' : 0
}
post_fields = urlencode(post_data)
# print(post_fields)
c.setopt(c.POSTFIELDS, post_fields)
try:
buffer = StringIO()
c.setopt(c.WRITEDATA, buffer)
c.perform()
response = buffer.getvalue()
if(no_results in response):
with open(filename_nok, "a") as output_nok:
output_nok.write(str(oab_code)+",notfound\n")
print str(oab_code)+',notfound'
else:
token=response.split('<li><span>')
name=token[1].replace("</span></li>","")
oab_code_state=re.sub('.* - ', '', token[2]).replace('</li>','')
date=re.sub('.*</span>','',token[3]).replace('</li>','')
subsection=re.sub('.*</span>','',token[4]).replace('</li>','')
status=re.sub('</li>.*','',re.sub('.*</span>','',token[5]))
fields=[str(oab_code),name,status,oab_code_state,subsection,date]
with open(filename_ok, "a") as output_ok:
writer = csv.writer(output_ok)
writer.writerow(fields)
print fields
except:
with open(filename_nok, "a") as output_nok:
output_nok.write(str(oab_code)+",timeout\n")
print str(oab_code) + ',timeout'
c.close()
| mit | Python | |
a1337ca14fe2f21c849bd27132bdee079ac47e59 | Add Session Support | kkstu/Torweb,kkstu/Torweb | app/Session.py | app/Session.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
# Session Support For Tornado
import hashlib
import os
import time
import json
class Session:
def __init__(self,prefix='',session_id=None,expires=7200,redis=None):
self.redis = redis
self.expires = expires
self.prefix = prefix
if session_id:
self.session_id = prefix + session_id
self.data = self.get_data()
if self.data:
self.isGuest = False
else:
self.isGuest = True # Not Login
else:
self.session_id = None
self.data = {} # Null Dict
self.isGuest = True # Not Login
# 生成SessionID
def gen_session_id(self):
sid = hashlib.sha1('%s%s' % (os.urandom(16), time.time())).hexdigest()
self.session_id = self.prefix + sid
return sid
# 获取Session数据
def get_data(self):
session = self.redis.get(self.session_id)
if not session:
return None
session = json.loads(session) # 字符串转字典
return session
# Get
def get(self,name):
if name:
return self.data.get(name,None)
else:
return None
# Set
def set(self,name,value):
self.data[name] = value
def save(self):
if not self.isGuest and self.session_id and self.data:
self.redis.set(self.session_id,json.dumps(self.data),self.expires)
# 销毁Session
def remove(self):
if self.session_id: # SessionID存在
self.redis.delete(self.session_id)
self.session_id = None
self.data = None
self.isGuest = True
| mit | Python | |
f9f5d2b040618bc7d7c26383218fad390bf9dd0a | add unit test_connection_detail_retriever | QualiSystems/vCenterShell,QualiSystems/vCenterShell | tests/test_common/test_cloudshell/test_connection_detail_retriever.py | tests/test_common/test_cloudshell/test_connection_detail_retriever.py | from unittest import TestCase
from mock import Mock
from common.cloudshell.conn_details_retriever import ResourceConnectionDetailsRetriever
class TestConnectionDetailRetriever(TestCase):
def test_connection_detail_retriever(self):
helpers = Mock()
cs_retriever_service = Mock()
session = Mock()
resource_context = Mock()
connection_details = Mock()
helpers.get_resource_context_details = Mock(return_value=resource_context)
helpers.get_api_session = Mock(return_value=session)
cs_retriever_service.getVCenterConnectionDetails = Mock(return_value=connection_details)
retriever = ResourceConnectionDetailsRetriever(helpers, cs_retriever_service)
res = retriever.connection_details()
self.assertEqual(res, connection_details)
self.assertTrue(helpers.get_resource_context_details.called)
self.assertTrue(helpers.get_api_session.called)
self.assertTrue(cs_retriever_service.getVCenterConnectionDetails.called_with(session, resource_context))
| apache-2.0 | Python | |
f5675a1cebfe6aa0f8dda3b94aa30139e2528c49 | Create broadcast.py | WebShark025/TheZigZagProject,WebShark025/TheZigZagProject | plugins/broadcast.py | plugins/broadcast.py | @bot.message_handler(commands=['bc'])
def bc_msg(message):
if message.from_user.id in ADMINS_IDS:
if len(message.text.split()) < 2:
bot.reply_to(message, "What should I broadcast?")
return
bcmsg = message.text.replace("/bc ","")
allmembers = list(redisserver.smembers('zigzag_members'))
for userid in allmembers:
bot.send_message(userid, bcmsg, parse_mode="HTML")
bot.reply_to(message, "Successfully broadcasted!")
else:
bot.send_message(message.chat.id, "You dont have permission.")
| mit | Python | |
d34d1d50b853d3a205cbc60a75dd3911a9253b4e | update backend | kevmo314/canigraduate.uchicago.edu,kevmo314/canigraduate.uchicago.edu,kelly-shen/canigraduate.uchicago.edu,kevmo314/canigraduate.uchicago.edu,kelly-shen/canigraduate.uchicago.edu,kevmo314/canigraduate.uchicago.edu,kelly-shen/canigraduate.uchicago.edu,kelly-shen/canigraduate.uchicago.edu,kevmo314/canigraduate.uchicago.edu | app/scraper.py | app/scraper.py | import collections
import json
import httplib2
from oauth2client.client import GoogleCredentials
from lib import Term
def get_http():
http = httplib2.Http()
GoogleCredentials.get_application_default().create_scoped([
'https://www.googleapis.com/auth/firebase.database',
'https://www.googleapis.com/auth/userinfo.email'
]).authorize(http)
return http
def run():
terms = sorted(list(Term.all()))
while len(terms) > 0:
term = terms.pop(0)
data = collections.defaultdict(dict)
for course, sections in term.courses.items():
for id, section in sections.items():
if data[course.id].get('name', section.name) != section.name:
print('[%s] Conflicting course name for %s: %s, %s' % (term, course.id, data[course.id], section.name))
data[course.id]['name'] = section.name
print(get_http().request(
'https://canigraduate-43286.firebaseio.com/course-info.json',
method='PATCH',
body=json.dumps(data)))
if __name__ == '__main__':
run()
| mit | Python | |
620401abdb33b335452df709a1a1f2c4bc55cd4c | Add challenge day 6 | lemming52/white_pawn,lemming52/white_pawn | leetcode/challenge/day06.py | leetcode/challenge/day06.py | """
Given an array of strings, group anagrams together.
Example:
Input: ["eat", "tea", "tan", "ate", "nat", "bat"],
Output:
[
["ate","eat","tea"],
["nat","tan"],
["bat"]
]
Note:
All inputs will be in lowercase.
The order of your output does not matter.
"""
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
hashes = {}
for s in strs:
sHash = tuple(sorted(s))
if sHash in hashes:
hashes[sHash].append(s)
else:
hashes[sHash] = [s]
return [v for k, v in hashes.items()] | mit | Python | |
7af8cc6d59a1d52e7decc90ecb9472f1c5825aa3 | Create ds_hash_two_sum.py | ngovindaraj/Python | leetcode/ds_hash_two_sum.py | leetcode/ds_hash_two_sum.py | # @file Two Sum
# @brief Given an array and target, find 2 nums in array that sum to target
# https://leetcode.com/problems/two-sum/
'''
Given an array of integers, return indices of the two numbers such that they
add up to a specific target.
You may assume that each input would have exactly one solution.
Example: Given nums = [2, 7, 11, 15], target = 9,
Because nums[0] + nums[1] = 2 + 7 = 9,
return [0, 1].
'''
#Note: Use property that if x + y = target, y = target - x
#Use a dictionary with each value (x) as key and idx as value
#Time Complexity = O(n)
def twoSum(self, nums, target):
dict = {}
for i in range(len(nums)):
x = nums[i]
y = target - x
if(y in dict): return dict.get(y), i
dict[x] = i
| mit | Python | |
6a84ed3872303aa5b05462982406749d7bd447d4 | Create main.py | brennanblue/unit_tests | main.py | main.py | #!/usr/bin/env python
# Command line script to convert a single given number to and from several units
import argparse
from src.convert import kilometers_to_miles, miles_to_kilometers, \
years_to__minutes, minutes_to_years
#parse args
parse = argparse.ArgumentParser()
parse.add_argument('value', type=float, help="Provide the number to be converted")
args.parse.parse_args()
#perform conversions
#km -> miles
to_miles = kilometers_to_miles(args.value)
print("{0} kilometer is {1} miles".format(args.value, to_miles))
#miles -> km
to_km = miles_to_kilometers(args.value)
print("{0} miles is {1} kilometers".format(args.value, to_km))
#years -> minutes
to_minutes = years_to_minutes(args.value)
print("{0} years is {1} minutes".format(args.value, to_minutes))
#minutes -> years
to_years = minutes_to_years(ags.value)
print("{0} minutes is {1} years".format(args.value, to_years))
#fin
| cc0-1.0 | Python | |
7629a1cd27c80c5ebff91c4d01bf648f9d4c9b3c | Create main.py | m-bee/kgetb | main.py | main.py | agpl-3.0 | Python | ||
dbb147018a92426c5c9e19a523e0bd8d4c277035 | Create LED_GPIO.py | jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi,jeonghoonkang/BerePi | setup/gpio/LED_GPIO.py | setup/gpio/LED_GPIO.py |
import time
import lgpio
#17,27,22
LED = 17
# open the gpio chip and set the LED pin as output
h = lgpio.gpiochip_open(0)
lgpio.gpio_claim_output(h, LED)
try:
while True:
# Turn the GPIO pin on
lgpio.gpio_write(h, LED, 1)
time.sleep(1)
# Turn the GPIO pin off
lgpio.gpio_write(h, LED, 0)
time.sleep(1)
except KeyboardInterrupt:
lgpio.gpio_write(h, LED, 0)
lgpio.gpiochip_close(h)
| bsd-2-clause | Python | |
741dac8a1cc80549c74c231a0a7b598748f9fa4b | Create program.py | madhurilalitha/Python-Projects | ProductInventorySystem/program.py | ProductInventorySystem/program.py | from abc import *
class Entity(metaclass = ABCMeta):
@abstractproperty
def id_number(self):
return 0
class Product(Entity):
id = 0 #initially no id exist (this is class variable)
#Constructor
def __init__(self,name = None,value =0,amount =0, scale ='kg'):
self._id = Product.id # accessing the class variable
Product.id = Product.id+1
self._value = value
self._amount = amount
self._scale = scale
if not name:
self._name = "{0}_{1}".format(self.__class__,self.id)
else:
self._name = name
@property
def id_number(self):
return self._id
@property
def name(self):
return self._name
@property
def value(self):
return self._value
@value.setter
def value(self,val):
self._value = val
@property
def amount(self):
return self._amount
@amount.setter
def amount(self,other):
self._amount = other
@property
def scale(self):
return self._scale
@scale.setter
def scale(self,other):
self._scale = other
def __repr__(self):
return "{0}: {1}".format(self.__class__.__name__,self._id)
def __str__(self):
return "{amount}{scale} of {name} valued at {value}".format(amount = self._amount,scale = self._scale, name = self._name,
value = self._value)
class Inventory(Entity):
id = 0 # Class variable
def __init__(self):
self._id = Inventory.id # assiging class variable to object instance variable
Inventory.id = Inventory.id+1
self._products = {}
def product_add(self,*args):
def add_to_category(item):
try:
self._products[item.name].append(item)
except:
self._products[item.name]=[item]
for arg in args:
if isinstance(arg,tuple) or isinstance(arg,list):
for item in arg:
add__to_category(item)
elif isinstance(arg,Product):
add_to_category(arg)
# if it is not a product, then it will not be added
@property
def product_value(self):
#Returns the sum of values of each item for each product category
return sum([each.value for category in self._products for each in self._products[category]])
@property
def product_count(self):
#Returns the total count of products
return len([each for category in self._products for each in self._products[category]])
@property
def product_diff_categories(self):
#Returns the number of different categories
return len(self._products)
@property
def products (self):
#Returns the list of products
return self._products
@property
def id_number(self):
#Returns the identity number of the product
return self._id
def __repr__(self):
return "{0}:{1}".format(self.__class__.__name__,self._id)
class ObjFactory(metaclass = ABCMeta):
@abstractmethod
def get_object(self):
return 0
def __repr__(self):
return "{0}:{1}".format(self.__class__.__name__,self._id)
class InventoryFactory(ObjFactory):
def get_object(self,amt =1):
for i in range(amt):
yield Inventory()
class ProductFactory(ObjFactory):
def get_object(self,amt =1):
for i in range(amt):
yield Product()
if __name__ =="__main__":
#create an inventory
inventory = Inventory()
#add some products to the inventory
genProd = lambda value:Product(value = value)
for i in range(1,10):
inventory.product_add(genProd(value = i))
for i in range(1,5):
inventory.product_add(genProd(value = i))
Total_Products = inventory.product_count
Total_Value = inventory.product_value
Total_categories = inventory.product_diff_categories
for name, info in (("Total Products are",Total_Products),("Total Value of the products is",Total_Value),("Total number of categories",Total_categories)):
print ("{0}: {1}".format(name,info))
print (inventory.products)
for product in inventory.products:
print (product + "prob details :" + str(inventory.products[product]))
| mit | Python | |
d856ea5597230b3befeb03049c45f3706bec5844 | add kael-crontab cli | 360skyeye/kael | kael/cron/cli.py | kael/cron/cli.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author:
@time: 2017/6/15
"""
import os
import click
@click.group()
def cli():
pass
def _check_task_is_py(command):
command = command.strip()
head = command.split(' ')[0]
if 'py' == head.split('.')[-1]:
return True
return False
@cli.command('run', short_help='Run task of cron with env.')
@click.option('-c', help='Command string')
@click.option('-d', help='Absolute directory of task')
@click.option('-p', help='Python interpreter location')
def run(c, d, p):
if not d:
raise
os.chdir(d)
python_env = p if p else 'python'
if _check_task_is_py(c):
os.system('{} {}'.format(python_env, c))
else:
os.system(c)
def main():
cli()
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
599ed110458d5bcf23b74a95c5c472cc376ed702 | Create field_notes.py | carthage-college/django-djspace,carthagecollege/django-djspace,carthage-college/django-djspace,carthagecollege/django-djspace,carthage-college/django-djspace,carthage-college/django-djspace,carthagecollege/django-djspace | djspace/application/field_notes.py | djspace/application/field_notes.py | # adding all the fields we need for this form..
#
| mit | Python | |
f4b0135a48ee94d8504ddf24dcc16b8036c05f2c | add test file | cogniteev/logup-factory,cogniteev/logup-factory | tests/app_test.py | tests/app_test.py | import os
import app
import unittest
import tempfile
class FlaskrTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, app.app.config['DATABASE'] = tempfile.mkstemp()
app.app.config['TESTING'] = True
self.app = app.app.test_client()
app.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.app.config['DATABASE'])
if __name__ == '__main__':
unittest.main()
| mit | Python | |
5b83b5e9a4e07af3f3dcd37d4f613039a42336e3 | Add salt.modules.container_resource | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/container_resource.py | salt/modules/container_resource.py | # -*- coding: utf-8 -*-
'''
Common resources for LXC and systemd-nspawn containers
These functions are not designed to be called directly, but instead from the
:mod:`lxc <salt.modules.lxc>` and the (future) :mod:`nspawn
<salt.modules.nspawn>` execution modules.
'''
# Import python libs
from __future__ import absolute_import
import logging
import time
import traceback
# Import salt libs
from salt.exceptions import SaltInvocationError
from salt.utils import vt
log = logging.getLogger(__name__)
def run(name,
cmd,
output=None,
no_start=False,
stdin=None,
python_shell=True,
output_loglevel='debug',
ignore_retcode=False,
use_vt=False):
'''
Common logic for running shell commands in containers
Requires the full command to be passed to :mod:`cmd.run
<salt.modules.cmdmod.run>`/:mod:`cmd.run_all <salt.modules.cmdmod.run_all>`
'''
valid_output = ('stdout', 'stderr', 'retcode', 'all')
if output is None:
cmd_func = 'cmd.run'
elif output not in valid_output:
raise SaltInvocationError(
'\'output\' param must be one of the following: {0}'
.format(', '.join(valid_output))
)
else:
cmd_func = 'cmd.run_all'
if not use_vt:
ret = __salt__[cmd_func](cmd,
stdin=stdin,
python_shell=python_shell,
output_loglevel=output_loglevel,
ignore_retcode=ignore_retcode)
else:
stdout, stderr = '', ''
try:
proc = vt.Terminal(cmd,
shell=python_shell,
log_stdin_level=output_loglevel if
output_loglevel == 'quiet'
else 'info',
log_stdout_level=output_loglevel,
log_stderr_level=output_loglevel,
log_stdout=True,
log_stderr=True,
stream_stdout=False,
stream_stderr=False)
# Consume output
while proc.has_unread_data:
try:
cstdout, cstderr = proc.recv()
if cstdout:
stdout += cstdout
if cstderr:
if output is None:
stdout += cstderr
else:
stderr += cstderr
time.sleep(0.5)
except KeyboardInterrupt:
break
ret = stdout if output is None \
else {'retcode': proc.exitstatus,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
except vt.TerminalException:
trace = traceback.format_exc()
log.error(trace)
ret = stdout if output is None \
else {'retcode': 127,
'pid': 2,
'stdout': stdout,
'stderr': stderr}
finally:
proc.terminate()
return ret
| apache-2.0 | Python | |
a4eb209150385ff2f9fea3722c0256fe7ea20b40 | Add unit test | Appdynamics/python-langutil,Appdynamics/python-langutil | test.py | test.py | from langutil import php
import unittest
class TestPHPScalarStringGeneratorFunctions(unittest.TestCase):
def test_generate_scalar_int(self):
self.assertEqual(php.generate_scalar(2), '2')
def test_generate_scalar_float(self):
self.assertEqual(php.generate_scalar(2.001), '2.001')
def test_generate_scalar_bools(self):
self.assertEqual(php.generate_scalar(True), 'TRUE')
self.assertEqual(php.generate_scalar(False), 'FALSE')
self.assertEqual(php.generate_scalar(True, upper_keywords=False),
'true')
self.assertEqual(php.generate_scalar(False, upper_keywords=False),
'false')
def test_generate_scalar_null(self):
self.assertEqual(php.generate_scalar(None), 'NULL')
self.assertEqual(php.generate_scalar(None, upper_keywords=False),
'null')
def test_nonspecial_string(self):
self.assertEqual(php.generate_scalar('non-special string'),
"'non-special string'")
def test_special_string(self):
self.assertEqual(php.generate_scalar('special\nstring'),
'"special\nstring"')
self.assertEqual(php.generate_scalar('special\rstring'),
'"special\rstring"')
self.assertEqual(php.generate_scalar('\tspecial\rstring'),
'"\tspecial\rstring"')
def _raises_exception_cb(self):
php.generate_scalar([])
def test_nonacceptable_type(self):
self.assertRaises(php.PHPScalarException, self._raises_exception_cb)
class TestPHPArrayStringGeneratorFunction(unittest.TestCase):
def test_generate_simple_array_from_list(self):
expected_ret = """array(
1,
2,
3,
);"""
ret = php.generate_array([1, 2, 3])
self.assertEqual(expected_ret, php.generate_array([1, 2, 3]))
def test_generate_many_lists_recursive_to_array(self):
expected_ret = """array(
1,
array(
3,
4,
),
2,
array(
5,
6,
),
);"""
ret = php.generate_array([1, [3, 4], 2, [5, 6]], indent=4)
self.assertEqual(expected_ret, ret)
def test_generate_simple_dict_to_array(self):
expected_ret = """array(
'a' => 2,
'b' => 3,
);"""
ret = php.generate_array({'a': 2, 'b': 3}, indent=4)
self.assertEqual(expected_ret, ret)
def test_dict_to_array_recursive(self):
expected_ret = """array(
'b' => array(
1,
2,
array(
"key\n" => "special\tstring",
'non' => 'special string',
),
),
'c' => 2,
'd' => array(
array(
array(
),
),
),
);"""
python_val = {
'b': [
1,
2,
{
'key\n': 'special\tstring',
'non': 'special string',
'_order': ['key\n', 'non'],
},
],
'c': 2,
'd': [[{}]],
'_order': ['b', 'c', 'd'],
}
ret = php.generate_array(python_val, indent=4)
self.assertEqual(expected_ret, ret)
class TestPHPSerialize(unittest.TestCase):
def test_serialize(self):
self.assertNotEqual(php.serialize([]), [])
def test_unserialize(self):
self.assertEqual(php.unserialize('a:0:{};'), {})
# This is used to keep compatibility with 2.6
if __name__ == '__main__':
unittest.main()
| mit | Python | |
8c2b90d4d2c9fc8ad759284719eab4dd346ccab2 | Add tests | sot/cxotime,sot/cxotime | test.py | test.py | """
Simple test of CxoTime. The base Time object is extremely well
tested, so this simply confirms that the add-on in CxoTime works.
"""
import pytest
import numpy as np
from cxotime import CxoTime
try:
from Chandra.Time import DateTime
HAS_DATETIME = True
except ImportError:
HAS_DATETIME = False
def test_cxotime_basic():
t = CxoTime(1)
assert t.format == 'secs'
assert t.scale == 'utc'
assert np.allclose(t.secs, 1.0, rtol=1e-10, atol=0)
assert t.tt.date == '1998:001:00:00:01.000'
t = CxoTime('1998:001:00:00:01.000', scale='tt')
assert t.scale == 'tt'
assert np.allclose(t.secs, 1.0, atol=1e-10, rtol=0)
@pytest.mark.skipif('not HAS_DATETIME')
def test_cxotime_vs_datetime():
dates = ('2015-06-30 23:59:60.5', '2015:180:01:02:03.456')
for date in dates:
assert np.allclose(CxoTime(date).secs, DateTime(date).secs,
atol=1e-4, rtol=0)
assert CxoTime(CxoTime(date).secs).date == DateTime(DateTime(date).secs).date
| bsd-2-clause | Python | |
815ef4b4b0dce640077e1f8ecd2fbe95598bf539 | Create existing comments' owners records | kr41/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core | src/ggrc/migrations/versions/20160608132526_170e453da661_add_comments_owners_info.py | src/ggrc/migrations/versions/20160608132526_170e453da661_add_comments_owners_info.py | # Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: peter@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Add comments' owners information.
Create Date: 2016-06-08 13:25:26.635435
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "170e453da661"
down_revision = "7a9b715ec504"
def upgrade():
"""Create owner information for the existing comments.
A comment's owner is assumed to be the user who last edited it, and this
information is added to the object_owners table for all existing comments.
If a record already exists, do nothing (this could happen e.g. on a DB
downgrade and a subsequent another upgrade).
"""
# NOTE: we set the status column's value to "Draft" to be consistent with
# what the application does when a new comment is created
command = """
INSERT IGNORE INTO object_owners (
person_id, ownable_id, ownable_type, modified_by_id,
created_at, updated_at, status
)
SELECT
modified_by_id, id, "Comment", modified_by_id, created_at, updated_at,
"Draft"
FROM comments;
"""
op.execute(command)
def downgrade():
"""Do not delete any comments' owner information to preserve data."""
| apache-2.0 | Python | |
b73c75bbafb53864a86f95949d6a028f9e79f718 | Add Tile class | supermitch/Island-Gen | tile.py | tile.py | from __future__ import division
class Tile(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.height = z
| mit | Python | |
9cc26c8a95ab4e6ffa9c991b5a575c7e6d62dae4 | add pytest for util.location | tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status | pytests/util/test_location.py | pytests/util/test_location.py | import pytest
import json
import util.location
@pytest.fixture
def urllib_req(mocker):
util.location.reset()
return mocker.patch("util.location.urllib.request")
@pytest.fixture
def primaryLocation():
return {
"country": "Middle Earth",
"longitude": "10.0",
"latitude": "20.5",
"ip": "127.0.0.1",
}
@pytest.fixture
def secondaryLocation():
return {
"country_name": "Rivia",
"longitude": "-10.0",
"latitude": "-23",
"ip": "127.0.0.6",
}
def test_primary_provider(urllib_req, primaryLocation):
urllib_req.urlopen.return_value.read.return_value = json.dumps(primaryLocation)
assert util.location.country() == primaryLocation["country"]
assert util.location.coordinates() == (
primaryLocation["latitude"],
primaryLocation["longitude"],
)
assert util.location.public_ip() == primaryLocation["ip"]
def test_secondary_provider(mocker, urllib_req, secondaryLocation):
urlopen = mocker.MagicMock()
urlopen.read.return_value = json.dumps(secondaryLocation)
urllib_req.urlopen.side_effect = [RuntimeError(), urlopen]
assert util.location.country() == secondaryLocation["country_name"]
assert util.location.coordinates() == (
secondaryLocation["latitude"],
secondaryLocation["longitude"],
)
assert util.location.public_ip() == secondaryLocation["ip"]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| mit | Python | |
c9690cabe3c4d1d02307e3594a2cac505f4a166d | Add new image moments functions | astropy/photutils,larrybradley/photutils | photutils/utils/_moments.py | photutils/utils/_moments.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ..centroids import centroid_com
__all__ = ['_moments_central', '_moments']
def _moments_central(data, center=None, order=1):
"""
Calculate the central image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
center : tuple of two floats or `None`, optional
The ``(x, y)`` center position. If `None` it will calculated as
the "center of mass" of the input ``data``.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The central image moments.
"""
data = np.asarray(data)
if data.ndim != 2:
raise ValueError('data must be a 2D array.')
if center is None:
center = centroid_com(data)
indices = np.ogrid[[slice(0, i) for i in data.shape]]
ypowers = (indices[0] - center[1]) ** np.arange(order + 1)
xpowers = np.transpose(indices[1] - center[0]) ** np.arange(order + 1)
return np.dot(np.transpose(xpowers), np.dot(data, ypowers))
def _moments(data, order=1):
"""
Calculate the raw image moments up to the specified order.
Parameters
----------
data : 2D array-like
The input 2D array.
order : int, optional
The maximum order of the moments to calculate.
Returns
-------
moments : 2D `~numpy.ndarray`
The raw image moments.
"""
return _moments_central(data, center=(0, 0), order=order)
| bsd-3-clause | Python | |
6a3c960640741036c3f444547cada1e1b7a24100 | Add first unit test for api | mitre/multiscanner,mitre/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,jmlong1027/multiscanner,awest1339/multiscanner,awest1339/multiscanner,awest1339/multiscanner,awest1339/multiscanner,jmlong1027/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner,mitre/multiscanner | tests/test_api.py | tests/test_api.py | import os
import sys
import json
import responses
import unittest
CWD = os.path.dirname(os.path.abspath(__file__))
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Allow import of api.py
if os.path.join(MS_WD, 'utils') not in sys.path:
sys.path.insert(0, os.path.join(MS_WD, 'utils'))
# Use multiscanner in ../
sys.path.insert(0, os.path.dirname(CWD))
import multiscanner
import api
HTTP_OK = 200
HTTP_CREATED = 201
class TestURLCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_index(self):
expected_response = {'Message': 'True'}
resp = self.app.get('/')
self.assertEqual(resp.status_code, HTTP_OK)
self.assertEqual(json.loads(resp.data), expected_response)
| mpl-2.0 | Python | |
d0432f1d3d48634c00027b71eb131c5e36827c4b | Add dropdown element located in widget bar | plamut/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core | src/lib/constants/element/widget_bar/dropdown.py | src/lib/constants/element/widget_bar/dropdown.py | SELECTOR = ".inner-nav-item"
CLAUSES = "Clauses"
CONTRACTS = "Contracts"
DATA_ASSETS = "Data Assets"
FACILITIES = "Facilities"
MARKETS = "Markets"
ORG_GROUPS = "Org Groups"
POLICIES = "Policies"
PROCESSES = "Processes"
PRODUCTS = "Products"
PROJECTS = "Projects"
STANDARDS = "Standards"
SYSTEMS = "Systems"
VENDORS = "Vendors"
THREAD_ACTORS = "Thread Actors"
RISKS = "Risks"
TASKS = "Tasks"
| apache-2.0 | Python | |
763680e57b28a9746050206cd63450bf11c3e512 | Fix ProgramEditor permissions to not include Program delete | VinnieJohns/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,uskudnik/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,vladan-m/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core | src/ggrc_basic_permissions/migrations/versions/20131010001257_10adeac7b693_fix_programeditor_pe.py | src/ggrc_basic_permissions/migrations/versions/20131010001257_10adeac7b693_fix_programeditor_pe.py |
"""Fix ProgramEditor permissions
Revision ID: 10adeac7b693
Revises: 8f33d9bd2043
Create Date: 2013-10-10 00:12:57.391754
"""
# revision identifiers, used by Alembic.
revision = '10adeac7b693'
down_revision = '8f33d9bd2043'
import json
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
def set_permissions(program_editor_objects):
program_editor_delete_objects = list(program_editor_objects)
program_editor_delete_objects.remove('Program')
current_datetime = datetime.now()
op.execute(roles_table.update()\
.values(
permissions_json = json.dumps({
'create': program_editor_objects,
'read': program_editor_objects,
'update': program_editor_objects,
'delete': program_editor_delete_objects,
}),
updated_at = current_datetime)\
.where(roles_table.c.name == 'ProgramEditor'))
def upgrade():
set_permissions([
'Cycle',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Program',
'ProgramControl',
'ProgramDirective',
'Relationship',
])
def downgrade():
pass
| apache-2.0 | Python | |
da488fa4505de818a5efcec13fdb7963d5051389 | Create util.py | nikohernandiz/TVLineFinder | util.py | util.py | import requests
import logging
def downloadRedditUrl(url):
print "downloadRedditUrl(): Downloading url: {}".format(url)
#assert url.startswith('https://www.reddit.com/r/learnprogramming/')
headers = {
'User-Agent': 'Searching Reddit bot version 1.0',
}
r = requests.get(url,headers = headers)
if r.status_code != 200:
raise Exception("Non-OK status code: {}".format(r.status_code))
return r.text
#Find the TV Line in post
def parseRedditPost(html):
bs = BeautifulSoup(html)
return bs.select('div.usertext-body')[1].text
| mit | Python | |
2beac94eb32fc4adb976c4a10018de8518e4bada | Add wsgi file | phantomii/helix | wsgi.py | wsgi.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
from helix.api import app
from helix.common import config
from helix.common import encoding
from helix.common import log as helix_logging
if not config.parse(sys.argv[1:]):
logging.warning("Unable to find configuration file via the"
" default search paths (~/.helix/, ~/, /etc/helix/,"
" /etc/) and the '--config-file' option!")
helix_logging.configure()
log = logging.getLogger(__name__)
encoding.set_default_encoding_from_config()
application = app.build_wsgi_application()
| apache-2.0 | Python | |
7c095c82e1b6a16da65b8fcfaf77d9a606321d76 | Create sum67.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/sum67.py | Python/CodingBat/sum67.py | # http://codingbat.com/prob/p108886
def sum67(nums):
sum = 0
i = 0
while i < len(nums):
if nums[i] == 6:
while nums[i] != 7:
i += 1
else:
sum += nums[i]
i += 1
return sum
| mit | Python | |
28f41fcfc80bc562343e510e3e0e5e57d97d27ea | Create Scrap_share_marketdata.py | codepunter/scrap-with-python | Scrap_share_marketdata.py | Scrap_share_marketdata.py | import urllib
import re
#TItile scrap of any website
# regex='<title>(.+?)</title>'
# pattern =re.compile(regex)
# htmlfile = urllib.urlopen("https://www.cnn.com")
# htmltext=htmlfile.read()
# titles=re.findall(pattern,htmltext)
# print titles
# Scrap using finance yahoo.com
# symbolfile=open("symbols.txt")
# symbolslist=symbolfile.read()
# newsymbolslist=symbolslist.split("\n")
# # symbolslist=["APPL","SPY"]
# i=0
# while i<len(newsymbolslist)-1:
# url="http://finance.yahoo.com/q?s="+newsymbolslist[i]+"&ql=1"
# htmlfile=urllib.urlopen(url)
# htmltext=htmlfile.read()
# regex='<span class="time_rtq_ticker">(.+?)</span></span>'
# pattern=re.compile(regex)
# price=re.findall(pattern,htmltext)
# s=price[0]
# print s.split(">",1)[1]
# i+=1
# Scrap using google finance
htmltext=urllib.urlopen("http://www.google.com/finance?q=AAPL")
regex='<span id="ref_[^.]*_l">(.+?)</span>'
pattern=re.compile(regex)
results=re.findall(pattern,htmltext)
print results
| apache-2.0 | Python | |
1ec2f110c16de75503092df873693e2929baa8cd | add the "Cargos Importantes" field | datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit | candidates/migrations/0018_cr_add_important_posts_field.py | candidates/migrations/0018_cr_add_important_posts_field.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
def add_extra_field(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
if settings.ELECTION_APP == 'cr':
ExtraField.objects.create(
key='important_roles',
type='longer-text',
label=u'Important Roles',
)
def remove_extra_field(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
if settings.ELECTION_APP == 'cr':
extra_field = ExtraField.objects.get('important_roles')
extra_field.personextrafieldvalue_set.all().delete()
extra_field.delete()
class Migration(migrations.Migration):
dependencies = [
('candidates', '0017_remove_cv_and_program_fields'),
]
operations = [
migrations.RunPython(add_extra_field, remove_extra_field)
]
| agpl-3.0 | Python | |
0f5a52a215f8f1e16ab5ddf622a541919ab760ce | Fix up language detector. | alephdata/aleph,alephdata/aleph,OpenGazettes/aleph,OpenGazettes/aleph,OpenGazettes/aleph,smmbllsm/aleph,OpenGazettes/aleph,gazeti/aleph,pudo/aleph,gazeti/aleph,alephdata/aleph,pudo/aleph,smmbllsm/aleph,gazeti/aleph,gazeti/aleph,pudo/aleph,alephdata/aleph,alephdata/aleph,smmbllsm/aleph | aleph/analyze/language.py | aleph/analyze/language.py | import logging
import langid
# https://github.com/saffsd/langid.py
from aleph.analyze.analyzer import Analyzer
log = logging.getLogger(__name__)
THRESHOLD = 0.9
CUTOFF = 30
class LanguageAnalyzer(Analyzer):
def analyze_text(self, document, meta):
if len(meta.languages):
return
languages = set()
for page in document.pages:
if not page.text or len(page.text) < CUTOFF:
continue
lang, score = langid.classify(page.text)
if score > THRESHOLD:
languages.add(lang)
self.save(document, meta, languages)
def analyze_tabular(self, document, meta):
if len(meta.languages):
return
languages = set()
for record in document.records:
for text in record.data.values():
if not text or len(text) < CUTOFF:
continue
lang, score = langid.classify(text)
if score > THRESHOLD:
languages.add(lang)
self.save(document, meta, languages)
def save(self, document, meta, languages):
existing = meta.get('languages')
if existing is None or not len(existing):
meta['languages'] = list(languages)
super(LanguageAnalyzer, self).save(document, meta)
| import logging
import langid
# https://github.com/saffsd/langid.py
from aleph.analyze.analyzer import Analyzer
log = logging.getLogger(__name__)
THRESHOLD = 0.9
CUTOFF = 30
class LanguageAnalyzer(Analyzer):
def analyze_text(self, document, meta):
if len(meta.languages):
return
languages = set()
for page in document.pages:
if not page.text or len(page.text) < CUTOFF:
continue
lang, score = langid.classify(page.text)
if score > THRESHOLD:
languages.add(lang)
self.save(document, meta, languages)
def analyze_tabular(self, document, meta):
if len(meta.languages):
return
languages = set()
for table in document.tables:
for row in table:
for text in row.values():
if not text or len(text) < CUTOFF:
continue
lang, score = langid.classify(text)
if score > THRESHOLD:
languages.add(lang)
self.save(document, meta, languages)
def save(self, document, meta, languages):
existing = meta.get('languages')
if existing is None or not len(existing):
meta['languages'] = list(languages)
super(LanguageAnalyzer, self).save(document, meta)
| mit | Python |
57fe1a44c2285f39cc1454bbd6cfb3ce621348c3 | Add a test to validate the user creation | aligot-project/aligot,aligot-project/aligot,aligot-project/aligot,skitoo/aligot | aligot/tests/test_user.py | aligot/tests/test_user.py | # coding: utf-8
from django.core.urlresolvers import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from ..models import User
class TestUser(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('user-create')).status_code)
self.assertEquals(0, User.objects.count())
def test_create(self):
"""
Create user & wait for 201 response.
"""
data = {
'username': 'test',
'password': 'test',
'email': 'test@mail.com'
}
response = self.client.post(reverse('user-create'), data)
self.assertEqual(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEqual(1, User.objects.count())
# Check the first
user = User.objects.all()[0]
self.assertEqual(user.username, data['username'], 'Username in DB don\'t match')
| mit | Python | |
d63235026ec40857d3cbeef67064879d4b180eeb | add pip_upgrade | HongxuChen/dotfiles,HongxuChen/dotfiles,HongxuChen/dotfiles | _bin/pip_upgrade.py | _bin/pip_upgrade.py | #!/usr/bin/env python
import pip
from subprocess import call
for dist in pip.get_installed_distributions():
call("pip install --upgrade " + dist.project_name, shell=True)
| mit | Python | |
cba5577517659e13511dcd45c996fd292cbd1cf8 | Add Eq typeclass definition | NicolasT/typeclasses | typeclasses/eq.py | typeclasses/eq.py | # typeclasses, an educational implementation of Haskell-style type
# classes, in Python
#
# Copyright (C) 2010 Nicolas Trangez <eikke eikke com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, version 2.1
# of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
'''Definition of the Eq typeclass'''
from typeclasses import function, TypeClass
eq = function(1)
ne = function(1)
Eq = TypeClass((eq, lambda a, b: not ne(a, b)),
(ne, lambda a, b: not eq(a, b)))
| lgpl-2.1 | Python | |
2c7a40679e6202446a2e1076e19832589abf9ef9 | Add test mobile flatpage | GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin | geotrek/api/tests/test_mobile_flatpage.py | geotrek/api/tests/test_mobile_flatpage.py | from __future__ import unicode_literals
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.testcases import TestCase
from geotrek.flatpages.factories import FlatPageFactory
from geotrek.flatpages.models import FlatPage
FLATPAGE_DETAIL_PROPERTIES_JSON_STRUCTURE = sorted([
'id', 'title', 'content'
])
class FlatPageAdministratorTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.flatpage = FlatPageFactory.create()
FlatPageFactory.create()
cls.administrator = User.objects.create(username="administrator", is_superuser=True,
is_staff=True, is_active=True)
cls.administrator.set_password('administrator')
cls.administrator.save()
cls.administrator.refresh_from_db()
def get_flatpage_list(self, params=None):
return self.client.get(reverse('apimobile:flatpage-list'), params, HTTP_ACCEPT_LANGUAGE='fr')
def get_flatpage_detail(self, id_flatpage, params=None):
return self.client.get(reverse('apimobile:flatpage-detail', args=(id_flatpage,)),
params, HTTP_ACCEPT_LANGUAGE='fr')
def test_flatpage_list_administrator(self):
self.client.login(username="administrator", password="administrator")
response = self.get_flatpage_list()
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(len(json_response), 2)
self.assertEqual(json_response[0].get('title'), FlatPage.objects.first().title)
def test_flatpage_detail_administrator(self):
self.client.login(username="administrator", password="administrator")
response = self.get_flatpage_detail(self.flatpage.pk)
self.assertEqual(response.status_code, 200)
json_response = json.loads(response.content.decode('utf-8'))
self.assertEqual(sorted(json_response.keys()),
FLATPAGE_DETAIL_PROPERTIES_JSON_STRUCTURE)
self.assertEqual(json_response.get('content'), self.flatpage.content)
self.assertEqual(json_response.get('title'), self.flatpage.title)
| bsd-2-clause | Python | |
4afd2553625db404cdfedfcf336079b3d9d723e3 | Add test for auth service pre-run time validation checks. | StackStorm/st2,Plexxi/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,StackStorm/st2,Plexxi/st2,tonybaloney/st2,tonybaloney/st2,nzlosh/st2,nzlosh/st2,tonybaloney/st2 | st2auth/tests/unit/test_validation_utils.py | st2auth/tests/unit/test_validation_utils.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from oslo_config import cfg
from st2auth.validation import validate_auth_backend_is_correctly_configured
from st2tests import config as tests_config
__all__ = [
'ValidationUtilsTestCase'
]
class ValidationUtilsTestCase(unittest2.TestCase):
def setUp(self):
super(ValidationUtilsTestCase, self).setUp()
tests_config.parse_args()
def test_validate_auth_backend_is_correctly_configured_success(self):
result = validate_auth_backend_is_correctly_configured()
self.assertTrue(result)
def test_validate_auth_backend_is_correctly_configured_invalid_backend(self):
cfg.CONF.set_override(group='auth', name='mode', override='invalid')
expected_msg = ('Invalid auth mode "invalid" specified in the config. '
'Valid modes are: proxy, standalone')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
def test_validate_auth_backend_is_correctly_configured_backend_doesnt_expose_groups(self):
# Flat file backend doesn't expose user group membership information aha provide
# "has group info" capability
cfg.CONF.set_override(group='auth', name='backend', override='flat_file')
cfg.CONF.set_override(group='auth', name='backend_kwargs',
override='{"file_path": "dummy"}')
cfg.CONF.set_override(group='rbac', name='enable', override=True)
cfg.CONF.set_override(group='rbac', name='sync_remote_groups', override=True)
expected_msg = ('Configured auth backend doesn\'t expose user group information. Disable '
'remote group synchronization or')
self.assertRaisesRegexp(ValueError, expected_msg,
validate_auth_backend_is_correctly_configured)
| apache-2.0 | Python | |
b413af07917f3555edb4b69c4d4a0e4d5c4a629f | Create boolean_logic_from_scratch.py | Kunalpod/codewars,Kunalpod/codewars | boolean_logic_from_scratch.py | boolean_logic_from_scratch.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Boolean Logic from Scratch
#Problem level: 7 kyu
def func_or(a,b):
#your code here - do no be lame and do not use built-in code!
if bool(a) or bool(b):
return True
return False
def func_xor(a,b):
#your code here - remember to consider truthy and falsey value as in JS
if bool(a)==bool(b):
return False
return True
| mit | Python | |
df9c8b2c2e616937afdbf09fc4a76ac7b821c8a5 | Add test (which we fail at the moment) | openhatch/oh-bugimporters,openhatch/oh-bugimporters,openhatch/oh-bugimporters | bugimporters/tests/test_spider.py | bugimporters/tests/test_spider.py | import os
import bugimporters.main
from mock import Mock
HERE = os.path.dirname(os.path.abspath(__file__))
# Create a global variable that can be referenced both from inside tests
# and from module level functions functions.
bug_data_transit = {
'get_fresh_urls': None,
'update': None,
'delete_by_url': None,
}
trac_data_transit = {
'get_bug_times': lambda url: (None, None),
'get_timeline_url': Mock(),
'update_timeline': Mock()
}
importer_data_transits = {'bug': bug_data_transit, 'trac': trac_data_transit}
class TestBaseSpider(object):
def setup_class(cls):
cls.spider = bugimporters.main.BugImportSpider()
# This is sample input data that has an invalid special
# bug parser name.
cls.spider.input_data = [
{'as_appears_in_distribution': u'',
'documentation_type': u'',
'existing_bug_urls': [],
'bug_project_name_format': u'FEL',
'base_url': u'https://fedorahosted.org/fedora-electronic-lab/report/1',
'custom_parser': u'fedora-electronic-lab',
'documentation_text': u'',
'bitesized_text': u'',
'bitesized_type': u'',
'queries': [u'https://fedorahosted.org/fedora-electronic-lab'],
'get_older_bug_data': None,
'tracker_name': u'fedora-electronic-lab',
'bugimporter': u'trac'},
]
def test_get_bugimporters(self):
# We should get no bugimporters out.
# In the past, what happened was a crash.
assert([] == list(self.spider.get_bugimporters()))
| agpl-3.0 | Python | |
cde401e95bef16b3bcc815251187af094240b598 | Create check_linux.py | applicant1844244/mit-decision-check | check_linux.py | check_linux.py | import cups
from twill.commands import *
import html2text
import subprocess
import time
##Emotional words
acceptance = ['congratulat', 'enjoy', 'party']
rejection = ['sorry', 'unfortunately', 'disappoint']
##function to login and save the html
def retrieve():
go('https://decisions.mit.edu/decision.php')
fv("1", "username", "username") #replace with the actual username
fv("1", "password", "password") #replace with the actual password
formaction('f','https://decisions.mit.edu/decision.php')
submit()
save_html('decision.html')
##function to check if the applicant has been accepted
def check():
global acceptance, rejection
html = open("decision.html").read()
with open("decision.txt", "w") as text_file:
text_file.write(html2text.html2text(html))
converted = html2text.html2text(html).lower()
if any(x in converted for x in acceptance):
return "Congratulations! You have been admitted to the class of 2020"
#command to be spoken in case of acceptance
elif any(x in converted for x in rejection):
return "I am extremely sorry. Unfortunately, you couldn't be admitted"
#command to be spoken in case of rejection
else:
print "Unable to identify"
return -1
##function to print the decision
def printit():
conn = cups.Connection()
printers = conn.getPrinters()
printer_name = printers.keys()[1]
cups.setUser('username') #replace with the computer's account name
conn.printFile(printer_name, "decision.txt", "",{})
while True:
retrieve()
command=check()
if command!=-1:
printit()
subprocess.call(['speech-dispatcher']) #start speech dispatcher
subprocess.call(['spd-say', command]) #say the command
break
time.sleep(15) #recheck the decision every 15 seconds
| mit | Python | |
fde083c87f0e2582fbf57415e957b93d116ad67a | Create RequestHandler related to GCI. | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | app/soc/modules/gci/views/base.py | app/soc/modules/gci/views/base.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the boiler plate required to construct GCI views.
"""
__authors__ = [
'"Selwyn Jacob" <selwynjacob90@gmail.com>',
]
from soc.views.base import RequestHandler
from soc.modules.gci.views import base_templates
from soc.modules.gci.views.helper import access_checker
from soc.modules.gci.views.helper.request_data import RequestData
from soc.modules.gci.views.helper.request_data import RedirectHelper
class RequestHandler(RequestHandler):
"""Customization required by GCI to handle HTTP requests.
"""
def render(self, template_path, context):
"""Renders the page using the specified context.
See soc.views.base.RequestHandler.
The context object is extended with the following values:
header: a rendered header.Header template for the current self.data
mainmenu: a rendered site_menu.MainMenu template for the current self.data
footer: a rendered site_menu.Footer template for the current self.data
"""
context['header'] = base_templates.Header(self.data)
context['mainmenu'] = base_templates.MainMenu(self.data)
context['footer'] = base_templates.Footer(self.data)
super(RequestHandler, self).render(template_path, context)
def init(self, request, args, kwargs):
self.data = RequestData()
self.redirect = RedirectHelper(self.data, self.response)
self.data.populate(self.redirect, request, args, kwargs)
if self.data.is_developer:
self.mutator = access_checker.DeveloperMutator(self.data)
self.check = access_checker.DeveloperAccessChecker(self.data)
else:
self.mutator = access_checker.Mutator(self.data)
self.check = access_checker.AccessChecker(self.data)
def error(self, status, message=None):
self.response.set_status(status)
template_path = "v2/modules/gci/error.html"
context = {
'page_name': self.response.content,
'message': message,
}
self.response.content = ''
self.render(template_path, context)
| apache-2.0 | Python | |
e2669eddb9187db9a71095d8ed860f8b25369e78 | add new package (#20106) | iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack | var/spack/repos/builtin/packages/py-catkin-pkg/package.py | var/spack/repos/builtin/packages/py-catkin-pkg/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyCatkinPkg(PythonPackage):
"""Library for retrieving information about catkin packages."""
homepage = "https://wiki.ros.org/catkin_pkg"
url = "https://pypi.io/packages/source/c/catkin-pkg/catkin_pkg-0.4.23.tar.gz"
version('0.4.23', sha256='28ee181cca827c0aabf9397351f58a97e1475ca5ac7c106a5916e3ee191cd3d0')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-docutils', type=('build', 'run'))
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('py-pyparsing', type=('build', 'run'))
depends_on('py-argparse', when='^python@:2.6', type=('build', 'run'))
| lgpl-2.1 | Python | |
0106355df43bc35a75aafc6b9070f78131e89bef | Test for switching to postgres search backend | p/wolis-phpbb,p/wolis-phpbb | tests/search_backend_postgres.py | tests/search_backend_postgres.py | from wolis.test_case import WolisTestCase
class SearchBackendPostgresTest(WolisTestCase):
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='search_type',
value='phpbb_search_fulltext_postgres',
)
if __name__ == '__main__':
import unittest
unittest.main()
| bsd-2-clause | Python | |
1f12da3d049527f838ab21c042b8f18e1977af49 | Migrate existing platform admin services to not be counted | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0283_platform_admin_not_live.py | migrations/versions/0283_platform_admin_not_live.py | """empty message
Revision ID: 0283_platform_admin_not_live
Revises: 0282_add_count_as_live
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0283_platform_admin_not_live'
down_revision = '0282_add_count_as_live'
from alembic import op
import sqlalchemy as sa
STATEMENT = """
UPDATE
services
SET
count_as_live = {count_as_live}
FROM
users
WHERE
services.created_by_id = users.id and
users.platform_admin is true
;
"""
def upgrade():
op.execute(STATEMENT.format(count_as_live='false'))
def downgrade():
op.execute(STATEMENT.format(count_as_live='true'))
| mit | Python | |
8e0e28c45616479c3d1fea9be78553185126743b | change case_type to location_type to be more clear about what's expected | dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,qedsoftware/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq | corehq/apps/consumption/models.py | corehq/apps/consumption/models.py | from decimal import Decimal
from couchdbkit.ext.django.schema import Document, StringProperty, DecimalProperty
TYPE_DOMAIN = 'domain'
TYPE_PRODUCT = 'product'
TYPE_SUPPLY_POINT_TYPE = 'supply-point-type'
TYPE_SUPPLY_POINT = 'supply-point'
class DefaultConsumption(Document):
"""
Model for setting the default consumption value of an entity
"""
type = StringProperty() # 'domain', 'product', 'supply-point-type', 'supply-point'
domain = StringProperty()
product_id = StringProperty()
supply_point_type = StringProperty()
supply_point_id = StringProperty()
default_consumption = DecimalProperty()
def get_default_consumption(domain, product_id, location_type, case_id):
keys = [
[domain, product_id, {}, case_id],
[domain, product_id, location_type, None],
[domain, product_id, None, None],
[domain, None, None, None],
]
results = DefaultConsumption.get_db().view(
'consumption/consumption_index',
keys=keys, reduce=False, limit=1, descending=True,
)
results = results.one()
return Decimal(results['value']) if results else None
| from decimal import Decimal
from couchdbkit.ext.django.schema import Document, StringProperty, DecimalProperty
TYPE_DOMAIN = 'domain'
TYPE_PRODUCT = 'product'
TYPE_SUPPLY_POINT_TYPE = 'supply-point-type'
TYPE_SUPPLY_POINT = 'supply-point'
class DefaultConsumption(Document):
"""
Model for setting the default consumption value of an entity
"""
type = StringProperty() # 'domain', 'product', 'supply-point-type', 'supply-point'
domain = StringProperty()
product_id = StringProperty()
supply_point_type = StringProperty()
supply_point_id = StringProperty()
default_consumption = DecimalProperty()
def get_default_consumption(domain, product_id, case_type, case_id):
keys = [
[domain, product_id, {}, case_id],
[domain, product_id, case_type, None],
[domain, product_id, None, None],
[domain, None, None, None],
]
results = DefaultConsumption.get_db().view(
'consumption/consumption_index',
keys=keys, reduce=False, limit=1, descending=True,
)
results = results.one()
return Decimal(results['value']) if results else None
| bsd-3-clause | Python |
6857624e9d6633038f0565a520de856ee40def09 | Test with many envs and large groups | lhupfeldt/multiconf | test/many_envs_test.py | test/many_envs_test.py | # Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from .. import ConfigRoot
from ..envs import EnvFactory
ef = EnvFactory()
envs = []
groups = []
for ii in range(0, 16):
local_envs = []
for jj in range(0, 128):
local_envs.append(ef.Env('e' + str(ii) + '_' + str(jj)))
groups.append(ef.EnvGroup('g' + str(ii), *local_envs))
envs.extend(local_envs)
def test_many_envs():
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, e0_0=0)
conf.setattr('b', default=None, e1_7=1)
conf.setattr('c', default=None, e2_15=2)
conf.setattr('d', default=None, e3_23=3)
conf.setattr('e', default=None, e4_31=4)
conf.setattr('f', default=None, e5_39=5)
conf.setattr('g', default=None, e6_47=6)
conf.setattr('h', default=None, e7_55=7)
conf.setattr('i', default=None, e0_0=10, e15_127=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
def test_many_groups():
# This is slow!
with ConfigRoot(envs[0], envs) as conf:
conf.setattr('a', default=None, g0=0)
conf.setattr('b', default=None, g1=1)
conf.setattr('i', default=None, e0_0=10, g15=8)
assert conf.a == 0
assert conf.b == None
assert conf.i == 10
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.