commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
450557e0bfb902de862e5fe42868d3fbf7165600 | Add lc0983_minimum_cost_for_tickets.py from Hotel Schulz Berlin | lc0983_minimum_cost_for_tickets.py | lc0983_minimum_cost_for_tickets.py | """Leetcode 983. Minimum Cost For Tickets
Medium
URL: https://leetcode.com/problems/minimum-cost-for-tickets/
In a country popular for train travel, you have planned some train travelling
one year in advance. The days of the year that you will travel is given as
an array days. Each day is an integer from 1 to 365.
Train tickets are sold in 3 different ways:
- a 1-day pass is sold for costs[0] dollars;
- a 7-day pass is sold for costs[1] dollars;
- a 30-day pass is sold for costs[2] dollars.
The passes allow that many days of consecutive travel.
For example, if we get a 7-day pass on day 2, then we can travel for 7 days:
day 2, 3, 4, 5, 6, 7, and 8.
Return the minimum number of dollars you need to travel every day in the given
list of days.
Example 1:
Input: days = [1,4,6,7,8,20], costs = [2,7,15]
Output: 11
Explanation:
For example, here is one way to buy passes that lets you travel your travel plan:
On day 1, you bought a 1-day pass for costs[0] = $2, which covered day 1.
On day 3, you bought a 7-day pass for costs[1] = $7, which covered days 3, 4, ..., 9.
On day 20, you bought a 1-day pass for costs[0] = $2, which covered day 20.
In total you spent $11 and covered all the days of your travel.
Example 2:
Input: days = [1,2,3,4,5,6,7,8,9,10,30,31], costs = [2,7,15]
Output: 17
Explanation:
For example, here is one way to buy passes that lets you travel your travel plan:
On day 1, you bought a 30-day pass for costs[2] = $15 which covered days 1, 2, ..., 30.
On day 31, you bought a 1-day pass for costs[0] = $2 which covered day 31.
In total you spent $17 and covered all the days of your travel.
Note:
- 1 <= days.length <= 365
- 1 <= days[i] <= 365
- days is in strictly increasing order.
- costs.length == 3
- 1 <= costs[i] <= 1000
"""
class Solution(object):
def mincostTickets(self, days, costs):
"""
:type days: List[int]
:type costs: List[int]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000001 | |
a737126f8f8bcac1a00999f9e5c2a23bca9efd0d | Create hamming.py | hamming.py | hamming.py | #Python Problem 2
#hamming.py
#Introduction to Bioinformatics Assignment 2
#Purpose:Calculate Hamming Distance
#Your Name: Michael Thomas
#Date: 10/10/15
#stores 3 database sequences
seqList = ["AGGATACAGCGGCTTCTGCGCGACAAATAAGAGCTCCTTGTAAAGCGCCAAAAAAAGCCTCTCGGTCTGTGGCAGCAGCGTTGGCCCGGCCCCGGGAGCGGAGAGCGAGGGGAGGCAGATTCGGAGGAAGGTCTGAAAAG",
"AAAATACAGGGGGTTCTGCGCGACTTATGGGAGCTCCTTGTGCGGCGCCATTTTAAGCCTCACAGACTATGGCAGCAGCGTTGGCCCGGCAAAAGGAGCGGAGAGCGAGGGGAGGCGGAGACGGACGAAGGTCTGAGCAG",
"CCCATACAGCCGCTCCTCCGCGACTTATAAGAGCTCCTTGTGCGGCGCCATTTTAAGCCTCTCGGTCTGTGGCAGCAGCGTTGGCCCGCCCAAAACAGCGGAGAGCGAGGGGAGGCGGAGACGGAGGAAGGTCTGAGCAG"]
#your query sequence
s1 = "AGGATACAGCGGCTTCTGCGCGACTTATAAGAGCTCCTTGTGCGGCGCCATTTTAAGCCTCTCGGTCTGTGGCAGCAGCGTTGGCCCGGCCCCGGGAGCGGAGAGCGAGGGGAGGCGGAGACGGAGGAAGGTCTGAGGAG"
count=[0,0,0]
#outer loop to go through seqList[]
for i in range(len(seqList)):
#save each string to iterate trough on secondary loop
seqi = seqList[i]
#checks for non-matches between s1 and seqi and iterates count
for j in range(len(s1)):
if s1[j] != seqi[j]:
count[i] = count[i] + 1
#Results
#hamming distance for each sequence
print "The Hamming distance dh(s1,seqList[0]) =", count[0]
print "The Hamming distance dh(s1,seqList[1]) = ", count[1]
print "The Hamming distance dh(s1,seqList[2]) = ", count[2]
| Python | 0.000001 | |
290239e45b5a4eae88a5c92304b46bf74a04b616 | update Enclosure.mime docstring [skip ci] | mailthon/enclosure.py | mailthon/enclosure.py | """
mailthon.enclosure
~~~~~~~~~~~~~~~~~~
Implements Enclosure objects- parts that collectively
make up body of the email.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
from email.encoders import encode_base64
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from os.path import basename
from .helpers import guess
from .headers import Headers, content_disposition
class Enclosure(object):
"""
Base class for Enclosure objects to inherit from.
An enclosure is a part of the enclosure in a real
envelope- it contains part of the content to be
sent.
:param headers: Iterable of headers to include,
stored in an RFC-compliant Headers mapping
internally under the headers attribute.
"""
def __init__(self, headers=()):
self.headers = Headers(headers)
def mime_object(self):
"""
To be overriden. Returns the generated MIME
object, without applying the internal headers.
"""
raise NotImplementedError
def mime(self):
"""
Returns the finalised mime object, after
applying the internal headers. Usually this
is not to be overriden.
"""
mime = self.mime_object()
self.headers.prepare(mime)
return mime
class PlainText(Enclosure):
"""
Enclosure that has a text/plain mimetype.
:param content: Unicode or bytes string.
:param encoding: Encoding used to serialize the
content or the encoding of the content.
:param headers: Optional headers.
"""
subtype = 'plain'
def __init__(self, content, encoding='utf-8', **kwargs):
Enclosure.__init__(self, **kwargs)
self.content = content
self.encoding = encoding
def mime_object(self):
return MIMEText(self.content,
self.subtype,
self.encoding)
class HTML(PlainText):
"""
Subclass of PlainText with a text/html mimetype.
"""
subtype = 'html'
class Binary(Enclosure):
"""
An Enclosure subclass for binary content. If the
content is HTML or any kind of plain-text then
the HTML or PlainText Enclosures are receommended
since they have a simpler interface.
:param content: A bytes string.
:param mimetype: Mimetype of the content.
:param encoding: Optional encoding of the content.
:param encoder: An optional encoder_ function.
:param headers: Optional headers.
.. _encoder: https://docs.python.org/2/library/email.encoders.html
"""
def __init__(self, content, mimetype, encoding=None,
encoder=encode_base64, **kwargs):
Enclosure.__init__(self, **kwargs)
self.content = content
self.mimetype = mimetype
self.encoding = encoding
self.encoder = encoder
def mime_object(self):
mime = MIMEBase(*self.mimetype.split('/'))
mime.set_payload(self.content)
if self.encoding:
del mime['Content-Type']
mime.add_header('Content-Type',
self.mimetype,
charset=self.encoding)
self.encoder(mime)
return mime
class Attachment(Binary):
"""
Binary subclass for easier file attachments.
Basically using this class has the advantage
that fetching the file contents is lazy, which
may be desired. Else use the Binary class. Also,
the Content-Disposition header is automatically
set.
:param path: Absolute/Relative path to the file.
:param headers: Optional headers.
"""
def __init__(self, path, headers=()):
self.path = path
self.mimetype, self.encoding = guess(path)
self.encoder = encode_base64
heads = dict([content_disposition('attachment', basename(path))])
heads.update(headers)
self.headers = Headers(heads)
@property
def content(self):
"""
Lazily returns the bytes contents of the file.
"""
with open(self.path, 'rb') as handle:
return handle.read()
| """
mailthon.enclosure
~~~~~~~~~~~~~~~~~~
Implements Enclosure objects- parts that collectively
make up body of the email.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
from email.encoders import encode_base64
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from os.path import basename
from .helpers import guess
from .headers import Headers, content_disposition
class Enclosure(object):
"""
Base class for Enclosure objects to inherit from.
An enclosure is a part of the enclosure in a real
envelope- it contains part of the content to be
sent.
:param headers: Iterable of headers to include,
stored in an RFC-compliant Headers mapping
internally under the headers attribute.
"""
def __init__(self, headers=()):
self.headers = Headers(headers)
def mime_object(self):
"""
To be overriden. Returns the generated MIME
object, without applying the internal headers.
"""
raise NotImplementedError
def mime(self):
"""
Returns the finalised mime object, after
applying the internal headers.
"""
mime = self.mime_object()
self.headers.prepare(mime)
return mime
class PlainText(Enclosure):
"""
Enclosure that has a text/plain mimetype.
:param content: Unicode or bytes string.
:param encoding: Encoding used to serialize the
content or the encoding of the content.
:param headers: Optional headers.
"""
subtype = 'plain'
def __init__(self, content, encoding='utf-8', **kwargs):
Enclosure.__init__(self, **kwargs)
self.content = content
self.encoding = encoding
def mime_object(self):
return MIMEText(self.content,
self.subtype,
self.encoding)
class HTML(PlainText):
"""
Subclass of PlainText with a text/html mimetype.
"""
subtype = 'html'
class Binary(Enclosure):
"""
An Enclosure subclass for binary content. If the
content is HTML or any kind of plain-text then
the HTML or PlainText Enclosures are receommended
since they have a simpler interface.
:param content: A bytes string.
:param mimetype: Mimetype of the content.
:param encoding: Optional encoding of the content.
:param encoder: An optional encoder_ function.
:param headers: Optional headers.
.. _encoder: https://docs.python.org/2/library/email.encoders.html
"""
def __init__(self, content, mimetype, encoding=None,
encoder=encode_base64, **kwargs):
Enclosure.__init__(self, **kwargs)
self.content = content
self.mimetype = mimetype
self.encoding = encoding
self.encoder = encoder
def mime_object(self):
mime = MIMEBase(*self.mimetype.split('/'))
mime.set_payload(self.content)
if self.encoding:
del mime['Content-Type']
mime.add_header('Content-Type',
self.mimetype,
charset=self.encoding)
self.encoder(mime)
return mime
class Attachment(Binary):
"""
Binary subclass for easier file attachments.
Basically using this class has the advantage
that fetching the file contents is lazy, which
may be desired. Else use the Binary class. Also,
the Content-Disposition header is automatically
set.
:param path: Absolute/Relative path to the file.
:param headers: Optional headers.
"""
def __init__(self, path, headers=()):
self.path = path
self.mimetype, self.encoding = guess(path)
self.encoder = encode_base64
heads = dict([content_disposition('attachment', basename(path))])
heads.update(headers)
self.headers = Headers(heads)
@property
def content(self):
"""
Lazily returns the bytes contents of the file.
"""
with open(self.path, 'rb') as handle:
return handle.read()
| Python | 0 |
fad2e9d7b05c072b4d25f9f6c51e25c87428f41d | add PyPI publisher, for pushing to packages.python.org | hyde/ext/publishers/pypi.py | hyde/ext/publishers/pypi.py | """
Contains classes and utilities that help publishing a hyde website to
the documentation hosting on http://packages.python.org/.
"""
import os
import getpass
import zipfile
import tempfile
import httplib
import urlparse
from base64 import standard_b64encode
import ConfigParser
from hyde.fs import File, Folder
from hyde.publisher import Publisher
from hyde.util import getLoggerWithNullHandler
logger = getLoggerWithNullHandler('hyde.ext.publishers.pypi')
class PyPI(Publisher):
def initialize(self, settings):
self.settings = settings
self.project = settings.project
self.url = getattr(settings,"url","https://pypi.python.org/pypi/")
self.username = getattr(settings,"username",None)
self.password = getattr(settings,"password",None)
self.prompt_for_credentials()
def prompt_for_credentials(self):
pypirc_file = os.path.expanduser("~/.pypirc")
if not os.path.isfile(pypirc_file):
pypirc = None
else:
pypirc = ConfigParser.RawConfigParser()
pypirc.read([pypirc_file])
missing_errs = (ConfigParser.NoSectionError,ConfigParser.NoOptionError)
# Try to find username in .pypirc
if self.username is None:
if pypirc is not None:
try:
self.username = pypirc.get("server-login","username")
except missing_errs:
pass
# Prompt for username on command-line
if self.username is None:
print "Username: ",
self.username = raw_input().strip()
# Try to find password in .pypirc
if self.password is None:
if pypirc is not None:
try:
self.password = pypirc.get("server-login","password")
except missing_errs:
pass
# Prompt for username on command-line
if self.password is None:
self.password = getpass.getpass("Password: ")
# Validate the values.
if not self.username:
raise ValueError("PyPI requires a username")
if not self.password:
raise ValueError("PyPI requires a password")
def publish(self):
super(PyPI, self).publish()
tf = tempfile.TemporaryFile()
try:
# Bundle it up into a zipfile
logger.info("building the zipfile")
root = self.site.config.deploy_root_path
zf = zipfile.ZipFile(tf,"w",zipfile.ZIP_DEFLATED)
try:
for item in root.walker.walk_files():
logger.info(" adding file: %s",item.path)
zf.write(item.path,item.get_relative_path(root))
finally:
zf.close()
# Formulate the necessary bits for the HTTP POST.
# Multipart/form-data encoding. Yuck.
authz = self.username + ":" + self.password
authz = "Basic " + standard_b64encode(authz)
boundary = "-----------" + os.urandom(20).encode("hex")
sep_boundary = "\r\n--" + boundary
end_boundary = "\r\n--" + boundary + "--\r\n"
content_type = "multipart/form-data; boundary=%s" % (boundary,)
items = ((":action","doc_upload"),("name",self.project))
body_prefix = ""
for (name,value) in items:
body_prefix += "--" + boundary + "\r\n"
body_prefix += "Content-Disposition: form-data; name=\""
body_prefix += name + "\"\r\n\r\n"
body_prefix += value + "\r\n"
body_prefix += "--" + boundary + "\r\n"
body_prefix += "Content-Disposition: form-data; name=\"content\""
body_prefix += "; filename=\"website.zip\"\r\n\r\n"
body_suffix = "\r\n--" + boundary + "--\r\n"
content_length = len(body_prefix) + tf.tell() + len(body_suffix)
# POST it up to PyPI
logger.info("uploading to PyPI")
url = urlparse.urlparse(self.url)
if url.scheme == "https":
con = httplib.HTTPSConnection(url.netloc)
else:
con = httplib.HTTPConnection(url.netloc)
con.connect()
try:
con.putrequest("POST", self.url)
con.putheader("Content-Type",content_type)
con.putheader("Content-Length",str(content_length))
con.putheader("Authorization",authz)
con.endheaders()
con.send(body_prefix)
tf.seek(0)
data = tf.read(1024*32)
while data:
con.send(data)
data = tf.read(1024*32)
con.send(body_suffix)
r = con.getresponse()
try:
# PyPI tries to redirect to the page on success.
if r.status in (200,301,):
logger.info("success!")
else:
msg = "Upload failed: %s %s" % (r.status,r.reason,)
raise Exception(msg)
finally:
r.close()
finally:
con.close()
finally:
tf.close()
| Python | 0 | |
7b73c957ad52f9b846955b96b7cc6d0938587bb3 | Add 3rd order covariance | src/conventional/cum3est.py | src/conventional/cum3est.py | #!/usr/bin/env python
from __future__ import division
import numpy as np
from scipy.linalg import hankel
import scipy.io as sio
import matplotlib.pyplot as plt
from tools import *
def cum3est(y, maxlag, nsamp, overlap, flag, k1):
"""
UM3EST Third-order cumulants.
Should be invoked via "CUMEST" for proper parameter checks
Parameters:
y: input data vector (column)
maxlag: maximum lag to be computed
samp_seg: samples per segment
overlap: percentage overlap of segments
flag : 'biased', biased estimates are computed [default]
'unbiased', unbiased estimates are computed.
k1: the fixed lag in c3(m,k1): see below
Output:
y_cum: estimated third-order cumulant,
C3(m,k1) -maxlag <= m <= maxlag
"""
(n1,n2) = np.shape(y)
N = n1*n2
minlag = -maxlag
overlap = np.fix(overlap/100 * nsamp)
nrecord = np.fix((N - overlap)/(nsamp - overlap))
nadvance = nsamp - overlap
y_cum = np.zeros([maxlag-minlag+1,1])
nd = np.arange(nsamp).T
nlags = 1*maxlag + 1
zlag = 1 + maxlag
if flag == 'biased':
scale = np.ones([nlags, 1])/nsamp
else:
lsamp = nsamp - abs(k1)
scale = make_arr((range(lsamp-maxlag, lsamp), range(lsamp, lsamp-maxlag,-1)), axis=1)
(m2,n2) = scale.shape
scale = np.ones([m2,n2]) / scale
y = y.ravel(order='F')
for i in xrange(nrecord):
x = y[ind]
x = x.ravel(order='F') - mean(x)
cx = np.conj(x)
z = x * 0
# create the "IV" matrix: offset for second lag
if k1 > 0:
z[q:nsamp-k1] = x[0:nsamp-k1, :] * cx[k1:nsamp, :]
else:
z[-k1:nsamp] = x[-k1:nsamp] * cx[0:nsamp+k1]
# compute third-order cumulants
y_cum[zlag] = y_cum[zlag] + (z.T * x)
for k in xrange(maxlag):
y_cum[zlag-k] = y_cum[zlag-k] + z[k:nsamp].T * x[0:nsamp-k]
y_cum[zlag+k] = y_cum[zlag+k] + z[0:nsamp-k].T * x[k:nsamp]
ind = ind + int(nadvance)
y_cum = y_cum * scale/nrecord
return y_cum
| Python | 0.999372 | |
ded893c34db0c6de521e6d735d6fce30f16f3a51 | Add WSGI file. | noodleamp.wsgi | noodleamp.wsgi | import os
import pwd
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def path(*paths):
return os.path.join(BASE_DIR, *paths)
os.environ['NOODLEAMP_CONFIG'] = path('settings_local.py')
# http://code.google.com/p/modwsgi/wiki/ApplicationIssues#User_HOME_Environment_Variable
os.environ['HOME'] = pwd.getpwuid(os.getuid()).pw_dir
activate_this = path('venv/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
BASE_DIR = os.path.join(os.path.dirname(__file__))
if BASE_DIR not in sys.path:
sys.path.append(BASE_DIR)
from noodleamp.server import app as application
| Python | 0 | |
33393fcfcca30edafcf06df53550f4985033c459 | Add numba error module | numba/error.py | numba/error.py | class NumbaError(Exception):
"Some error happened during compilation" | Python | 0 | |
5784158855eba090c24bb93ece991fba3b1e1a67 | Add never_cache to views | radmin/views.py | radmin/views.py | from django.http import HttpResponse
from django.contrib.admin.views.decorators import staff_member_required
from django.utils import simplejson as json
from django.views.decorators.cache import never_cache
from radmin.console import REGISTERED_NAMED_ITEMS, REGISTERED_TO_ALL
from radmin.utils import *
@never_cache
@staff_member_required
def entry_point(request):
""" This is the entry point for radmin console."""
if request.is_ajax():
# grab get params
location = request.GET.get('location', None) # where we are in the admin site
param1 = request.GET.get('param1', None) # usually specifics about location, app_name or model_name etc
param2 = request.GET.get('param2', None) # and additional field, can carry model id
controls = []
# first lets do the globally registered controls
for key,value in REGISTERED_TO_ALL.items():
controls.append({'label':value['label'],'target':key})
# check for admin_index stuff
if location in REGISTERED_NAMED_ITEMS:
value = REGISTERED_NAMED_ITEMS[location]
controls.append({'label':value['label'],'target':location, 'data':param2})
if param1 in REGISTERED_NAMED_ITEMS:
value = REGISTERED_NAMED_ITEMS[param1]
controls.append({'label':value['label'],'target':param1, 'data':param2})
return HttpResponse(json.dumps(controls), mimetype="application/json")
@never_cache
@staff_member_required
def runner(request):
if request.is_ajax():
target = request.GET.get('target')
data = request.GET.get('data', None)
# now we have to do a look up and see if the target exists in commands dict
if target in REGISTERED_NAMED_ITEMS:
console_item = REGISTERED_NAMED_ITEMS[target]
mod = radmin_import(console_item['callback'])
if mod:
try:
if data:
output = mod(data)
else:
output = mod()
result = {'result':'success', 'output':output, 'display_result':console_item['display_result']}
except Exception as e:
result = {'result':'error', 'output':e, 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
result = {'result':'error', 'output':'No Module Found', 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
elif target in REGISTERED_TO_ALL:
console_item = REGISTERED_TO_ALL[target]
mod = radmin_import(console_item['callback'])
if mod:
try:
result = {'result':'success', 'output':mod(),'display_result':console_item['display_result']}
except Exception as e:
result = {'result':'error', 'output':e, 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
result = {'result':'error', 'output':'No Module Found', 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
return HttpResponse(json.dumps({'result':'not_found_error'}), mimetype="application/json")
def sample():
return "Hi there!"
| from django.http import HttpResponse
from django.contrib.admin.views.decorators import staff_member_required
from django.utils import simplejson as json
from radmin.console import REGISTERED_NAMED_ITEMS, REGISTERED_TO_ALL
from radmin.utils import *
@staff_member_required
def entry_point(request):
""" This is the entry point for radmin console."""
if request.is_ajax():
# grab get params
location = request.GET.get('location', None) # where we are in the admin site
param1 = request.GET.get('param1', None) # usually specifics about location, app_name or model_name etc
param2 = request.GET.get('param2', None) # and additional field, can carry model id
controls = []
# first lets do the globally registered controls
for key,value in REGISTERED_TO_ALL.items():
controls.append({'label':value['label'],'target':key})
# check for admin_index stuff
if location in REGISTERED_NAMED_ITEMS:
value = REGISTERED_NAMED_ITEMS[location]
controls.append({'label':value['label'],'target':location, 'data':param2})
if param1 in REGISTERED_NAMED_ITEMS:
value = REGISTERED_NAMED_ITEMS[param1]
controls.append({'label':value['label'],'target':param1, 'data':param2})
return HttpResponse(json.dumps(controls), mimetype="application/json")
@staff_member_required
def runner(request):
if request.is_ajax():
target = request.GET.get('target')
data = request.GET.get('data', None)
# now we have to do a look up and see if the target exists in commands dict
if target in REGISTERED_NAMED_ITEMS:
console_item = REGISTERED_NAMED_ITEMS[target]
mod = radmin_import(console_item['callback'])
if mod:
try:
if data:
output = mod(data)
else:
output = mod()
result = {'result':'success', 'output':output, 'display_result':console_item['display_result']}
except Exception as e:
result = {'result':'error', 'output':e, 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
result = {'result':'error', 'output':'No Module Found', 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
elif target in REGISTERED_TO_ALL:
console_item = REGISTERED_TO_ALL[target]
mod = radmin_import(console_item['callback'])
if mod:
try:
result = {'result':'success', 'output':mod(),'display_result':console_item['display_result']}
except Exception as e:
result = {'result':'error', 'output':e, 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
else:
result = {'result':'error', 'output':'No Module Found', 'display_result':console_item['display_result']}
return HttpResponse(json.dumps(result), mimetype="application/json")
return HttpResponse(json.dumps({'result':'not_found_error'}), mimetype="application/json")
def sample():
return "Hi there!"
| Python | 0 |
e8a5720c6959a3166c1c8a373ef00a390b89ac22 | Add rasp2geotiff script | rasp2geotiff.py | rasp2geotiff.py | #!/usr/bin/env python
import xcsoar
import osr
import gdal
import numpy as np
import os, sys
import math
idx_min_x = idx_max_x = idx_min_y = idx_max_y = 0
spa_x = spa_y = 0
lat_0 = lat_1 = lon_0 = 0
lat_c = lon_c = 0
raster_data = None
def get_parameters(line):
global idx_min_x, idx_max_x, idx_min_y, idx_max_y, spa_x, spa_y, lat_0, lat_1, lon_0, lat_c, lon_c
splitted = line.split(' ')
i = 0
while splitted[i] != 'Indexs=':
i += 1
idx_min_x = int(splitted[i + 1])
idx_max_x = int(splitted[i + 2])
idx_min_y = int(splitted[i + 3])
idx_max_y = int(splitted[i + 4])
i = 0
while splitted[i] != 'Proj=':
i += 1
if splitted[i + 1] != 'lambert':
print "Error - no lambert projection found..."
return
spa_x = float(splitted[i + 2])
spa_y = float(splitted[i + 3])
lat_0 = float(splitted[i + 4])
lat_1 = float(splitted[i + 5])
lon_0 = float(splitted[i + 6])
lat_c = float(splitted[i + 7])
lon_c = float(splitted[i + 8])
def read_data(line, idx):
splitted = line.split(' ')
if len(splitted) != idx_max_x - idx_min_x + 1:
print "Error - grid resolution wrong?!?"
return
for i in range(len(splitted)):
raster_data[(idx_max_y - idx_min_y) - idx - 1, i] = float(splitted[i])
#raster_data[idx, i] = float(splitted[i])
i = 0
for line in open(sys.argv[1]):
i += 1
if line == '---':
continue
if line.startswith('Model='):
get_parameters(line)
raster_data = np.zeros((idx_max_x - idx_min_x + 1, idx_max_y - idx_min_y + 1), dtype=np.float32)
if i >= 5:
read_data(line, i - 5)
lcc = osr.SpatialReference()
lcc.ImportFromProj4("+proj=lcc +lat_1=" + str(lat_1) + " +lat_0=" + str(lat_0) + " +lon_0=" + str(lon_0) + " +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs")
epsg4326 = osr.SpatialReference()
epsg4326.ImportFromEPSG(4326)
epsg4326_to_lcc = osr.CoordinateTransformation(epsg4326, lcc)
width = (idx_max_x - idx_min_x) + 1
height = (idx_max_y - idx_min_y) + 1
center_lcc = epsg4326_to_lcc.TransformPoint(lon_c, lat_c)
geotransform = [center_lcc[0] - width * spa_x / 2, spa_x, 0, center_lcc[1] + height * spa_y / 2, 0, -spa_y]
driver = gdal.GetDriverByName('GTiff')
dst_ds = driver.Create(sys.argv[1] + ".tiff", width, height, 1, gdal.GDT_Float32)
dst_ds.SetProjection(lcc.ExportToWkt())
dst_ds.SetGeoTransform(geotransform)
dst_ds.GetRasterBand(1).WriteArray(raster_data)
dst_ds = None
| Python | 0 | |
c1e801798d3b7e8d4c9ba8a11f79ffa92bf182f5 | Add test cases for the logger | test/test_logger.py | test/test_logger.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
from pingparsing import (
set_logger,
set_log_level,
)
import pytest
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
| Python | 0.000002 | |
c559cdd34a2dc8f3129c1fed5235291f22329368 | install crontab | install_crontab.py | install_crontab.py | #!/usr/bin/python2
from crontab import CronTab
import sys
CRONTAB_TAG = "ubuntu-cleanup-annoifier"
def install_cron():
my_cron = CronTab(user=True)
# job = my_cron.new(command=executable_path(args))
job = my_cron.new(command="dummy123")
job.minute.on(0)
job.hour.on(0)
job.enable()
job.set_comment(CRONTAB_TAG)
my_cron.write_to_user( user=True )
def uninstall_cron():
my_cron = CronTab(user=True)
my_cron.remove_all(comment=CRONTAB_TAG)
my_cron.write_to_user( user=True )
if __name__ == "__main__":
if sys.argv[1] == "i":
install_cron()
elif sys.argv[1] == "u":
uninstall_cron()
| Python | 0.000001 | |
ca8a7320cbec1d4fa71ec5a7f909908b8765f573 | Allow underscores for release tags (#4976) | test_utils/scripts/circleci/get_tagged_package.py | test_utils/scripts/circleci/get_tagged_package.py | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to determine package from tag.
Get the current package directory corresponding to the Circle Tag.
"""
from __future__ import print_function
import os
import re
import sys
TAG_RE = re.compile(r"""
^
(?P<pkg>
(([a-z]+)[_-])*) # pkg-name-with-hyphens-or-underscores (empty allowed)
([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints)
$
""", re.VERBOSE)
TAG_ENV = 'CIRCLE_TAG'
ERROR_MSG = '%s env. var. not set' % (TAG_ENV,)
BAD_TAG_MSG = 'Invalid tag name: %s. Expected pkg-name-x.y.z'
CIRCLE_CI_SCRIPTS_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.realpath(
os.path.join(CIRCLE_CI_SCRIPTS_DIR, '..', '..', '..'))
def main():
"""Get the current package directory.
Prints the package directory out so callers can consume it.
"""
if TAG_ENV not in os.environ:
print(ERROR_MSG, file=sys.stderr)
sys.exit(1)
tag_name = os.environ[TAG_ENV]
match = TAG_RE.match(tag_name)
if match is None:
print(BAD_TAG_MSG % (tag_name,), file=sys.stderr)
sys.exit(1)
pkg_name = match.group('pkg')
if pkg_name is None:
print(ROOT_DIR)
else:
pkg_dir = pkg_name.rstrip('-').replace('-', '_')
print(os.path.join(ROOT_DIR, pkg_dir))
if __name__ == '__main__':
main()
| # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to determine package from tag.
Get the current package directory corresponding to the Circle Tag.
"""
from __future__ import print_function
import os
import re
import sys
TAG_RE = re.compile(r"""
^
(?P<pkg>
(([a-z]+)-)*) # pkg-name-with-hyphens- (empty allowed)
([0-9]+)\.([0-9]+)\.([0-9]+) # Version x.y.z (x, y, z all ints)
$
""", re.VERBOSE)
TAG_ENV = 'CIRCLE_TAG'
ERROR_MSG = '%s env. var. not set' % (TAG_ENV,)
BAD_TAG_MSG = 'Invalid tag name: %s. Expected pkg-name-x.y.z'
CIRCLE_CI_SCRIPTS_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.realpath(
os.path.join(CIRCLE_CI_SCRIPTS_DIR, '..', '..', '..'))
def main():
"""Get the current package directory.
Prints the package directory out so callers can consume it.
"""
if TAG_ENV not in os.environ:
print(ERROR_MSG, file=sys.stderr)
sys.exit(1)
tag_name = os.environ[TAG_ENV]
match = TAG_RE.match(tag_name)
if match is None:
print(BAD_TAG_MSG % (tag_name,), file=sys.stderr)
sys.exit(1)
pkg_name = match.group('pkg')
if pkg_name is None:
print(ROOT_DIR)
else:
pkg_dir = pkg_name.rstrip('-').replace('-', '_')
print(os.path.join(ROOT_DIR, pkg_dir))
if __name__ == '__main__':
main()
| Python | 0 |
ee679b745e955e3d555b49500ae2d09aa3336abb | Add a util function for SNMP | confluent_server/confluent/snmputil.py | confluent_server/confluent/snmputil.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This provides a simplified wrapper around snmp implementation roughly
# mapping to the net-snmp commands
# net-snmp-python was considered as the API is cleaner, but the ability to
# patch pysnmp to have it be eventlet friendly has caused it's selection
# This module simplifies the complex hlapi pysnmp interface
import confluent.exceptions as exc
import eventlet
from eventlet.support.greendns import getaddrinfo
import socket
snmp = eventlet.import_patched('pysnmp.hlapi')
def _get_transport(name):
# Annoyingly, pysnmp does not automatically determine ipv6 v ipv4
res = getaddrinfo(name, 161, 0, socket.SOCK_DGRAM)
if res[0][0] == socket.AF_INET6:
return snmp.Udp6TransportTarget(res[0][4])
else:
return snmp.UdpTransportTarget(res[0][4])
def walk(server, oid, secret, username=None, context=None):
"""Walk over children of a given OID
This is roughly equivalent to snmpwalk. It will automatically try to be
an snmpbulkwalk if possible. If username is not given, it is assumed that
the secret is a community string, and v2c is used. If a username given,
it'll assume SHA auth and DES privacy with the secret being the same for
both.
:param server: The network name/address to target
:param oid: The SNMP object identifier
:param secret: The community string or password
:param username: The username for SNMPv3
:param context: The SNMPv3 context or index for community string indexing
"""
# SNMP is a complicated mess of things. Will endeavor to shield caller
# from as much as possible, assuming reasonable defaults where possible.
# there may come a time where we add more parameters to override the
# automatic behavior (e.g. DES is weak, so it's a likely candidate to be
# overriden, but some devices only support DES)
tp = _get_transport(server)
ctx = snmp.ContextData(context)
if '::' in oid:
mib, field = oid.split('::')
obj = snmp.ObjectType(snmp.ObjectIdentity(mib, field))
else:
obj = snmp.ObjectType(snmp.ObjectIdentity(oid))
eng = snmp.SnmpEngine()
if username is None:
# SNMP v2c
authdata = snmp.CommunityData(secret, mpModel=1)
else:
authdata = snmp.UsmUserData(username, authKey=secret, privKey=secret)
walking = snmp.bulkCmd(eng, authdata, tp, ctx, 0, 10, obj,
lexicographicMode=False)
for rsp in walking:
errstr, errnum, erridx, answers = rsp
if errstr:
raise exc.TargetEndpointUnreachable(str(errstr))
elif errnum:
raise exc.ConfluentException(errnum.prettyPrint())
for ans in answers:
yield ans
if __name__ == '__main__':
import sys
for kp in walk(sys.argv[1], sys.argv[2], 'public'):
print(str(kp[0]))
print(str(kp[1]))
| Python | 0 | |
7f09311b2f7825bf1446a6e476f8d1909d501699 | Add threshold helper | driver_station/src/targeting/threshold_helper.py | driver_station/src/targeting/threshold_helper.py | #
# This file is part of KwarqsDashboard.
#
# KwarqsDashboard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# KwarqsDashboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KwarqsDashboard. If not, see <http://www.gnu.org/licenses/>.
#
import math
import sys
import cv2
import numpy as np
import target_data
from common import settings
# using python 2.7, get some python 3 builtins
from future_builtins import zip
import logging
logger = logging.getLogger(__name__)
class ThresholdHelper(object):
def __init__(self):
self.size = None
# debug settings
self.show_hue = False
self.show_sat = False
self.show_val = False
self.show_bin = False
self.show_bin_overlay = False
# thresholds are not initialized here, someone else does it
def processImage(self, img):
'''
Processes an image and thresholds it. Returns the original
image, and a binary version of the image indicating the area
that was filtered
:returns: img, bin
'''
# reinitialize any time the image size changes
if self.size is None or self.size[0] != img.shape[0] or self.size[1] != img.shape[1]:
h, w = img.shape[:2]
self.size = (h, w)
# these are preallocated so we aren't allocating all the time
self.bin = np.empty((h, w, 1), dtype=np.uint8)
self.hsv = np.empty((h, w, 3), dtype=np.uint8)
self.hue = np.empty((h, w, 1), dtype=np.uint8)
self.sat = np.empty((h, w, 1), dtype=np.uint8)
self.val = np.empty((h, w, 1), dtype=np.uint8)
# for overlays
self.zeros = np.zeros((h, w, 1), dtype=np.bool)
# these settings should be adjusted according to the image size
# and noise characteristics
# TODO: What's the optimal setting for this? For smaller images, we
# cannot morph as much, or the features blend into each other.
# TODO: tune kMinWidth
# Note: if you set k to an even number, the detected
# contours are offset by some N pixels. Sometimes.
if w <= 320:
k = 1
offset = (0,0)
self.kHoleClosingIterations = 2 # originally 9
self.kMinWidth = 2
# drawing
self.kThickness = 1
self.kTgtThickness = 1
# accuracy of polygon approximation
self.kPolyAccuracy = 10.0
elif w <= 480:
k = 2
offset = (1,1)
self.kHoleClosingIterations = 9 # originally 9
self.kMinWidth = 5
# drawing
self.kThickness = 1
self.kTgtThickness = 2
# accuracy of polygon approximation
self.kPolyAccuracy = 15.0
else:
k = 3
offset = (1,1)
self.kHoleClosingIterations = 6 # originally 9
self.kMinWidth = 10
# drawing
self.kThickness = 1
self.kTgtThickness = 2
# accuracy of polygon approximation
self.kPolyAccuracy = 20.0
self.morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (k,k), anchor=offset)
logging.info("New image size: %sx%s, morph size set to %s, %s iterations", w,h,k, self.kHoleClosingIterations)
# get this outside the loop
ih, iw = self.size
centerOfImageY = ih/2.0
# convert to HSV
cv2.cvtColor(img, cv2.cv.CV_BGR2HSV, self.hsv)
cv2.split(self.hsv, [self.hue, self.sat, self.val])
# Threshold each component separately
# Hue
cv2.threshold(self.hue, self.thresh_hue_p, 255, type=cv2.THRESH_BINARY, dst=self.bin)
cv2.threshold(self.hue, self.thresh_hue_n, 255, type=cv2.THRESH_BINARY_INV, dst=self.hue)
cv2.bitwise_and(self.hue, self.bin, self.hue)
if self.show_hue:
# overlay green where the hue threshold is non-zero
img[np.dstack((self.zeros, self.hue != 0, self.zeros))] = 255
# Saturation
cv2.threshold(self.sat, self.thresh_sat_p, 255, type=cv2.THRESH_BINARY, dst=self.bin)
cv2.threshold(self.sat, self.thresh_sat_n, 255, type=cv2.THRESH_BINARY_INV, dst=self.sat)
cv2.bitwise_and(self.sat, self.bin, self.sat)
if self.show_sat:
# overlay blue where the sat threshold is non-zero
img[np.dstack((self.sat != 0, self.zeros, self.zeros))] = 255
# Value
cv2.threshold(self.val, self.thresh_val_p, 255, type=cv2.THRESH_BINARY, dst=self.bin)
cv2.threshold(self.val, self.thresh_val_n, 255, type=cv2.THRESH_BINARY_INV, dst=self.val)
cv2.bitwise_and(self.val, self.bin, self.val)
if self.show_val:
# overlay red where the val threshold is non-zero
img[np.dstack((self.zeros, self.zeros, self.val != 0))] = 255
# Combine the results to obtain our binary image which should for the most
# part only contain pixels that we care about
cv2.bitwise_and(self.hue, self.sat, self.bin)
cv2.bitwise_and(self.bin, self.val, self.bin)
# Fill in any gaps using binary morphology
cv2.morphologyEx(self.bin, cv2.MORPH_CLOSE, self.morphKernel, dst=self.bin, iterations=self.kHoleClosingIterations)
if self.show_bin:
cv2.imshow('bin', self.bin)
# overlay the binarized image on the displayed image, instead of a separate picture
if self.show_bin_overlay:
img[np.dstack((self.bin, self.bin, self.bin)) != 0] = 255
return img, self.bin
| Python | 0.000009 | |
74e24debf55b003f1d56d35f4b040d91a0698e0a | Add example for cluster centroids method | example/under-sampling/plot_cluster_centroids.py | example/under-sampling/plot_cluster_centroids.py | """
=================
Cluster centroids
=================
An illustration of the cluster centroids method.
"""
print(__doc__)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Define some color for the plotting
almost_black = '#262626'
palette = sns.color_palette()
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from unbalanced_dataset.under_sampling import ClusterCentroids
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=5000, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply the random under-sampling
cc = ClusterCentroids()
X_resampled, y_resampled = cc.fit_transform(X, y)
X_res_vis = pca.transform(X_resampled)
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(X_vis[y == 0, 0], X_vis[y == 0, 1], label="Class #0", alpha=0.5,
edgecolor=almost_black, facecolor=palette[0], linewidth=0.15)
ax1.scatter(X_vis[y == 1, 0], X_vis[y == 1, 1], label="Class #1", alpha=0.5,
edgecolor=almost_black, facecolor=palette[2], linewidth=0.15)
ax1.set_title('Original set')
ax2.scatter(X_res_vis[y_resampled == 0, 0], X_res_vis[y_resampled == 0, 1],
label="Class #0", alpha=.5, edgecolor=almost_black,
facecolor=palette[0], linewidth=0.15)
ax2.scatter(X_res_vis[y_resampled == 1, 0], X_res_vis[y_resampled == 1, 1],
label="Class #1", alpha=.5, edgecolor=almost_black,
facecolor=palette[2], linewidth=0.15)
ax2.set_title('Cluster centroids')
plt.show()
| Python | 0.000001 | |
25cd25dab4de9e6963ffa622474b3f0bdcdc1e48 | Create preprocessor.py | interpreter/preprocessor.py | interpreter/preprocessor.py | Python | 0.000006 | ||
0c1ccd5180601d3ed3f5dc98b3330d40c014f7c0 | Add simul. (#3300) | var/spack/repos/builtin/packages/simul/package.py | var/spack/repos/builtin/packages/simul/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Simul(Package):
"""simul is an MPI coordinated test of parallel
filesystem system calls and library functions. """
homepage = "https://github.com/LLNL/simul"
url = "https://github.com/LLNL/simul/archive/1.16.tar.gz"
version('1.16', 'd616c1046a170c1e1b7956c402d23a95')
version('1.15', 'a5744673c094a87c05c6f0799d1f496f')
version('1.14', 'f8c14f0bac15741e2af354e3f9a0e30f')
version('1.13', '8a80a62d569557715d6c9c326e39a8ef')
depends_on('mpi')
def install(self, spec, prefix):
make('simul')
mkdirp(prefix.bin)
install('simul', prefix.bin)
| Python | 0.000027 | |
a4bd7e17d02f83e377287564788a3eda1000029a | add adp tests | pyiid/tests/test_scatter_adp.py | pyiid/tests/test_scatter_adp.py | from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
__author__ = 'christopher'
# rtol = 4e-4
# atol = 4e-4
rtol = 5e-4
atol = 5e-5
# Actual Tests
def check_meta(value):
value[0](value[1:])
def check_scatter_fq(value):
"""
Check two processor, algorithm pairs against each other for FQ calculation
:param value:
:return:
"""
# set everything up
atoms, exp = value[:2]
scat = ElasticScatter(exp_dict=exp, verbose=True)
proc1, alg1 = value[-1][0]
proc2, alg2 = value[-1][1]
# run algorithm 1
scat.set_processor(proc1, alg1)
ans1 = scat.get_fq(atoms)
# run algorithm 2
scat.set_processor(proc2, alg2)
ans2 = scat.get_fq(atoms)
# test
if not stats_check(ans1, ans2, rtol, atol):
print value
assert_allclose(ans1, ans2, rtol=rtol, atol=atol)
# make certain we did not give back the same pointer
assert ans1 is not ans2
# assert False
def check_scatter_grad_fq(value):
"""
Check two processor, algorithm pairs against each other for gradient FQ
calculation
:param value:
:return:
"""
# set everything up
atoms, exp = value[:2]
scat = ElasticScatter(exp_dict=exp, verbose=True)
proc1, alg1 = value[-1][0]
proc2, alg2 = value[-1][1]
# run algorithm 1
scat.set_processor(proc1, alg1)
ans1 = scat.get_grad_fq(atoms)
# run algorithm 2
scat.set_processor(proc2, alg2)
ans2 = scat.get_grad_fq(atoms)
# test
if not stats_check(ans1, ans2, rtol, atol):
print value
assert_allclose(ans1, ans2, rtol=rtol, atol=atol)
# make certain we did not give back the same pointer
assert ans1 is not ans2
def check_scatter_sq(value):
"""
Check two processor, algorithm pairs against each other for SQ calculation
:param value:
:return:
"""
# set everything up
atoms, exp = value[:2]
scat = ElasticScatter(exp_dict=exp, verbose=True)
proc1, alg1 = value[-1][0]
proc2, alg2 = value[-1][1]
# run algorithm 1
scat.set_processor(proc1, alg1)
ans1 = scat.get_sq(atoms)
# run algorithm 2
scat.set_processor(proc2, alg2)
ans2 = scat.get_sq(atoms)
# test
stats_check(ans1, ans2, rtol, atol)
assert_allclose(ans1, ans2, rtol=rtol, atol=atol)
# make certain we did not give back the same pointer
assert ans1 is not ans2
def check_scatter_iq(value):
"""
Check two processor, algorithm pairs against each other for IQ calculation
:param value:
:return:
"""
# set everything up
atoms, exp = value[:2]
scat = ElasticScatter(exp_dict=exp, verbose=True)
proc1, alg1 = value[-1][0]
proc2, alg2 = value[-1][1]
# run algorithm 1
scat.set_processor(proc1, alg1)
ans1 = scat.get_iq(atoms)
# run algorithm 2
scat.set_processor(proc2, alg2)
ans2 = scat.get_iq(atoms)
# test
stats_check(ans1, ans2, rtol, atol)
assert_allclose(ans1, ans2, rtol=rtol, atol=atol)
# make certain we did not give back the same pointer
assert ans1 is not ans2
def check_scatter_pdf(value):
"""
Check two processor, algorithm pairs against each other for PDF calculation
:param value:
:return:
"""
# set everything up
atoms, exp = value[:2]
scat = ElasticScatter(exp_dict=exp, verbose=True)
proc1, alg1 = value[-1][0]
proc2, alg2 = value[-1][1]
# run algorithm 1
scat.set_processor(proc1, alg1)
ans1 = scat.get_pdf(atoms)
# run algorithm 2
scat.set_processor(proc2, alg2)
ans2 = scat.get_pdf(atoms)
# test
stats_check(ans1, ans2, rtol, atol)
assert_allclose(ans1, ans2, rtol=rtol, atol=atol)
# make certain we did not give back the same pointer
assert ans1 is not ans2
def check_scatter_grad_pdf(value):
"""
Check two processor, algorithm pairs against each other for gradient PDF
calculation
:param value:
:return:
"""
# set everything up
atoms, exp = value[:2]
scat = ElasticScatter(exp_dict=exp, verbose=True)
proc1, alg1 = value[-1][0]
proc2, alg2 = value[-1][1]
# run algorithm 1
scat.set_processor(proc1, alg1)
ans1 = scat.get_grad_pdf(atoms)
# run algorithm 2
scat.set_processor(proc2, alg2)
ans2 = scat.get_grad_pdf(atoms)
# test
stats_check(ans1, ans2, rtol, atol)
assert_allclose(ans1, ans2, rtol=rtol, atol=atol)
# make certain we did not give back the same pointer
assert ans1 is not ans2
tests = [
check_scatter_fq,
check_scatter_sq,
check_scatter_iq,
check_scatter_pdf,
check_scatter_grad_fq,
check_scatter_grad_pdf
]
test_data = list(product(
tests,
test_adp_atoms, test_exp, comparison_pro_alg_pairs))
def test_meta():
for v in test_data:
yield check_meta, v
if __name__ == '__main__':
import nose
nose.runmodule(argv=[
'-s',
'--with-doctest',
# '--nocapture',
'-v',
# '-x',
],
# env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
exit=False)
| Python | 0 | |
6cb953dc01a77bc549c53cc325a741d1952ed6b6 | Bump FIDO version to 1.3.12 | fpr/migrations/0025_update_fido_1312.py | fpr/migrations/0025_update_fido_1312.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def data_migration_up(apps, schema_editor):
"""
Update identification tool FIDO to 1.3.12, correcting a
character-spacing issue bug identified in PRONOM94 (again)
"""
idtool = apps.get_model('fpr', 'IDTool')
idcommand = apps.get_model('fpr', 'IDCommand')
# Update Fido tool
idtool.objects\
.filter(uuid='c33c9d4d-121f-4db1-aa31-3d248c705e44')\
.update(version='1.3.12', slug='fido-1312')
# Create new command using the new version of Fido
old_fido_command = idcommand.objects\
.get(uuid='e586f750-6230-42d7-8d12-1e24ca2aa658')
idcommand.objects.create(
uuid='213d1589-c255-474f-81ac-f0a618181e40',
description=u'Identify using Fido 1.3.12',
config=old_fido_command.config,
script=old_fido_command.script,
script_type=old_fido_command.script_type,
tool=idtool.objects.get(uuid='c33c9d4d-121f-4db1-aa31-3d248c705e44'),
enabled=True
)
old_fido_command.enabled = False
old_fido_command.save()
def data_migration_down(apps, schema_editor):
"""
Revert FIDO to previous version
"""
idtool = apps.get_model('fpr', 'IDTool')
idcommand = apps.get_model('fpr', 'IDCommand')
# Remove new ID Commands
idcommand.objects\
.filter(uuid='213d1589-c255-474f-81ac-f0a618181e40').delete()
# Revert Fido tool
idtool.objects\
.filter(uuid='c33c9d4d-121f-4db1-aa31-3d248c705e44')\
.update(version='1.3.10', slug='fido-1310')
# Restore Fido command
idcommand.objects\
.filter(uuid='e586f750-6230-42d7-8d12-1e24ca2aa658')\
.update(enabled=True)
class Migration(migrations.Migration):
dependencies = [
('fpr', '0024_update_fido'),
]
operations = [
migrations.RunPython(data_migration_up, data_migration_down),
]
| Python | 0 | |
8fa9a54c9a5ee683fc9e9d361a4eb7affe5e83ed | Add functions to paint game of life to screen | game_of_life.py | game_of_life.py | #!/usr/bin/env python
from curses import wrapper
from time import sleep
def enumerate_lines(matrix):
on = '*'
off = ' '
for i, row in enumerate(matrix):
yield i, ''.join(on if v else off for v in row)
def paint(stdscr, matrix):
stdscr.clear()
for i, line in enumerate_lines(matrix):
stdscr.addstr(i, 0, line)
stdscr.refresh()
size = 50
m1 = [
[i == j or i == size - j for j in xrange(0, size + 1)]
for i in xrange(0, size + 1)
]
m2 = [
[i == size / 2 or j == size / 2 for j in xrange(0, size + 1)]
for i in xrange(0, size + 1)
]
def main(stdscr):
for i in xrange(0,100):
matrix = m1 if i % 2 else m2
paint(stdscr, matrix)
sleep(0.5)
stdscr.getkey()
wrapper(main)
| Python | 0 | |
ff8cee4f98dde0533751dfd15308c5fdfdec3982 | test file for rapid iteration | tests/quick_test.py | tests/quick_test.py | """
nosetests -sv --nologcapture tests/quick_test.py
"""
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
os.environ['is_test_suite'] = 'True'
os.environ['KERAS_BACKEND'] = 'theano'
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
import dill
import numpy as np
import utils_testing as utils
# def regression_test():
# # a random seed of 42 has ExtraTreesRegressor getting the best CV score, and that model doesn't generalize as well as GradientBoostingRegressor.
# np.random.seed(0)
# df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
# column_descriptions = {
# 'MEDV': 'output'
# , 'CHAS': 'categorical'
# }
# ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# ml_predictor.train(df_boston_train, model_names=['DeepLearningRegressor'])
# test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
# print('test_score')
# print(test_score)
# assert -3.35 < test_score < -2.8
def classification_test(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=['DeepLearningClassifier'])
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
assert -0.215 < test_score < -0.17
| Python | 0 | |
34709ee5d69e59fe87bebb51d522e05abaf6ed8d | Add unit test for helper functions | test/unit/test_parser.py | test/unit/test_parser.py | # :coding: utf-8
import pytest
import sphinxcontrib.parser
def test_parse_repository_error():
"""Raise an error if the path is incorrect."""
with pytest.raises(OSError):
sphinxcontrib.parser.parse_repository("")
def test_parse_repository_empty(temporary_directory):
"""Raise an empty environment."""
environment = dict(
modules={},
classes={},
functions={},
variables={},
files={}
)
assert sphinxcontrib.parser.parse_repository(
temporary_directory
) == environment
@pytest.fixture()
def content_lines_with_docstrings():
return [
"/**",
" * An function example.",
" *",
" * Detailed description.",
" */",
"function sum(a, b) {",
" return a+b;",
"}",
"",
"/*",
" * Incorrect docstring",
" */",
"function doSomething() {",
" console.log('something');",
"}",
"",
"/*",
"",
" Incorrect docstring",
"",
"*/",
"function doSomethingElse() {",
" console.log('something_else');",
"}",
"",
"",
"/* A cool variable. */",
"const Data = null",
]
@pytest.mark.parametrize(
("content_lines", "line_number", "expected"),
[
(
[
"/**",
" * An function example.",
" *",
" * Detailed description.",
" */",
"function sum(a, b) {",
" return a+b;",
"}",
],
6,
(
"An function example.\n"
"\n"
"Detailed description."
)
),
(
[
"/** A cool variable. */",
"const Data = null",
],
2,
(
"A cool variable."
)
),
(
[
"/*",
" * Incorrect docstring",
" */",
"function doSomething() {",
" console.log('something');",
"}",
],
4,
None
),
(
[
"/*",
"",
" Incorrect docstring",
"",
"*/",
"function doSomethingElse() {",
" console.log('something_else');",
"}",
],
6,
None
),
(
[
"// Incorrect docstring",
"function doSomethingElse() {",
" console.log('something_else');",
"}",
],
2,
None
),
(
[
"",
"function doSomethingElse() {",
" console.log('something_else');",
"}",
],
2,
None
),
(
[
"/** A cool variable. */",
"const Data = null",
],
1,
None
)
],
ids=[
"valid element line number with multiline docstring",
"valid element line number with one line docstring",
"valid element line number with incorrect docstring 1",
"valid element line number with incorrect docstring 2",
"valid element line number with incorrect docstring 3",
"valid element line number with no docstring",
"invalid line_number",
]
)
def test_parse_docstrings(content_lines, line_number, expected):
"""Return docstrings from a element's line number."""
assert sphinxcontrib.parser.parse_docstring(
line_number, content_lines
) == expected
def test_filter_comments():
"""Remove all comments from content"""
content = (
"'use strict' /* a beautiful comment */\n"
"\n"
"/*\n"
"a long comment that can take a lot of places so\n"
"we put it on several lines.\n"
"*/\n"
"\n"
"// a variable docstring\n"
"const DATA = 1;\n"
"\n"
"/**\n"
" * Function docstring\n"
" */\n"
"function sum(a, b) {\n"
" // Return the sum of a and b\n"
" return a+b;\n"
"}\n"
)
expected = (
"'use strict' \n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"const DATA = 1;\n"
"\n"
"\n"
"\n"
"\n"
"function sum(a, b) {\n"
" \n"
" return a+b;\n"
"}\n"
)
assert sphinxcontrib.parser.filter_comments(content) == expected
@pytest.mark.parametrize(
("content", "expected"),
[
(
"const emptyObject = {};",
"const emptyObject = {};",
),
(
"let test = {a: 1, b: 2, c: 3};",
"let test = {};"
),
(
(
"const element = {"
" key1: value1,"
" key2: value2,"
" key3: value3,"
"};"
""
"function sum(a, b) {"
" return a+b"
"}"
""
),
(
"const element = {}"
""
""
""
";"
""
"function sum(a, b) {}"
""
""
""
)
),
(
(
"class AwesomeClass {"
" constructor() {"
" this.variable = 1;"
" }"
""
" increase() {"
" this.variable += 1;"
" }"
"}"
),
(
"class AwesomeClass {}"
""
""
""
""
""
""
""
""
)
)
],
ids=[
"empty object",
"simple object",
"objects and functions on multiple lines",
"nested class"
]
)
def test_collapse_all(content, expected):
"""Collapse all objects, classes and functions."""
assert sphinxcontrib.parser.collapse_all(content) == expected
@pytest.mark.parametrize(
("name", "hierarchy_folders", "module_names", "expected"),
[
(
"example",
["module", "submodule", "test"],
[],
"example"
),
(
"example",
["module", "submodule", "test"],
["another_module"],
"example"
),
(
"example",
["module", "submodule", "test"],
["module.submodule.test"],
"module.submodule.test.example"
),
(
"example",
["module", "submodule", "test"],
["submodule.test"],
"submodule.test.example"
),
(
"example",
["module", "submodule", "test"],
["another_module", "submodule.test", "test"],
"submodule.test.example"
)
],
ids=[
"no module",
"one module not in hierarchy",
"one module matching entire hierarchy",
"one module matching part of the hierarchy",
"several modules"
]
)
def test_guess_module_name(name, hierarchy_folders, module_names, expected):
"""Return module name from initial name, hierarchy folders and modules."""
assert sphinxcontrib.parser.guess_module_name(
name, hierarchy_folders, module_names
) == expected
| Python | 0.000001 | |
c98a744f5f436ae2c6266a7bb5d32173cfd0e4a9 | Add a script that scrapes the Socrata catalog, just in case we need that in another format | scripts/socrata_scraper.py | scripts/socrata_scraper.py | #!/usr/bin/python3
"""
This is a basic script that downloads the catalog data from the smcgov.org
website and pulls out information about all the datasets.
This is in python3
There is an optional download_all argument that will allow you to download
all of the datasets individually and in their entirety. I have included this
as a demonstration, but it should not be commonly used because it takes a
while and beats up on the smcgov data portal, which you should avoid.
"""
import sys
import json
import argparse
import collections
import urllib.request
URL = "https://data.smcgov.org/api/catalog?limit=999999999&only=datasets"
def main(args):
category_data = collections.defaultdict(list)
domain_data = collections.defaultdict(list)
data_downloads = []
datasets_with_location = []
with urllib.request.urlopen(URL) as raw_data:
data = json.loads(raw_data.read().decode('utf-8'))
for result in data['results']:
categories = result['classification']['categories']
domain = result['classification']['domain_category']
if categories is None or categories == []:
categories = ['NULL']
permalink = result['permalink']
data_downloads.append('{}.json'.format(permalink))
domain_data[domain].append(permalink)
for category in categories:
category_data[category].append(permalink)
if args.download_all:
for download_url in data_downloads:
with urllib.request.urlopen(download_url) as dataset_file:
print('Downloading {}'.format(download_url))
dataset = json.loads(dataset_file.read().decode('utf-8'))
if len(dataset) < 1:
continue
if 'location_1' in dataset[0].keys():
# Our best guess on which datasets have location info.
datasets_with_location.append(download_url)
if args.download_all:
print('Datasets with location_1 key')
print(datasets_with_location)
print('----------------------------------------------------')
print('Number of Datasets by Category')
for key, values in category_data.items():
print(key, len(values))
print('----------------------------------------------------')
print('Number of Datasets by Domain')
for key, values in domain_data.items():
print(key, len(values))
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--download_all', help='Download all datasets',
action='store_true')
args = parser.parse_args()
main(args=args)
| Python | 0 | |
0c11d2740e561586bb4f9d2b67bda2ccc87e146e | Add new command to notify New Relic of deployment | ixdjango/management/commands/newrelic_notify_deploy.py | ixdjango/management/commands/newrelic_notify_deploy.py | """
Management command to enable New Relic notification of deployments
.. moduleauthor:: Infoxchange Development Team <development@infoxchange.net.au>
"""
import pwd
import os
from subprocess import Popen, PIPE
from urllib import urlencode
from httplib2 import Http
from django.conf import settings
from django.core.management.base import NoArgsCommand
import newrelic.agent
class Command(NoArgsCommand):
"""
Loads the fixtures contained inside IX_FIXTURES setting variable.
See http://redmine.office.infoxchange.net.au/issues/8376
"""
URL = 'https://rpm.newrelic.com/deployments.xml'
def handle_noargs(self, **options):
newrelic.agent.initialize(
settings.NEW_RELIC_CONFIG,
settings.NEW_RELIC_ENV
)
config = newrelic.agent.global_settings()
if not config.monitor_mode:
return
# get the current git version
git = Popen(('git', 'describe'), stdout=PIPE)
ver, _ = git.communicate()
ver = ver.strip()
# get the current user
user = pwd.getpwuid(os.getuid())
headers = {
'x-api-key': config.license_key
}
post = {
'deployment[app_name]': config.app_name,
'deployment[revision]': ver,
'deployment[user]': '%s (%s)' % (user.pw_gecos, user.pw_name),
}
print "Informing New Relic...",
# post this data
http = Http()
response, _ = http.request(self.URL, 'POST',
headers=headers,
body=urlencode(post))
print response['status']
| Python | 0 | |
c0ea919305bcedf080a2213f4c549c68fa4efa2d | test tools | tests/test_tools.py | tests/test_tools.py | import unittest2 as unittest
from fabric.api import run
import tempfile
from mixins import WebServerMixin
from parcel.tools import dl, rpull, rpush
def tempname():
return tempfile.mkstemp()[1]
import zlib, os
def crc32(filename):
CHUNKSIZE = 8192
checksum = 0
with open(filename, 'rb') as fh:
bytes = fh.read(CHUNKSIZE)
while bytes:
checksum = zlib.crc32(bytes, checksum)
bytes = fh.read(CHUNKSIZE)
return checksum
class ToolsTestSuite(unittest.TestCase, WebServerMixin):
"""Tools test cases."""
def test_dl(self):
self.startWebServer()
filename = tempname()
dl("http://localhost:%s/tip.tar.gz"%self.port,filename)
# there should be no differences between the files
self.assertEquals(crc32(filename),crc32(os.path.join(self.webroot,'tip.tar.gz')))
# shutdown webserver
self.stopWebServer()
| Python | 0.000002 | |
9de5728e5fdb0f7dc606681df685eb084477d8d0 | Add exercise | multiplyTwoNumbers.py | multiplyTwoNumbers.py | #!/usr/bin/env python
def main():
a = input("Enter a number: ")
b = input("Enter another number: ")
print "The product of %d and %d is %d" % (a, b, a * b)
main()
| Python | 0.000196 | |
f883edc209928494c45693c5ecfd279bfbb09c97 | Add partfrac1 | timing/partfrac1.py | timing/partfrac1.py | import time
from lcapy import *
funcs = [1 / s, 1 / s**2, 1 / (s + 3), 1 / (s + 3)**2, (s + 3) / (s + 4),
1 / (s + 3)**2 / (s + 4), 1 / (s + 3)**3 / (s + 4),
1 / (s + 3) / (s + 4) / (s + 5), (s + 6) / (s + 3) / (s + 4) / (s + 5),
1 / (s + 3)**2 / (s + 4)**2, 1 / (s + 3)**3 / (s + 4)**2,
s / (s + 3)**2 / (s + 4), s / (s + 3)**3 / (s + 4)]
Ntrials = 10
methods = ('ec', 'sub')
times = {}
for func in funcs:
ans1 = func.partfrac(method='ec')
ans2 = func.partfrac(method='sub')
if ans1 != func:
print('Wrong answer for eq: ', func)
if ans2 != func:
print('Wrong answer for sub: ', func)
for method in methods:
times[method] = []
for func in funcs:
start = time.perf_counter()
for i in range(Ntrials):
func.partfrac(method=method)
stop = time.perf_counter()
times[method].append((stop - start) / Ntrials)
import numpy as np
from matplotlib.pyplot import subplots, style, savefig, show
index = np.arange(len(funcs))
fig, axes = subplots(1)
axes.bar(index, times['ec'], 0.35, label='ec')
axes.bar(index+0.35, times['sub'], 0.35, label='subs')
axes.legend()
axes.set_ylabel('Time (s)')
show()
| Python | 0.999998 | |
e71c232660a7480c2b56f6e76e83fad4c7e9da8a | Add ctm_test.py test for testing CRTC's CTM color matrix property. | py/tests/ctm_test.py | py/tests/ctm_test.py | #!/usr/bin/python3
import sys
import pykms
def ctm_to_blob(ctm, card):
len=9
arr = bytearray(len*8)
view = memoryview(arr).cast("I")
for x in range(len):
i, d = divmod(ctm[x], 1)
if i < 0:
i = -i
sign = 1 << 31
else:
sign = 0
view[x * 2 + 0] = int(d * ((2 ** 32) - 1))
view[x * 2 + 1] = int(i) | sign
#print("%f = %08x.%08x" % (ctm[x], view[x * 2 + 1], view[x * 2 + 0]))
return pykms.Blob(card, arr);
if len(sys.argv) > 1:
conn_name = sys.argv[1]
else:
conn_name = ""
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector(conn_name)
crtc = res.reserve_crtc(conn)
mode = conn.get_default_mode()
fb = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, "XR24");
pykms.draw_test_pattern(fb);
crtc.set_mode(conn, fb, mode)
input("press enter to set normal ctm\n")
ctm = [ 1.0, 0.0, 0.0,
0.0, 1.0, 0.0,
0.0, 0.0, 1.0 ]
ctmb = ctm_to_blob(ctm, card)
crtc.set_prop("CTM", ctmb.id)
input("press enter to set new ctm\n")
ctm = [ 0.0, 1.0, 0.0,
0.0, 0.0, 1.0,
1.0, 0.0, 0.0 ]
ctmb = ctm_to_blob(ctm, card)
crtc.set_prop("CTM", ctmb.id)
print("r->b g->r b->g ctm active\n")
input("press enter to set new ctm\n")
ctm = [ 0.0, 0.0, 1.0,
1.0, 0.0, 0.0,
0.0, 1.0, 0.0 ]
ctmb = ctm_to_blob(ctm, card)
crtc.set_prop("CTM", ctmb.id)
input("r->g g->b b->r ctm active\n")
input("press enter to turn off the crtc\n")
crtc.disable_mode()
input("press enter to enable crtc again\n")
crtc.set_mode(conn, fb, mode)
input("press enter to remove ctm\n")
crtc.set_prop("CTM", 0)
input("press enter to exit\n")
| Python | 0 | |
04477b11bbe7efa1720829691b7d1c3fe2a7a492 | Add __init__ | h2/__init__.py | h2/__init__.py | # -*- coding: utf-8 -*-
"""
h2
~~
A HTTP/2 implementation.
"""
__version__ = '0.1.0'
| Python | 0.000917 | |
95a40b92256374878c7fc6528fcaabf1939d9fce | Add a python script to parse DisplayList benchmarking output (#31266) | testing/benchmark/displaylist_benchmark_parser.py | testing/benchmark/displaylist_benchmark_parser.py | #!/usr/bin/env python3
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages as pdfp
class BenchmarkResult:
def __init__(self, name, backend, timeUnit, drawCallCount):
self.name = name
self.series = {}
self.seriesLabels = {}
self.backend = backend
self.largeYValues = False
self.yLimit = 200
self.timeUnit = timeUnit
self.drawCallCount = drawCallCount
def __repr__(self):
return 'Name: % s\nBackend: % s\nSeries: % s\nSeriesLabels: % s\n' % (self.name, self.backend, self.series, self.seriesLabels)
def addDataPoint(self, family, x, y):
if family not in self.series:
self.series[family] = { 'x': [], 'y': [] }
self.series[family]['x'].append(x)
self.series[family]['y'].append(y)
if y > self.yLimit:
self.largeYValues = True
def setFamilyLabel(self, family, label):
# I'm not keying the main series dict off the family label
# just in case we get data where the two aren't a 1:1 mapping
if family in self.seriesLabels:
assert self.seriesLabels[family] == label
return
self.seriesLabels[family] = label
def plot(self):
figures = []
figures.append(plt.figure(dpi=1200, frameon=False, figsize=(11, 8.5)))
for family in self.series:
plt.plot(self.series[family]['x'], self.series[family]['y'], label = self.seriesLabels[family])
plt.xlabel('Benchmark Seed')
plt.ylabel('Time (' + self.timeUnit + ')')
title = ''
# Crop the Y axis so that we can see what's going on at the lower end
if self.largeYValues:
plt.ylim((0, self.yLimit))
title = self.name + ' ' + self.backend + ' (Cropped)'
else:
title = self.name + ' ' + self.backend
if self.drawCallCount != -1:
title += '\nDraw Call Count: ' + str(int(self.drawCallCount))
plt.title(title)
plt.grid(which='both', axis='both')
plt.legend(fontsize='xx-small')
plt.plot()
if self.largeYValues:
# Plot again but with the full Y axis visible
figures.append(plt.figure(dpi=1200, frameon=False, figsize=(11, 8.5)))
for family in self.series:
plt.plot(self.series[family]['x'], self.series[family]['y'], label = self.seriesLabels[family])
plt.xlabel('Benchmark Seed')
plt.ylabel('Time (' + self.timeUnit + ')')
title = self.name + ' ' + self.backend + ' (Complete)'
if self.drawCallCount != -1:
title += '\nDraw Call Count: ' + str(int(self.drawCallCount))
plt.title(title)
plt.grid(which='both', axis='both')
plt.legend(fontsize='xx-small')
plt.plot()
return figures
def main():
parser = argparse.ArgumentParser()
parser.add_argument('filename', action='store',
help='Path to the JSON output from Google Benchmark')
parser.add_argument('-o', '--output-pdf', dest='outputPDF', action='store', default='output.pdf',
help='Filename to output the PDF of graphs to.')
args = parser.parse_args()
jsonData = parseJSON(args.filename)
return processBenchmarkData(jsonData, args.outputPDF)
def error(message):
print(message)
exit(1)
def extractAttributesLabel(benchmarkResult):
# Possible attribute keys are:
# AntiAliasing
# HairlineStroke
# StrokedStyle
# FilledStyle
attributes = ['AntiAliasing', 'HairlineStroke', 'StrokedStyle', 'FilledStyle']
label = ''
for attr in attributes:
try:
if benchmarkResult[attr] != 0:
label += attr + ', '
except KeyError:
pass
return label[:-2]
def processBenchmarkData(benchmarkJSON, outputPDF):
benchmarkResultsData = {}
for benchmarkResult in benchmarkJSON:
# Skip aggregate results
if 'aggregate_name' in benchmarkResult:
continue
benchmarkVariant = benchmarkResult['name'].split('/')
# The final split is always `real_time` and can be discarded
benchmarkVariant.remove('real_time')
splits = len(benchmarkVariant)
# First split is always the benchmark function name
benchmarkName = benchmarkVariant[0]
# The last split is always the seeded value into the benchmark
benchmarkSeededValue = benchmarkVariant[splits-1]
# The second last split is always the backend
benchmarkBackend = benchmarkVariant[splits-2]
# Time taken (wall clock time) for benchmark to run
benchmarkRealTime = benchmarkResult['real_time']
benchmarkUnit = benchmarkResult['time_unit']
benchmarkFamilyIndex = benchmarkResult['family_index']
benchmarkFamilyLabel = ''
if splits > 3:
for i in range(1, splits-2):
benchmarkFamilyLabel += benchmarkVariant[i] + ', '
benchmarkFamilyAttributes = extractAttributesLabel(benchmarkResult)
if benchmarkFamilyAttributes == '':
benchmarkFamilyLabel = benchmarkFamilyLabel[:-2]
else:
benchmarkFamilyLabel = benchmarkFamilyLabel + benchmarkFamilyAttributes
if 'DrawCallCount' in benchmarkResult:
benchmarkDrawCallCount = benchmarkResult['DrawCallCount']
else:
benchmarkDrawCallCount = -1
if benchmarkName not in benchmarkResultsData:
benchmarkResultsData[benchmarkName] = BenchmarkResult(benchmarkName, benchmarkBackend, benchmarkUnit, benchmarkDrawCallCount)
benchmarkResultsData[benchmarkName].addDataPoint(benchmarkFamilyIndex, benchmarkSeededValue, benchmarkRealTime)
benchmarkResultsData[benchmarkName].setFamilyLabel(benchmarkFamilyIndex, benchmarkFamilyLabel)
pp = pdfp(outputPDF)
for benchmark in benchmarkResultsData:
figures = benchmarkResultsData[benchmark].plot()
for fig in figures:
pp.savefig(fig)
pp.close()
def parseJSON(filename):
try:
jsonFile = open(filename, 'r')
except:
error('Unable to load file.')
try:
jsonData = json.load(jsonFile)
except JSONDecodeError:
error('Invalid JSON. Unable to parse.')
return jsonData['benchmarks']
if __name__ == '__main__':
sys.exit(main())
| Python | 0 | |
2e2ad49c7ada145b5a4a81bd8941cf5e72d2d81b | Test case for wordaxe bug | rst2pdf/tests/input/test_180.py | rst2pdf/tests/input/test_180.py | # -*- coding: utf-8 -*-
from reportlab.platypus import SimpleDocTemplate
from reportlab.platypus.paragraph import Paragraph
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.colors import Color
from reportlab.platypus.flowables import _listWrapOn, _FUZZ
from wordaxe.rl.NewParagraph import Paragraph
from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet
def go():
styles = getSampleStyleSheet()
style=styles['Normal']
p1 = Paragraph('This is a paragraph', style )
print p1.wrap(500,701)
print p1._cache['avail']
print len(p1.split(500,701))
print len(p1.split(500,700))
go()
| Python | 0 | |
698eee3db238189ba066670c4fe4a1193e6a942a | add flask-login | app/user/loginmanager.py | app/user/loginmanager.py | from flask.ext.login import LoginManager
from models import User
login_manager = LoginManager()
@login_manager.user_loader
def user_loader(user_id):
return User.query.get(user_id)
login_manager.login_view = '.login'
| Python | 0.000001 | |
d5d8e16b5ccbbb65398ce015f020db3839fac409 | add test_rotate.py | tests/transforms_tests/image_tests/test_rotate.py | tests/transforms_tests/image_tests/test_rotate.py | import random
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import flip
from chainercv.transforms import rotate
class TestRotate(unittest.TestCase):
def test_rotate(self):
img = np.random.uniform(size=(3, 32, 24))
angle = random.uniform(0, 180)
out = rotate(img, angle)
expected = flip(img, x_flip=True)
expected = rotate(expected, -1 * angle)
expected = flip(expected, x_flip=True)
np.testing.assert_almost_equal(out, expected, decimal=6)
testing.run_module(__name__, __file__)
| Python | 0.000003 | |
de74c933b74d9066984fe040edf026b7d9f87711 | Split problem statement 2 | 69_split_problem_statement_2.py | 69_split_problem_statement_2.py | '''
Open the file sample.txt and read it line by line.
When you find a line that starts with 'From:' like the following line:
From stephen.marquard@uct.ac.za Sat Jan 5 09:14:16 2008
You will parse the From line using split() and print out the second word in the line
(i.e. the entire address of the person who sent the message).
Then print out a count at the end.
Hint: make sure not to include the lines that start with 'From:'.
'''
fileName = raw_input("Enter file name : ")
if len(fileName) < 1 : fileName = "sample.txt"
openFile = open(fileName)
count = 0
words = list()
for line in openFile:
if not line.startswith("From:"):
continue
count += 1
words = line.split()
print words[1]
print "There were", count, "lines in the file with 'From:' as the first word."
| Python | 0.99994 | |
01029805a6fb3484cf803f0c0abd18232b4ad810 | Add database tools | egoio/tools/db.py | egoio/tools/db.py | def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str) | Python | 0.000001 | |
bc9072cee7ce880c30af83ee4c239ae9cf1ddbfe | Create NumberofIslandsII_001.py | lintcode/Number-of-Islands-II/NumberofIslandsII_001.py | lintcode/Number-of-Islands-II/NumberofIslandsII_001.py | # Definition for a point.
# class Point:
# def __init__(self, a=0, b=0):
# self.x = a
# self.y = b
class UnionFind:
def __init__(self, n, m):
self.fathers = {}
self.nsets = 0
self.grid = [[0 for _ in range(m)] for _ in range(n)]
self.n = n
self.m = m
def build_island(self, i, j):
self.grid[i][j] = 1
self.fathers[i * self.m + j] = i * self.m + j
self.nsets += 1
nbrs = []
nbrs.append([i, j - 1])
nbrs.append([i, j + 1])
nbrs.append([i - 1, j])
nbrs.append([i + 1, j])
for nbr in nbrs:
if -1 < nbr[0] < self.n and -1 < nbr[1] < self.m:
if self.grid[nbr[0]][nbr[1]] == 1:
idx1 = i * self.m + j
idx2 = nbr[0] * self.m + nbr[1]
self.union(idx1, idx2)
def find(self, idx):
return self.compressed_find(idx)
def compressed_find(self, idx):
fidx = self.fathers[idx]
if fidx != idx:
self.fathers[idx] = self.find(fidx)
return self.fathers[idx]
def union(self, i, j):
fi = self.find(i)
fj = self.find(j)
if fi != fj:
self.fathers[fi] = fj
self.nsets -= 1
def get_nsets(self):
return self.nsets
class Solution:
# @param {int} n an integer
# @param {int} m an integer
# @param {Pint[]} operators an array of point
# @return {int[]} an integer array
def numIslands2(self, n, m, operators):
# Write your code here
if n == 0 or m == 0:
return 0
uf, res = UnionFind(n, m), []
for oper in operators:
i, j = oper.x, oper.y
if -1 < i < n and -1 < j < m:
uf.build_island(i, j)
res.append(uf.get_nsets())
return res
| Python | 0.000215 | |
bc3f4575c7267db8f7841a82e8f6866c59d15237 | Add some example function tests that use gaeftest | tests/test_functional.py | tests/test_functional.py | #!/usr/bin/python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__authors__ = [
'"Matthew Wilkes" <matthew@matthewwilkes.co.uk>',
]
from gaeftest.test import FunctionalTestCase
from zope.testbrowser import browser
import os.path
class MelangeFunctionalTestCase(FunctionalTestCase):
"""A base class for all functional tests in Melange.
Tests MUST NOT be defined here, but the superclass requires a path
attribute that points to the app.yaml. Utility functions MAY be
declared here to be shared by all functional tests, but any
overridden unittest methods MUST call the superclass version.
"""
path = os.path.abspath(__file__+"/../../app/app.yaml")
class TestBranding(MelangeFunctionalTestCase):
"""Tests that ensure Melange properly displays attribution.
Other notices, as required by the project and/or law, are tested
here as well.
"""
def test_attribution(self):
"""Ensure that the front page asserts that it is a Melange app.
"""
tb = browser.Browser()
tb.open("http://127.0.0.1:8080/site/show/site")
self.assertTrue("Powered by Melange" in tb.contents)
class TestLogin(MelangeFunctionalTestCase):
"""Tests that check the login system is functioning correctly.
Also tests that users go through the correct registration workflow.
"""
def test_firstLogin(self):
"""Ensure that new users are prompted to create a profile.
Also test that only new users are prompted.
"""
tb = browser.Browser()
tb.open("http://127.0.0.1:8080")
tb.getLink("Sign in").click()
self.assertTrue("login" in tb.url)
# fill in dev_appserver login form
tb.getForm().getControl("Email").value = "newuser@example.com"
tb.getForm().getControl("Login").click()
self.assertTrue(tb.url.endswith("/show/site"))
self.assertTrue('Please create <a href="/user/create_profile">'
'User Profile</a> in order to view this page' in tb.contents)
tb.getLink("User Profile").click()
# fill in the user profile
cp = tb.getForm(action="create_profile")
cp.getControl(name="link_id").value = "exampleuser"
cp.getControl(name="name").value = "Example user"
cp.getControl("Save").click()
# if all is well, we go to the edit page
self.assertTrue("edit_profile" in tb.url)
tb.open("http://127.0.0.1:8080")
# call to action no longer on front page
self.assertFalse('Please create <a href="/user/create_profile">'
'User Profile</a> in order to view this page' in tb.contents)
| Python | 0.000001 | |
1ced3a967742783ef649f7c7defecf333050d547 | Update http_endpoint to use convert_xml() | jenkins_jobs/modules/notifications.py | jenkins_jobs/modules/notifications.py | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Notifications module allows you to configure Jenkins to notify
other applications about various build phases. It requires the
Jenkins notification plugin.
**Component**: notifications
:Macro: notification
:Entry Point: jenkins_jobs.notifications
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.modules.helpers import convert_mapping_to_xml
def http_endpoint(registry, xml_parent, data):
"""yaml: http
Defines an HTTP notification endpoint.
Requires the Jenkins :jenkins-wiki:`Notification Plugin
<Notification+Plugin>`.
:arg str format: notification payload format, JSON (default) or XML
:arg str event: job events that trigger notifications: started,
completed, finalized or all (default)
:arg str url: URL of the endpoint
:arg str timeout: Timeout in milliseconds for sending notification
request (30 seconds by default)
:arg str log: Number lines of log messages to send (0 by default).
Use -1 for all (use with caution).
Example:
.. literalinclude:: \
/../../tests/notifications/fixtures/http-endpoint002.yaml
:language: yaml
"""
endpoint_element = XML.SubElement(xml_parent,
'com.tikal.hudson.plugins.notification.'
'Endpoint')
supported_formats = ['JSON', 'XML']
supported_events = ['started', 'completed', 'finalized', 'all']
fmt = data.get('format', 'JSON').upper()
event = data.get('event', 'all').lower()
mapping = [
('', 'format', fmt, supported_formats),
('', 'protocol', 'HTTP'),
('', 'event', event, supported_events),
('timeout', 'timeout', 30000),
('url', 'url', None),
('log', 'loglines', 0)]
convert_mapping_to_xml(endpoint_element, data, mapping, fail_required=True)
class Notifications(jenkins_jobs.modules.base.Base):
sequence = 22
component_type = 'notification'
component_list_type = 'notifications'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
notifications = data.get('notifications', [])
if notifications:
notify_element = XML.SubElement(properties,
'com.tikal.hudson.plugins.'
'notification.'
'HudsonNotificationProperty')
endpoints_element = XML.SubElement(notify_element, 'endpoints')
for endpoint in notifications:
self.registry.dispatch('notification',
endpoints_element, endpoint)
| # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Notifications module allows you to configure Jenkins to notify
other applications about various build phases. It requires the
Jenkins notification plugin.
**Component**: notifications
:Macro: notification
:Entry Point: jenkins_jobs.notifications
"""
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
import jenkins_jobs.modules.base
def http_endpoint(registry, xml_parent, data):
"""yaml: http
Defines an HTTP notification endpoint.
Requires the Jenkins :jenkins-wiki:`Notification Plugin
<Notification+Plugin>`.
:arg str format: notification payload format, JSON (default) or XML
:arg str event: job events that trigger notifications: started,
completed, finalized or all (default)
:arg str url: URL of the endpoint
:arg str timeout: Timeout in milliseconds for sending notification
request (30 seconds by default)
:arg str log: Number lines of log messages to send (0 by default).
Use -1 for all (use with caution).
Example:
.. literalinclude:: \
/../../tests/notifications/fixtures/http-endpoint002.yaml
:language: yaml
"""
endpoint_element = XML.SubElement(xml_parent,
'com.tikal.hudson.plugins.notification.'
'Endpoint')
supported_formats = ['JSON', 'XML']
fmt = data.get('format', 'JSON').upper()
if fmt not in supported_formats:
raise JenkinsJobsException(
"format must be one of %s" %
", ".join(supported_formats))
else:
XML.SubElement(endpoint_element, 'format').text = fmt
XML.SubElement(endpoint_element, 'protocol').text = 'HTTP'
supported_events = ['started', 'completed', 'finalized', 'all']
event = data.get('event', 'all').lower()
if event not in supported_events:
raise JenkinsJobsException(
"event must be one of %s" %
", ".join(supported_events))
else:
XML.SubElement(endpoint_element, 'event').text = event
XML.SubElement(endpoint_element, 'timeout').text = str(data.get('timeout',
30000))
XML.SubElement(endpoint_element, 'url').text = data['url']
XML.SubElement(endpoint_element, 'loglines').text = str(data.get('log', 0))
class Notifications(jenkins_jobs.modules.base.Base):
sequence = 22
component_type = 'notification'
component_list_type = 'notifications'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
notifications = data.get('notifications', [])
if notifications:
notify_element = XML.SubElement(properties,
'com.tikal.hudson.plugins.'
'notification.'
'HudsonNotificationProperty')
endpoints_element = XML.SubElement(notify_element, 'endpoints')
for endpoint in notifications:
self.registry.dispatch('notification',
endpoints_element, endpoint)
| Python | 0.000001 |
4856b426b380d4d46cccc2f5b8ab2212956a96c2 | test of time module. not terribly fancy, but it does touch every function and variable in the module, verifies a few return values and even tests a couple of known error conditions. | Lib/test/test_time.py | Lib/test/test_time.py | import time
time.altzone
time.clock()
t = time.time()
time.asctime(time.gmtime(t))
if time.ctime(t) <> time.asctime(time.localtime(t)):
print 'time.ctime(t) <> time.asctime(time.localtime(t))'
time.daylight
if int(time.mktime(time.localtime(t))) <> int(t):
print 'time.mktime(time.localtime(t)) <> t'
time.sleep(1.2)
tt = time.gmtime(t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'E', 'H', 'I',
'j', 'm', 'M', 'n', 'N', 'o', 'p', 'S', 't',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
time.strftime(format, tt)
time.timezone
time.tzname
# expected errors
try:
time.asctime(0)
except TypeError:
pass
try:
time.mktime((999999, 999999, 999999, 999999,
999999, 999999, 999999, 999999,
999999))
except OverflowError:
pass
| Python | 0 | |
c851501cc8149685a9e9c023aa200b92c17a9078 | Add decoder ida fields name | pida_fields.py | pida_fields.py | def decode_name_fields(ida_fields):
i = -1
stop = len(ida_fields)
while True:
i += 1
if i == stop:
break
count = ord(ida_fields[i]) - 1
if count == 0:
continue
i += 1
yield ida_fields[i:i + count]
i += count - 1
| Python | 0.000001 | |
fab0b43c2f2c60ac9cf701fb275650f05ac0dfa2 | add wrapper class for basic binary gpio | raspi_binary_gpio.py | raspi_binary_gpio.py | """
Author: Nate Levesque <public@thenaterhood.com>
Language: Python3
Filename: raspi_binary_gpio.py
Description:
contains a class for reading/writing from the raspberry pi
GPIO pins as two 8-bit arrays, like in assembly. Note that
this is designed (originally) for writing/reading 8-bit arrays
of switches or LEDs "in one shot" but if the hardware is designed
using shift registers, it could also be used to display data on an
8xN LED display.
The class requires integers which are then converted to binary
in 8 bits. If the output results in more than 8 bits, the program
will display up to the first 8, and set its error field to True.
If the output fits in the output array, then the error field is set
to false. Retrieving data does not set the error field.
The class treats (as it was easier to wire it this way in my
setup) the GPIO pins as one side of the connector is output and
the other side is input. In the setup section this is more clear
as it includes pin numbers. This can be changed by changing
what pins are listed in the inpins and outpins (and if it's safe in
your setup, they can be the same thing)
"""
import RPi.GPIO as io
class gpio():
"""
Manages IO for the raspberry pi GPIO as two 8-bit registers.
This may expand to 16 bit both ways, but for my purposes right now,
8 bit is fine and with my setup it's safer to use it as two separate
registers.
"""
__slots__=('register', 'error')
def __init__( self, TxData=False ):
"""
Constructs the class with either the input from an array of up
to 8 inputs (1 or 0) and returns it, or converts the argument
integer to binary and displays it on the GPIO LED array.
All returned arrays of input are lists of boolean values for
each pin.
Arguments:
task (str): read or write, whether to read from an
array of switches or 'write' to the output
"""
io.setmode(io.BOARD)
# Whether to print variables as they're created, for debug
# purposes
debug = False
# Defines the input and output pins to use
inpins = [3, 5, 7, 11, 13, 15, 19, 21, 23]
outpins = [8, 10, 12, 16, 18, 22, 24, 26]
# Retrieves the data from the incoming pins
if ( not TxData ):
RxData = []
# Setting up and retrieving the state of each pin
for pin in inpins:
io.setup(pin, io.IN)
RxData.append( io.input(pin) )
# Initializing the class slot for the data and printing
# it in string form if debug is set
self.register = RxData
if debug:
print( self )
# writes the data to the output pins
if ( TxData ):
# Converts the input to an array of True and False that
# represents the binary 1 and 0 bits of the binary number
self.register = self.createBinArray( self.DecToBinString( TxData ) )
if debug:
print( self )
# Iterates backwards through the binary data (LSB -> MSB)
# and outputs it over the designated output pins
i = len( outpins ) - 1
while i >= 0:
io.setup( outpins[i], io.OUT )
io.output( outpins[i], self.register[i] )
i -= 1
# Checks to see if the number fit in the number of output
# pins and sets the error field to True if it did not
if ( len( outpins ) < len( self.register ) ):
self.error = True
self.error = False
def DecToBinString( self, integer ):
"""
Returns a string of the binary representation
of an integer.
Arguments:
integer (int): an integer
Returns:
(str): the string of the binary rep of the integer
"""
return bin(integer)[2:]
def createBinArray( self, binary ):
"""
Creates a binary array from a string of 1's and 0's
Arguments:
binary (str): a string representation of a binary number
Returns:
binArray (list): a list of boolean values that represent
the binary number
"""
binArray = []
for digit in binary:
if ( digit == '0' ):
binArray.append( False )
if ( digit == '1' ):
binArray.append( True )
while len( binArray ) < 8:
binArray.insert(0, False)
return binArray
def __str__( self ):
"""
Returns a graphical representation of the bits stored
in the class's 'register' field.
"""
r = ''
for bit in self.register:
if ( bit ):
r = r + "@"
else:
r = r + "o"
return r
| Python | 0 | |
015c7f7fbab200084cf08bd1f7e35cbcd61b369e | Axonical hello world. | Sketches/PT/helloworld.py | Sketches/PT/helloworld.py | #!/usr/bin/env python
import time
from Axon.Component import component
from Axon.Scheduler import scheduler
class HelloPusher(component):
def main(self):
while True:
time.sleep(0.5) # normally this would be a bad idea, since the entire scheduler will halt inside this component.
self.send("\n!ednom ,tulas", 'outbox')
yield 1
class Reverser(component):
def main(self):
while True:
if self.dataReady('inbox'):
item = self.recv('inbox')
self.send(item[::-1], 'outbox')
else: self.pause()
yield 1
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Console import ConsoleEchoer
thepipe = Pipeline(HelloPusher(), Reverser(), ConsoleEchoer()).activate()
# thepipe = Pipeline(HelloPusher(), Reverser(), ConsoleEchoer()).run()
scheduler.run.runThreads() | Python | 0.999999 | |
e869c7ef9e3d19da4c98cda57b5e22fb5a35cba5 | Add first basic unittests using py.test | tests/test_validators.py | tests/test_validators.py | """
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
| Python | 0 | |
54c358a296733d2a5236a9a776830f1b78682b73 | Add lc040_combination_sum_ii.py | lc040_combination_sum_ii.py | lc040_combination_sum_ii.py | """Leetcode 40. Combination Sum II
Medium
URL: https://leetcode.com/problems/combination-sum-ii/
Given a collection of candidate numbers (candidates) and a target number (target),
find all unique combinations in candidates where the candidate numbers sums to target.
Each number in candidates may only be used once in the combination.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
Example 1:
Input: candidates = [10,1,2,7,6,1,5], target = 8,
A solution set is:
[
[1, 7],
[1, 2, 5],
[2, 6],
[1, 1, 6]
]
Example 2:
Input: candidates = [2,5,2,1,2], target = 5,
A solution set is:
[
[1,2,2],
[5]
]
"""
class Solution(object):
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.004345 | |
7fab8c2d014f013131bd4d6301f5f8e5268d6037 | add leetcode Pow(x, n) | leetcode/powx-n/solution.py | leetcode/powx-n/solution.py | # -*- coding:utf-8 -*-
class Solution:
# @param x, a float
# @param n, a integer
# @return a float
def pow(self, x, n):
if n == 0:
return 1
if n < 0:
neg_flag = True
n = -n
else:
neg_flag = False
ret = 1
while n > 0:
if n % 2 == 1:
ret *= x
x = x * x
n //= 2
if neg_flag:
return 1 / ret
return ret
| Python | 0.000908 | |
736103ea495c89defcae9bf6ab72aa7b89768026 | add start of advisory module | updatebot/advise.py | updatebot/advise.py | #
# Copyright (c) 2008 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
"""
Module for managing/manipulating advisories.
"""
from updatebot.errors import *
class Advisor(object):
"""
Class for managing, manipulating, and distributing advisories.
"""
def __init__(self, cfg, rpmSource):
self._cfg = cfg
self._rpmSource = rpmSource
| Python | 0 | |
27b9727926139ae2cfde6d3cdcdf5746ed28e03d | Add new package arbor (#11914) | var/spack/repos/builtin/packages/arbor/package.py | var/spack/repos/builtin/packages/arbor/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Arbor(CMakePackage):
"""Arbor is a high-performance library for computational neuroscience
simulations."""
homepage = "https://github.com/arbor-sim/arbor/"
url = "https://github.com/arbor-sim/arbor/archive/v0.2.tar.gz"
version('0.2', sha256='43c9181c03be5f3c9820b2b50592d7b41344f37e1200980119ad347eb7bcf4eb')
variant('vectorize', default=False,
description='Enable vectorization of computational kernels')
variant('gpu', default=False, description='Enable GPU support')
variant('mpi', default=False, description='Enable MPI support')
variant('python', default=False,
description='Enable Python frontend support')
variant('unwind', default=False,
description='Enable libunwind for pretty stack traces')
depends_on('cuda', when='+gpu')
depends_on('mpi', when='+mpi')
depends_on('libunwind', when='+unwind')
extends('python@3.6:', when='+python')
depends_on('py-mpi4py', when='+mpi+python', type=('build', 'run'))
depends_on('cmake@3.9:', type='build')
# mentioned in documentation but shouldn't be necessary when
# using the archive
# depends_on('git@2.0:', type='build')
# compiler dependencies
# depends_on(C++14)
# depends_on('gcc@6.1.0:', type='build')
# depends_on('llvm@4:', type='build')
# depends_on('clang-apple@9:', type='build')
# when building documentation, this could be an optional dependency
depends_on('py-sphinx', type='build')
def patch(self):
filter_file(
r'find_library\(_unwind_library_target unwind-\${libunwind_arch}',
r'find_library(_unwind_library_target unwind-${_libunwind_arch}',
'cmake/FindUnwind.cmake'
)
filter_file(
r'target_compile_definitions\(arbor-private-deps ARB_WITH_UNWIND\)', # noqa: E501
r'target_compile_definitions(arbor-private-deps INTERFACE WITH_UNWIND)', # noqa: E501
'CMakeLists.txt'
)
def cmake_args(self):
args = [
'-DARB_VECTORIZE=' + ('ON' if '+vectorize' in self.spec else 'OFF'), # noqa: E501
'-DARB_WITH_GPU=' + ('ON' if '+gpu' in self.spec else 'OFF'),
'-DARB_WITH_PYTHON=' + ('ON' if '+python' in self.spec else 'OFF'),
]
if '+unwind' in self.spec:
args.append('-DUnwind_ROOT_DIR={0}'.format(self.spec['libunwind'].prefix)) # noqa: E501
return args
| Python | 0 | |
ac851c402952cf44b24dfdf5277765ff286dd994 | convert embeddingns to js-friendly format | src/convert_embeddings_to_js.py | src/convert_embeddings_to_js.py | import h5py
import json
import numpy as np
def load_embeddings(path):
f = h5py.File(path, 'r')
nemb = f['nemb'][:]
f.close()
return nemb
def load_vocab(path):
vocab = []
with open(path, 'rb') as f:
for line in f.readlines():
split = line.split(' ')
vocab.append((split[0], int(split[1].rstrip())))
# ignore UNK at position 0
return vocab[1:]
def write_to_js(words, embeddings, path):
word_vecs = {}
for word, embedding in zip(words, embeddings):
word_vecs[word] = embedding.tolist()
with open(path, 'wb') as f:
json.dump(word_vecs, f)
f.write(';')
def main():
nemb = load_embeddings(path='/tmp/embeddings.h5')
vocab = load_vocab('/tmp/vocab.txt')
words = [tup[0] for tup in vocab]
# dont use UNK
words = words[1:]
nemb = nemb[1:]
# lower precision, faster
nemb = nemb.astype(np.float16)
write_to_js(words, nemb[1:], path='../../word2vecjson/data/foodVecs.js')
if __name__ == '__main__':
main()
| Python | 0.999997 | |
11efa5583bbeeee7c7823264f6f73715ea81edc0 | Add trivial test for ECO fetching | luigi/tests/ontologies/eco_test.py | luigi/tests/ontologies/eco_test.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ontologies import eco
def test_can_load_all_eco_terms():
source = eco.TermSources(
quickgo_file='data/quickgo/rna.gpa'
)
assert len(list(eco.to_load(source))) == 6
| Python | 0.000002 | |
ec3b080b2f1922f4989b853db45475d185e314de | add all | examples/gcharttestapp/TestGChart00.py | examples/gcharttestapp/TestGChart00.py |
import GChartTestAppUtil
from pyjamas.chart.GChart import GChart
"""* Empty chart without anything on it except a title and footnotes """
class TestGChart00 (GChart):
def __init__(self):
GChart.__init__(self, 150,150)
self.setChartTitle(GChartTestAppUtil.getTitle(self))
self.setChartFootnotes("Check: Consistent with a 'no data' chart (and it doesn't crash).")
| Python | 0.000308 | |
4fdba8a1a5a2123843cc9eefd8949fb8996f59b2 | Add a wrapper for ChromeOS to call into telemetry. | telemetry/telemetry/unittest/run_chromeos_tests.py | telemetry/telemetry/unittest/run_chromeos_tests.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.unittest import gtest_progress_reporter
from telemetry.unittest import run_tests
from telemetry.core import util
def RunTestsForChromeOS(browser_type, unit_tests, perf_tests):
stream = _LoggingOutputStream()
error_string = ''
logging.info('Running telemetry unit tests with browser_type "%s".' %
browser_type)
ret = _RunOneSetOfTests(browser_type, 'telemetry',
os.path.join('telemetry', 'telemetry'),
unit_tests, stream)
if ret:
error_string += 'The unit tests failed.\n'
logging.info('Running telemetry perf tests with browser_type "%s".' %
browser_type)
ret = _RunOneSetOfTests(browser_type, 'perf', 'perf', perf_tests, stream)
if ret:
error_string = 'The perf tests failed.\n'
return error_string
def _RunOneSetOfTests(browser_type, root_dir, sub_dir, tests, stream):
top_level_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', root_dir)
sub_dir = os.path.join(util.GetChromiumSrcDir(), 'tools', sub_dir)
sys.path.append(top_level_dir)
output_formatters = [gtest_progress_reporter.GTestProgressReporter(stream)]
run_tests.config = run_tests.Config(top_level_dir, [sub_dir],
output_formatters)
return run_tests.RunTestsCommand.main(['--browser', browser_type] + tests)
class _LoggingOutputStream(object):
def __init__(self):
self._buffer = []
def write(self, s):
"""Buffer a string write. Log it when we encounter a newline."""
if '\n' in s:
segments = s.split('\n')
segments[0] = ''.join(self._buffer + [segments[0]])
log_level = logging.getLogger().getEffectiveLevel()
try: # TODO(dtu): We need this because of crbug.com/394571
logging.getLogger().setLevel(logging.INFO)
for line in segments[:-1]:
logging.info(line)
finally:
logging.getLogger().setLevel(log_level)
self._buffer = [segments[-1]]
else:
self._buffer.append(s)
def flush(self): # pylint: disable=W0612
pass
| Python | 0.000003 | |
fcce65daf40bb1c198be7ddadee8769bf6feea9b | Create k-order-test.py | k-order-test.py | k-order-test.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 6 16:41:40 2014
@author: xiao
"""
from k_order import *
#number of items to recommand
p=2
fadress = "/home/xiao/ProjetLibre/matrix/matrixInfo"
readDataFromFile(fadress)
getDu()
recommendationListe = zeros((m,p))
############################################
#### We need to recommend top items ####
############################################
#k=1
#recommend top p items for user u
def recommendItems_u(u, p):
#initialize recommendation items to be -1: null
res = zeros(p)-1
D_bar_u = Omega_comp[u]
r = f_bar_d(D_bar_u, u)
indexOrder = argsort(r)
indexOrder = indexOrder[::-1]
if len(indexOrder) >= p:
res = indexOrder[:p]
else:
res[:len(indexOrder)] = indexOrder
return res
#recommend top p items for all m users
def recommendItems(p):
for u in range(m):
r = recommendItems_u(u, p)
recommendationListe[u,:] = r
def f_test(x):
return x**2 - 3*x
def test():
a = arange(5)
b = f_test(a)
c = argsort(b)
c = c[::-1]
return c
#show
def showRecomms():
for u in range(m):
print "u:", u, ",",recommendationListe[u,:]
k_os_AUC()
recommendItems(p)
showRecomms()
######################################################
#### We need to recommend most relavent users ####
######################################################
######################################################
#### test normal AUC ####
######################################################
######################################################
#### test normal WARP ####
######################################################
######################################################
#### test K-os AUC ####
######################################################
######################################################
#### test k-os WARP ####
######################################################
| Python | 0.000032 | |
d73235dd994d3705178d0cff142293444977d764 | Remove bad imports | odo/backends/tests/conftest.py | odo/backends/tests/conftest.py | import os
import shutil
import pytest
@pytest.fixture(scope='session')
def sc():
pyspark = pytest.importorskip('pyspark')
return pyspark.SparkContext('local[*]', 'odo')
@pytest.yield_fixture(scope='session')
def sqlctx(sc):
pyspark = pytest.importorskip('pyspark')
try:
yield pyspark.HiveContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath)
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath)
os.remove(logpath)
| import os
import shutil
import pytest
@pytest.fixture(scope='session')
def sc():
pytest.importorskip('pyspark')
from pyspark import SparkContext
return SparkContext('local[*]', 'odo')
@pytest.yield_fixture(scope='session')
def sqlctx(sc):
pytest.importorskip('pyspark')
from odo.backends.sparksql import HiveContext, SQLContext, SPARK_ONE_TWO
try:
yield HiveContext(sc) if not SPARK_ONE_TWO else SQLContext(sc)
finally:
dbpath = 'metastore_db'
logpath = 'derby.log'
if os.path.exists(dbpath):
assert os.path.isdir(dbpath)
shutil.rmtree(dbpath)
if os.path.exists(logpath):
assert os.path.isfile(logpath)
os.remove(logpath)
| Python | 0.000015 |
4c5a8f018af4377ce3f9367b0c66a51a6cad671b | add __init__.py | eatable/__init__.py | eatable/__init__.py |
from .table import Table
from .row import Row
| Python | 0.00212 | |
2b15d2df8333db5f5cd6fcefaf56f5400baba95e | add test_results_table.py | metaseq/test/test_results_table.py | metaseq/test/test_results_table.py | from metaseq import results_table
import metaseq
import numpy as np
fn = metaseq.example_filename('ex.deseq')
d = results_table.ResultsTable(fn)
def test_dataframe_access():
# different ways of accessing get the same data in memory
assert d.id is d.data.id
assert d['id'] is d.data.id
def test_dataframe_subsetting():
assert all(d[:10].data == d.data[:10])
assert all(d.update(d.data[:10]).data == d.data[:10])
def test_copy():
e = d.copy()
e.id = 'a'
assert e.id[0] == 'a'
assert d.id[0] != 'a'
def smoke_tests():
#smoke test for repr
print repr(d)
def test_db():
# should work
d.attach_db(None)
d.attach_db(metaseq.example_filename('dmel-all-r5.33-cleaned.gff.db'))
| Python | 0.000104 | |
2382c1c9daf2b17799ceb03f42a6917966b3162c | add kattis/cold | Kattis/cold.py | Kattis/cold.py | """
Problem: cold
Link: https://open.kattis.com/problems/cold
Source: Kattis
"""
N = int(input())
A = list(map(int, input().split()))
answer = 0
for i in range(len(A)):
answer += (A[i] < 0)
print(answer)
| Python | 0.999545 | |
dd93b450eb0cc92debd8b5cec82f3127c454d77f | put this back... | TheCannon/infer_labels.py | TheCannon/infer_labels.py | from __future__ import (absolute_import, division, print_function, unicode_literals)
from scipy import optimize as opt
import numpy as np
LARGE = 200.
SMALL = 1. / LARGE
def _get_lvec(labels):
"""
Constructs a label vector for an arbitrary number of labels
Assumes that our model is quadratic in the labels
Parameters
----------
labels: numpy ndarray
pivoted label values for one star
Returns
-------
lvec: numpy ndarray
label vector
"""
nlabels = len(labels)
# specialized to second-order model
linear_terms = labels
quadratic_terms = np.outer(linear_terms,
linear_terms)[np.triu_indices(nlabels)]
lvec = np.hstack((linear_terms, quadratic_terms))
return lvec
def _func(coeffs, *labels):
""" Takes the dot product of coefficients vec & labels vector
Parameters
----------
coeffs: numpy ndarray
the coefficients on each element of the label vector
*labels: numpy ndarray
label vector
Returns
-------
dot product of coeffs vec and labels vec
"""
lvec = _get_lvec(list(labels))
return np.dot(coeffs, lvec)
def _infer_labels(model, dataset):
"""
Uses the model to solve for labels of the test set.
Parameters
----------
model: tuple
Coeffs_all, covs, scatters, chis, chisqs, pivots
dataset: Dataset
Dataset that needs label inference
Returns
-------
errs_all:
Covariance matrix of the fit
"""
print("Inferring Labels")
coeffs_all = model.coeffs
scatters = model.scatters
chisqs = model.chisqs
pivots = model.pivots
nlabels = dataset.tr_label.shape[1]
fluxes = dataset.test_flux
ivars = dataset.test_ivar
nstars = fluxes.shape[0]
labels_all = np.zeros((nstars, nlabels))
MCM_rotate_all = np.zeros((nstars, coeffs_all.shape[1] - 1,
coeffs_all.shape[1]-1.))
errs_all = np.zeros((nstars, nlabels))
for jj in range(nstars):
print(jj)
flux = fluxes[jj,:]
ivar = ivars[jj,:]
flux_piv = flux - coeffs_all[:,0] * 1. # pivot around the leading term
sig = np.sqrt(1./ivar + scatters**2)
coeffs = np.delete(coeffs_all, 0, axis=1) # take pivot into account
try:
labels, covs = opt.curve_fit(_func, coeffs, flux_piv,
p0=np.repeat(1, nlabels),
sigma=sig, absolute_sigma=True)
except TypeError: # old scipy version
labels, covs = opt.curve_fit(_func, coeffs, flux_piv,
p0=np.repeat(1, nlabels), sigma=sig)
# rescale covariance matrix
chi = (flux_piv-_func(coeffs, *labels)) / sig
chi2 = (chi**2).sum()
# FIXME: dof does not seem to be right to me (MF)
dof = len(flux_piv) - nlabels
factor = (chi2 / dof)
covs /= factor
labels = labels + pivots
labels_all[jj,:] = labels
errs_all[jj,:] = covs.diagonal()
dataset.set_test_label_vals(labels_all)
return errs_all
| Python | 0 | |
652a03d96cbc5c06850fa62fa3507fb74ee3deab | Create python_ciphertext.py | Encryption/python_ciphertext.py | Encryption/python_ciphertext.py | #Simply how to make a ciphertext only with 1 line.
>>> #hex_encode = 'summonagus'.encode('hex')
>>> hex_encode = '73756d6d6f6e61677573'
>>> chip = ''.join([ str(int(a)*2) if a.isdigit() and int(a) == 3 else str(int(a)/2) if a.isdigit() and int(a) == 6 else a for a in hex_encode ])
>>>
>>> hex_encode
'73756d6d6f6e61677573'
>>> chip
'76753d3d3f3e31377576'
>>>
>>>
| Python | 0.999975 | |
8add0d44139b527d40aaa9da43d023ddde52c410 | Add string python solution | HackerRank/PYTHON/Strings/alphabet_rangoli.py | HackerRank/PYTHON/Strings/alphabet_rangoli.py | #!/usr/bin/env python3
import sys
from string import ascii_lowercase
def print_rangoli(size):
width = size * 4 - 3
alphabet = (ascii_lowercase[0:size])[::-1]
res = []
for i in range(size):
s = ''
for a in alphabet[0:i+1]:
s = '%s-%s' % (s, a)
temp = s + s[::-1][1:]
if len(temp) == width + 2:
temp = temp[1:-1]
res.append(temp)
else:
res.append(temp.center(width, '-'))
print('\n'.join(res))
print('\n'.join(list(reversed(res[0:size - 1]))))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
| Python | 0.999999 | |
eac74d731b01f732d23ce21e8132fa0785aa1ab2 | Create visible_elements.py | visible_elements.py | visible_elements.py | # -*- coding: utf-8 -*-
import unittest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
class visible_elements(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome("C://chromedriver/chromedriver.exe")
self.driver.maximize_window()
wait = WebDriverWait(self.driver, 10)
def test_clickelements(self):
self.driver.get("http://localhost/litecart/en/")
rows = self.driver.find_elements_by_xpath("//li[@class='product column shadow hover-light']")
def are_elements_present(self, *args):
return len(self.driver.find_elements(*args)) == 1
are_elements_present(self, By.XPATH, "//div[@class='sticker sale']" and "//div[@class='sticker new']" ) in rows
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
| Python | 0.000003 | |
eaa45d8a9a8cd26379ea7bd3bcee99cbab08d9e7 | Remove hdf5 ~cxx constraint on netcdf | var/spack/repos/builtin/packages/netcdf/package.py | var/spack/repos/builtin/packages/netcdf/package.py | from spack import *
class Netcdf(Package):
"""NetCDF is a set of software libraries and self-describing, machine-independent
data formats that support the creation, access, and sharing of array-oriented
scientific data."""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.3.3.tar.gz"
version('4.4.0', 'cffda0cbd97fdb3a06e9274f7aef438e')
version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
variant('mpi', default=True, description='Enables MPI parallelism')
variant('hdf4', default=False, description='Enable HDF4 support')
depends_on("m4")
depends_on("hdf", when='+hdf4')
# Required for DAP support
depends_on("curl")
# Required for NetCDF-4 support
depends_on("zlib")
depends_on("hdf5+mpi", when='+mpi')
depends_on("hdf5~mpi", when='~mpi')
def install(self, spec, prefix):
# Environment variables
CPPFLAGS = []
LDFLAGS = []
LIBS = []
config_args = [
"--prefix=%s" % prefix,
"--enable-fsync",
"--enable-v2",
"--enable-utilities",
"--enable-shared",
"--enable-static",
"--enable-largefile",
# necessary for HDF5 support
"--enable-netcdf-4",
"--enable-dynamic-loading",
# necessary for DAP support
"--enable-dap"
]
# Make sure Netcdf links against Spack's curl
# Otherwise it may pick up system's curl, which could lead to link errors:
# /usr/lib/x86_64-linux-gnu/libcurl.so: undefined reference to `SSL_CTX_use_certificate_chain_file@OPENSSL_1.0.0'
LIBS.append("-lcurl")
CPPFLAGS.append("-I%s" % spec['curl'].prefix.include)
LDFLAGS.append( "-L%s" % spec['curl'].prefix.lib)
if '+mpi' in spec:
config_args.append('--enable-parallel4')
CPPFLAGS.append("-I%s/include" % spec['hdf5'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['hdf5'].prefix)
# HDF4 support
# As of NetCDF 4.1.3, "--with-hdf4=..." is no longer a valid option
# You must use the environment variables CPPFLAGS and LDFLAGS
if '+hdf4' in spec:
config_args.append("--enable-hdf4")
CPPFLAGS.append("-I%s/include" % spec['hdf'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['hdf'].prefix)
LIBS.append( "-l%s" % "jpeg")
if 'szip' in spec:
CPPFLAGS.append("-I%s/include" % spec['szip'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['szip'].prefix)
LIBS.append( "-l%s" % "sz")
# Fortran support
# In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
# Use the netcdf-fortran package to install Fortran support.
config_args.append('CPPFLAGS=%s' % ' '.join(CPPFLAGS))
config_args.append('LDFLAGS=%s' % ' '.join(LDFLAGS))
config_args.append('LIBS=%s' % ' '.join(LIBS))
configure(*config_args)
make()
make("install")
| from spack import *
class Netcdf(Package):
"""NetCDF is a set of software libraries and self-describing, machine-independent
data formats that support the creation, access, and sharing of array-oriented
scientific data."""
homepage = "http://www.unidata.ucar.edu/software/netcdf"
url = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.3.3.tar.gz"
version('4.4.0', 'cffda0cbd97fdb3a06e9274f7aef438e')
version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
variant('mpi', default=True, description='Enables MPI parallelism')
variant('hdf4', default=False, description="Enable HDF4 support")
# Dependencies:
depends_on("curl") # required for DAP support
depends_on("hdf", when='+hdf4')
depends_on("hdf5+mpi~cxx", when='+mpi') # required for NetCDF-4 support
depends_on("hdf5~mpi", when='~mpi') # required for NetCDF-4 support
depends_on("zlib") # required for NetCDF-4 support
depends_on("m4")
def install(self, spec, prefix):
# Environment variables
CPPFLAGS = []
LDFLAGS = []
LIBS = []
config_args = [
"--prefix=%s" % prefix,
"--enable-fsync",
"--enable-v2",
"--enable-utilities",
"--enable-shared",
"--enable-static",
"--enable-largefile",
# necessary for HDF5 support
"--enable-netcdf-4",
"--enable-dynamic-loading",
# necessary for DAP support
"--enable-dap"
]
# Make sure Netcdf links against Spack's curl
# Otherwise it may pick up system's curl, which could lead to link errors:
# /usr/lib/x86_64-linux-gnu/libcurl.so: undefined reference to `SSL_CTX_use_certificate_chain_file@OPENSSL_1.0.0'
LIBS.append("-lcurl")
CPPFLAGS.append("-I%s" % spec['curl'].prefix.include)
LDFLAGS.append ("-L%s" % spec['curl'].prefix.lib)
if '+mpi' in spec:
config_args.append('--enable-parallel4')
CPPFLAGS.append("-I%s/include" % spec['hdf5'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['hdf5'].prefix)
# HDF4 support
# As of NetCDF 4.1.3, "--with-hdf4=..." is no longer a valid option
# You must use the environment variables CPPFLAGS and LDFLAGS
if '+hdf4' in spec:
config_args.append("--enable-hdf4")
CPPFLAGS.append("-I%s/include" % spec['hdf'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['hdf'].prefix)
LIBS.append( "-l%s" % "jpeg")
if 'szip' in spec:
CPPFLAGS.append("-I%s/include" % spec['szip'].prefix)
LDFLAGS.append( "-L%s/lib" % spec['szip'].prefix)
LIBS.append( "-l%s" % "sz")
# Fortran support
# In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
# Use the netcdf-fortran package to install Fortran support.
config_args.append('CPPFLAGS=%s' % ' '.join(CPPFLAGS))
config_args.append('LDFLAGS=%s' % ' '.join(LDFLAGS))
config_args.append('LIBS=%s' % ' '.join(LIBS))
configure(*config_args)
make()
make("install")
| Python | 0.000001 |
5343c89686fd05cf251388e1f28bfd4343d4c277 | Add python-based CPU implementation | src/CPU/color_histogram.py | src/CPU/color_histogram.py | from PIL import Image
from collections import defaultdict
import sys
im = Image.open(sys.argv[1])
colors = defaultdict(int)
for pixel in im.getdata():
colors[pixel] += 1
print colors
| Python | 0.000074 | |
f408465521484032631adfe9dced21119ad2bf82 | Revert "Delete old MultiServer implementation" | MultiServer.py | MultiServer.py | from multiprocessing import Process
import subprocess
import GlobalVars
def botInstance(server, channels):
args = ["python", "hubbebot.py"]
args.append(server)
for chan in channels:
args.append(chan)
subprocess.call(args)
if __name__ == "__main__":
for (server,channels) in GlobalVars.connections.items():
p = Process(target=botInstance, args=(server, channels))
p.start()
| Python | 0 | |
2ef9618e705bb293641674ca5e7cc1f14daf3483 | Set default branding for all organisations | migrations/versions/0285_default_org_branding.py | migrations/versions/0285_default_org_branding.py | """empty message
Revision ID: 0285_default_org_branding
Revises: 0284_0283_retry
Create Date: 2016-10-25 17:37:27.660723
"""
# revision identifiers, used by Alembic.
revision = '0285_default_org_branding'
down_revision = '0284_0283_retry'
from alembic import op
import sqlalchemy as sa
BRANDING_TABLES = ('email_branding', 'letter_branding')
def upgrade():
for branding in BRANDING_TABLES:
op.execute("""
UPDATE
organisation
SET
{branding}_id = {branding}.id
FROM
{branding}
WHERE
{branding}.domain in (
SELECT
domain
FROM
domain
WHERE
domain.organisation_id = organisation.id
)
""".format(branding=branding))
def downgrade():
for branding in BRANDING_TABLES:
op.execute("""
UPDATE
organisation
SET
{branding}_id = null
""".format(branding=branding))
| Python | 0 | |
52b870d36370f46fdc33de2948504c2aec8db1a1 | fix field names in network object | planetstack/core/migrations/0002_network_field_case.py | planetstack/core/migrations/0002_network_field_case.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import timezones.fields
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='networktemplate',
old_name='controllerKind',
new_name='controller_kind',
),
migrations.RenameField(
model_name='networktemplate',
old_name='guaranteedBandwidth',
new_name='guaranteed_bandwidth',
),
migrations.RenameField(
model_name='networktemplate',
old_name='sharedNetworkId',
new_name='shared_network_id',
),
migrations.RenameField(
model_name='networktemplate',
old_name='sharedNetworkName',
new_name='shared_network_name',
),
migrations.RenameField(
model_name='networktemplate',
old_name='topologyKind',
new_name='topology_kind',
),
]
| Python | 0.000003 | |
9b584c6d23ad93fd497fb2e71d2343a954cea4e5 | Create PaulFinalproject.py | PaulFinalproject.py | PaulFinalproject.py | Python | 0 | ||
8462466f8a21f25f85b8a06076877361b2545a12 | Add initialize script | PyResis/__init__.py | PyResis/__init__.py | __author__ = 'Yu Cao'
| Python | 0.000002 | |
8fcc727f9a7fbd886bc900f9c24cf2711a0c5b99 | Create Record.py | Record.py | Record.py | """
The MIT License (MIT)
Copyright (c) <2016> <Larry McCaig (aka: Larz60+ aka: Larz60p)>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from collections import namedtuple
import json
class Record(object):
def __init__(self, filename=None):
with open(filename, 'r') as f:
self.j = f.read()
self.record = json.loads(self.j, object_hook=lambda j:
namedtuple('data', j.keys())(*j.values()))
self.recindex = len(self.record)
self.index = 0
def __iter__(self):
self.index = self.recindex
return self
def __next__(self):
if self.index == 0:
raise StopIteration
self.index -= 1
return self.record[self.index]
| Python | 0.000001 | |
1708eb17fb9c232414b0e162754ca31b6fd9366c | Add tests for plagiarism filter command | services/comprehension/main-api/comprehension/tests/management/commands/test_pre_filter_responses.py | services/comprehension/main-api/comprehension/tests/management/commands/test_pre_filter_responses.py | import csv
from io import StringIO
from unittest.mock import call, MagicMock, patch
from django.test import TestCase
from ....views.plagiarism import PlagiarismFeedbackView
from ....management.commands import pre_filter_responses
Command = pre_filter_responses.Command
class TestCommandBase(TestCase):
def setUp(self):
self.command = Command()
class TestPreFilterResponsesCommand(TestCommandBase):
def test_add_arguments(self):
mock_parser = MagicMock()
self.command.add_arguments(mock_parser)
self.assertEqual(mock_parser.add_argument.call_count, 2)
mock_parser.assert_has_calls([
call.add_argument('passage_source', metavar='PASSAGE_SOURCE',
help='The path to the file with the passage'),
call.add_argument('csv_input', metavar='CSV_PATH',
help='The path to the input CSV file'),
])
@patch.object(PlagiarismFeedbackView, '_check_is_plagiarism')
@patch.object(Command, '_retrieve_passage')
@patch.object(csv, 'reader')
@patch.object(csv, 'writer')
@patch(f'{pre_filter_responses.__name__}.open')
def test_extract_create_feedback_kwargs(self, mock_open, mock_writer,
mock_reader, mock_retrieve,
mock_check_plagiarism):
mock_csv_input = 'MOCK_CSV_INPUT'
kwargs = {
'passage_source': 'MOCK_PASSAGE_SOURCE',
'csv_input': mock_csv_input,
}
file_name = 'FAKE FILE NAME'
mock_handler = mock_open.return_value
mock_file_content = StringIO('HEADER\nVALUE')
mock_handler.__enter__.return_value = mock_file_content
mock_reader_row = 'MOCK_ROW'
mock_reader.next.return_value = mock_reader_row
mock_check_plagiarism.return_value = False
self.command.handle(**kwargs)
mock_open.assert_has_calls([
call(mock_csv_input, 'r'),
call().__enter__(),
call(f'filtered_{mock_csv_input}', 'w'),
call().__enter__(),
call().__exit__(None, None, None),
call().__exit__(None, None, None),
])
mock_retrieve.assert_called_with(kwargs['passage_source'])
mock_writer.assert_called()
| Python | 0 | |
06ced5abe2226a234c2e2887fbf84f18dfa7ddc4 | Update timer for new label. Clean up a bit and use more pyglet 1.1 features. | examples/timer.py | examples/timer.py | '''A full-screen minute:second timer. Leave it in charge of your conference
lighting talks.
After 5 minutes, the timer goes red. This limit is easily adjustable by
hacking the source code.
Press spacebar to start, stop and reset the timer.
'''
import pyglet
window = pyglet.window.Window(fullscreen=True)
class Timer(object):
def __init__(self):
self.label = pyglet.text.Label('00:00', font_size=360,
x=window.width//2, y=window.height//2,
valign='center', halign='center')
self.reset()
def reset(self):
self.time = 0
self.running = False
self.label.text = '00:00'
self.label.color = (255, 255, 255, 255)
def update(self, dt):
if self.running:
self.time += dt
m, s = divmod(self.time, 60)
self.label.text = '%02d:%02d' % (m, s)
if m >= 5:
self.label.color = (180, 0, 0, 255)
@window.event
def on_key_press(symbol, modifiers):
if symbol == pyglet.window.key.SPACE:
if timer.running:
timer.running = False
else:
if timer.time > 0:
timer.reset()
else:
timer.running = True
elif symbol == pyglet.window.key.ESCAPE:
window.close()
@window.event
def on_draw():
window.clear()
timer.label.draw()
timer = Timer()
pyglet.clock.schedule_interval(timer.update, 1)
pyglet.app.run()
| from pyglet import window
from pyglet import text
from pyglet import clock
from pyglet import font
w = window.Window(fullscreen=True)
class Timer(text.Label):
def stop(self):
self.__time = 0
def reset(self):
self.__time = 0
self.__running = False
self.text = '00:00'
def animate(self, dt):
if self.__running:
self.__time += dt
m, s = divmod(self.__time, 60)
self.text = '%02d:%02d'%(m, s)
def on_text(self, text):
if text == ' ':
self.__running = not self.__running
return True
return False
ft = font.load('', 360)
timer = Timer('00:00', ft, x=w.width//2, y=w.height//2,
valign='center', halign='center')
timer.reset()
clock.schedule(timer.animate)
w.push_handlers(timer)
while not w.has_exit:
w.dispatch_events()
clock.tick()
w.clear()
timer.draw()
w.flip()
| Python | 0 |
064c1a5bd8790c9ea407f62de0428657354e979f | Create jcolor.py | jcolor.py | jcolor.py | # colors
HEADER = '\033[95m'
FAIL = '\033[91m'
FGBLUE2 = '\033[94m'
FGGREEN2 = '\033[92m'
FGORANGE = '\033[93m'
FGGRAY = '\033[30m'
FGRED = '\033[31m'
FGGREEN = '\033[32m'
FGYELLOW = '\033[33m'
FGBLUE = '\033[34m'
FGMAG = '\033[35m'
FGCYAN = '\033[36m'
FGWHITE = '\033[37m'
# FGGRAY = '\033[61m'
BGBLACK = '\033[40m'
BGRED = '\033[41m'
BGGREEN = '\033[42m'
BGYELLOW = '\033[43m'
BGBLUE = '\033[44m'
BGMAG = '\033[45m'
BGCYAN = '\033[46m'
BGWHITE = '\033[47m'
# end color(s)
ENDC = '\033[0m'
# format settings
BOLDON = '\033[1m'
BOLDOFF = '\033[22m'
ITALON = '\033[3m'
ITALOFF = '\033[23m'
UNDLNON = '\033[4m'
UNDLNOFF = '\033[24m'
INVON = '\033[7m'
INVOFF = '\033[27m'
STRKTHRUON = '\033[9m'
STRKTHRUOFF = '\033[29m'
| Python | 0.000001 | |
040911e2343ec6753c767eff44be2cf54eb33ff8 | add file name to fasta sequence headers | add_file_name_to_reads.py | add_file_name_to_reads.py | import os
import sys
from Bio import SeqIO
out = open(sys.argv[2], 'w')
for records in SeqIO.parse(open(sys.argv[1], 'rU'), "fasta"):
records.id = records.id.strip() + '%s' % sys.argv[1].split('.')[0]
records.name = records.id
records.description = records.id
SeqIO.write(records, out, 'fasta')
| Python | 0 | |
c420f6bf996c53fa8958956626c136ac0e9e55f6 | Add sonos updater plugin. | beetsplug/sonosupdate.py | beetsplug/sonosupdate.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2018, Tobias Sauerwein.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Sonos library whenever the beets library is changed.
This is based on the Kodi Update plugin.
Put something like the following in your config.yaml to configure:
kodi:
host: localhost
port: 8080
user: user
pwd: secret
"""
from __future__ import division, absolute_import, print_function
from beets import config
from beets.plugins import BeetsPlugin
import six
import soco
class SonosUpdate(BeetsPlugin):
def __init__(self):
super(SonosUpdate, self).__init__()
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to a Sonos
controler.
"""
self._log.info(u'Requesting a Sonos library update...')
# Try to send update request.
try:
device = soco.discovery.any_soco()
device.music_library.start_library_update()
except:
self._log.warning(u'Sonos update failed')
return
self._log.info(u'Sonos update triggered')
| Python | 0 | |
797114781ed4f31c265c58a76e39aa8ff6a16443 | Add missing file from last commit | tensorpack/utils/compatible_serialize.py | tensorpack/utils/compatible_serialize.py | #!/usr/bin/env python
import os
from .serialize import loads_msgpack, loads_pyarrow, dumps_msgpack, dumps_pyarrow
"""
Serialization that has compatibility guarantee (therefore is safe to store to disk).
"""
__all__ = ['loads', 'dumps']
# pyarrow has no compatibility guarantee
# use msgpack for persistent serialization, unless explicitly set from envvar
if os.environ.get('TENSORPACK_COMPATIBLE_SERIALIZE', 'msgpack') == 'msgpack':
loads = loads_msgpack
dumps = dumps_msgpack
else:
loads = loads_pyarrow
dumps = dumps_pyarrow
| Python | 0.000001 | |
2f155e1dafd5302dfbf4607af81bfa979046be8e | add test file | junk/t.py | junk/t.py | def f():
print "hi"
f() | Python | 0.000001 | |
cfb39d7389d63a293dc075d420f80276a34df193 | Add minimal pygstc example to play a video | examples/pygstc/simple_pipeline.py | examples/pygstc/simple_pipeline.py | import time
import sys
from pygstc.gstc import *
from pygstc.logger import *
#Create a custom logger with loglevel=DEBUG
gstd_logger = CustomLogger('simple_pipeline', loglevel='DEBUG')
#Create the client with the logger
gstd_client = GstdClient(logger=gstd_logger)
def printError():
print("To play run: python3 simple_pipeline.py play VIDEO_PATH")
print("To stop run: python3 simple_pipeline.py stop")
print("To stop run: python3 simple_pipeline.py reverse")
print("To stop run: python3 simple_pipeline.py slow_motion")
if(len(sys.argv) > 1):
if(sys.argv[1]=="play"):
FILE_SOURCE = sys.argv[2]
#pipeline is the string with the pipeline description
pipeline = "playbin uri=file:"+FILE_SOURCE
#Following instructions create and play the pipeline
gstd_client.pipeline_create ("p0", pipeline)
gstd_client.pipeline_play ("p0")
print("Playing")
# Check this
# reverse and slow motion restart the pipeline
elif(sys.argv[1]== "reverse"):
gstd_client.event_seek("p0", rate=-1.0, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1)
print("Playing in reverse")
elif(sys.argv[1]== "slow_motion"):
gstd_client.event_seek("p0", rate=0.5, format=3, flags=1, start_type=1, start=0, end_type=1, end=-1)
print("Playing in slow motion")
elif(sys.argv[1]== "stop"):
#Following instructions stop and delete the pipeline
gstd_client.pipeline_stop ("p0")
gstd_client.pipeline_delete ("p0")
print("Pipeline deleted")
else:
printError()
else:
printError()
| Python | 0 | |
f8d06f85e896c1098f58667c161d920f6d255d7b | Add utility for sent mail | sendmail/log_mail.py | sendmail/log_mail.py | # -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
from smtplib import SMTP
from datetime import datetime
# Parameter:
smtp_host = 'smtp.qboxmail.com'
smtp_port = 465
smtp_user = 'account@example.it''
smtp_password = 'password'
from_address = 'from@example.it'
to_address = 'dest@example.it'
subject = 'Subject'
body = 'body'
# Send mail:
smtp = SMTP()
smtp.set_debuglevel(0)
smtp.connect(smtp_host, smtp_port)
smtp.login(smtp_user, smtp_password)
date = datetime.now().strftime('%Y-%m-%s %H:%M')
smtp.sendmail(
from_addr, to_addr,
'From: %s\nTo: %s\nSubject: %s\nDate: %s\n\n%s' % (
from_addr,
to_addr,
subject,
date,
body,
),
)
smtp.quit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 | |
ddac657da2743c7435e8408677406d37eaea5836 | Add migration. | instance/migrations/0041_auto_20160420_1409.py | instance/migrations/0041_auto_20160420_1409.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instance', '0040_auto_20160420_0754'),
]
operations = [
migrations.AlterField(
model_name='openstackserver',
name='status',
field=models.CharField(choices=[('pending', 'Pending'), ('building', 'Building'), ('booting', 'Booting'), ('ready', 'Ready'), ('terminated', 'Terminated'), ('unknown', 'Unknown'), ('failed', 'BuildFailed')], max_length=20, db_index=True, default='pending'),
),
migrations.RunSQL(
[
"UPDATE instance_openstackserver SET status = 'pending' WHERE status = 'new'",
"UPDATE instance_openstackserver SET status = 'building' WHERE status = 'started'",
"UPDATE instance_openstackserver SET status = 'booting' WHERE status = 'active' OR status = 'rebooting'",
"UPDATE instance_openstackserver SET status = 'ready' WHERE status = 'booted' OR status = 'provisioning'",
],
)
]
| Python | 0 | |
4c6964a6043c6c5bb3df7ad184e2c6a5537ca6da | Create __init__.py | intelmq/tests/bots/experts/fqdn2ip/__init__.py | intelmq/tests/bots/experts/fqdn2ip/__init__.py | Python | 0.000429 | ||
ce5ca3ac3268af331150f66865072a049869b3b2 | add abstraction magics | abstraction.py | abstraction.py | """
abstraction magics
let's you turn a cell into a function
In [1]: plot(x, f(y))
...: xlabel('x')
...: ylabel('y')
In [2]: %functionize 1
"""
from IPython.utils.text import indent
def parse_ranges(s):
blocks = s.split(',')
ranges = []
for block in blocks:
if '-' in block:
start, stop = [ int(b) for b in block.split('-') ]
stop = stop + 1 # be inclusive?
else:
start = int(block)
stop = start + 1
ranges.append((start, stop))
return ranges
def functionize(line):
shell = get_ipython()
splits = line.split(' ', 1)
range_str = splits[0]
args = splits[1] if len(splits) > 1 else ''
ranges = parse_ranges(range_str)
get_range = shell.history_manager.get_range
blocks = ["def cell_function(%s):" % args]
for start, stop in ranges:
cursor = get_range(0, start, stop)
for session_id, cell_id, code in cursor:
blocks.append(indent(code))
code = '\n'.join(blocks)
shell.set_next_input(code)
def load_ipython_extension(ip):
ip.magics_manager.register_function(functionize) | Python | 0.000088 | |
9af5c4e79234a47ac26e5d1890e70f741363b18a | Create factorise_test.py | factorise_test.py | factorise_test.py | Python | 0.000001 | ||
425a8e26d371038f6ebf7c80dd7faea0f1dd906e | Add base test for admin endpoints [WAL-883] | nodeconductor/core/tests/unittests/test_admin.py | nodeconductor/core/tests/unittests/test_admin.py | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
User = get_user_model()
class TestAdminEndpoints(TestCase):
def setUp(self):
user, _ = User.objects.get_or_create(username='username', is_staff=True)
self.client.force_login(user)
self.admin_site_name = admin.site.name
def _reverse_url(self, path):
return reverse('%s:%s' % (self.admin_site_name, path))
def test_app_list_ulrs_can_be_queried(self):
app_list_urls = dict()
for model in admin.site._registry:
app_list_url = reverse('%s:%s' % (self.admin_site_name, 'app_list'), args=(model._meta.app_label,))
app_list_urls.update({model._meta.app_label: app_list_url})
for url in app_list_urls.values():
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_base_admin_site_urls_can_be_queried(self):
pages = ['index', 'login', 'logout', 'password_change', 'password_change_done', 'jsi18n']
for name in pages:
url = self._reverse_url(name)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 302])
def test_changelist_urls_can_be_queried(self):
for model in admin.site._registry:
url = self._reverse_url('%s_%s_changelist' % (model._meta.app_label, model._meta.model_name))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_add_urls_can_be_queried(self):
for model in admin.site._registry:
model_fullname = '%s_%s' % (model._meta.app_label, model._meta.model_name)
url = self._reverse_url('%s_add' % model_fullname)
response = self.client.get(url)
self.assertIn(response.status_code, [200, 403])
| Python | 0 | |
90a30ae1b3165c03f6de5458c92f8ecb9d3f948a | Add homework min_three | domaci-zadaci/05/test_min_three.py | domaci-zadaci/05/test_min_three.py | from solutions import min_three
import unittest
import random
class TestMinThree(unittest.TestCase):
def test_1000_cases(self):
for _ in range(1000):
first = (random.random() - 0.5) * 2000
second = (random.random() - 0.5) * 2000
third = (random.random() - 0.5) * 2000
expected = min(first, second, third)
actual = min_three(first, second, third)
self.assertEqual(expected, actual)
actual = min_three(first, third, second)
self.assertEqual(expected, actual)
actual = min_three(second, first, third)
self.assertEqual(expected, actual)
actual = min_three(second, third, first)
self.assertEqual(expected, actual)
actual = min_three(third, first, second)
self.assertEqual(expected, actual)
actual = min_three(third, second, first)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| Python | 0.00046 | |
aad51679cc2e4e719ed12e3983b54dcf15a2c06f | Update slack.py | graphite_beacon/handlers/slack.py | graphite_beacon/handlers/slack.py | import json
from tornado import gen, httpclient as hc
from graphite_beacon.handlers import AbstractHandler, LOGGER
from graphite_beacon.template import TEMPLATES
class SlackHandler(AbstractHandler):
name = 'slack'
# Default options
defaults = {
'webhook': None,
'channel': None,
'username': 'graphite-beacon',
}
emoji = {
'critical': ':exclamation:',
'warning': ':warning:',
'normal': ':white_check_mark:',
}
def init_handler(self):
self.webhook = self.options.get('webhook')
assert self.webhook, 'Slack webhook is not defined.'
self.channel = self.options.get('channel')
if self.channel and not self.channel.startswith(('#', '@')):
self.channel = '#' + self.channel
self.username = self.options.get('username')
self.client = hc.AsyncHTTPClient()
def get_message(self, level, alert, value, target=None, ntype=None, rule=None):
msg_type = 'slack' if ntype == 'graphite' else 'short'
tmpl = TEMPLATES[ntype][msg_type]
return tmpl.generate(
level=level, reactor=self.reactor, alert=alert, value=value, target=target).strip()
@gen.coroutine
def notify(self, level, *args, **kwargs):
LOGGER.debug("Handler (%s) %s", self.name, level)
message = self.get_message(level, *args, **kwargs)
data = dict()
data['username'] = self.username
data['text'] = message
data['icon_emoji'] = self.emoji.get(level, ':warning:')
if self.channel:
data['channel'] = self.channel
body = json.dumps(data)
yield self.client.fetch(self.webhook, method='POST', body=body)
| import json
from tornado import gen, httpclient as hc
from graphite_beacon.handlers import AbstractHandler, LOGGER
from graphite_beacon.template import TEMPLATES
class SlackHandler(AbstractHandler):
name = 'slack'
# Default options
defaults = {
'webhook': None,
'channel': None,
'username': 'graphite-beacon',
}
emoji = {
'critical': ':exclamation:',
'warning': ':warning:',
'normal': ':white_check_mark:',
}
def init_handler(self):
self.webhook = self.options.get('webhook')
assert self.webhook, 'Slack webhook is not defined.'
self.channel = self.options.get('channel')
if self.channel and not self.channel.startswith('#'):
self.channel = '#' + self.channel
self.username = self.options.get('username')
self.client = hc.AsyncHTTPClient()
def get_message(self, level, alert, value, target=None, ntype=None, rule=None):
msg_type = 'slack' if ntype == 'graphite' else 'short'
tmpl = TEMPLATES[ntype][msg_type]
return tmpl.generate(
level=level, reactor=self.reactor, alert=alert, value=value, target=target).strip()
@gen.coroutine
def notify(self, level, *args, **kwargs):
LOGGER.debug("Handler (%s) %s", self.name, level)
message = self.get_message(level, *args, **kwargs)
data = dict()
data['username'] = self.username
data['text'] = message
data['icon_emoji'] = self.emoji.get(level, ':warning:')
if self.channel:
data['channel'] = self.channel
body = json.dumps(data)
yield self.client.fetch(self.webhook, method='POST', body=body)
| Python | 0 |
34908071bd11470806a84d9f76c630fd3fcc2d4b | test file :-) | tests/gsim/abrahamson_silva_2008_test.py | tests/gsim/abrahamson_silva_2008_test.py | # nhlib: A New Hazard Library
# Copyright (C) 2012 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nhlib.gsim.abrahamson_silva_2008 import AbrahamsonSilva2008
from tests.gsim.utils import BaseGSIMTestCase
# Test data have been generated from Fortran implementation
# of Dave Boore available at:
# http://www.daveboore.com/software_online.html
# Note that the Fortran implementation has been modified not
# to compute the 'Constant Displacement Model' term
class AbrahamsonSilva2008TestCase(BaseGSIMTestCase):
GSIM_CLASS = AbrahamsonSilva2008
def test_mean(self):
self.check('AS08/AS08_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_inter(self):
self.check('AS08/AS08_STD_INTER.csv',
max_discrep_percentage=0.1)
def test_std_intra(self):
self.check('AS08/AS08_STD_INTRA.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('AS08/AS08_STD_TOTAL.csv',
max_discrep_percentage=0.1)
| Python | 0 | |
9fc373bbfa606aeb23c237df9c8d9143e14b60a1 | structure of preprocessing module for lea to fill in | code/python/seizures/preprocessing/preprocessing.py | code/python/seizures/preprocessing/preprocessing.py | import scipy.signal
def preprocess_multichannel_data(matrix):
n_channel,m= matrix.shape
for i in range(n_channel):
preprocess_single_channel(matrix[i,:])
def preprocess_single_channel(x):
x = remove_elec_noise(x)
x = hp_filter(x)
x = remove_dc(x)
return x
def remove_dc():
"""
Remove mean of signal
:return:
"""
pass
def remove_elec_noise():
"""
Bandpass remove:49-51Hz
:return:
"""
pass
def hp_filter():
"""
Anti_aliasing
:return:
"""
pass | Python | 0 | |
3e7429a36532e7c731d5d254b853dd72bdd94c82 | Create test.py | home/crap0101/test/500fup/test.py | home/crap0101/test/500fup/test.py | import operator
import re
import sys
import time
import urlparse
import fhp.api.five_hundred_px as _fh
import fhp.helpers.authentication as _a
from fhp.models.user import User
_TREG = re.compile('^(\d+)-(\d+)-(\d+).*?(\d+):(\d+):(\d+).*')
_URL = 'http://500px.com/'
_HTML_BEGIN = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<HTML>
<HEAD>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<TITLE>following last updates</TITLE>
</HEAD>
<BODY>
'''
_HTML_END = '''</BODY>
</HTML>'''
FMT_TEXT = 'txt'
FMT_HTML = 'html'
def get_time (s):
return time.strptime(' '.join(_TREG.match(s).groups()),
'%Y %m %d %H %M %S')
def get_last_upload_photo (user):
return sorted(user.photos,
reverse=True, key=lambda p: get_time(p.created_at))[0]
def get_sorted_data (user):
return sorted([(u, u.photos[0].created_at) for u in user.friends],
reverse=True, key=operator.itemgetter(1))
def _get_sorted_data (user): # like get_sorted_data but slower :-D
return sorted([(u, get_last_upload_photo(u).created_at) for u in user.friends],
reverse=True, key=operator.itemgetter(1))
def format_info_html (data):
yield _HTML_BEGIN
for user, date in data:
yield '<a href="%s">%s</a> (%s)<p>' % (
urlparse.urljoin(_URL, user.username), user.fullname.strip(), date)
#time.strftime('%Y-%m-%d %H:%M:%S', get_time(date))) # last for debug only
yield _HTML_END
def format_info_txt (data):
for user, date in data:
yield '%s (%s, %s)' % (
urlparse.urljoin(_URL, user.username), user.fullname.strip(), date)
#time.strftime('%Y-%m-%d %H:%M:%S', get_time(date))) # last for debug only
def print_info(data, fmt=FMT_HTML):
if fmt == FMT_HTML:
func = format_info_html
elif fmt == FMT_TEXT:
func = format_info_txt
else:
raise ValueError("unknown format <%s>" % fmt)
for out in func(data):
print out
if __name__ == '__main__':
_f = _fh.FiveHundredPx(_a.get_consumer_key(),
_a.get_consumer_secret(),
_a.get_verify_url())
username = sys.argv[1].encode('utf-8')
me = User(username=username)
sorted_uploads = get_sorted_data(me)
print_info(sorted_uploads)
##############################################
sys.exit()
if 0:
__t = []
#class
for i in range(10):
__t.append(time.localtime())
sleep(1)
me.friends = 8
"""
print type(me), dir(me), me.id
print "------"
print type(f), dir(f)
for i in me.friends:
print i.fullname, i.username, i.id, i.domain, dir(i)
break
if 0:
for p in sorted((x.created_at for x in i.photos), reverse=True, key=lambda s:get_time(s)):
print p
break
break
for p in i.photos:
print p, p.created_at, p.id
break
print list(f.get_user_friends(username))
print type(me), len(me)
print dir(f)
"""
| Python | 0 | |
42c82bc865d69b904ec688aa152caf3a247df1c6 | Create frontdoor.py | home/pi/PirFrontDoor/frontdoor.py | home/pi/PirFrontDoor/frontdoor.py | #!/usr/bin/python
import RPi.GPIO as GPIO
import time
import requests
GPIO.setmode(GPIO.BCM)
PIR_PIN = 22
GPIO.setup(PIR_PIN, GPIO.IN)
def MOTION(PIR_PIN):
print "Motion Detected!"
payload = { 'value1' : 'Someone at Front Door'}
r = requests.post("https://maker.ifttt.com/trigger/{Event}/with/key/{secret key}", data=payload)
print r.text
print "PIR Module Test (CTRL+C to exit)"
time.sleep(2)
print "Ready"
try:
GPIO.add_event_detect(PIR_PIN, GPIO.RISING, callback=MOTION)
while 1:
time.sleep(120)
except KeyboardInterrupt:
print "Quit"
GPIO.cleanup()
| Python | 0.000024 | |
1d7451fd6eca8a68832b676ef0a696e8de801533 | Update services_and_index_sync.py | tendrl/node_agent/node_sync/services_and_index_sync.py | tendrl/node_agent/node_sync/services_and_index_sync.py | import json
import etcd
from tendrl.commons.event import Event
from tendrl.commons.message import ExceptionMessage
from tendrl.commons.message import Message
from tendrl.commons.utils import etcd_utils
# TODO(darshan) this has to be moved to Definition file
TENDRL_SERVICES = [
"tendrl-node-agent",
"etcd",
"tendrl-api",
"tendrl-gluster-integration",
"tendrl-ceph-integration",
"glusterd",
"ceph-mon@*",
"ceph-osd@*",
"ceph-installer"
]
def sync(sync_ttl=None):
try:
tags = []
# update node agent service details
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "node_sync, Updating Service data"}
)
)
for service in TENDRL_SERVICES:
s = NS.tendrl.objects.Service(service=service)
if s.running:
service_tag = NS.compiled_definitions.get_parsed_defs()[
'namespace.tendrl'
]['tags'][service.strip("@*")]
tags.append(service_tag)
if service_tag == "tendrl/server":
tags.append("tendrl/monitor")
s.save()
# Try to claim orphan "provisioner_%integration_id" tag
_cluster = NS.tendrl.objects.Cluster(integration_id=NS.tendrl_context.integration_id).load()
try:
if _cluster.is_managed == "yes":
_tag = "provisioner/%s" % _cluster.integration_id
_index_key = "/indexes/tags/%s" % _tag
etcd_utils.read(_index_key)
except etcd.EtcdKeyNotFound:
tags.append(_tag)
# updating node context with latest tags
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "node_sync, updating node context "
"data with tags"
}
)
)
NS.node_context = NS.tendrl.objects.NodeContext().load()
current_tags = list(NS.node_context.tags)
tags += current_tags
NS.node_context.tags = list(set(tags))
NS.node_context.tags.sort()
current_tags.sort()
if NS.node_context.tags != current_tags:
NS.node_context.save()
# Update /indexes/tags/:tag = [node_ids]
for tag in NS.node_context.tags:
index_key = "/indexes/tags/%s" % tag
_node_ids = []
try:
_node_ids = NS._int.client.read(index_key).value
_node_ids = json.loads(_node_ids)
except etcd.EtcdKeyNotFound:
pass
if _node_ids:
if NS.node_context.node_id in _node_ids:
continue
else:
_node_ids += [NS.node_context.node_id]
else:
_node_ids = [NS.node_context.node_id]
_node_ids = list(set(_node_ids))
etcd_utils.write(index_key, json.dumps(_node_ids))
if sync_ttl:
etcd_utils.refresh(index_key, sync_ttl)
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "node_sync, Updating detected "
"platform"
}
)
)
except Exception as ex:
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": "node_sync service and indexes "
"sync failed: " + ex.message,
"exception": ex}
)
)
| import json
import etcd
from tendrl.commons.event import Event
from tendrl.commons.message import ExceptionMessage
from tendrl.commons.message import Message
from tendrl.commons.utils import etcd_utils
# TODO(darshan) this has to be moved to Definition file
TENDRL_SERVICES = [
"tendrl-node-agent",
"etcd",
"tendrl-api",
"tendrl-gluster-integration",
"tendrl-ceph-integration",
"glusterd",
"ceph-mon@*",
"ceph-osd@*",
"ceph-installer"
]
def sync(sync_ttl=None):
try:
tags = []
# update node agent service details
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "node_sync, Updating Service data"}
)
)
for service in TENDRL_SERVICES:
s = NS.tendrl.objects.Service(service=service)
if s.running:
service_tag = NS.compiled_definitions.get_parsed_defs()[
'namespace.tendrl'
]['tags'][service.strip("@*")]
tags.append(service_tag)
if service_tag == "tendrl/server":
tags.append("tendrl/monitor")
s.save()
# updating node context with latest tags
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "node_sync, updating node context "
"data with tags"
}
)
)
NS.node_context = NS.tendrl.objects.NodeContext().load()
current_tags = list(NS.node_context.tags)
tags += current_tags
NS.node_context.tags = list(set(tags))
NS.node_context.tags.sort()
current_tags.sort()
if NS.node_context.tags != current_tags:
NS.node_context.save()
# Update /indexes/tags/:tag = [node_ids]
for tag in NS.node_context.tags:
index_key = "/indexes/tags/%s" % tag
_node_ids = []
try:
_node_ids = NS._int.client.read(index_key).value
_node_ids = json.loads(_node_ids)
except etcd.EtcdKeyNotFound:
pass
if _node_ids:
if NS.node_context.node_id in _node_ids:
continue
else:
_node_ids += [NS.node_context.node_id]
else:
_node_ids = [NS.node_context.node_id]
_node_ids = list(set(_node_ids))
etcd_utils.write(index_key, json.dumps(_node_ids))
if sync_ttl:
etcd_utils.refresh(index_key, sync_ttl)
Event(
Message(
priority="debug",
publisher=NS.publisher_id,
payload={"message": "node_sync, Updating detected "
"platform"
}
)
)
except Exception as ex:
Event(
ExceptionMessage(
priority="error",
publisher=NS.publisher_id,
payload={"message": "node_sync service and indexes "
"sync failed: " + ex.message,
"exception": ex}
)
)
| Python | 0.000001 |
bcb89187a398000d80c7c0b0ac5152e76edd2666 | Remove TRT 4.0 restrictions on int32 test. | tensorflow/python/compiler/tensorrt/test/int32_test.py | tensorflow/python/compiler/tensorrt/test/int32_test.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test conversion of graphs involving INT32 tensors and operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase):
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GetParams(self):
"""Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
input_name = 'input'
output_name = 'output'
input_dims = [100, 4]
dtype = dtypes.int32
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
b = self._ConstOp((4, 10), dtype)
x = math_ops.matmul(x, b)
b = self._ConstOp((10,), dtype)
x = nn.bias_add(x, b)
x = array_ops.identity(x, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[[input_dims]],
output_names=[output_name],
expected_output_dims=[[[100, 10]]])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
conversion_params = super(ExcludeUnsupportedInt32Test,
self).GetConversionParams(run_params)
return conversion_params._replace(
max_batch_size=100,
maximum_cached_engines=1,
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return []
if __name__ == '__main__':
test.main()
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test conversion of graphs involving INT32 tensors and operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase):
def _ConstOp(self, shape, dtype):
return constant_op.constant(np.random.randn(*shape), dtype=dtype)
def GetParams(self):
"""Test exclusion of ops which are not supported in INT32 mode by TF-TRT"""
input_name = 'input'
output_name = 'output'
input_dims = [100, 4]
dtype = dtypes.int32
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
b = self._ConstOp((4, 10), dtype)
x = math_ops.matmul(x, b)
b = self._ConstOp((10,), dtype)
x = nn.bias_add(x, b)
x = array_ops.identity(x, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[[input_dims]],
output_names=[output_name],
expected_output_dims=[[[100, 10]]])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
conversion_params = super(ExcludeUnsupportedInt32Test,
self).GetConversionParams(run_params)
return conversion_params._replace(
max_batch_size=100,
maximum_cached_engines=1,
# Disable layout optimizer, since it will convert BiasAdd with NHWC
# format to NCHW format under four dimentional input.
rewriter_config_template=trt_test.OptimizerDisabledRewriterConfig())
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return []
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# TODO(aaroey): Trt 4.0 forbids conversion for tensors with rank <3 in int8
# mode, which is a bug. Re-enable this when trt library is fixed.
return not trt_test.IsQuantizationMode(run_params.precision_mode)
if __name__ == '__main__':
test.main()
| Python | 0 |
f94f1f698c8e9473b7c96ec7b1244e84fc4ebe5d | update unittest for MonoMixer | test/src/unittest/standard/test_monomixer_streaming.py | test/src/unittest/standard/test_monomixer_streaming.py | #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import MonoMixer, AudioLoader
class TestMonoMixer_Streaming(TestCase):
left = []
right = []
def clickTrack(self):
size = 100
offset = 10
self.left = [0]*size
self.right = [0]*size
for i in range(offset/2, size, offset):
self.left[i] = 1.0
for i in range(offset, size, offset):
self.right[i] = 1
output = []
for i in range(size):
output.append((self.left[i], self.right[i]))
return array(output)
def testLeft(self):
gen = VectorInput(self.clickTrack())
chGen = VectorInput([2])
mixer = MonoMixer(type='left')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertEqualVector(pool['mix'], self.left)
def testRight(self):
gen = VectorInput(self.clickTrack())
chGen = VectorInput([2])
mixer = MonoMixer(type='right')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertEqualVector(pool['mix'], self.right)
def testMix(self):
gen = VectorInput(self.clickTrack())
chGen = VectorInput([2])
mixer = MonoMixer(type='mix')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertEqual(sum(pool['mix']), 19*0.5)
def testSingle(self):
gen = VectorInput(array([(0.9, 0.5)]))
chGen = VectorInput([2])
mixer = MonoMixer(type='mix')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertAlmostEqual(sum(pool['mix']), (0.9+0.5)*0.5)
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.ogg')
# NOTE: AudioLoader will through exception on "empty.wav" complaining that
# it cannot read stream info, using "empty.ogg" therefore...
loader = AudioLoader(filename=inputFilename)
mixer = MonoMixer(type='left')
pool = Pool()
loader.audio >> mixer.audio
mixer.audio >> (pool, "mix")
loader.numberChannels >> mixer.numberChannels
loader.sampleRate >> None
loader.md5 >> None
run(loader)
self.assertEqualVector(pool.descriptorNames(), [])
def testInvalidParam(self):
self.assertConfigureFails(MonoMixer(), {'type':'unknown'})
suite = allTests(TestMonoMixer_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| #!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
from essentia.streaming import MonoMixer, AudioLoader
class TestMonoMixer_Streaming(TestCase):
left = []
right = []
def clickTrack(self):
size = 100
offset = 10
self.left = [0]*size
self.right = [0]*size
for i in range(offset/2, size, offset):
self.left[i] = 1.0
for i in range(offset, size, offset):
self.right[i] = 1
output = []
for i in range(size):
output.append((self.left[i], self.right[i]))
return array(output)
def testLeft(self):
gen = VectorInput(self.clickTrack())
chGen = VectorInput([2])
mixer = MonoMixer(type='left')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertEqualVector(pool['mix'], self.left)
def testRight(self):
gen = VectorInput(self.clickTrack())
chGen = VectorInput([2])
mixer = MonoMixer(type='right')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertEqualVector(pool['mix'], self.right)
def testMix(self):
gen = VectorInput(self.clickTrack())
chGen = VectorInput([2])
mixer = MonoMixer(type='mix')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertEqual(sum(pool['mix']), 19*0.5)
def testSingle(self):
gen = VectorInput(array([(0.9, 0.5)]))
chGen = VectorInput([2])
mixer = MonoMixer(type='mix')
pool = Pool()
gen.data >> mixer.audio
mixer.audio >> (pool, "mix")
chGen.data >> mixer.numberChannels
chGen.push('data', 2)
run(gen)
self.assertAlmostEqual(sum(pool['mix']), (0.9+0.5)*0.5)
def testEmpty(self):
inputFilename = join(testdata.audio_dir, 'generated', 'empty', 'empty.wav')
loader = AudioLoader(filename=inputFilename)
mixer = MonoMixer(type='left')
pool = Pool()
loader.audio >> mixer.audio
mixer.audio >> (pool, "mix")
loader.numberChannels >> mixer.numberChannels
loader.sampleRate >> None
run(loader)
self.assertEqualVector(pool.descriptorNames(), [])
def testInvalidParam(self):
self.assertConfigureFails(MonoMixer(), {'type':'unknown'})
suite = allTests(TestMonoMixer_Streaming)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| Python | 0 |
1307070cfe27ca605bfcc279644b735ee941f627 | Add work for ex21.py. | lpthw/ex31.py | lpthw/ex31.py | print "You enter a dark room with two doors. Do you go through door #1 or #2?"
door = raw_input("> ")
if door == "1":
print "Ther's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear ears your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!"
| Python | 0 | |
d3c6c91bc4b6214053b9a1d1d2291a402c164b86 | add file | GridPixelPlot.py | GridPixelPlot.py | import kplr
import numpy as np
import matplotlib.pyplot as plt
qua = 5
client = kplr.API()
# Find the target KOI.
koi = client.koi(282.02)
originStar = koi.star
# Find potential targets by Kepler magnitude
koisOver = client.kois(where="koi_kepmag between %f and %f"%(originStar.kic_kepmag, originStar.kic_kepmag+0.1), sort=("koi_kepmag",1))
koisUnder = client.kois(where="koi_kepmag between %f and %f"%(originStar.kic_kepmag-0.1, originStar.kic_kepmag), sort=("koi_kepmag",1))
koisUnder.reverse()
stars = []
stars.append(originStar.kepid)
#Find 16 stars that are closest to the origin star in terms of Kepler magnitude
i=0
j=0
while len(stars) <17:
while koisOver[i].kepid in stars:
i+=1
tmpOver = koisOver[i].star
while koisUnder[j].kepid in stars:
j+=1
tmpUnder =koisUnder[j].star
if tmpOver.kic_kepmag-originStar.kic_kepmag > originStar.kic_kepmag-tmpUnder.kic_kepmag:
stars.append(tmpUnder.kepid)
j+=1
elif tmpOver.kic_kepmag-originStar.kic_kepmag < originStar.kic_kepmag-tmpUnder.kic_kepmag:
stars.append(tmpOver.kepid)
j+=1
else:
stars.append(tmpUnder.kepid)
stars.append(tmpOver.kepid)
i+=1
j+=1
for tmp in stars:
star = client.star(tmp)
# Get a list of light curve datasets.
tpfs = star.get_target_pixel_files(short_cadence=False)
time, flux = [], []
for tpf in tpfs:
with tpf.open() as f:
hdu_data = f[1].data
time.append(hdu_data["time"])
flux.append(hdu_data["flux"])
t = time[qua]
data = flux[qua]
data = np.nan_to_num(data)
data = np.ma.masked_equal(data,0)
shape = data.shape
td = shape[0]
x = shape[1]
y = shape[2]
# Plot the data
f, axes = plt.subplots(x, y)
for i in range(0,x):
for j in range(0,y):
axes[i,j].plot(t,data[0:td:1,i,j])
plt.setp( axes[i,j].get_xticklabels(), visible=False)
plt.setp( axes[i,j].get_yticklabels(), visible=False)
plt.subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=0, hspace=0)
plt.suptitle('Kepler %d Quarter %d\n Kepler magnitude %f'%(star.kepid, qua, star.kic_kepmag))
plt.savefig('%d-%d.png'%(star.kepid, qua))
plt.clf()
| Python | 0.000001 | |
8515155d9d0df940eea758121124995320fce6bb | add experimental C/clang plugin | languages/c.py | languages/c.py | import os
from lint.linter import Linter
from lint.util import find
class C(Linter):
language = 'c'
cmd = ('clang', '-xc', '-fsyntax-only', '-std=c99', '-Werror',
'-pedantic')
regex = (
r'^<stdin>:(?P<line>\d+):(?P<col>\d+):'
r'(?:(?P<ranges>[{}0-9:\-]+):)?\s+'
r'(?P<error>.+)'
)
def communicate(self, cmd, code):
includes = []
if self.filename:
parent = os.path.dirname(self.filename)
includes.append('-I' + parent)
inc = find(parent, 'include')
if inc:
includes.append('-I' + inc)
cmd += ('-',) + tuple(includes)
return super(C, self).communicate(cmd, code)
| Python | 0 | |
0026beea95ec26b8763feae270e79872f86de8a5 | Add run_sample_tests for executing sample tests in Travis | stress_test/sample_test_confs/run_sample_tests.py | stress_test/sample_test_confs/run_sample_tests.py | #! /usr/bin/env python3.4
# Copyright (c) 2015 Intracom S.A. Telecom Solutions. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
"""
Runner for sample tests
"""
import os
def run_tests():
"""
Method for running sample tests
"""
pass
if __name__ == '__main__':
run_tests()
| Python | 0 | |
352b17d8139fb0d269e4c17c01fe8ee488961c3a | Create HR_miniMaxSum.py | HR_miniMaxSum.py | HR_miniMaxSum.py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the miniMaxSum function below.
def miniMaxSum(arr):
maxx = max(arr)
minn = min(arr)
mini = arr.copy()
mini.remove(maxx)
maxi = arr.copy()
maxi.remove(minn)
sum_min = sum(mini)
sum_max = sum(maxi)
print(sum_min, sum_max)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
miniMaxSum(arr)
| Python | 0.000003 | |
b8fe92674773c7470c3b47899a8832bbb94771b4 | Add path module | lib/oelite/path.py | lib/oelite/path.py | import os
TOPDIR = os.getcwd()
def init(topdir):
global TOPDIR
TOPDIR = topdir
def relpath(path):
"""Return a relative version of paths compared to TOPDIR."""
global TOPDIR
if path.startswith(TOPDIR):
return path[len(TOPDIR):].lstrip("/")
return path
def which(path, filename, pathsep=os.pathsep):
"""Given a search path, find file."""
if isinstance(path, basestring):
path = path.split(pathsep)
for p in path:
f = os.path.join(p, filename)
if os.path.exists(f):
return os.path.abspath(f)
return '' # TODO: change to None, and fixup the breakage it causes
| Python | 0 | |
8ce2da2ed2e445480ee2e10483a5fae1c7c677a0 | Include self contained method for output to a view | lib/output_view.py | lib/output_view.py | import sublime
import sublime_plugin
###-----------------------------------------------------------------------------
def output_to_view(window,
title,
content,
reuse=True,
syntax=None,
clear=True,
settings=None):
if not isinstance(content, str):
content = "\n".join (content)
view = None
if reuse:
for _view in window.views ():
if _view.name () == title:
view = _view
break
if view is None:
view = window.new_file ()
view.set_scratch (True)
view.set_name (title)
if syntax is not None:
view.assign_syntax (syntax)
else:
view.set_read_only (False)
if clear is True:
view.sel ().clear ()
view.sel ().add (sublime.Region (0, view.size ()))
view.run_command ("left_delete")
if window.active_view () != view:
window.focus_view (view)
if settings is not None:
for setting in settings:
view.settings ().set (setting, settings[setting])
# Sace current buffer size, selection information and view position
saved_size = view.size ()
saved_sel = list(view.sel ())
saved_position = view.viewport_position ()
# Single select, position cursor at end of file, insert the data
view.sel ().clear ()
view.sel ().add (sublime.Region (saved_size, saved_size))
view.run_command ("insert", {"characters": content})
# If the last selection was at the end of the buffer, replace that selection
# with the new end of the buffer so the relative position remains the same.
if sublime.Region (saved_size, saved_size) == saved_sel[-1]:
saved_sel[-1] = sublime.Region (view.size (), view.size ())
# Clear current selection and add original selection back
view.sel ().clear ()
for region in saved_sel:
view.sel ().add (region)
view.set_viewport_position (saved_position, False)
view.set_read_only (True)
###-----------------------------------------------------------------------------
| Python | 0 | |
0d35b502515a9775166e775c3462ca9300fe4517 | add examples | examples/helpers.py | examples/helpers.py | # -*- coding: utf-8 -*-
#
from dolfin import as_backend_type
import matplotlib.pyplot as plt
import scipy.linalg
def show_matrix(A):
A = as_backend_type(A)
A_matrix = A.sparray()
# colormap
cmap = plt.cm.gray_r
A_dense = A_matrix.todense()
# A_r = A_dense[0::2][0::2]
# A_i = A_dense[1::2][0::2]
cmap.set_bad('r')
# im = plt.imshow(
# abs(A_dense), cmap=cmap, interpolation='nearest', norm=LogNorm()
# )
plt.imshow(abs(A_dense), cmap=cmap, interpolation='nearest')
plt.colorbar()
plt.show()
return
def get_eigenvalues(A):
A = as_backend_type(A)
A_matrix = A.sparray()
return scipy.linalg.eigvals(A_matrix.todense())
| Python | 0 | |
e333bc7b23a69a39392899a1d1c8e0bdf3523c3f | remove unused import [ci skip] | corehq/apps/app_manager/management/commands/build_apps.py | corehq/apps/app_manager/management/commands/build_apps.py | import contextlib
import json
from django.core.management.base import BaseCommand
from lxml import etree
import os
from corehq.apps.app_manager.models import Application, RemoteApp
_parser = etree.XMLParser(remove_blank_text=True)
def normalize_xml(xml):
xml = etree.fromstring(xml, parser=_parser)
return etree.tostring(xml, pretty_print=True)
@contextlib.contextmanager
def record_performance_stats(filepath, slug):
from guppy import hpy
import time
hp = hpy()
before = hp.heap()
start = time.clock()
try:
yield
finally:
end = time.clock()
after = hp.heap()
leftover = after - before
with open(filepath, 'a') as f:
f.write('{},{},{}\n'.format(slug, leftover.size, end - start))
class Command(BaseCommand):
args = '<path_to_dir> <build-slug>'
help = """
Pass in a path to a directory (dir, below) with the following layout:
dir/
src/
[app-slug].json
[app-slug].json
...
"""
def handle(self, *args, **options):
path, build_slug = args
app_slugs = []
perfpath = os.path.join(path, '{}-performance.txt'.format(build_slug))
if os.path.exists(perfpath):
os.remove(perfpath)
for name in os.listdir(os.path.join(path, 'src')):
_JSON = '.json'
if name.endswith(_JSON):
app_slugs.append(name[:-len(_JSON)])
for slug in app_slugs:
print 'Fetching %s...' % slug
source_path = os.path.join(path, 'src', '%s.json' % slug)
with open(source_path) as f:
j = json.load(f)
if j['doc_type'] == 'Application':
app = Application.wrap(j)
elif j['doc_type'] == 'RemoteApp':
app = RemoteApp.wrap(j)
app.version = 1
build_path = os.path.join(path, build_slug, slug)
print ' Creating files...'
with record_performance_stats(perfpath, slug):
files = app.create_all_files()
self.write_files(files, build_path)
def write_files(self, files, path):
for filename, payload in files.items():
filepath = os.path.join(path, filename)
dirpath, filename = os.path.split(filepath)
try:
os.makedirs(dirpath)
except OSError:
# file exists
pass
with open(filepath, 'w') as f:
if filepath.endswith('.xml'):
payload = normalize_xml(payload)
f.write(payload) | import contextlib
from functools import wraps
import json
from django.core.management.base import BaseCommand
from lxml import etree
import os
from corehq.apps.app_manager.models import Application, RemoteApp
_parser = etree.XMLParser(remove_blank_text=True)
def normalize_xml(xml):
xml = etree.fromstring(xml, parser=_parser)
return etree.tostring(xml, pretty_print=True)
@contextlib.contextmanager
def record_performance_stats(filepath, slug):
from guppy import hpy
import time
hp = hpy()
before = hp.heap()
start = time.clock()
try:
yield
finally:
end = time.clock()
after = hp.heap()
leftover = after - before
with open(filepath, 'a') as f:
f.write('{},{},{}\n'.format(slug, leftover.size, end - start))
class Command(BaseCommand):
args = '<path_to_dir> <build-slug>'
help = """
Pass in a path to a directory (dir, below) with the following layout:
dir/
src/
[app-slug].json
[app-slug].json
...
"""
def handle(self, *args, **options):
path, build_slug = args
app_slugs = []
perfpath = os.path.join(path, '{}-performance.txt'.format(build_slug))
if os.path.exists(perfpath):
os.remove(perfpath)
for name in os.listdir(os.path.join(path, 'src')):
_JSON = '.json'
if name.endswith(_JSON):
app_slugs.append(name[:-len(_JSON)])
for slug in app_slugs:
print 'Fetching %s...' % slug
source_path = os.path.join(path, 'src', '%s.json' % slug)
with open(source_path) as f:
j = json.load(f)
if j['doc_type'] == 'Application':
app = Application.wrap(j)
elif j['doc_type'] == 'RemoteApp':
app = RemoteApp.wrap(j)
app.version = 1
build_path = os.path.join(path, build_slug, slug)
print ' Creating files...'
with record_performance_stats(perfpath, slug):
files = app.create_all_files()
self.write_files(files, build_path)
def write_files(self, files, path):
for filename, payload in files.items():
filepath = os.path.join(path, filename)
dirpath, filename = os.path.split(filepath)
try:
os.makedirs(dirpath)
except OSError:
# file exists
pass
with open(filepath, 'w') as f:
if filepath.endswith('.xml'):
payload = normalize_xml(payload)
f.write(payload) | Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.