commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
3492ffd5ffa0c7d1dfb5a9f4a587777245044685 | add test cases of ruamel.yaml backend | ssato/python-anyconfig,ssato/python-anyconfig | tests/backend/yaml/ruamel_yaml.py | tests/backend/yaml/ruamel_yaml.py | #
# Copyright (C) - 2018 Satoru SATOH <ssato @ redhat.com>
# License: MIT
#
# pylint: disable=missing-docstring,invalid-name,too-few-public-methods
# pylint: disable=ungrouped-imports
from __future__ import absolute_import
import os
import anyconfig.backend.yaml.pyyaml as TT
import tests.backend.common as TBC
from anyconfig.compat import OrderedDict
CNF_S = """
a: 0
b: bbb
c:
- 1
- 2
- 3
sect0: §0
d: ["x", "y", "z"]
sect1:
<<: *sect0
e: true
"""
CNF = OrderedDict((("a", 0), ("b", "bbb"), ("c", [1, 2, 3]),
("sect0", OrderedDict((("d", "x y z".split()), ))),
("sect1", OrderedDict((("d", "x y z".split()),
("e", True))))))
class HasParserTrait(TBC.HasParserTrait):
psr = TT.Parser()
cnf = CNF
cnf_s = CNF_S
opts = dict(typ="rt", pure=True,
preserve_quotes=True,
indent=dict(mapping=4, sequence=4, offset=2))
setattr(psr, "dict_options", opts)
class Test_10(TBC.Test_10_dumps_and_loads, HasParserTrait): # noqa: N801
load_options = dict(ac_safe=True, Loader=TT.yaml.loader.Loader)
dump_options = dict(ac_safe=True)
empty_patterns = [('', {}), (' ', {}), ('[]', []),
("#%s#%s" % (os.linesep, os.linesep), {})]
class Test_20(TBC.Test_20_dump_and_load, HasParserTrait): # noqa: N801
pass
# vim:sw=4:ts=4:et:
| mit | Python | |
d4f3f65a9c6dbe4a7119eb65f872524a45f756a7 | Add codifflib.py | nzre/codifflib | codifflib.py | codifflib.py | import sys
from difflib import SequenceMatcher
from pygments import lex
from pygments.lexers.c_cpp import CLexer
from pygments.styles import get_style_by_name
class CodeDiff:
opcode_style = {
'insert': {'bgcolor': 'eaffea'}, 'replace': {'bgcolor': 'fff68f'},
'delete': {'bgcolor': 'ffaaaa'}, 'equal': {'bgcolor': None},
}
def __init__(self, from_str, to_str):
self.from_str = from_str
self.to_str = to_str
# difflib stuff.
self.sm = SequenceMatcher(None, from_str, to_str)
# Pygments stuff.
# TODO: allow chosing of style.
self.style = get_style_by_name('colorful')
self.style_map = {}
for token_type, style in self.style:
self.style_map[token_type] = style
self.descs = []
pdesc = self.get_pygmentation_desc()
ddesc = self.get_difflib_desc()
while pdesc or ddesc:
# The diff may be larger than the program itself.
if not pdesc:
desc = ddesc.pop(0)
# Update the starting position of the diff fragment to the last
# character descriptor in descriptor list.
desc[1] = self.descs[-1][2]
self.descs.append(desc)
break
# We have an opcode from dstart to dend.
dstyle, dstart, dend = ddesc[0]
# We have an token dstart to dend.
pstyle, pstart, pend = pdesc[0]
# TODO: update() overwrites values for common keys, is this correct?
style = pstyle.copy()
style.update(dstyle)
# If the token fits within the opcode region, merge the two styles.
if dstart <= pstart and pend <= dend:
self.descs.append([style, pstart, pend])
# Next token & remove the diff opcode if necessary.
pdesc.pop(0)
if (pend == dend): ddesc.pop(0)
# If the token doesn't fit within the opcode region, seperate it into two.
elif dstart <= pstart and pend >= dend:
self.descs.append([style, pstart, dend])
# Second part of the token stays in the array.
pdesc[0] = [style, dend, pend]
ddesc.pop(0)
else:
# TODO: missing some cases?
assert False
# Both arrays should be empty when the above loop finishes.
assert (not pdesc) and (not ddesc)
def get_pygmentation_desc(self):
desc = []
token_start = token_end = 0
for token_type, token in lex(self.from_str, CLexer()):
token_start = token_end
token_end += len(token)
desc.append([self.style_map[token_type], token_start, token_end])
return desc
def get_difflib_desc(self):
desc = []
for s in self.sm.get_opcodes():
opcode, from_start, from_end, to_start, to_end = s
desc.append([CodeDiff.opcode_style[opcode], from_start, from_end])
return desc
def to_html(self):
# TODO: don't hardcode output formatting.
html = ""
for style, start, end in self.descs:
span = '<span style="'
if style['bgcolor']:
span += 'background: #' + style['bgcolor'] + ';'
# TODO: maps always containing 'bgcolor' and not necessarily 'color' may
# by a bit confusing.
if ('color' in style) and style['color']:
span += 'color: #' + style['color'] + ';'
span += '">'
fragment = self.from_str[start:end]
fragment = fragment.replace('\n', '<br>')
fragment = fragment.replace(' ', ' ')
span += fragment + '</span>'
html += span
return html
@staticmethod
def from_files(from_file, to_file):
with open(from_file) as f:
from_str = f.read()
with open(to_file) as f:
to_str = f.read()
return CodeDiff(from_str, to_str)
| mit | Python | |
3f9a8ee16e47f4ce0d75a1b856341c05436c2aff | Create sending_email.py | wliu2016/sending_email | sending_email.py | sending_email.py | # -*- coding: utf-8 -*-
"""
Created on Wed May 10 16:32:22 2017
This scripts are used to send out data from Pasture to Wenlong from field.
Key features:
- Parse and send out all .par files
- Send out email at certain intervals: such as one day.
@author: wliu14
"""
from email import encoders
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
import smtplib
import time
import glob
import os
import logging
def _get_files(path):
# parse and find out all the files endwith .par.
files = list()
for filename in glob.glob(os.path.join(path, '*.par')):
files.append(filename)
return files
def send_email(path):
#information of the email address.
from_addr = 'wenlongliu853@gmail.com'
email_password = 'XXXX'
to_addr = 'wenlongliu853@gmail.com'
#send attachment via email.
msg = MIMEMultipart()
msg['From'] = from_addr
msg['To'] = to_addr
msg['Subject'] = 'This is a testing email.'
#Attachement information.
files = _get_files(path)
msg.attach(MIMEText('This is a testing email to send out the Plymouth data', 'plain', 'utf-8'))
for filename in files:
#Adding attachments.
with open(filename, 'rb') as f:
# Set the name and format of the attachment:
mime = MIMEBase('text', 'plain', filename=filename)
# Header information:
mime.add_header('Content-Disposition', 'attachment', filename=filename)
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# Read in attachment:
mime.set_payload(f.read())
# Decode the information:
encoders.encode_base64(mime)
# Add files into attachment.
msg.attach(mime)
smtp_server = 'smtp.gmail.com'
smtp_port = 587
server = smtplib.SMTP(smtp_server, smtp_port)
server.starttls()
server.set_debuglevel(1)
server.login(from_addr, email_password)
server.sendmail(from_addr, to_addr, msg.as_string())
server.quit()
if __name__ == '__main__':
path = 'C:\\s-canV5.0\\Results\\ORIGINAL'
interval = 600 #Unit: second
while True:
try:
send_email(path)
print('\n Sending one email!\n')
except:
print('\n error, try again later.\n ')
finally:
time.sleep(interval)
| apache-2.0 | Python | |
03f46b0d6867bcb8a88e53b26089705cb1667bbd | Add script to generate images from all samples | megacool/teetime | tools/create_from_sample_texts.py | tools/create_from_sample_texts.py | #!/usr/bin/env python
import teetime
import os
def main():
with open('samples/sample-texts.txt') as fh:
for line in fh:
print line.strip()
path = teetime.create_typography(line.strip(), colors=False)
os.rename(path, os.path.join('samples', os.path.basename(path)))
if __name__ == '__main__':
main()
| mit | Python | |
08c0c68ed52e9644cc92ad8afdc423b43b4c1326 | Add Fractal_Tree.py. | mcsoo/Exercises | Fractal_Tree.py | Fractal_Tree.py | __author__ = "ClaytonBat"
import turtle
def tree(branchLen,t):
if branchLen > 5:
t.forward(branchLen)
t.right(20)
tree(branchLen-15,t)
t.left(40)
tree(branchLen-15,t)
t.right(20)
t.backward(branchLen)
def main():
t = turtle.Turtle()
myWin = turtle.Screen()
t.left(90)
t.up()
t.backward(100)
t.down()
t.color("green")
tree(75,t)
myWin.exitonclick()
main()
| mit | Python | |
1893473729acd938a0657127b82892af1bdb987b | Create AbortMultipartUploads.py | OpenTelekomCloud/docs | obs/cleanUnmergedFragments/AbortMultipartUploads.py | obs/cleanUnmergedFragments/AbortMultipartUploads.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import commands
if __name__ == '__main__':
if len(sys.argv[1:]) > 0:
bucket_nameurl = str(sys.argv[1:][0])
else:
bucket_nameurl = ""
print("bucket name should be specified\nAbortMultipartUploads.py [s3://BucketName]")
sys.exit()
while True:
ls_cmd = "s3cmd multipart %s" % bucket_nameurl
out = commands.getoutput(ls_cmd)
if len(out.splitlines()) < 3:
print("All multiuploads have been aborted.")
sys.exit()
if len(sys.argv[1:]) > 1 and sys.argv[2:][0].lower() == "list":
print("only up to 1000 multiuploads can be displayed !\n")
print(out)
sys.exit()
for line in out.splitlines()[2:]:
obj_uid = line.split("\t")
url = obj_uid[1]
uploadId = obj_uid[2]
print("url:[%s], uploadId:[%s]" % (url, uploadId))
abort_cmd = "s3cmd abortmp %s %s" % (url, uploadId)
out = commands.getoutput(abort_cmd)
print(out)
| apache-2.0 | Python | |
6dca2d95144ebe22f58cb4dafb00a3f8a402316e | add answer for question 4 | pythonzhichan/DailyQuestion,pythonzhichan/DailyQuestion | question_4/heguilong.py | question_4/heguilong.py | #!/usr/bin/env python3
"""
File: heguilong.py
Author: heguilong
Email: hgleagle@gmail.com
Github: https://github.com/hgleagle
Description:
斐波那契数列由0和1开始,之后的斐波那契系数就是由之前的两数相加而得出,例如
斐波那契数列的前10个数是 0, 1, 1, 2, 3, 5, 8, 13, 21, 34。
"""
import sys
import logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s \
- %(message)s')
# solution 1
class Fibonaci():
def __init__(self):
"""TODO: Docstring for __init__.
:arg1: TODO
:returns: TODO
"""
self.fib_dict = {}
def calculate(self, number):
"""TODO: Docstring for calculate.
:f: TODO
:number: TODO
:returns: TODO
"""
# logging.debug("number: {%d}" % number)
if number <= 1:
result = number
else:
result = self.calculate(number - 1) + self.calculate(number - 2)
if number > 0 and number not in self.fib_dict:
self.fib_dict[number] = result
return result
def show_fib_values(self):
print(self.fib_dict.values())
# solution 2
def fib(n):
a, b = 0, 1
while n > 0:
yield b
a, b = b, a + b
n -= 1
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python3 heguilong.py number")
sys.exit()
number = int(sys.argv[1])
if number <= 0:
print("number should be larger than 0")
# solution 1
print("Solution 1:")
fib_obj = Fibonaci()
fib_obj.calculate(number)
fib_obj.show_fib_values()
# solution 2
print("Solution 2:")
for i in fib(number):
print(i)
| mit | Python | |
a50ce7117e4c1300410b74b5511722e4d7d57be4 | Implement script to automate setting scope location and time | bgottula/track,bgottula/track | set_when_and_where.py | set_when_and_where.py | #!/usr/bin/env python
import config
import configargparse
import math
import time
import nexstar
import ephem
parser = configargparse.ArgParser(default_config_files=config.DEFAULT_FILES)
parser.add_argument('--scope', help='serial device for connection to telescope', default='/dev/ttyUSB0')
parser.add_argument('--lat', required=True, help='latitude of observer (+N)')
parser.add_argument('--lon', required=True, help='longitude of observer (+E)')
args = parser.parse_args()
# We want to parse the latitude and longitude exactly the same way as our other
# scripts do: by letting ephem.Angle do the parsing itself. But they explicitly
# disallow us from creating our own Angle objects directly, so we're forced to
# work around that by using an object (Observer) that has Angles in it already.
observer = ephem.Observer()
observer.lat = args.lat
observer.lon = args.lon
# Convert to degrees
lat = observer.lat * 180.0 / math.pi
lon = observer.lon * 180.0 / math.pi
# Shove data into telescope
nexstar = nexstar.NexStar(args.scope)
nexstar.set_location(lat, lon)
nexstar.set_time(time.time())
| mit | Python | |
1212677ac1087498fa83a3d4d9e8ba9d13c35b20 | Add the basic structure for the notification handler. | yiyangyi/cc98-tornado | handler/notification.py | handler/notification.py | class ListHandler(BaseHandler):
| mit | Python | |
7225514cd2e4acb3bf78d6ba1c221caf9c084490 | Add plotting script for correction files | GeoscienceAustralia/PyRate,GeoscienceAustralia/PyRate | utils/plot_correction_files.py | utils/plot_correction_files.py | import numpy as np
from matplotlib import pyplot as plt
import glob
import re
import math
import rasterio as rio
import argparse
import os
"""
This script plots the original interferogram, the corresponding correction file,
and the resulting corrected interferogram from a PyRate directory with already,
processed data. directories are given as user arguments to the script, and the
number of plots is determined by a number range given by user.
Usage: python3 plot_correction_files.py <IFG_DIR> <CORRECTION_DIR> <CORRECTED_DIR> <SAVE_DIR> <FIRST_IFG> <LAST_IFG>
Command-line arguments:
IFG_DIR - full path to uncorrected interferograms in PyRate.
CORRECTION_DIR - full path to correction files in PyRate.
CORRECTED_DIR - full path to corrected interferograms in PyRate.
SAVE_DIR - full path to directory where images will be saved (needs to exist).
FIRST_IFG - first IFG in range of IFGs to plot (e.g. 1 to start plotting at 1st IFG in directory).
LAST_IFG - last IFG in range of IFGs to plot (e.g. 37 will plot up until the 37th IFG in directory).
"""
# Arguments
parser = argparse.ArgumentParser(description="Script to plot orbit correction files with uncorrected and corrected interferogram")
parser.add_argument("IFG_DIR", type=str, help="full path to uncorrected interferograms in PyRate")
parser.add_argument("CORRECTION_FILE_DIR", type=str, help="full path to correction files in PyRate")
parser.add_argument("CORRECTED_IFG_DIR", type=str, help="full path to corrected interferograms in PyRate")
parser.add_argument("SAVE_DIR", type=str, help="full path to directory where images will be saved")
parser.add_argument("FIRST_IFG", type=int, help="first IFG in range of IFGs to plot (e.g. 1 to start plotting at first IFG in directory)")
parser.add_argument("LAST_IFG", type=int, help="last IFG in range of IFGs to plot (e.g. 37 will plot up until the 37th IFG in directory)")
args = parser.parse_args()
# Directories and variable from user agruments
ifg_dir = os.path.abspath(args.IFG_DIR)
corr_dir = os.path.abspath(args.CORRECTION_FILE_DIR)
tempml_dir = os.path.abspath(args.CORRECTED_IFG_DIR)
save_dir = os.path.abspath(args.SAVE_DIR)
first_ifg_num = args.FIRST_IFG - 1
last_ifg_num = args.LAST_IFG - 1
# Create Lists
ifg_list = []
for file in glob.glob(f'{ifg_dir}/*ifg.tif'):
ifg_list.append(file)
corr_list = []
for file in glob.glob(f'{corr_dir}/*.npy'):
corr_list.append(file)
tempml_list = []
for file in glob.glob(f'{tempml_dir}/*ifg.tif'):
tempml_list.append(file)
# Sort
ifg_list.sort()
corr_list.sort()
tempml_list.sort()
for i in range(first_ifg_num, last_ifg_num + 1):
# Read data
with rio.open(ifg_list[i]) as src:
ifg = src.read(1)
ifg[ifg==0.0] = np.nan
mask = np.isnan(ifg)
# convert to mm
ifg = ifg * 1000 * (0.0562356424 / (4 * math.pi))
corr = np.load(corr_list[i])
corr[mask] = np.nan
with rio.open(tempml_list[i]) as src:
ifg_corr = src.read(1)
# Identify Date Pair
date_pair_list = re.findall(r'\d{8}-\d{8}', ifg_list[i])
date_pair_string = date_pair_list[0]
print(f'\nPlotting for {date_pair_string}...\n')
# Plot
climit = 100
fig, ax = plt.subplots(1,3, figsize=(6, 3))
# IFG
s0 = ax[0].imshow(ifg-np.nanmedian(ifg), cmap='bwr', clim=(-1*climit, climit))
ax[0].set_axis_off()
# CORRECTION FILE
s1 =ax[1].imshow(corr-np.nanmedian(corr), cmap='bwr', clim=(-1*climit, climit))
ax[1].set_axis_off()
# IFG CORRECTED
s2 = ax[2].imshow(ifg_corr-np.nanmedian(ifg_corr), cmap='bwr', clim=(-1*climit,climit))
ax[2].set_axis_off()
# Extra
fig.colorbar(s0, ax=ax[0], location='bottom', label='mm')
fig.colorbar(s1, ax=ax[1], location='bottom', label='mm')
fig.colorbar(s2, ax=ax[2], location='bottom', label='mm')
fig.set_facecolor('grey')
fig.tight_layout()
# Title
ax[1].set_title(f'{date_pair_string}', fontsize=10, fontweight='bold')
plt.savefig(f'{save_dir}/{date_pair_string}.png', dpi=300)
plt.close()
i = i + 1
| apache-2.0 | Python | |
77fc04ddf6dbc9cb618b427b36628adb019b2f43 | add import script | sassoftware/mirrorball,sassoftware/mirrorball | scripts/import.py | scripts/import.py | #!/usr/bin/python
#
# Copyright (c) 2008 rPath, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.rpath.com/permanent/licenses/CPL-1.0.
#
# This program is distributed in the hope that it will be useful, but
# without any warranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
import os
import sys
sys.path.insert(0, os.environ['HOME'] + '/hg/rpath-xmllib')
sys.path.insert(0, os.environ['HOME'] + '/hg/conary')
sys.path.insert(0, os.environ['HOME'] + '/hg/mirrorball')
from conary.lib import util
sys.excepthook = util.genExcepthook()
from updatebot import bot, config, log
log.addRootLogger()
cfg = config.UpdateBotConfig()
cfg.read(os.environ['HOME'] + '/hg/mirrorball/config/opensuse/updatebotrc')
obj = bot.Bot(cfg)
obj.create()
import epdb ; epdb.st()
| apache-2.0 | Python | |
42a9c36d711f2550cc68fdba96b6af36d3d31d8d | Create grasshopperDebug.py | NendoTaka/CodeForReference,NendoTaka/CodeForReference,NendoTaka/CodeForReference | CodeWars/8kyu/grasshopperDebug.py | CodeWars/8kyu/grasshopperDebug.py | def weather_info (temp):
c = convertToCelsius(temp)
if (c <= 0):
return (str(c) + " is freezing temperature")
else:
return (str(c) + " is above freezing temperature")
def convertToCelsius (temp):
temp = (((float(temp) - 32) * 5) / 9)
return temp
| mit | Python | |
9ba0ff62572dcfd7912c9b58091b59844f8e1753 | Add script for Helmholtz rates | thomasgibson/tabula-rasa | results/sccg-table.py | results/sccg-table.py | import os
import sys
import pandas as pd
p4_data = "helmholtz-results/helmholtz_conv-d-4.csv"
p5_data = "helmholtz-results/helmholtz_conv-d-5.csv"
p6_data = "helmholtz-results/helmholtz_conv-d-6.csv"
p7_data = "helmholtz-results/helmholtz_conv-d-7.csv"
data_set = [p4_data, p5_data, p6_data, p7_data]
for data in data_set:
if not os.path.exists(data):
print("Cannot find data file '%s'" % data)
sys.exit(1)
table = r"""\begin{tabular}{| l | c | c | c |}
\hline
\multicolumn{4}{|c|}{$H^1$ Helmholtz} \\
\hline
\multirow{2}{*}{$k$} & mesh &
\multicolumn{2}{|c|}{$\norm{p-p_h}_{L^2(\Omega)} \leq \mathcal{O}(h^{k+1})$} \\
\cline{2-4}
& $r$ & $L^2$-error & rate \\
"""
lformat = r"""& {mesh: d} & {L2Errors:.3e} & {ConvRates} \\
"""
def rate(s):
if s == '---':
return s
else:
return "{s:.3f}".format(s=float(s))
for data in data_set:
df = pd.read_csv(data)
df = df.sort_values("Mesh")
degree = df.Degree.values[0]
table += r"""
\hline
\multirow{6}{*}{%d}
""" % degree
for k in df.Mesh:
sliced = df.loc[lambda x: x.Mesh == k]
table += lformat.format(mesh=k,
L2Errors=sliced.L2Errors.values[0],
ConvRates=rate(sliced.ConvRates.values[0]),
Reductions=sliced.ResidualReductions.values[0])
table += r"""\hline
\end{tabular}
"""
print(table)
| mit | Python | |
248c738b31e43ef456d47045bc5f5b2d58d35d98 | add autocomplete with a German-Korean dictionary | Kuniz/alfnaversearch,Kuniz/alfnaversearch | workflow/dedic_naver_search.py | workflow/dedic_naver_search.py | # Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://dedicac.naver.com/ac'
params = dict(
_callback="",
st="11",
r_lt="10",
q=word)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Dedic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("de_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Dedic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| mit | Python | |
4137bdd36fa4a1b4e194c2c61f803cecdebe8f69 | Implement MySQL driver | romuloceccon/logviewer,romuloceccon/logviewer | lib/mysql_driver.py | lib/mysql_driver.py | import mysql.connector
import sql_driver
import screen_buffer
class MySQLDriver(sql_driver.SqlDriver):
class Factory(object):
def __init__(self, **mysql_conf):
self._mysql_conf = mysql_conf
if 'port' in self._mysql_conf:
self._mysql_conf['port'] = int(self._mysql_conf['port'])
def create_driver(self, state):
return MySQLDriver(self._mysql_conf, level=state.level,
facility=state.facility, host=state.host, program=state.program)
def __init__(self, mysql_conf, **kwargs):
sql_driver.SqlDriver.__init__(self, **kwargs)
self._mysql_conf = mysql_conf
def start_connection(self):
self._connection = mysql.connector.connect(**(self._mysql_conf))
def stop_connection(self):
self._connection.close()
def select(self, cmd):
result = self._connection.cursor()
result.execute(cmd)
return result
def fetch_record(self, query):
rec = query.fetchone()
if rec is None:
query.close()
self._connection.rollback()
return
return { 'id': rec[0], 'facility_num': str(rec[1]),
'level_num': str(rec[2]), 'host': rec[3], 'datetime': rec[4],
'program': rec[5], 'pid': rec[6], 'message': rec[7] }
| mit | Python | |
6a29e9f963af4920b21c64d157ca90b0d7d081c4 | implement parallelForLoop.py | xmementoit/practiseSamples,xmementoit/practiseSamples,xmementoit/practiseSamples,xmementoit/practiseSamples,xmementoit/practiseSamples,xmementoit/practiseSamples,xmementoit/practiseSamples,xmementoit/practiseSamples | pythonPractiseSamples/parallelForLoop.py | pythonPractiseSamples/parallelForLoop.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Damian Ziobro <damian@xmementoit.com>
#
from joblib import Parallel, delayed
import multiprocessing
n=1000000
def squareRoot(i):
return i*i
cpus = multiprocessing.cpu_count()
#cpus = 1
results = Parallel(n_jobs=cpus)(delayed(squareRoot)(i) for i in range(n))
#print results
| apache-2.0 | Python | |
b9711e4fd82441669fdd97b1e5eeb12f03e995a5 | Make srrun use the proper executable on windows | mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge | srrun.py | srrun.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import platform
import subprocess
import sys
mypath = os.path.abspath(__file__)
mydir = os.path.split(mypath)[0]
if platform.system().lower() == 'windows':
srpython = sys.executable
else:
srhome = os.path.join(mydir, '..')
srhome = os.path.abspath(srhome)
srbin = os.path.join(srhome, 'bin')
srpython = os.path.join(srbin, 'python')
srpypath = [mydir, os.path.join(mydir, 'wpr')]
env = copy.copy(os.environ)
env['PYTHONPATH'] = ':'.join(srpypath)
# Set a sane umask for all children
os.umask(022)
sys.exit(subprocess.call([srpython] + sys.argv[1:], env=env))
| #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import copy
import os
import subprocess
import sys
mypath = os.path.abspath(__file__)
mydir = os.path.split(mypath)[0]
srhome = os.path.join(mydir, '..')
srhome = os.path.abspath(srhome)
srbin = os.path.join(srhome, 'bin')
srpython = os.path.join(srbin, 'python')
srpypath = [mydir, os.path.join(mydir, 'wpr')]
env = copy.copy(os.environ)
env['PYTHONPATH'] = ':'.join(srpypath)
# Set a sane umask for all children
os.umask(022)
sys.exit(subprocess.call([srpython] + sys.argv[1:], env=env))
| mpl-2.0 | Python |
5d1da267791456f6c5e386d6e7204d02371c2eb2 | Add tests for gold projects | emawind84/readthedocs.org,kenshinthebattosai/readthedocs.org,Tazer/readthedocs.org,SteveViss/readthedocs.org,atsuyim/readthedocs.org,clarkperkins/readthedocs.org,safwanrahman/readthedocs.org,kenwang76/readthedocs.org,SteveViss/readthedocs.org,sunnyzwh/readthedocs.org,mhils/readthedocs.org,attakei/readthedocs-oauth,rtfd/readthedocs.org,wanghaven/readthedocs.org,hach-que/readthedocs.org,stevepiercy/readthedocs.org,clarkperkins/readthedocs.org,CedarLogic/readthedocs.org,stevepiercy/readthedocs.org,gjtorikian/readthedocs.org,wijerasa/readthedocs.org,davidfischer/readthedocs.org,emawind84/readthedocs.org,pombredanne/readthedocs.org,sunnyzwh/readthedocs.org,espdev/readthedocs.org,clarkperkins/readthedocs.org,atsuyim/readthedocs.org,GovReady/readthedocs.org,fujita-shintaro/readthedocs.org,wanghaven/readthedocs.org,LukasBoersma/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,sunnyzwh/readthedocs.org,sid-kap/readthedocs.org,hach-que/readthedocs.org,mhils/readthedocs.org,atsuyim/readthedocs.org,espdev/readthedocs.org,stevepiercy/readthedocs.org,CedarLogic/readthedocs.org,techtonik/readthedocs.org,sunnyzwh/readthedocs.org,titiushko/readthedocs.org,michaelmcandrew/readthedocs.org,attakei/readthedocs-oauth,tddv/readthedocs.org,gjtorikian/readthedocs.org,soulshake/readthedocs.org,kenshinthebattosai/readthedocs.org,kenshinthebattosai/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,fujita-shintaro/readthedocs.org,mhils/readthedocs.org,safwanrahman/readthedocs.org,titiushko/readthedocs.org,rtfd/readthedocs.org,michaelmcandrew/readthedocs.org,laplaceliu/readthedocs.org,royalwang/readthedocs.org,tddv/readthedocs.org,mhils/readthedocs.org,michaelmcandrew/readthedocs.org,singingwolfboy/readthedocs.org,laplaceliu/readthedocs.org,GovReady/readthedocs.org,VishvajitP/readthedocs.org,kenwang76/readthedocs.org,wijerasa/readthedocs.org,istresearch/readthedocs.org,tddv/readthedocs.org,sid-kap/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,titiushko/readthedocs.org,emawind84/readthedocs.org,SteveViss/readthedocs.org,techtonik/readthedocs.org,kenwang76/readthedocs.org,fujita-shintaro/readthedocs.org,gjtorikian/readthedocs.org,LukasBoersma/readthedocs.org,LukasBoersma/readthedocs.org,davidfischer/readthedocs.org,pombredanne/readthedocs.org,singingwolfboy/readthedocs.org,GovReady/readthedocs.org,titiushko/readthedocs.org,singingwolfboy/readthedocs.org,sid-kap/readthedocs.org,singingwolfboy/readthedocs.org,soulshake/readthedocs.org,CedarLogic/readthedocs.org,CedarLogic/readthedocs.org,istresearch/readthedocs.org,espdev/readthedocs.org,hach-que/readthedocs.org,LukasBoersma/readthedocs.org,clarkperkins/readthedocs.org,emawind84/readthedocs.org,pombredanne/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,VishvajitP/readthedocs.org,VishvajitP/readthedocs.org,fujita-shintaro/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,stevepiercy/readthedocs.org,Tazer/readthedocs.org,techtonik/readthedocs.org,wijerasa/readthedocs.org,wanghaven/readthedocs.org,kenwang76/readthedocs.org,royalwang/readthedocs.org,techtonik/readthedocs.org,davidfischer/readthedocs.org,espdev/readthedocs.org,atsuyim/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,safwanrahman/readthedocs.org,sid-kap/readthedocs.org,laplaceliu/readthedocs.org,royalwang/readthedocs.org,SteveViss/readthedocs.org,GovReady/readthedocs.org,laplaceliu/readthedocs.org,wijerasa/readthedocs.org,Tazer/readthedocs.org,VishvajitP/readthedocs.org,royalwang/readthedocs.org,attakei/readthedocs-oauth | readthedocs/rtd_tests/tests/test_gold.py | readthedocs/rtd_tests/tests/test_gold.py | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django_dynamic_fixture import get
from django_dynamic_fixture import new
from readthedocs.gold.models import GoldUser, LEVEL_CHOICES
from readthedocs.projects.models import Project
def create_user(username, password):
user = new(User, username=username)
user.set_password(password)
user.save()
return user
class GoldViewTests(TestCase):
def setUp(self):
self.user = create_user(username='owner', password='test')
self.project = get(Project, slug='test')
self.golduser = get(GoldUser, user=self.user, level=LEVEL_CHOICES[0][0])
self.client.login(username='owner', password='test')
def test_adding_projects(self):
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': 'test'})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
def test_too_many_projects(self):
self.project2 = get(Project, slug='test2')
self.assertEqual(self.golduser.projects.count(), 0)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.assertEqual(resp.status_code, 302)
resp = self.client.post(reverse('gold_projects'), data={'project': self.project2.slug})
self.assertFormError(
resp, form='form', field=None, errors='You already have the max number of supported projects.'
)
self.assertEqual(resp.status_code, 200)
self.assertEqual(self.golduser.projects.count(), 1)
def test_remove_project(self):
self.assertEqual(self.golduser.projects.count(), 0)
self.client.post(reverse('gold_projects'), data={'project': self.project.slug})
self.assertEqual(self.golduser.projects.count(), 1)
self.client.post(
reverse('gold_projects_remove', args=[self.project.slug]),
)
self.assertEqual(self.golduser.projects.count(), 0)
| mit | Python | |
680ab5562e2b4599c74b9605b688538c1da1479d | add profiler helper function | nikken1/patentprocessor,yngcan/patentprocessor,nikken1/patentprocessor,funginstitute/patentprocessor,yngcan/patentprocessor,nikken1/patentprocessor,funginstitute/patentprocessor,yngcan/patentprocessor,funginstitute/patentprocessor | lib/util/profile.py | lib/util/profile.py | import cProfile
def profile_this(fn):
def profiled_fn(*args, **kwargs):
fpath = fn.__name__ + '.profile'
prof = cProfile.Profile()
ret = prof.runcall(fn, *args, **kwargs)
prof.dump_stats(fpath)
return ret
return profiled_fn
# Just use the following decorator to get a pstat profile of it after runtime
#@profile_this
| bsd-2-clause | Python | |
73479f2efe46623f40ea4a49edfc79de0725a291 | Create cdbhelp.py | ChristinaHammer/Client_Database | cdbhelp.py | cdbhelp.py | """cdbhelp.py
This file will define a window which displays help information.
"""
from tkinter import *
from tkinter import ttk
class cdbHelp:
def __init__(self, tag):
self.bgcolor = 'lavender'
self.helpwin = Tk()
#self.frame=Frame(self.helpwin).grid()
self.helpwin.configure(background=self.bgcolor)
self.helpwin.title('Help')
self.instruct = Text(self.helpwin, font=('Helvetica', 10))
self.instruct.grid(row=3, column=1, padx=20, pady=10)
if tag == 'login':
self.volunteer_help()
else:
return
def volunteer_help(self):
"""This function will give volunteer help.
"""
vinstruct = "Instructions: \n 1. Select your name from the list."+\
"\n2. Press the 'Login' button to proceed to the"+\
"database.\n3. Press the 'View' button to see and make"+\
"changes to your information."
self.helpwin.configure(background=self.bgcolor)
self.instruct.insert('1.0', vinstruct)
return
| mit | Python | |
426afb06904b2e4ebab380b6d5ea79c2f481cb44 | add text encoder | ethereum/pyrlp,ethereum/pyrlp | rlp/sedes/text.py | rlp/sedes/text.py | from rlp.exceptions import SerializationError, DeserializationError
from rlp.atomic import Atomic
class Text:
"""A sedes object for encoded text data of certain length.
:param min_length: the minimal length in encoded characters or `None` for no lower limit
:param max_length: the maximal length in encoded characters or `None` for no upper limit
:param allow_empty: if true, empty strings are considered valid even if
a minimum length is required otherwise
"""
def __init__(self, min_length=None, max_length=None, allow_empty=False, encoding='utf8'):
self.min_length = min_length or 0
self.max_length = max_length or float('inf')
self.allow_empty = allow_empty
@classmethod
def fixed_length(cls, l, allow_empty=False):
"""Create a sedes for text data with exactly `l` encoded characters."""
return cls(l, l, allow_empty=allow_empty)
@classmethod
def is_valid_type(cls, obj):
return isinstance(obj, str)
def is_valid_length(self, l):
return any((self.min_length <= l <= self.max_length,
self.allow_empty and l == 0))
def serialize(self, obj):
if not self.is_valid_type(obj):
raise SerializationError('Object is not a serializable ({})'.format(type(obj)), obj)
if not self.is_valid_length(len(obj)):
raise SerializationError('Object has invalid length', obj)
return obj.encode('utf8')
def deserialize(self, serial):
if not isinstance(serial, Atomic):
m = 'Objects of type {} cannot be deserialized'
raise DeserializationError(m.format(type(serial).__name__), serial)
text_value = serial.encode(self.encoding)
if self.is_valid_length(len(text_value)):
return text_value
else:
raise DeserializationError('{} has invalid length'.format(type(serial)), serial)
text = Text()
| mit | Python | |
c83e946a2b5205c7246b1cfbde7b6e84759b3876 | add potentiometer script | zpiman/golemScripts | potentiometer.py | potentiometer.py | #from pydcpf.appliances.quido import Device as QuidoDevice
#from pydcpf.appliances.ad4xxx_drak4 import Device as AD4Device
#from pydcpf.appliances.evr116 import Device as EVRDevice
#from pydcpf.appliances.AC250Kxxx import Device as AC250KDevice
TELNET = "telnet 192.168.2.243 10001"
time_delay = 0.2
time_step = 0.001
Ion = "*B1OS1H"
Ioff = "*B1OS1L"
IIon = "*B1OS2H"
IIoff = "*B1OS2L"
IIIon = "*B1OS3H"
IIIoff = "*B1OS3L"
def getValue():
return exec("wget -o /dev/null -O - 'http://192.168.2.253/data.xml' | grep -Po '<input id='1'.*val='\K[ 0-9]*'|xargs")
print getValue()
| mit | Python | |
afa810d9b80a5a304395c588455fe421a4a02129 | Add a simple script which monitors the paused domains on a host, checks them against the xapi database, logs anomalies, and optionally destroys the domain if it has been in an error state for longer than a threshold (currently 60s) | ravippandey/xen-api,koushikcgit/xen-api,cheng-z/xen-api,cheng--zhang/xen-api,djs55/xen-api,cheng-z/xen-api,ravippandey/xen-api,cheng--zhang/xen-api,jjd27/xen-api,jjd27/xen-api,simonjbeaumont/xen-api,cheng-z/xen-api,cheng-z/xen-api,thomassa/xen-api,salvocambria/xen-api,anoobs/xen-api,agimofcarmen/xen-api,cheng--zhang/xen-api,robertbreker/xen-api,agimofcarmen/xen-api,koushikcgit/xen-api,srowe/xen-api,robertbreker/xen-api,euanh/xen-api,robertbreker/xen-api,salvocambria/xen-api,anoobs/xen-api,euanh/xen-api,Frezzle/xen-api,euanh/xen-api,anoobs/xen-api,djs55/xen-api,simonjbeaumont/xen-api,vasilenkomike/xen-api,djs55/xen-api,srowe/xen-api,Frezzle/xen-api,koushikcgit/xen-api,cheng-z/xen-api,cheng--zhang/xen-api,ravippandey/xen-api,huizh/xen-api,guard163/xen-api,agimofcarmen/xen-api,thomassa/xen-api,ravippandey/xen-api,koushikcgit/xen-api,robertbreker/xen-api,simonjbeaumont/xen-api,cheng-z/xen-api,cheng-z/xen-api,guard163/xen-api,euanh/xen-api,jjd27/xen-api,huizh/xen-api,agimofcarmen/xen-api,Frezzle/xen-api,anoobs/xen-api,vasilenkomike/xen-api,huizh/xen-api,vasilenkomike/xen-api,anoobs/xen-api,rafalmiel/xen-api,simonjbeaumont/xen-api,jjd27/xen-api,euanh/xen-api,guard163/xen-api,vasilenkomike/xen-api,srowe/xen-api,vasilenkomike/xen-api,Frezzle/xen-api,vasilenkomike/xen-api,agimofcarmen/xen-api,thomassa/xen-api,djs55/xen-api,thomassa/xen-api,thomassa/xen-api,guard163/xen-api,rafalmiel/xen-api,simonjbeaumont/xen-api,srowe/xen-api,koushikcgit/xen-api,ravippandey/xen-api,rafalmiel/xen-api,guard163/xen-api,cheng--zhang/xen-api,salvocambria/xen-api,djs55/xen-api,agimofcarmen/xen-api,salvocambria/xen-api,huizh/xen-api,robertbreker/xen-api,simonjbeaumont/xen-api,rafalmiel/xen-api,huizh/xen-api,jjd27/xen-api,ravippandey/xen-api,jjd27/xen-api,huizh/xen-api,salvocambria/xen-api,srowe/xen-api,rafalmiel/xen-api,guard163/xen-api,cheng--zhang/xen-api,anoobs/xen-api,thomassa/xen-api,koushikcgit/xen-api,euanh/xen-api,salvocambria/xen-api,djs55/xen-api,rafalmiel/xen-api,Frezzle/xen-api,cheng--zhang/xen-api,robertbreker/xen-api | scripts/examples/python/monitor-unwanted-domains.py | scripts/examples/python/monitor-unwanted-domains.py | #!/usr/bin/env python
import subprocess, XenAPI, inventory, time, sys
# Script which monitors the domains running on a host, looks for
# paused domains which don't correspond to VMs which are running here
# or are about to run here, logs them and optionally destroys them.
# Return a list of (domid, uuid) tuples, one per paused domain on this host
def list_paused_domains():
results = []
all = subprocess.Popen(["/opt/xensource/bin/list_domains"], stdout=subprocess.PIPE).communicate()[0]
lines = all.split("\n")
for domain in lines[1:]:
bits = domain.split()
if bits <> []:
domid = bits[0]
uuid = bits[2]
state = bits[4]
if 'P' in state:
results.append( (domid, uuid) )
return results
# Given localhost's uuid and a (domid, uuid) tuple, return True if the domain
# be somewhere else i.e. we think it may have leaked here
def should_domain_be_somewhere_else(localhost_uuid, (domid, uuid)):
try:
x = XenAPI.xapi_local()
x.xenapi.login_with_password("root", "")
try:
try:
vm = x.xenapi.VM.get_by_uuid(uuid)
resident_on = x.xenapi.VM.get_resident_on(vm)
current_operations = x.xenapi.VM.get_current_operations(vm)
result = current_operations == {} and resident_on <> localhost_uuid
if result:
log("domid %s uuid %s: is not being operated on and is not resident here" % (domid, uuid))
return result
except XenAPI.Failure, e:
if e.details[0] == "UUID_INVALID":
# VM is totally bogus
log("domid %s uuid %s: is not in the xapi database" % (domid, uuid))
return True
# fail safe for now
return False
finally:
x.xenapi.logout()
except:
return False
def log(str):
print str
# Destroy the given domain
def destroy_domain((domid, uuid)):
log("destroying domid %s uuid %s" % (domid, uuid))
all = subprocess.Popen(["/opt/xensource/debug/destroy_domain", "-domid", domid], stdout=subprocess.PIPE).communicate()[0]
# Keep track of when a domain first looked like it should be here
domain_first_noticed = {}
# Number of seconds after which we conclude that a domain really shouldn't be here
threshold = 60
if __name__ == "__main__":
localhost_uuid = inventory.get_localhost_uuid ()
while True:
time.sleep(1)
paused = list_paused_domains ()
# GC the domain_first_noticed map
for d in domain_first_noticed.keys():
if d not in paused:
log("domid %s uuid %s: looks ok now, forgetting about it" % d)
del domain_first_noticed[d]
for d in list_paused_domains():
if should_domain_be_somewhere_else(localhost_uuid, d):
if d not in domain_first_noticed:
domain_first_noticed[d] = time.time()
noticed_for = time.time() - domain_first_noticed[d]
if noticed_for > threshold:
log("domid %s uuid %s: has been in bad state for over threshold" % d)
if "-destroy" in sys.argv:
destroy_domain(d)
| lgpl-2.1 | Python | |
eb3510933b356c5b97e7a0cce9ebad563f21bf3c | Create BinTreeInTraversal_002.py | Chasego/cod,cc13ny/algo,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,Chasego/codirit,Chasego/codi,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codirit,Chasego/codirit,cc13ny/algo,Chasego/codirit,Chasego/cod,Chasego/codi,cc13ny/algo,cc13ny/Allin,Chasego/cod,cc13ny/algo,cc13ny/Allin,cc13ny/algo,Chasego/codi | leetcode/094-Binary-Tree-Inorder-Traversal/BinTreeInTraversal_002.py | leetcode/094-Binary-Tree-Inorder-Traversal/BinTreeInTraversal_002.py | class Solution:
# @param root, a tree node
# @return a list of integers
def iterative_inorder(self, root, list):
stack = []
while root or stack:
if root:
stack.append(root)
root = root.left
else:
root = stack.pop()
list.append(root.val)
root = root.right
return list
def recursive_inorder(self, root, list):
if root:
self.inorder(root.left, list)
list.append(root.val)
self.inorder(root.right, list)
def inorderTraversal(self, root):
list = []
self.iterative_inorder(root, list)
return list
| mit | Python | |
7ac77a2f95bebad6a13e1d538c366c8688c9d0a6 | Create __init__.py | thegreathippo/crispy | crispy/localevents/__init__.py | crispy/localevents/__init__.py | import core
| mit | Python | |
aafdd253bc818d605023dd2a22164d2ac3cdc911 | Add first draft of outdoor map (for issue #3) | BHSPitMonkey/vmflib | examples/outdoor.py | examples/outdoor.py | #!/usr/bin/python
"""Example map generator: Outdoor
This script demonstrates vmflib by generating a map with a 2D skybox and
some terrain (a displacement map).
"""
from vmf import *
from vmf.types import Vertex
from vmf.tools import Block
m = vmf.ValveMap()
walls = []
# Floor
floor = Block(Vertex(0, 0, -512), (1024, 1024, 64))
# Ceiling
ceiling = Block(Vertex(0, 0, 512), (1024, 1024, 64))
ceiling.set_material('tools/toolsskybox2d')
# Left wall
walls.append(Block(Vertex(-512, 0, 0), (64, 1024, 1024)))
# Right wall
walls.append(Block(Vertex(512, 0, 0), (64, 1024, 1024)))
# Forward wall
walls.append(Block(Vertex(0, 512, 0), (1024, 64, 1024)))
# Rear wall
walls.append(Block(Vertex(0, -512, 0), (1024, 64, 1024)))
# Set each wall's material
for wall in walls:
wall.set_material('PL_BARNBLITZ/WOODWALL_YELLOWWORN002')
# Add walls to world geometry
m.world.children.extend(walls)
m.world.children.extend([floor, ceiling])
# TODO: Define a playerspawn entity
# Write the map to a file
m.write_vmf('outdoor.vmf')
| bsd-2-clause | Python | |
268eb8d8591a529c0ee67e6f37b287956b39eb1b | Copy kafka-patch-review.py from kafka trunk | evvers/kafka-dev-tools | kafka-patch-review.py | kafka-patch-review.py | #!/usr/bin/env python
import argparse
import sys
import os
import time
import datetime
import tempfile
import commands
from jira.client import JIRA
def get_jira():
options = {
'server': 'https://issues.apache.org/jira'
}
# read the config file
home=jira_home=os.getenv('HOME')
home=home.rstrip('/')
jira_config = dict(line.strip().split('=') for line in open(home + '/jira.ini'))
jira = JIRA(options,basic_auth=(jira_config['user'], jira_config['password']))
return jira
def cmd_exists(cmd):
status, result = commands.getstatusoutput(cmd)
return status
def main():
''' main(), shut up, pylint '''
popt = argparse.ArgumentParser(description='Kafka patch review tool')
popt.add_argument('-b', '--branch', action='store', dest='branch', required=True, help='Tracking branch to create diff against')
popt.add_argument('-j', '--jira', action='store', dest='jira', required=True, help='JIRA corresponding to the reviewboard')
popt.add_argument('-s', '--summary', action='store', dest='summary', required=False, help='Summary for the reviewboard')
popt.add_argument('-d', '--description', action='store', dest='description', required=False, help='Description for reviewboard')
popt.add_argument('-r', '--rb', action='store', dest='reviewboard', required=False, help='Review board that needs to be updated')
popt.add_argument('-t', '--testing-done', action='store', dest='testing', required=False, help='Text for the Testing Done section of the reviewboard')
popt.add_argument('-db', '--debug', action='store_true', required=False, help='Enable debug mode')
opt = popt.parse_args()
post_review_tool = None
if (cmd_exists("post-review") == 0):
post_review_tool = "post-review"
elif (cmd_exists("rbt") == 0):
post_review_tool = "rbt post"
else:
print "please install RBTools"
sys.exit(1)
patch_file=tempfile.gettempdir() + "/" + opt.jira + ".patch"
if opt.reviewboard:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
patch_file=tempfile.gettempdir() + "/" + opt.jira + '_' + st + '.patch'
# first check if rebase is needed
git_branch_hash="git rev-parse " + opt.branch
p_now=os.popen(git_branch_hash)
branch_now=p_now.read()
p_now.close()
git_common_ancestor="git merge-base " + opt.branch + " HEAD"
p_then=os.popen(git_common_ancestor)
branch_then=p_then.read()
p_then.close()
if branch_now != branch_then:
print 'ERROR: Your current working branch is from an older version of ' + opt.branch + '. Please rebase first by using git pull --rebase'
sys.exit(1)
git_configure_reviewboard="git config reviewboard.url https://reviews.apache.org"
print "Configuring reviewboard url to https://reviews.apache.org"
p=os.popen(git_configure_reviewboard)
p.close()
git_remote_update="git remote update"
print "Updating your remote branches to pull the latest changes"
p=os.popen(git_remote_update)
p.close()
rb_command= post_review_tool + " --publish --tracking-branch " + opt.branch + " --target-groups=kafka --bugs-closed=" + opt.jira
if opt.debug:
rb_command=rb_command + " --debug"
summary="Patch for " + opt.jira
if opt.summary:
summary=opt.summary
rb_command=rb_command + " --summary \"" + summary + "\""
if opt.description:
rb_command=rb_command + " --description \"" + opt.description + "\""
if opt.reviewboard:
rb_command=rb_command + " -r " + opt.reviewboard
if opt.testing:
rb_command=rb_command + " --testing-done=" + opt.testing
if opt.debug:
print rb_command
p=os.popen(rb_command)
rb_url=""
for line in p:
print line
if line.startswith('http'):
rb_url = line
elif line.startswith("There don't seem to be any diffs"):
print 'ERROR: Your reviewboard was not created/updated since there was no diff to upload. The reasons that can cause this issue are 1) Your diff is not checked into your local branch. Please check in the diff to the local branch and retry 2) You are not specifying the local branch name as part of the --branch option. Please specify the remote branch name obtained from git branch -r'
p.close()
sys.exit(1)
elif line.startswith("Your review request still exists, but the diff is not attached") and not opt.debug:
print 'ERROR: Your reviewboard was not created/updated. Please run the script with the --debug option to troubleshoot the problem'
p.close()
sys.exit(1)
p.close()
if opt.debug:
print 'rb url=',rb_url
git_command="git diff " + opt.branch + " > " + patch_file
if opt.debug:
print git_command
p=os.popen(git_command)
p.close()
print 'Creating diff against', opt.branch, 'and uploading patch to JIRA',opt.jira
jira=get_jira()
issue = jira.issue(opt.jira)
attachment=open(patch_file)
jira.add_attachment(issue,attachment)
attachment.close()
comment="Created reviewboard "
if not opt.reviewboard:
print 'Created a new reviewboard',rb_url,
else:
print 'Updated reviewboard',rb_url
comment="Updated reviewboard "
comment = comment + rb_url + ' against branch ' + opt.branch
jira.add_comment(opt.jira, comment)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | Python | |
1c228a8de02c81df8d22bde75ac22b902ee39c77 | Add oedb connection helper | openego/data_processing | data_processing/tools/io.py | data_processing/tools/io.py | from sqlalchemy import create_engine
def oedb_session(section='oedb'):
"""Get SQLAlchemy session object with valid connection to OEDB"""
# get session object by oemof.db tools (requires .oemof/config.ini
try:
from oemofof import db
conn = db.connection(section=section)
except:
print('Please provide connection parameters to database:\n' +
'Hit [Enter] to take defaults')
host = input('host (default oe.iws.cs.ovgu.de): ') or 'oe.iws.cs.ovgu.de'
port = input('port (default 5432): ') or '5432'
database = input("database name (default 'oedb'): ") or 'oedb'
user = input('user (default postgres): ')
password = input('password: ')
conn = create_engine(
'postgresql://' + '%s:%s@%s:%s/%s' % (user,
password,
host,
port,
database))
return conn | agpl-3.0 | Python | |
f7742c3ffcd86667e86e7cb80977f24eddc5444c | add wrapper for `gr1x` that circumvents entry point | johnyf/gr1experiments | examples/wrapper.py | examples/wrapper.py | #!/usr/bin/env python
"""Wrapper to circumvent the entry point.
Because, if development versions of dependencies are installed,
but `install_requires` contains no local identifiers,
then the entry point raises a `VersionConflict` for its context.
"""
import sys
from tugs import solver
if __name__ == '__main__':
solver.command_line_wrapper(args=sys.argv[1:])
| bsd-3-clause | Python | |
00c8c165e3f9a136a8950ca1fb0f2d9ade6731d6 | Add a regression test for whitespace normalization in the BibTeX parser. | chbrown/pybtex,andreas-h/pybtex,andreas-h/pybtex,chbrown/pybtex | pybtex/tests/bibtex_parser_test.py | pybtex/tests/bibtex_parser_test.py | from pybtex.database import BibliographyData
from pybtex.core import Entry
from pybtex.database.input.bibtex import Parser
from cStringIO import StringIO
test_data = [
(
'''
''',
BibliographyData(),
),
(
'''@ARTICLE{
test,
title={Polluted
with {DDT}.
},
}''',
BibliographyData({u'test': Entry('article', {u'title': 'Polluted with {DDT}.'})}),
),
]
def _test(bibtex_input, correct_result):
parser = Parser(encoding='UTF-8')
parser.parse_stream(StringIO(bibtex_input))
result = parser.data
assert result == correct_result
def test_bibtex_parser():
for bibtex_input, correct_result in test_data:
_test(bibtex_input, correct_result)
| mit | Python | |
337928d30d96146cb8033e3ccb15d7d6d0c85d5a | add managed_layer_test | google/neuroglancer,google/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer,janelia-flyem/neuroglancer,google/neuroglancer,google/neuroglancer,janelia-flyem/neuroglancer | python/tests/managed_layer_test.py | python/tests/managed_layer_test.py | import neuroglancer
def test_visible():
layer = neuroglancer.ManagedLayer('a', {'type': 'segmentation', 'visible': False})
assert layer.name == 'a'
assert layer.visible == False
assert layer.to_json() == {'name': 'a', 'type': 'segmentation', 'visible': False}
layer.visible = True
assert layer.to_json() == {'name': 'a', 'type': 'segmentation'}
| apache-2.0 | Python | |
38903c6b6e4dec9fd2fe73b0c468a8b3f2ab870a | Add multi np array | jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm | python_practice/numpy_exercise2.py | python_practice/numpy_exercise2.py | import numpy as np
Matrix_A = np.array( [[1,1],[0,1]] )
Matrix_B = np.array( [[2,0],[3,4]] )
print Matrix_A*Matrix_B
print Matrix_A.dot(Matrix_B)
print np.dot(Matrix_A, Matrix_B)
| mit | Python | |
46393dca28abe0df421066e76b26f198ad790690 | Create NimGame_001.py | cc13ny/algo,Chasego/codirit,Chasego/codi,Chasego/cod,Chasego/codirit,Chasego/codirit,Chasego/codirit,Chasego/codi,Chasego/codi,cc13ny/algo,Chasego/cod,Chasego/cod,Chasego/cod,Chasego/codirit,cc13ny/Allin,cc13ny/algo,cc13ny/Allin,Chasego/codi,cc13ny/Allin,Chasego/codi,cc13ny/algo,cc13ny/Allin,Chasego/cod,cc13ny/algo,cc13ny/Allin | leetcode/NimGame_001.py | leetcode/NimGame_001.py | class Solution(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return not (n % 4 == 0)
| mit | Python | |
0aa078c8beb6bad3dd0f30463f2925f01282e353 | add merge script | tobiasrausch/alfred,tobiasrausch/alfred,tobiasrausch/alfred,tobiasrausch/alfred,tobiasrausch/alfred,tobiasrausch/alfred | scripts/merge.py | scripts/merge.py | #! /usr/bin/env python
import gzip
import json
import sys
ret = {
"samples": []
}
def opn(fn):
if fn.endswith('.gz'):
return gzip.open(fn)
return open(fn)
for file_name in sys.argv[1:]:
with opn(file_name) as f:
file_content = json.load(f)
ret["samples"].extend(file_content["samples"])
print(json.dumps(ret))
| bsd-3-clause | Python | |
9a045ac0c5cfe39689d8e1446674193e8862d269 | add IPLookup | Naught0/qtbot | cogs/ip.py | cogs/ip.py | #!/bin/env python
import discord
from discord.ext import commands
from utils import aiohttp_wrap as aw
class IPLookup:
def __init__(self, bot):
self.bot = bot
self.aio_session = bot.aio_session
self.api_uri = 'http://ip-api.com/json/{}'
@commands.command(aliases=['ip'])
async def iplookup(self, ctx, *, query: str):
""" Get information about an IP or website """
res = await aw.aio_get_json(self.aio_session, query)
# Check whether successful
if not res or res['status'] == 'fail':
return await ctx.send(f"Sorry, I couldn't find any data on `{query}`.")
em = discord.Embed(title=res['org'], color=discord.Color.dark_magenta())
em.add_field(name='Location', value=f"{res['city'], res['regionName'], res['country']}")
em.add_field(name='Coordinates', value=f"({res['lat']:.3f}, {res['lon']:.3f})")
em.add_field(name='ISP', value=res['isp'])
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(IPLookup(bot)) | mit | Python | |
2d53172748f1dc7c8462fa79a6a158e4689b2363 | Add ThermalDisplacements test | atztogo/phonopy,atztogo/phonopy,atztogo/phonopy,atztogo/phonopy | test/phonon/test_thermal_displacement.py | test/phonon/test_thermal_displacement.py | import numpy as np
temps = [0.000000, 100.000000, 200.000000, 300.000000, 400.000000, 500.000000,
600.000000, 700.000000, 800.000000, 900.000000]
td_ref = [
[0.00571624, 0.00571624, 0.00571624, 0.00403776, 0.00403776, 0.00403776],
[0.00877353, 0.00877353, 0.00877353, 0.00654962, 0.00654962, 0.00654962],
[0.01513305, 0.01513305, 0.01513305, 0.01151749, 0.01151749, 0.01151749],
[0.02198817, 0.02198817, 0.02198817, 0.01681392, 0.01681392, 0.01681392],
[0.02898107, 0.02898107, 0.02898107, 0.02220032, 0.02220032, 0.02220032],
[0.03603064, 0.03603064, 0.03603064, 0.02762357, 0.02762357, 0.02762357],
[0.04310888, 0.04310888, 0.04310888, 0.03306543, 0.03306543, 0.03306543],
[0.05020359, 0.05020359, 0.05020359, 0.03851798, 0.03851798, 0.03851798],
[0.05730864, 0.05730864, 0.05730864, 0.04397723, 0.04397723, 0.04397723],
[0.06442059, 0.06442059, 0.06442059, 0.04944096, 0.04944096, 0.04944096]]
def test_ThermalDisplacements(ph_nacl):
ph_nacl.init_mesh([5, 5, 5],
with_eigenvectors=True,
is_mesh_symmetry=False,
use_iter_mesh=True)
temperatures = [0, 100, 200, 300, 400, 500, 600, 700, 800, 900]
ph_nacl.run_thermal_displacements(temperatures=temperatures,
freq_min=1e-2)
td = ph_nacl.thermal_displacements
# for td_t in td.thermal_displacements:
# print(", ".join(["%10.8f" % x for x in td_t]))
np.testing.assert_allclose(td_ref, td.thermal_displacements, atol=1e-5)
| bsd-3-clause | Python | |
314fcab1904cd0c5e434789bef09766d33e2d6ef | add synth.py for generation | googleapis/google-cloud-node,googleapis/google-cloud-node,googleapis/google-cloud-node,googleapis/google-cloud-node | packages/google-cloud-asset/synth.py | packages/google-cloud-asset/synth.py | # copyright 2018 google LLC
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
"""this script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import subprocess
import logging
logging.basicconfig(level=logging.debug)
gapic = gcp.gapicgenerator()
common_templates = gcp.commontemplates()
version = "v1"
library = gapic.node_library('asset', version, private=True)
s.copy(library, excludes=['src/index.js', 'readme.md', 'package.json'])
templates = common_templates.node_library(
package_name="@google-cloud/asset",
repo_name="googleapis/nodejs-asset",
)
s.copy(templates)
'''
node.js specific cleanup
'''
subprocess.run(['npm', 'ci'])
subprocess.run(['npm', 'run', 'prettier'])
subprocess.run(['npm', 'run', 'lint'])
| apache-2.0 | Python | |
37f7ab9435939a144b08fdbb52e1e519ad139318 | add some field definitions | chronossc/django-ldapdb,UGentPortaal/django-ldapdb-archived,UGentPortaal/django-ldapdb | ldapdb/models/fields.py | ldapdb/models/fields.py | # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (C) 2009 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.db.models import fields, SubfieldBase
class CharField(fields.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 200
super(CharField, self).__init__(*args, **kwargs)
class ImageField(fields.Field):
pass
class IntegerField(fields.IntegerField):
pass
class ListField(fields.Field):
__metaclass__ = SubfieldBase
def to_python(self, value):
if not value:
return []
return value
| bsd-2-clause | Python | |
24dbbd880142ed0925c30083cd3926b47c7ba90c | add mobilenet | analysiscenter/dataset | dataset/models/tf/mobilenet.py | dataset/models/tf/mobilenet.py | ''' Contains class for MobileNet '''
import numpy as np
import tensorflow as tf
from . import TFModel
from .layers import conv_block
_DEFAULT_BODY = {'strides': [1, 2, 1, 2, 1, 2,
1, 1, 1, 1, 1,
2, 2],
'double_filters': [True, True, False, True, False, True,
False, False, False, False, False,
True, False]}
class MobileNet(TFModel):
""" MobileNet
References
----------
Howard A. G. et al. "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
Arxiv.org `<https://arxiv.org/abs/1704.04861>`_
**Configuration**
inputs : dict
dict with keys 'images' and 'masks' (see :meth:`._make_inputs`)
input_block : dict
strides : list of int
strides in separable convolutions
double_filters : list of bool
if True number of filters in 1x1 covolution will be doubled
width_factor : float
multiplier for number of channels
resolution_factor : float
multiplier for spatial resolution
head : dict
"""
def _build_config(self, names=None):
names = names if names else ['images', 'labels']
config = super()._build_config(names)
config['input_block']['inputs'] = self.inputs['images']
input_block = self.get_from_config('input_block', {'layout': 'cna', 'filters': 32,
'kernel_size' : 3, 'strides': 2})
config['input_block'] = {**input_block,
**config['input_block']}
config['default']['data_format'] = self.data_format('images')
config['body']['strides'] = self.get_from_config('strides', _DEFAULT_BODY['strides'])
config['body']['double_filters'] = self.get_from_config('double_filters',
_DEFAULT_BODY['double_filters'])
config['body']['width_factor'] = self.get_from_config('width_factor', 1.0)
config['body']['resolution_factor'] = self.get_from_config('resolution_factor', 1.0)
config['head'] = {**dict(layout='Vf', units=self.num_classes('labels')),
**config['head']}
return config
@classmethod
def body(cls, inputs, strides, double_filters, name='body', **kwargs):
""" Base layers
Parameters
----------
inputs : tf.Tensor
input tensor
strides : list of int
strides in separable convolutions
double_filters : list of bool
if True number of filters in 1x1 covolution will be doubled
width_factor : float
multiplier for number of channels
resolution_factor : float
multiplier for spatial resolution
name : str
scope name
Returns
-------
tf.Tensor
"""
with tf.variable_scope(name):
x = inputs
for index in range(len(strides)):
x = cls.block(x, strides[index], double_filters[index], 'block-'+str(index), **kwargs)
return x
@classmethod
def block(cls, inputs, strides, double_filters=False, name=None, **kwargs):
""" A network building block consisting of a separable depthwise convolution and 1x1 pointwise covolution.
Parameters
----------
inputs : tf.Tensor
input tensor
strides : int
strides in separable convolution
double_filters : bool
if True number of filters in 1x1 covolution will be doubled
name : str
scope name
Returns
-------
tf.Tensor
"""
data_format = kwargs.get('data_format')
num_channels = cls.channels_shape(inputs, data_format)
filters = [num_channels, num_channels*2] if double_filters else [num_channels]*2
x = conv_block(inputs, filters, [3, 1], 'sna cna', name, [strides, 1], **kwargs)
return x
| apache-2.0 | Python | |
c4e83d80a9600b9059619c73bc5756ce7b2a1d6d | add extract images module | nontas/menpo3d,grigorisg9gr/menpo3d,grigorisg9gr/menpo3d,nontas/menpo3d | menpo3d/extractimage.py | menpo3d/extractimage.py | import numpy as np
from menpo3d.rasterize import GLRasterizer, model_to_clip_transform
from menpo.shape import PointCloud
def render_hi_res_shape_image(mesh, render_width=3000):
h, w = mesh.range()[:2]
aspect_ratio = w / h
height = render_width * aspect_ratio
r = GLRasterizer(
projection_matrix=model_to_clip_transform(mesh).h_matrix,
width=render_width, height=height)
return r.model_to_image_transform, r.rasterize_mesh_with_shape_image(mesh)[1]
def per_vertex_occlusion(mesh, err_proportion=0.0001, err_norm='z', render_width=3000):
# Render a high-resolution shape image for visibility testing
model_to_image_transform, shape_image = render_hi_res_shape_image(mesh, render_width=render_width)
# err_proportion=0.01 is 1% deviation of total range of 3D shape
err_scale = mesh.range()[2].sum() if err_norm == 'z' else np.sqrt(
(mesh.range() ** 2).sum())
threshold = err_scale * err_proportion
sample_points_3d = mesh
sample_points_2d = model_to_image_transform.apply(sample_points_3d)
xyz_found = shape_image.as_unmasked().sample(sample_points_2d, order=1).T
err = np.sum((xyz_found - sample_points_3d.points) ** 2, axis=1)
return err < threshold
def per_vertex_occlusion_accurate(mesh):
from menpo3d.vtkutils import trimesh_to_vtk
import vtk
tol = mesh.mean_edge_length() / 1000
min_, max_ = mesh.bounds()
z_min = min_[-1] - 10
z_max = max_[-1] + 10
ray_start = mesh.points.copy()
ray_end = mesh.points.copy()
points = mesh.points
ray_start[:, 2] = z_min
ray_end[:, 2] = z_max
vtk_mesh = trimesh_to_vtk(mesh)
obbTree = vtk.vtkOBBTree()
obbTree.SetDataSet(vtk_mesh)
obbTree.BuildLocator()
vtk_points = vtk.vtkPoints()
vtk_cellIds = vtk.vtkIdList()
bad_val = tuple(ray_start[0])
first_intersects = []
for start, end, point in zip(ray_start, ray_end, points):
start = tuple(start)
end = tuple(end)
obbTree.IntersectWithLine(start, end, vtk_points, vtk_cellIds)
data = vtk_points.GetData()
break
for start, end, point in zip(ray_start, ray_end, points):
start = tuple(start)
end = tuple(end)
#obbTree.IntersectWithLine(start, end, vtk_points, vtk_cellIds)
data = vtk_points.GetData()
if data.GetNumberOfTuples() > 0:
first_intersects.append(data.GetTuple3(0))
else:
first_intersects.append(bad_val)
visible = np.linalg.norm(points - np.array(first_intersects), axis=1) < tol
return visible
def extract_per_vertex_colour(mesh, image):
return image.sample(PointCloud(mesh.points[:, :2])).T
def extract_per_vertex_colour_with_occlusion(mesh, image, render_width=3000):
colours = extract_per_vertex_colour(mesh, image)
mask = per_vertex_occlusion(mesh, render_width=render_width)
return colours, mask
def extract_per_vertex_features(mesh, image, feature_f, diagonal_range=None):
image = image.copy()
image.landmarks['mesh_2d'] = mesh.with_dims([0, 1])
if diagonal_range is not None:
image = image.rescale_landmarks_to_diagonal_range(diagonal_range,
group='mesh_2d')
feature_image = feature_f(image)
return extract_per_vertex_colour(feature_image.landmarks['mesh_2d'].lms,
feature_image)
| bsd-3-clause | Python | |
c5fd2a7bccb45325acdf7f0800843ddb9ad82b64 | split Natura2000 pdf files | eaudeweb/natura2000db,eaudeweb/natura2000db,eaudeweb/natura2000db,eaudeweb/natura2000db | migrations/split_pdf.py | migrations/split_pdf.py | #use http://pybrary.net/pyPdf/
from pyPdf import PdfFileWriter, PdfFileReader
import re
pattern = re.compile(r"RO(SCI|SPA)\d{4}")
source_path = "/Users/cornel/Downloads/2011-10-20_protectia_naturii_RO_SPA_SDF_2011.pdf"
pdf = PdfFileReader(file(source_path, "rb"))
def save_pdf(output, name):
outputStream = file("%s.pdf" % name, "wb")
output.write(outputStream)
outputStream.close()
output = PdfFileWriter()
for i in range(0, pdf.getNumPages()):
page = pdf.getPage(i)
#skip empty pages
if page.has_key('/Contents'):
text = page.extractText()
if text.find('1. IDENTIFICAREA SITULUI') > 0:
if i:
save_pdf(output, name)
output = PdfFileWriter()
match = pattern.search(text)
name = match.group(0)
output.addPage(pdf.getPage(i))
#save last pages
save_pdf(output, name)
| bsd-3-clause | Python | |
8ffcf96b5b270fa77026c8c62ee267363ae2e7b1 | Add virtualenv plugin: build python environment. | liwushuo/fapistrano | fapistrano/plugins/virtualenv.py | fapistrano/plugins/virtualenv.py | # -*- coding: utf-8 -*-
"""
virtualenv plugin provide
2 virtualenv environment:
1. virtualenvwrapper in /home/depploy/.virtulaenvs/%(project_name)s
2. virtualenv in each release directory
2 pip install:
1. pip
2. pip wheel
"""
from fabric.api import run, env, prefix, cd
from fabric.contrib.files import exists
from .. import signal
def init():
if not hasattr(env, 'virtualenv_type'):
env.virtualenv_type = 'virtualenvwrapper'
if not hasattr(env, 'virtualenv_upgrade_pip'):
env.virtualenv_upgrade_pip = True
signal.register('deploy.updated', build_python_env)
def build_python_env():
if env.virtualenv_type == 'virtualenvwrapper':
_check_virtualenvwrapper_env()
_check_virtualenvwrapper_activate()
elif env.virtualenv_type == 'virtualenv':
_check_virtualenv_env()
_check_virtualenv_activate()
if env.virtualenv_upgrade_pip:
_upgrade_pip()
_install_requirements()
def _check_virtualenvwrapper_env():
if not exists('~/.virtualenvs/%(project_name)s' % env):
run('source %(virtualenvwrapper_source)s && mkvirtualenv %(project_name)s' % env)
def _check_virtualenv_env():
if not exists('%(releases_path)s/%(new_release)s/venv' % env):
run('virtualenv %(releases_path)s/%(new_release)s/venv' % env)
def _check_virtualenvwrapper_activate():
env.activate = 'source ~/.virtualenvs/%(project_name)s/bin/activate' % env
def _check_virtualenv_activate():
env.activate = 'source %(releases_path)s/%(new_release)s/venv/bin/activate' % env
def _upgrade_pip():
with prefix(env.activate):
run('pip install -q -U pip setuptools wheel || pip install -U pip setuptools wheel')
def _install_requirements():
with prefix(env.activate):
with cd('%(releases_path)s/%(new_release)s' % env):
run('pip install -r requirements.txt' % env)
| mit | Python | |
4987e1722f8b55e99fbc9455eafe0210b5973060 | create server bootstrap script | hertzwhenip/virtual-hammond,hertzwhenip/virtual-hammond,hertzwhenip/virtual-hammond,hertzwhenip/virtual-hammond | server/server.py | server/server.py | #! /usr/bin/python
# encoding=utf-8
import os
import cherrypy
from http.router import Router
def bootstrap():
api_config = os.path.abspath(os.path.join(os.getcwd(), 'config/api.conf'))
router = Router()
cherrypy.tools.CORS = cherrypy.Tool('before_handler', router.cors)
cherrypy.tree.mount(router, '/api', api_config)
cherrypy.engine.start()
cherrypy.engine.block()
if __name__ == '__main__':
bootstrap()
| mit | Python | |
45c81e268df7f01fdeb64b053583b2946739eebe | add pactest with pacparser lib | aulphar/gfw_whitelist,aulphar/gfw_whitelist,blog2i2j/gfw_whitelist,amoxicillin/gfw_whitelist,felixonmars/gfw_whitelist,breakwa11/gfw_whitelist,faynwol/gfw_whitelist,zhaiyusci/youknowwhatitis,zhaiyusci/youknowwhatitis,breakwa11/gfw_whitelist,amoxicillin/gfw_whitelist,breakwa11/gfw_whitelist,amoxicillin/gfw_whitelist,felixonmars/gfw_whitelist,faynwol/gfw_whitelist,faynwol/gfw_whitelist,blog2i2j/gfw_whitelist,aulphar/gfw_whitelist,zhaiyusci/youknowwhatitis,blog2i2j/gfw_whitelist,felixonmars/gfw_whitelist | pactest.py | pactest.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
'''
You have to install pacparser before runing this script.
You can get pacparser from https://code.google.com/p/pacparser.
'''
import pacparser
import time
def get_pac_result(filename, url, host):
pacparser.init()
pacparser.parse_pac(filename)
ret_str = pacparser.find_proxy(url, host)
pacparser.cleanup()
return ret_str
def main_test(filename, test_times):
pacparser.init()
pacparser.parse_pac(filename)
beg_time = time.time()
for i in xrange(test_times):
ret_str = pacparser.find_proxy('http://www.coding.com', 'www.coding.com') # using the worst case
end_time = time.time()
print "%s:\nTotal Time: %s s\nAvg. Time: %s ms\n\n" % (filename, end_time - beg_time, (end_time - beg_time) * 1000.0 / test_times),
pacparser.cleanup()
def time_test():
main_test("whitelist.pac", 10000)
main_test("whiteiplist.pac", 100)
#main_test("flora_pac.pac", 100)
#main_test("usufu_flora_pac.pac", 100)
def main():
time_test()
main()
| mit | Python | |
a68d89a4f351f8df2bfceeac77540b23e29827be | Add failing test for bug #1375 -- no out-of-bounds error for token.nbor() | honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,aikramer2/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,honnibal/spaCy,aikramer2/spaCy,aikramer2/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy | spacy/tests/regression/test_issue1375.py | spacy/tests/regression/test_issue1375.py | from __future__ import unicode_literals
import pytest
from ...vocab import Vocab
from ...tokens.doc import Doc
@pytest.mark.xfail
def test_issue1375():
'''Test that token.nbor() raises IndexError for out-of-bounds access.'''
doc = Doc(Vocab(), words=['0', '1', '2'])
with pytest.raises(IndexError):
assert doc[0].nbor(-1)
assert doc[1].nbor(-1).text == '0'
with pytest.raises(IndexError):
assert doc[2].nbor(1)
assert doc[1].nbor(1).text == '2'
| mit | Python | |
6418326667f0819a028606dee1683965a0092e0a | add functions to find the data dir | StoDevX/cs251-toolkit,StoDevX/cs251-toolkit,StoDevX/cs251-toolkit,StoDevX/cs251-toolkit | cs251tk/specs/dirs.py | cs251tk/specs/dirs.py | import os
def get_specs_dir():
return os.path.join(get_data_dir(), 'cs251tk', 'specs')
def get_data_dir():
return os.getenv('XDG_DATA_HOME', os.path.join(os.getenv('HOME'), '.local', 'share'))
| mit | Python | |
4af8e31da47b321cfbd84223619379167c9c7d3b | Add config file with list of programs | njbbaer/unicorn-remote,njbbaer/unicorn-remote,njbbaer/unicorn-remote | app/config.py | app/config.py | programs = {
"ascii_text": {
"title": "ASCII Text",
"path": "programs/ascii_text.py",
},
"blink_sun": {
"title": "Blink Sun",
"path": "programs/blink_sun.py"
},
"cheertree": {
"title": "Cheertree",
"path": "programs/cheertree.py"
},
"cross": {
"title": "Cross",
"path": "programs/cross.py"
},
"demo": {
"title": "Demo",
"path": "programs/demo.py"
},
"dna": {
"title": "DNA",
"path": "programs/dna.py"
},
"game_of_life": {
"title": "Game of Life",
"path": "programs/game_of_life.py"
},
"matrix": {
"title": "Matrix",
"path": "programs/matrix.py"
},
"psychedelia": {
"title": "Psychedelia",
"path": "programs/psychedelia.py"
},
"rain": {
"title": "Rain",
"path": "programs/rain.py"
},
"random_blinky": {
"title": "Random Blinky",
"path": "programs/random_blinky.py"
},
"random_sparkles": {
"title": "Random Sparkles",
"path": "programs/random_sparkles.py"
},
"simple": {
"title": "Simple",
"path": "programs/simple.py"
},
"snow": {
"title": "Snow",
"path": "programs/snow.py"
},
"trig": {
"title": "Trig",
"path": "programs/trig.py"
}
} | mit | Python | |
7749a3531cc2985112b7ef60421dd9c07e742bcb | Add fabfile.py file to project | korniichuk/jupyterhub | fabfile.py | fabfile.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
"""The jupyterhub stack fabric file"""
from fabric.api import local
def git():
"""Setup Git"""
local("git remote rm origin")
local("git remote add origin https://korniichuk@github.com/korniichuk/jupyterhub.git")
local("git remote add bitbucket https://korniichuk@bitbucket.org/korniichuk/jupyterhub.git")
| unlicense | Python | |
fcea7c42c7b793a84febd29112b50fc89b5fd6f4 | Add fabfile to generate documentation | tarunbhardwaj/trytond-prestashop,prakashpp/trytond-prestashop | fabfile.py | fabfile.py | # -*- coding: utf-8 -*-
"""
fabfile
Fab file to build and push documentation to github
:copyright: © 2013 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import time
from fabric.api import local, lcd
def upload_documentation():
"""
Build and upload the documentation HTML to github
"""
temp_folder = '/tmp/%s' % time.time()
local('mkdir -p %s' % temp_folder)
# Build the documentation
with lcd('docs'):
local('make html')
local('mv build/html/* %s' % temp_folder)
# Checkout to gh-pages branch
local('git checkout gh-pages')
# Copy back the files from temp folder
local('rm -rf *')
local('mv %s/* .' % temp_folder)
# Add the relevant files
local('git add *.html')
local('git add *.js')
local('git add *.js')
local('git add *.inv')
local('git add _images')
local('git add _sources')
local('git add _static')
local('git commit -m "Build documentation"')
local('git push')
print "Documentation uploaded to Github."
print "View at: http://openlabs.github.io/trytond-prestashop"
| bsd-3-clause | Python | |
1d19208600c137cc4b27fd4c046d424982c6e4f5 | Rename base class DakotaBase | csdms/dakota,csdms/dakota | dakota/dakota_base.py | dakota/dakota_base.py | #! /usr/bin/env python
"""A base class for all Dakota experiments."""
from abc import ABCMeta, abstractmethod
class DakotaBase(object):
"""Describe features common to all Dakota experiments."""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""Create a set of default experiment parameters."""
self.model = None
# self.input_file = 'dakota.in'
# self.output_file = 'dakota.out'
self.data_file = 'dakota.dat'
self.method = None
self.variable_type = 'continuous_design'
self.n_variables = 0
self.variable_descriptors = []
self.interface = 'direct'
self.analysis_driver = 'rosenbrock'
self.parameters_file = 'params.in'
self.results_file = 'results.out'
self.n_responses = 0
self.is_objective_function = False
self.response_descriptors = []
self.response_files = []
self.response_statistics = []
def autogenerate_descriptors(self):
"""Quickly make generic variable and response descriptors."""
self.variable_descriptors = ['x' + str(i+1) for i in
range(self.n_variables)]
self.response_descriptors = ['y' + str(i+1) for i in
range(self.n_responses)]
def environment_block(self):
"""Define the environment block of a Dakota input file."""
s = '# Dakota input file\n' \
+ 'environment\n' \
+ ' tabular_data\n' \
+ ' tabular_data_file = {!r}\n\n'.format(self.data_file)
return(s)
def method_block(self):
"""Define the method block of a Dakota input file."""
s = 'method\n' \
+ ' {}\n'.format(self.method) \
+ ' partitions ='
for p in self.partitions:
s += ' {}'.format(p)
s += '\n\n'
return(s)
def variables_block(self):
"""Define the variables block of a Dakota input file."""
s = 'variables\n' \
+ ' {0} = {1}\n'.format(self.variable_type, self.n_variables) \
+ ' upper_bounds ='
for b in self.upper_bounds:
s += ' {}'.format(b)
s += '\n' \
+ ' lower_bounds ='
for b in self.lower_bounds:
s += ' {}'.format(b)
s += '\n' \
+ ' descriptors ='
for vd in self.variable_descriptors:
s += ' {!r}'.format(vd)
s += '\n\n'
return(s)
def interface_block(self):
"""Define the interface block of a Dakota input file."""
s = 'interface\n' \
+ ' {}\n'.format(self.interface) \
+ ' analysis_driver = {!r}\n'.format(self.analysis_driver)
if self.model is not None:
s += ' analysis_components = {!r}'.format(self.model)
for pair in zip(self.response_files, self.response_statistics):
s += ' \'{0[0]}:{0[1]}\''.format(pair)
s += '\n'
if self.interface is not 'direct':
s += ' parameters_file = {!r}\n'.format(self.parameters_file) \
+ ' results_file = {!r}\n'.format(self.results_file) \
+ ' work_directory\n' \
+ ' named \'run\'\n' \
+ ' directory_tag\n' \
+ ' directory_save\n' \
+ ' file_save\n'
s += '\n'
return(s)
def responses_block(self):
"""Define the responses block of a Dakota input file."""
s = 'responses\n'
if self.is_objective_function:
s += ' objective_functions = {}\n'.format(self.n_responses)
else:
s += ' response_functions = {}\n'.format(self.n_responses)
s += ' response_descriptors ='
for rd in self.response_descriptors:
s += ' {!r}'.format(rd)
s += '\n' \
+ ' no_gradients\n' \
+ ' no_hessians\n'
return(s)
| mit | Python | |
1e82283cc85b2eb449969849d23c4ffa2c090426 | Add script to batch convert a directory recursively | kinow/pccora | scripts/directory_batch_convert.py | scripts/directory_batch_convert.py | import os
import sys
import re
from pathlib import Path
import argparse
from convert2netcdf4 import parseandconvert
parser = argparse.ArgumentParser(description='Recursively batch convert Vaisala old-binary format to NetCDF files. Keeps directory structure.')
parser.add_argument('--from', dest='fromdir', help='Input directory', required=True)
parser.add_argument('--to', dest='todir', help='Output directory. Created if not exists. Files will be overwritten.', required=True)
EXTENSION_REGEX = r'.*\.edt$|.*\.[0-9]{2}e$'
def main():
args = parser.parse_args()
from_dir = Path(args.fromdir)
to_dir = Path(args.todir)
for dirpath, dirnames, files in os.walk(from_dir.as_posix()):
for name in files:
#if name.lower().endswith(extension):
if re.match(EXTENSION_REGEX, name.lower(), re.M|re.I):
input_file = os.path.join(dirpath, name)
input_path = Path(input_file)
diff = input_path.relative_to(from_dir)
output_path = to_dir.joinpath(diff)
extension = output_path.suffix
output_file = output_path.as_posix()
output_file = output_file.replace(extension, '.nc')
if not output_path.parent.exists():
output_path.parent.mkdir(parents=True, exist_ok=True)
print(output_file)
parseandconvert(input_file, output_file)
if __name__ == '__main__':
main()
sys.exit(0) | mit | Python | |
8a3caf06f146ff9d3cf20d0d739c78cb93c16325 | Add migrations | rapidpro/ureport,Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,rapidpro/ureport,Ilhasoft/ureport,Ilhasoft/ureport,Ilhasoft/ureport | ureport/polls/migrations/0049_auto_20160810_1823.py | ureport/polls/migrations/0049_auto_20160810_1823.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0048_populate_age_and_gender_on_poll_results'),
]
operations = [
migrations.AlterField(
model_name='poll',
name='poll_date',
field=models.DateTimeField(help_text='The date to display for this poll. Leave empty to use flow creation date.'),
),
]
| agpl-3.0 | Python | |
b69643de7f9ec207949e0054d2b1e98dbb81d898 | Add new package: librelp (#18779) | LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/librelp/package.py | var/spack/repos/builtin/packages/librelp/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Librelp(AutotoolsPackage):
"""Librelp is an easy to use library for the RELP protocol. RELP
(stands for Reliable Event Logging Protocol) is a general-purpose,
extensible logging protocol."""
homepage = "http://www.rsyslog.com/librelp/"
url = "https://github.com/rsyslog/librelp/archive/v1.7.0.tar.gz"
version('1.7.0', sha256='ff46bdd74798934663d1388d010270325dc6a6ed6d44358ca69b280a8304b1e9')
version('1.6.0', sha256='acaaa6b8e295ecd8e9d9b70c1c3c8fb3cc3c95a9ed5ce1689688510d0eecb37e')
version('1.5.0', sha256='ce7f463944417ba77d7b586590e41e276f7b107d3e35a77ce768cf3889b5e1a6')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('openssl')
depends_on('gnutls@2.0.0:')
| lgpl-2.1 | Python | |
860f8224bf8ef2f1553a17842d1389491f43bfa5 | Add missing migration for wagtail.tests | inonit/wagtail,janusnic/wagtail,chrxr/wagtail,FlipperPA/wagtail,nrsimha/wagtail,benjaoming/wagtail,jordij/wagtail,taedori81/wagtail,takeshineshiro/wagtail,mephizzle/wagtail,hanpama/wagtail,darith27/wagtail,gogobook/wagtail,gasman/wagtail,wagtail/wagtail,timorieber/wagtail,rsalmaso/wagtail,FlipperPA/wagtail,mjec/wagtail,nimasmi/wagtail,mikedingjan/wagtail,kurtrwall/wagtail,marctc/wagtail,benjaoming/wagtail,chrxr/wagtail,marctc/wagtail,mixxorz/wagtail,rv816/wagtail,taedori81/wagtail,Klaudit/wagtail,gogobook/wagtail,kaedroho/wagtail,nutztherookie/wagtail,kurtw/wagtail,mikedingjan/wagtail,iansprice/wagtail,Tivix/wagtail,jordij/wagtail,wagtail/wagtail,janusnic/wagtail,benjaoming/wagtail,nimasmi/wagtail,tangentlabs/wagtail,quru/wagtail,quru/wagtail,nealtodd/wagtail,taedori81/wagtail,davecranwell/wagtail,jorge-marques/wagtail,torchbox/wagtail,iansprice/wagtail,takeflight/wagtail,zerolab/wagtail,bjesus/wagtail,mayapurmedia/wagtail,darith27/wagtail,darith27/wagtail,kaedroho/wagtail,janusnic/wagtail,chimeno/wagtail,hamsterbacke23/wagtail,stevenewey/wagtail,Pennebaker/wagtail,kurtw/wagtail,bjesus/wagtail,Tivix/wagtail,hamsterbacke23/wagtail,thenewguy/wagtail,FlipperPA/wagtail,mikedingjan/wagtail,iansprice/wagtail,WQuanfeng/wagtail,kaedroho/wagtail,taedori81/wagtail,mephizzle/wagtail,inonit/wagtail,WQuanfeng/wagtail,mjec/wagtail,mjec/wagtail,chimeno/wagtail,JoshBarr/wagtail,hanpama/wagtail,rv816/wagtail,nilnvoid/wagtail,mayapurmedia/wagtail,zerolab/wagtail,thenewguy/wagtail,nealtodd/wagtail,chrxr/wagtail,mikedingjan/wagtail,marctc/wagtail,stevenewey/wagtail,nilnvoid/wagtail,kurtrwall/wagtail,jnns/wagtail,JoshBarr/wagtail,hanpama/wagtail,rjsproxy/wagtail,timorieber/wagtail,kurtrwall/wagtail,Pennebaker/wagtail,dresiu/wagtail,nrsimha/wagtail,chrxr/wagtail,jnns/wagtail,zerolab/wagtail,jordij/wagtail,torchbox/wagtail,Toshakins/wagtail,benjaoming/wagtail,serzans/wagtail,mayapurmedia/wagtail,gogobook/wagtail,jorge-marques/wagtail,zerolab/wagtail,nutztherookie/wagtail,rsalmaso/wagtail,dresiu/wagtail,jorge-marques/wagtail,Pennebaker/wagtail,takeflight/wagtail,wagtail/wagtail,chimeno/wagtail,nrsimha/wagtail,inonit/wagtail,nealtodd/wagtail,Toshakins/wagtail,rv816/wagtail,m-sanders/wagtail,torchbox/wagtail,stevenewey/wagtail,gogobook/wagtail,Klaudit/wagtail,jorge-marques/wagtail,nutztherookie/wagtail,hanpama/wagtail,kaedroho/wagtail,mjec/wagtail,tangentlabs/wagtail,darith27/wagtail,nimasmi/wagtail,dresiu/wagtail,JoshBarr/wagtail,WQuanfeng/wagtail,timorieber/wagtail,m-sanders/wagtail,FlipperPA/wagtail,hamsterbacke23/wagtail,rjsproxy/wagtail,takeflight/wagtail,iansprice/wagtail,takeshineshiro/wagtail,tangentlabs/wagtail,marctc/wagtail,mixxorz/wagtail,jnns/wagtail,inonit/wagtail,WQuanfeng/wagtail,rsalmaso/wagtail,gasman/wagtail,Toshakins/wagtail,thenewguy/wagtail,zerolab/wagtail,iho/wagtail,dresiu/wagtail,Toshakins/wagtail,Pennebaker/wagtail,torchbox/wagtail,serzans/wagtail,tangentlabs/wagtail,gasman/wagtail,stevenewey/wagtail,chimeno/wagtail,kurtw/wagtail,rv816/wagtail,wagtail/wagtail,rjsproxy/wagtail,nilnvoid/wagtail,Tivix/wagtail,KimGlazebrook/wagtail-experiment,m-sanders/wagtail,bjesus/wagtail,Klaudit/wagtail,rjsproxy/wagtail,iho/wagtail,davecranwell/wagtail,thenewguy/wagtail,takeshineshiro/wagtail,thenewguy/wagtail,Klaudit/wagtail,bjesus/wagtail,dresiu/wagtail,chimeno/wagtail,jnns/wagtail,jorge-marques/wagtail,gasman/wagtail,JoshBarr/wagtail,serzans/wagtail,hamsterbacke23/wagtail,quru/wagtail,nilnvoid/wagtail,taedori81/wagtail,gasman/wagtail,iho/wagtail,iho/wagtail,kurtw/wagtail,janusnic/wagtail,timorieber/wagtail,serzans/wagtail,rsalmaso/wagtail,mephizzle/wagtail,takeshineshiro/wagtail,Tivix/wagtail,nrsimha/wagtail,wagtail/wagtail,rsalmaso/wagtail,takeflight/wagtail,kurtrwall/wagtail,KimGlazebrook/wagtail-experiment,KimGlazebrook/wagtail-experiment,davecranwell/wagtail,mixxorz/wagtail,mephizzle/wagtail,quru/wagtail,jordij/wagtail,nealtodd/wagtail,nutztherookie/wagtail,mixxorz/wagtail,nimasmi/wagtail,kaedroho/wagtail,mixxorz/wagtail,mayapurmedia/wagtail,KimGlazebrook/wagtail-experiment,m-sanders/wagtail,davecranwell/wagtail | wagtail/tests/migrations/0008_auto_20141113_2125.py | wagtail/tests/migrations/0008_auto_20141113_2125.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('tests', '0007_registerdecorator'),
]
operations = [
migrations.AlterField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', to='wagtailcore.Page'),
preserve_default=True,
),
migrations.AlterField(
model_name='snippetchoosermodel',
name='advert',
field=models.ForeignKey(help_text='help text', to='tests.Advert'),
preserve_default=True,
),
]
| bsd-3-clause | Python | |
78420caaac5c5055d9264e9905c5e14e9756a064 | Add Cli_server_tcp, just like Cli_server_local, but using TCP socket. | sippy/b2bua,AVOXI/b2bua,AVOXI/b2bua,sippy/b2bua | sippy/Cli_server_tcp.py | sippy/Cli_server_tcp.py | # Copyright (c) 2003-2005 Maxim Sobolev. All rights reserved.
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from twisted.internet.protocol import Factory
from twisted.internet import reactor
from Cli_session import Cli_session
class Cli_server_tcp(Factory):
command_cb = None
def __init__(self, command_cb, address):
self.command_cb = command_cb
self.protocol = Cli_session
reactor.listenTCP(address[1], self, interface = address[0])
def buildProtocol(self, addr):
p = Factory.buildProtocol(self, addr)
p.command_cb = self.command_cb
return p
if __name__ == '__main__':
def callback(clm, cmd):
print cmd
return False
laddr = ('127.0.0.1', 12345)
f = Cli_server_tcp(callback, laddr)
reactor.run()
| bsd-2-clause | Python | |
057173c48a56c0c858212233ee60dcbb88e22838 | Add tests.py, including one failure | openhatch/oh-mainline,willingc/oh-mainline,jledbetter/openhatch,mzdaniel/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,jledbetter/openhatch,sudheesh001/oh-mainline,campbe13/openhatch,vipul-sharma20/oh-mainline,onceuponatimeforever/oh-mainline,ehashman/oh-mainline,sudheesh001/oh-mainline,jledbetter/openhatch,onceuponatimeforever/oh-mainline,Changaco/oh-mainline,eeshangarg/oh-mainline,sudheesh001/oh-mainline,mzdaniel/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,Changaco/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,SnappleCap/oh-mainline,heeraj123/oh-mainline,nirmeshk/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,mzdaniel/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,mzdaniel/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,mzdaniel/oh-mainline,willingc/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,jledbetter/openhatch,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,Changaco/oh-mainline,sudheesh001/oh-mainline,campbe13/openhatch,eeshangarg/oh-mainline,campbe13/openhatch,sudheesh001/oh-mainline,nirmeshk/oh-mainline,jledbetter/openhatch,onceuponatimeforever/oh-mainline,moijes12/oh-mainline,waseem18/oh-mainline,nirmeshk/oh-mainline,waseem18/oh-mainline,waseem18/oh-mainline,ojengwa/oh-mainline,ojengwa/oh-mainline,SnappleCap/oh-mainline,eeshangarg/oh-mainline,openhatch/oh-mainline,willingc/oh-mainline,openhatch/oh-mainline,heeraj123/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,eeshangarg/oh-mainline,Changaco/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,waseem18/oh-mainline,ehashman/oh-mainline,ojengwa/oh-mainline,mzdaniel/oh-mainline,mzdaniel/oh-mainline,SnappleCap/oh-mainline | mysite/profile/tests.py | mysite/profile/tests.py | import django.test
from search.models import Project
import twill
from twill import commands as tc
from twill.shell import TwillCommandLoop
from django.test import TestCase
from django.core.servers.basehttp import AdminMediaHandler
from django.core.handlers.wsgi import WSGIHandler
from StringIO import StringIO
# FIXME: Later look into http://stackoverflow.com/questions/343622/how-do-i-submit-a-form-given-only-the-html-source
# Functions you'll need:
def twill_setup():
app = AdminMediaHandler(WSGIHandler())
twill.add_wsgi_intercept("127.0.0.1", 8080, lambda: app)
def twill_teardown():
twill.remove_wsgi_intercept('127.0.0.1', 8080)
def make_twill_url(url):
# modify this
return url.replace("http://openhatch.org/",
"http://127.0.0.1:8080/")
def twill_quiet():
# suppress normal output of twill.. You don't want to
# call this if you want an interactive session
twill.set_output(StringIO())
class ProfileTests(django.test.TestCase):
def setUp(self):
twill_setup()
def tearDown(self):
twill_teardown()
def testSlash(self):
response = self.client.get('/profile/')
def testAddContribution(self):
url = 'http://openhatch.org/profile/'
tc.go(make_twill_url(url))
tc.fv('add_contrib', 'project', 'Babel')
tc.fv('add_contrib', 'contrib_text', 'msgctxt support')
tc.fv('add_contrib', 'url', 'http://babel.edgewall.org/ticket/54')
tc.submit()
| agpl-3.0 | Python | |
bfc386c1a894811532ccfc65ce45339a964c5ac0 | Create batch_clip.py | jamaps/open_geo_scripts,jamaps/gdal_and_ogr_scripts,jamaps/open_geo_scripts,jamaps/fun_with_gdal,jamaps/shell_scripts,jamaps/shell_scripts,jamaps/gdal_and_ogr_scripts,jamaps/open_geo_scripts,jamaps/fun_with_gdal | batch_clip.py | batch_clip.py | # clips all .shps in folder to a boundary polygon
from subprocess import call
import os
# in dir of to be clipped shps and boundary file
shp_folder = "nrn_rrn_on_shp_en"
clip_poly = "clip_bound.shp"
#output dir name
# os.mkdir("clipped")
c = 0
for subdir, dirs, files in os.walk(shp_folder):
for file in files:
if file.endswith(('.shp')):
print file
call(["ogr2ogr", "-clipsrc", clip_poly, "clipped/" + file, shp_folder + '/' + file])
c += 1
print c
| mit | Python | |
d1e403fce1affd0c7da1753fda441dd9a9c1d9ff | copy ISON settings for BBC | olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net,olebole/astrometry.net | net/settings_bbc.py | net/settings_bbc.py | # settings_ison.py
from settings_common import *
TEMPDIR = '/data2/tmp'
DATABASES['default']['NAME'] = 'an-ison'
LOGGING['loggers']['django.request']['level'] = 'WARN'
SESSION_COOKIE_NAME = 'IsonAstrometrySession'
ssh_solver_config = 'an-ison'
sitename = 'ison'
| bsd-3-clause | Python | |
f20ed3b4941fef84e95afce4db0349ed8da3070e | Create Hour_Rain_Map.py | bryansandw/Rain_Gauges | Hour_Rain_Map.py | Hour_Rain_Map.py | #############################################################################
# Name: Elizabeth Rentschlar #
# Assistantce from: #
# Purpose: Use Hourly rain totals condensed in Hours.py to create a series #
# of maps that show the #
# Created: 12/21/15 #
# Copyright: (c) City of Bryan #
# ArcGIS Version: 10.2.2 #
# Python Version: 2.7 #
#############################################################################
#Import arcpy module
import arcpy
import datetime
# set workspace
arcpy.env.overwriteOutput = True
work_space = 'G:\GIS_PROJECTS\WATER_SERVICES\Rain_Gauges'
map_doc = '.\Rain_Gauge_Map.mxd'
map_output_folder = '.\Rain_Maps\'
hour_xy = '.\Hour_xy.py'
mapdoc = arcpy.mapping.MapDocument(map_doc)
data_frame = arcpy.mapping.ListDataFrames(mapdoc)[0]
print data_frame.name
print data_frame.scale
legend = arcpy.mapping.ListLayoutElements(mapdoc, "LEGEND_ELEMENT",
"Legend")[0]
hours = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
dates = []
today_date = date.today()
# could not get datetime.datetime(2014,09,24[,00[,00[,00[,00000[,None]]]]]) to work
d = datetime.date(2014,9,24)
t = datetime.time(12,0,0)
first_date = datetime.datetime.combine(d, t)
#print first_date
one_day = datetime.timedelta(days = 1)
#i = today_date
#while i != first_date:
i = first_date
while i <= today_date :
dates.append(i)
new_date = i + one_day
#print new_date
i = new_date
#print dates
s_cur = arcpy.da.SearchCursor(hour_xy,
['Day', 'Hour', 'Rain_Total'])
for text in arcpy.mapping.ListLayoutElements(mapdoc, "TEXT_ELEMENT"):
if text.text == "Date: Text":
text.text = district + "\n" + date
arcpy.RefreshActiveView()
arcpy.RefreshTOC()
| mit | Python | |
d8ad74b80f214ca313d01533ea6f15082cbb3af2 | Add tests for calibration procedure (draft) | matteobachetti/srt-single-dish-tools | srttools/core/tests/test_calibration.py | srttools/core/tests/test_calibration.py | from ..calibration import CalibratorTable
from ..read_config import read_config
from ..scan import list_scans
import numpy as np
import matplotlib.pyplot as plt
import unittest
from astropy.table import Table
from ..imager import ScanSet
import os
import glob
class Test2_Calibration(unittest.TestCase):
@classmethod
def setup_class(klass):
import os
global DEBUG_MODE
DEBUG_MODE = True
klass.curdir = os.path.dirname(__file__)
klass.datadir = os.path.join(klass.curdir, 'data')
klass.config_file = \
os.path.abspath(os.path.join(klass.datadir, "calibrators.ini"))
klass.config = read_config(klass.config_file)
def step_1_calibrate(self):
"""Simple calibration from scans."""
scan_list = \
list_scans(self.config['datadir'],
self.config['calibrator_directories'])
scan_list.sort()
caltable = CalibratorTable()
caltable.from_scans(scan_list)
caltable.update()
caltable.write(os.path.join(self.datadir, 'calibrators.hdf5'),
path="config", overwrite=True)
def step_999_cleanup(self):
"""Clean up the mess."""
os.unlink(os.path.join(self.datadir, 'calibrators.hdf5'))
for d in self.config['list_of_directories']:
hfiles = glob.glob(os.path.join(self.config['datadir'], d, '*.hdf5'))
print(hfiles)
for h in hfiles:
os.unlink(h)
def test_all(self):
self.step_1_calibrate()
self.step_999_cleanup()
| bsd-3-clause | Python | |
427f4562dc2a4fa7b805154e3ffd732381ed6e8d | Add dbwriter.py | bcanvural/thesis,bcanvural/thesis | dbwriter.py | dbwriter.py | from pymongo import MongoClient
import os
from pathlib import Path
def main():
client = MongoClient('localhost', 27017)
db = client['thesis-database']
#tfidf-cv-category
collection = db['tfidf-cv-category']
path = 'Calculated/tfidf/cv-category'
for filename in os.listdir(path):
if filename[-3:] == "csv":
fullpath = path + '/' + filename
q = Path(fullpath)
with q.open() as f:
for line in f:
# "cvid", "catid", "skillName", "similarity"
cvid, catid, skillName, similarity = line.strip().split(',')
obj = {"cvid": cvid, "catid": catid, "skillName": skillName, \
"similarity": similarity}
collection.insert_one(obj)
#tfidf-job-category
#tfidf-job-cv
#tfidf2-cv-category
#tfidf2-job-category
#tfidf2-job-cv
#countvectorizer-cv-category
#countvectorizer-job-category
#countvectorizer-job-cv
#word2vec-cv-category
#word2vec-cv-category
#word2vec-job-cv
if __name__ == '__main__':
main()
| mit | Python | |
ac8fdf7dc902caed2757ea40483fff951af93ac8 | add cu2qu.cli module exporting a main() function for console script | googlefonts/cu2qu,googlefonts/fonttools,googlei18n/cu2qu,fonttools/fonttools | Lib/cu2qu/cli.py | Lib/cu2qu/cli.py | import os
import argparse
import logging
import shutil
import multiprocessing as mp
from contextlib import closing
from functools import partial
import cu2qu
from cu2qu.ufo import font_to_quadratic, fonts_to_quadratic
import defcon
logger = logging.getLogger("cu2qu")
def _cpu_count():
try:
return mp.cpu_count()
except NotImplementedError:
return 1
def _font_to_quadratic(zipped_paths, **kwargs):
input_path, output_path = zipped_paths
ufo = defcon.Font(input_path)
logger.info('Converting curves for %s', input_path)
if font_to_quadratic(ufo, **kwargs):
logger.info("Saving %s", output_path)
ufo.save(output_path)
else:
_copytree(input_path, output_path)
def _samepath(path1, path2):
# TODO on python3+, there's os.path.samefile
path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
return path1 == path2
def _copytree(input_path, output_path):
if _samepath(input_path, output_path):
logger.debug("input and output paths are the same file; skipped copy")
return
if os.path.exists(output_path):
shutil.rmtree(output_path)
shutil.copytree(input_path, output_path)
def main(args=None):
parser = argparse.ArgumentParser(prog="cu2qu")
parser.add_argument(
"--version", action="version", version=cu2qu.__version__)
parser.add_argument(
"infiles",
nargs="+",
metavar="INPUT",
help="one or more input UFO source file(s).")
parser.add_argument("-v", "--verbose", action="count", default=0)
mode_parser = parser.add_mutually_exclusive_group()
mode_parser.add_argument(
"-i",
"--interpolatable",
action="store_true",
help="whether curve conversion should keep interpolation compatibility"
)
mode_parser.add_argument(
"-j",
"--jobs",
type=int,
nargs="?",
default=1,
const=_cpu_count(),
help="Convert using multiple processes (default: %(default)s)")
output_parser = parser.add_mutually_exclusive_group()
output_parser.add_argument(
"-o",
"--output-file",
default=None,
metavar="OUTPUT",
help=("output filename for the converted UFO. By default fonts are "
"modified in place. This only works with a single input."))
output_parser.add_argument(
"-d",
"--output-dir",
default=None,
metavar="DIRECTORY",
help="output directory where to save converted UFOs")
options = parser.parse_args(args)
if not options.verbose:
level = "WARNING"
elif options.verbose == 1:
level = "INFO"
else:
level = "DEBUG"
logging.basicConfig(level=level)
if len(options.infiles) > 1 and options.output_file:
parser.error("-o/--output-file can't be used with multile inputs")
if options.output_dir:
output_paths = [
os.path.join(options.output_dir, os.path.basename(p))
for p in options.infiles
]
elif options.output_file:
output_paths = [options.output_file]
else:
# save in-place
output_paths = list(options.infiles)
if options.interpolatable:
logger.info('Converting curves compatibly')
ufos = [defcon.Font(infile) for infile in options.infiles]
if fonts_to_quadratic(ufos, dump_stats=True):
for ufo, output_path in zip(ufos, output_paths):
logger.info("Saving %s", output_path)
ufo.save(output_path)
else:
for input_path, output_path in zip(options.infiles, output_paths):
_copytree(input_path, output_path)
else:
jobs = min(len(options.infiles),
options.jobs) if options.jobs > 1 else 1
if jobs > 1:
func = partial(_font_to_quadratic, dump_stats=False)
logger.info('Running %d parallel processes', jobs)
with closing(mp.Pool(jobs)) as pool:
# can't use Pool.starmap as it's 3.3+ only
pool.map(func, zip(options.infiles, output_paths))
else:
for paths in zip(options.infiles, output_paths):
_font_to_quadratic(paths, dump_stats=True)
| apache-2.0 | Python | |
afcd636772952c8b6d2cca51e1851af29f5b6707 | Create PostForm. | jambonrose/DjangoUnleashed-1.8,jambonrose/DjangoUnleashed-1.8 | blog/forms.py | blog/forms.py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
def clean_slug(self):
return self.cleaned_data['slug'].lower()
| bsd-2-clause | Python | |
e39290b71299843eff858fb51543b99a06178a1d | Add a simple 8x benchmark script | YosysHQ/nextpnr,SymbiFlow/nextpnr,SymbiFlow/nextpnr,YosysHQ/nextpnr,YosysHQ/nextpnr,YosysHQ/nextpnr,SymbiFlow/nextpnr,SymbiFlow/nextpnr | ice40/picorv32_benchmark.py | ice40/picorv32_benchmark.py | #!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
os.remove("picorv32.json")
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
| isc | Python | |
30081b470fc3522afc4af3a4fc33eb28bc85d6d6 | Add project config manager | polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon | polyaxon_cli/managers/project.py | polyaxon_cli/managers/project.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_schemas.project import ProjectConfig
from polyaxon_cli.managers.base import BaseConfigManager
class ProjectConfigManager(BaseConfigManager):
"""Manages access token configuration .plxprojectconfig file."""
IS_GLOBAL = False
CONFIG_FILE_NAME = '.plxprojectconfig'
CONFIG = ProjectConfig
INIT_COMMAND = True
| apache-2.0 | Python | |
aec80109daf855f64f666f998aa2a96755e653f5 | add rpython-compatible implementation of load_resource() for win32 | Darkman/esky,timeyyy/esky,ccpgames/esky,datalytica/esky,kinnarr/esky | esky/bdist_esky/pypy_winres.py | esky/bdist_esky/pypy_winres.py | """
esky.bdist_esky.pypy_winres: access win32 exe resources in rpython
This module provides some functions for accessing win32 exe resources from
rpython code. It's a trimmed-down version of the esky.winres module with
just enough functionality to get the py2exe compiled bootstrapper working.
"""
from pypy.rlib import libffi
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.rlib import rwin32
LOAD_LIBRARY_AS_DATAFILE = 0x00000002
k32_LoadLibraryExA = rwin32.winexternal("LoadLibraryExA",[rffi.CCHARP,rwin32.HANDLE,rwin32.DWORD],rwin32.HANDLE)
k32_FindResourceExA = rwin32.winexternal("FindResourceExA",[rwin32.HANDLE,rffi.CCHARP,rwin32.DWORD,rwin32.DWORD],rwin32.HANDLE)
k32_SizeofResource = rwin32.winexternal("SizeofResource",[rwin32.HANDLE,rwin32.HANDLE],rwin32.DWORD)
k32_LoadResource = rwin32.winexternal("LoadResource",[rwin32.HANDLE,rwin32.HANDLE],rwin32.HANDLE)
k32_LockResource = rwin32.winexternal("LockResource",[rwin32.HANDLE],rffi.CCHARP)
k32_FreeLibrary = rwin32.winexternal("FreeLibrary",[rwin32.HANDLE],rwin32.BOOL)
def load_resource(filename,resname,resid,reslang):
"""Load the named resource from the given file.
The filename and resource name must be ascii strings, and the resid and
reslang must be integers.
"""
l_handle = k32_LoadLibraryExA(filename,0,LOAD_LIBRARY_AS_DATAFILE)
if not l_handle:
raise WindowsError(rwin32.GetLastError(),"LoadLibraryExW failed")
try:
r_handle = k32_FindResourceExA(l_handle,resname,resid,reslang)
if not r_handle:
raise WindowsError(rwin32.GetLastError(),"FindResourceExA failed")
r_size = k32_SizeofResource(l_handle,r_handle)
if not r_size:
raise WindowsError(rwin32.GetLastError(),"SizeofResource failed")
r_info = k32_LoadResource(l_handle,r_handle)
if not r_info:
raise WindowsError(rwin32.GetLastError(),"LoadResource failed")
r_ptr = k32_LockResource(r_info)
if not r_ptr:
raise WindowsError(rwin32.GetLastError(),"LockResource failed")
return rffi.charpsize2str(r_ptr,r_size)
finally:
if not k32_FreeLibrary(l_handle):
raise WindowsError(rwin32.GetLastError(),"FreeLibrary failed")
def load_resource_pystr(py,filename,resname,resid,reslang):
"""Load the named resource from the given file as a python-level string
The filename and resource name must be ascii strings, and the resid and
reslang must be integers.
This uses the given python dll object to load the data directly into
a python string, saving a lot of copying and carrying on.
"""
l_handle = k32_LoadLibraryExA(filename,0,LOAD_LIBRARY_AS_DATAFILE)
if not l_handle:
raise WindowsError(rwin32.GetLastError(),"LoadLibraryExW failed")
try:
r_handle = k32_FindResourceExA(l_handle,resname,resid,reslang)
if not r_handle:
raise WindowsError(rwin32.GetLastError(),"FindResourceExA failed")
r_size = k32_SizeofResource(l_handle,r_handle)
if not r_size:
raise WindowsError(rwin32.GetLastError(),"SizeofResource failed")
r_info = k32_LoadResource(l_handle,r_handle)
if not r_info:
raise WindowsError(rwin32.GetLastError(),"LoadResource failed")
r_ptr = k32_LockResource(r_info)
if not r_ptr:
raise WindowsError(rwin32.GetLastError(),"LockResource failed")
s = py.String_FromStringAndSize(None,r_size)
buf = py.String_AsString(s)
memcpy(buf,r_ptr,r_size)
return s
finally:
if not k32_FreeLibrary(l_handle):
raise WindowsError(rwin32.GetLastError(),"FreeLibrary failed")
def memcpy(target,source,n):
impl = libffi.CDLL(libffi.get_libc_name()).getpointer("memcpy",[libffi.ffi_type_pointer,libffi.ffi_type_pointer,libffi.ffi_type_uint],libffi.ffi_type_void)
impl.push_arg(target)
impl.push_arg(source)
impl.push_arg(n)
impl.call(lltype.Void)
| bsd-3-clause | Python | |
8e1a117c0d0bf3614beed0410862d1dc1a91b306 | Create shp2zip.py | datawagovau/fme-workbenches | upload-geospatial-data/python/shp2zip.py | upload-geospatial-data/python/shp2zip.py | import os
import shutil
import zipfile
# Creates a zip file containing the input shapefile
# inShp: Full path to shapefile to be zipped
# Delete: Set to True to delete shapefile files after zip
def ZipShp (inShp, Delete = True):
#List of shapefile file extensions
extensions = [".shp",".shx",".dbf",".sbn",".sbx",".fbn",".fbx",".ain",".aih",".atx",".ixs",".mxs",".prj",".xml",".cpg",".shp.xml"]
#Directory of shapefile
inLocation = os.path.dirname (inShp)
#Base name of shapefile
inName = os.path.basename (os.path.splitext (inShp)[0])
#Create zipfile name
zipfl = os.path.join (inLocation, inName + ".zip")
#Create zipfile object
ZIP = zipfile.ZipFile (zipfl, "w")
#Empty list to store files to delete
delFiles = []
#Iterate files in shapefile directory
for fl in os.listdir (inLocation):
#Iterate extensions
for extension in extensions:
#Check if file is shapefile file
if fl == inName + extension:
#Get full path of file
inFile = os.path.join (inLocation, fl)
#Add file to delete files list
delFiles += [inFile]
#Add file to zipfile
ZIP.write (inFile, fl)
break
#Delete shapefile if indicated
if Delete == True:
for fl in delFiles:
os.remove (fl)
#Close zipfile object
ZIP.close()
#Return zipfile full path
return zipfl
| mit | Python | |
06804460f2b0e70b7b08d6657353c1c172a0df4c | add examples/tube-stream-private.py | PabloCastellano/telepathy-python,detrout/telepathy-python,detrout/telepathy-python,max-posedon/telepathy-python,PabloCastellano/telepathy-python,epage/telepathy-python,max-posedon/telepathy-python,freedesktop-unofficial-mirror/telepathy__telepathy-python,freedesktop-unofficial-mirror/telepathy__telepathy-python,epage/telepathy-python | examples/tube-stream-private.py | examples/tube-stream-private.py | import sys
from stream_tube_client import StreamTubeJoinerPrivateClient, \
StreamTubeInitiatorPrivateClient
def usage():
print "Usage:\n" \
"Offer a stream tube to [contact] using the trivial stream server:\n" \
"\tpython %s [account-file] [contact]\n" \
"Accept a stream tube from a contact and connect it to the trivial stream client:\n" \
"\tpython %s [account-file]\n" \
"Offer a stream tube to [contact] using the UNIX socket [socket]:\n" \
"\tpython %s [account-file] [contact] [socket]\n" \
"Accept a stream tube from a contact and wait for connections from an external client:\n" \
"\tpython %s [account-file] --no-trivial-client\n" \
% (sys.argv[0], sys.argv[0], sys.argv[0], sys.argv[0])
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 2 and args[1] != '--no-trivial-client':
client = StreamTubeInitiatorPrivateClient(args[0], contact_id=args[1])
elif len(args) == 1:
client = StreamTubeJoinerPrivateClient(args[0], True)
elif len(args) == 3:
client = StreamTubeInitiatorPrivateClient(args[0], args[1], args[2])
elif len(args) == 2 and args[1] == '--no-trivial-client':
client = StreamTubeJoinerPrivateClient(args[0], False)
else:
usage()
sys.exit(0)
client.run()
| lgpl-2.1 | Python | |
d304c1cd9b35f92a6f5bb1c739402b8f3a6c22c8 | Create cigar_party.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/cigar_party.py | Python/CodingBat/cigar_party.py | # http://codingbat.com/prob/p195669
def cigar_party(cigars, is_weekend):
if is_weekend and cigars >= 40:
return True
elif not is_weekend and (cigars >= 40 and cigars <= 60):
return True
else:
return False
| mit | Python | |
dbf1d68fd3c3681b1aa7673b1cfd8a2cc3417edf | Create problem5.py | CptDemocracy/Python | Project-Euler/Problem5/problem5.py | Project-Euler/Problem5/problem5.py | """
[ref.href] https://projecteuler.net/problem=5
Smallest multiple.
2520 is the smallest number that can be divided by each
of the numbers from 1 to 10 without any remainder.
What is the smallest positive number that is evenly
divisible by all of the numbers from 1 to 20?
"""
EPS = 1e-6
def IsDivisibleByAll(n, divs):
for div in divs:
if n % div != 0:
return False
return True
def GCD(n, m, eps = EPS):
if ((abs(n) >= 0 and abs(n) <= eps) or
(abs(m) >= 0 and abs(m) <= eps)):
return 0
while n % m > eps:
temp = m
m = n % m
n = temp
return m
def LCD(n, m, eps = EPS):
gcd = GCD(n, m, eps)
if abs(gcd) >= 0 and abs(gcd) <= eps:
return 1
return n * m / gcd
lowestdiv = 1
highestdiv = 20
divs = range(lowestdiv, highestdiv + 1)
lcd = reduce(LCD, divs)
print "The smallest positive number that is evenly divisible by " +\
", ".join([str(div) for div in divs]) + " is " + str(lcd) + "."
| mit | Python | |
d441cfd92cd8d843f22f181d485786fe1ed8948f | Add herokustatus plugin | Cyanogenoid/smartbot,tomleese/smartbot,Muzer/smartbot,thomasleese/smartbot-old | plugins/herokustatus.py | plugins/herokustatus.py | import urllib.parse
import requests
class Plugin:
def __call__(self, bot):
bot.on_respond(r"heroku st(atus)?$", self.on_respond)
bot.on_help("herokustatus", self.on_help)
def on_respond(self, bot, msg, reply):
url = "https://status.heroku.com/api/v3/current-status"
headers = { "User-Agent": "SmartBot" }
res = requests.get(url, headers=headers).json()
reply("Production: {0}\nDevelopment: {1}".format(res["status"]["Production"], res["status"]["Development"]))
def on_help(self, bot, reply):
reply("Syntax: heroku st[atus]")
| mit | Python | |
0af447c0371bd157c03fc5097ac8c0e0a5873ff7 | Add temporary satellites analysis example. | probcomp/bayeslite,probcomp/bayeslite | examples/satellites_analyze.py | examples/satellites_analyze.py | assert __name__ == '__main__'
import bayeslite.bql as bql
import bayeslite.core as core
import bayeslite.parse as parse
import crosscat.LocalEngine as localengine
import getopt
import sys
# XXX This is wrong -- should be part of bayesdb proper. But it, and
# copypasta of it, will do for now until internals are restructured
# well enough for bdb.execute to work.
def bql_exec(bdb, string):
import sys
print >>sys.stderr, '--> %s' % (string.strip(),)
phrases = parse.parse_bql_string(string)
phrase = phrases.next()
done = None
try:
phrases.next()
done = False
except StopIteration:
done = True
if done is not True:
raise ValueError('>1 phrase: %s' % (string,))
return bql.execute_phrase(bdb, phrase)
def usage():
print >>sys.stderr, 'Usage: %s [-hv] [-i <iter>] [-m <models>]' % \
(sys.argv[0])
iterations = None
modelnos = None
try:
opts, args = getopt.getopt(sys.argv[1:], '?hi:m:', [])
except getopt.GetoptError as e:
print str(e)
usage()
if 0 < len(args):
usage()
for o, a in opts:
if o in ('-h', '-?'):
usage()
sys.exit()
elif o == '-i':
iterations = int(a)
elif o == '-m':
modelnos = a
else:
assert False, 'bad option %s' % (o,)
bdb = core.BayesDB(localengine.LocalEngine(seed=0), pathname='satellites.bdb')
bql_exec(bdb, "create btable if not exists satellites" +
" from 'satellites.utf8.csv'")
bql_exec(bdb, 'initialize 10 models if not exists for satellites')
if iterations is not None:
modelspec = 'models %s' % (modelnos,) if modelnos is not None else ''
bql_exec(bdb, 'analyze satellites %s for %d iterations wait' %
(modelspec, iterations))
| apache-2.0 | Python | |
034cb3a0fe2a2c0c8b47fd631ca28bbfa7091902 | add recursive preOrder BST | Daetalus/Algorithms | BST/bst.py | BST/bst.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
class Node(object):
def __init__(self, value):
self.left = None
self.right = None
self.value = value
# recursive
def preOrderRecur(root):
if not root:
return
print(root.value)
if root.right:
preOrderRecur(root.right)
if root.left:
preOrderRecur(root.left)
# iterative
def preOrder(root):
if root is None:
return []
stack = [root]
preorder = []
while stack:
node = stack.pop()
preorder.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return preorder
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
class Node(object):
def __init__(self, value):
self.left = None
self.right = None
self.value = value
# iterative
def preOrder(root):
if root is None:
return []
stack = [root]
preorder = []
while stack:
node = stack.pop()
preorder.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return preorder
| unlicense | Python |
022ab3e18660a310545d59a467f6eb9703fb5422 | Add dummy settings for database (which hopefully work). | campovski/beernburger,campovski/beernburger,campovski/beernburger | beernburger/my_settings.py | beernburger/my_settings.py | """
Django settings for beernburger project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
def generate_secret_key(path):
from django.utils.crypto import get_random_string
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*()-_=+'
x = get_random_string(50, chars)
with open(path, 'w') as f:
f.write('SECRET_KEY = {0}'.format(x))
try:
from .secret_key import SECRET_KEY
except ImportError:
settings_dir = os.path.abspath(os.path.dirname(__file__))
generate_secret_key(os.path.join(settings_dir, 'secret_key.py'))
from .secret_key import SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['84.52.169.242', '127.0.0.1', 'campovski.eu', 'localhost']
# Application definition
INSTALLED_APPS = [
'beernburger',
'beer.apps.BeerConfig',
'burger.apps.BurgerConfig',
'teacoffee.apps.TeacoffeeConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'beernburger.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'beernburger.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'beernburger',
'USER': 'test_user',
'PASSWORD': 'test_user_pwd123',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
| mit | Python | |
1d48cbd2a9882d20b71fe26841a8c8575fbb3ba9 | Add script to generate all attention plots for dataset | google/seq2seq,chunfengh/seq2seq,google/seq2seq,shashankrajput/seq2seq,liyi193328/seq2seq,liyi193328/seq2seq,liyi193328/seq2seq,chunfengh/seq2seq,chunfengh/seq2seq,chunfengh/seq2seq,google/seq2seq,liyi193328/seq2seq,shashankrajput/seq2seq,shashankrajput/seq2seq,liyi193328/seq2seq,google/seq2seq,kontact-chan/seq2seq,shashankrajput/seq2seq,kontact-chan/seq2seq,kontact-chan/seq2seq,kontact-chan/seq2seq | bin/visualize_attention.py | bin/visualize_attention.py | #! /usr/bin/env python
""" Generates model predictions.
"""
import os
import tensorflow as tf
from tensorflow.python.platform import gfile
import numpy as np
from matplotlib import pyplot as plt
from seq2seq import graph_utils
from seq2seq.inference import create_inference_graph, create_predictions_iter
tf.flags.DEFINE_string("source", None, "path to source training data")
tf.flags.DEFINE_string("vocab_source", None, "Path to source vocabulary file")
tf.flags.DEFINE_string("vocab_target", None, "Path to target vocabulary file")
tf.flags.DEFINE_string("model", "AttentionSeq2Seq", "model class")
tf.flags.DEFINE_string("model_dir", None, "directory to load model from")
tf.flags.DEFINE_string("checkpoint_path", None,
"""Full path to the checkpoint to be loaded. If None,
the latest checkpoint in the model dir is used.""")
tf.flags.DEFINE_integer("batch_size", 32, "the train/dev batch size")
tf.flags.DEFINE_integer("beam_width", None,
"Use beam search with this beam width for decoding")
tf.flags.DEFINE_string("output_dir", None,
"Write all attention plots to this directory")
FLAGS = tf.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
def create_figure(predictions_dict):
"""Creates an returns a new figure that visualizes
attention scors for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = [_.decode() for _ in predictions_dict["target_words"]]
if "SEQUENCE_END" in target_words:
prediction_len = target_words.index("SEQUENCE_END") + 1
else:
prediction_len = None
target_words = target_words[:prediction_len]
# Get source words
source_len = predictions_dict["source_len"]
source_words = predictions_dict["source_tokens"][:source_len]
source_words = [_.decode() for _ in source_words]
# Plot
fig = plt.figure(figsize=(8, 8))
plt.imshow(
X=predictions_dict["attention_scores"][:prediction_len, :],
interpolation="nearest",
cmap=plt.cm.Blues)
plt.xticks(np.arange(source_len), source_words, rotation=45)
plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
fig.tight_layout()
return fig
def main(_argv):
"""Program entrypoint.
"""
gfile.MakeDirs(FLAGS.output_dir)
predictions, _, _ = create_inference_graph(
model_class=FLAGS.model,
model_dir=FLAGS.model_dir,
vocab_source=FLAGS.vocab_source,
vocab_target=FLAGS.vocab_target,
input_file=FLAGS.source,
batch_size=FLAGS.batch_size,
beam_width=FLAGS.beam_width
)
vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
features = graph_utils.get_dict_from_collection("features")
predictions["source_tokens"] = features["source_tokens"]
predictions["source_len"] = features["source_len"]
predictions["target_words"] = vocab_tables["target_id_to_vocab"].lookup(
predictions["predictions"])
saver = tf.train.Saver()
checkpoint_path = FLAGS.checkpoint_path
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir)
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.initialize_all_tables())
# Restore checkpoint
saver.restore(sess, checkpoint_path)
tf.logging.info("Restored model from %s", checkpoint_path)
# Output predictions
predictions_iter = create_predictions_iter(predictions, sess)
for idx, predictions_dict in enumerate(predictions_iter):
output_path = os.path.join(FLAGS.output_dir, "{:05d}.png".format(idx))
create_figure(predictions_dict)
plt.savefig(output_path)
tf.logging.info("Wrote %s", output_path)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | Python | |
aa52332d622e16b3d1524eb2dc12047cec02fb33 | Create calculator.py | Anurag842/my-first-calculator | calculator.py | calculator.py | # my-first-calculator
#Description is in the name
#calculator
print "For addition press 1"
print "For subtraction press 2"
print "For multiplication press 3"
print "For division press 4"
print "If you're all done press 5"
cmd=float(int(raw_input("Enter operation number:")))
#Addition
if cmd==1:
print "Ok begin addition"
first=float(int(raw_input("Enter first number:")))
second=float(int(raw_input("Enter second number:")))
result=first+second
print first,"+",second,"=",result,"(sum)"
#Subtraction
elif cmd==2:
print "Ok begin Subtraction:"
first=float(int(raw_input("Enter first number:")))
second=float(int(raw_input("Enter second number:")))
result=first-second
print first,"-",second,"=",result,"(difference)"
#Multiplication
elif cmd==3:
print "Ok begin multiplication"
first=float(int(raw_input("Enter multiplicand:")))
second=float(int(raw_input("Enter multiplier:")))
product=first*second
print first,"X",second,"=",product,"(product)"
#Division
elif cmd==4:
print "Ok begin division"
print "For decimal division press 1"
print "For normal division press 2"
command=float(int(raw_input("Enter Division type:")))
if command==1:
first=float(int(raw_input("Enter Dividend:")))
second=float(int(raw_input("Enter Divisor:")))
result=first/second
print first,"/",second,"=",result,"(quotient)"
elif command==2:
first=int(raw_input("Enter Dividend:"))
second=int(raw_input("Enter Divisor:"))
result1=first/second
result2=first%second
print first,"/",second,"=",result1,", remainder =",result2
| mit | Python | |
069a5758b16624ac2b547ede44123b64c89baf96 | Add simple script mapping YTID to KA URLs. | danielhollas/AmaraUpload,danielhollas/AmaraUpload | map_ytids_to_ka_urls.py | map_ytids_to_ka_urls.py | #!/usr/bin/env python3
from kapi import *
from utils import *
import argparse, sys
import time
def read_cmd():
"""Function for reading command line options."""
desc = "Program for mapping YouTube IDs to KA URLs to Crowdin WYSIWYG editor."
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('input_file',metavar='INPUT_FILE', help='Text file containing YouTube IDs and possibly filenames.')
parser.add_argument('-s','--subject', dest='subject', default='root', help='Print full tree for a given domain/subject.')
return parser.parse_args()
def print_children_titles(content_tree):
for child in content_tree["children"]:
pprint(child['title'])
def print_dict_without_children(dictionary):
for k in dictionary.keys():
if k != 'children':
print(k, dictionary[k])
if __name__ == "__main__":
opts = read_cmd()
infile = opts.input_file
subject_title = opts.subject
# List ytids may also contain filenames
ytids = []
# Reading file with YT id's
with open(infile, "r") as f:
for line in f:
y = line.split()
if len(y) != 0:
ytids.append(y[0])
else:
ytids.append(None)
tree = load_ka_tree('video')
if subject_title == 'root':
subtree = tree
else:
subtree = find_ka_topic(tree, subject_title)
videos = []
for ytid in ytids:
if ytid is not None:
v = find_video_by_youtube_id(subtree, ytid)
if v:
videos.append(find_video_by_youtube_id(subtree, ytid) )
else:
videos.append(ytid)
for v in videos:
try:
print(v['ka_url'].replace('www', 'translate'))
except:
print(v)
| mit | Python | |
41265e02f47a55d11dcc921aeeebebba290ed61f | Fix Dee. | CelineBoudier/rapid-router,mikebryant/rapid-router,mikebryant/rapid-router,CelineBoudier/rapid-router,mikebryant/rapid-router,mikebryant/rapid-router,CelineBoudier/rapid-router,CelineBoudier/rapid-router | game/migrations/0008_fix_dee.py | game/migrations/0008_fix_dee.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def fix_dee(apps, schema_editor):
Character = apps.get_model('game', 'Character')
dee = Character.objects.get(name='Dee')
dee.en_face = 'characters/front_view/Dee.svg'
dee.save()
class Migration(migrations.Migration):
dependencies = [
('game', '0007_added_block__limits'),
]
operations = [
migrations.RunPython(fix_dee)
]
| agpl-3.0 | Python | |
31af59fdfa988924da8a6b02b191ae9ba8ab5f02 | Add initial SDS011 sensor reader script | aapris/VekotinVerstas,aapris/VekotinVerstas | RpiAir/sds011.py | RpiAir/sds011.py | # coding=utf-8
"""
TODO:
check sleep command from here!
http://www.codegists.com/snippet/python/sds011_kadamski_python
"""
from __future__ import print_function
import serial
import struct, sys
import sys
import datetime
class Sds011Reader:
def __init__(self, port, baudrate=9600):
self.ser = serial.Serial()
self.ser.port = port # sys.argv[1]
self.ser.baudrate = baudrate
self.ser.open()
self.ser.flushInput()
self.byte, self.data = 0, ""
def process_frame(self, d):
r = struct.unpack('<HHxxBBB', d[2:])
pm25 = r[0]/10.0
pm10 = r[1]/10.0
checksum = sum(ord(v) for v in d[2:8]) % 256
ok = checksum == r[2] and r[3] == 0xab
return pm25, pm10, ok
def read_forever(self):
while True:
#print(self.byte)
while self.byte != "\xaa":
self.byte = self.ser.read(size=1)
d = self.ser.read(size=10)
if d[0] == "\xc0":
pm25, pm10, ok = self.process_frame(self.byte + d)
yield pm25, pm10, ok
def process_frame3(self, d):
r = struct.unpack('<HHxxBBB', d[2:])
pm25 = r[0]/10.0
pm10 = r[1]/10.0
#checksum = sum(ord(v) for v in d[2:8]) % 256
checksum = sum(v for v in d[2:8]) % 256
ok = checksum == r[2] and r[3] == 0xab
return pm25, pm10, ok
def read_forever3(self):
while True:
while self.byte != b"\xaa":
print(self.byte, type(self.byte))
self.byte = self.ser.read(size=1)
d = self.ser.read(size=10)
#print("FRAME", d, len(d), type(d), d[0])
#if d[0] == b"\xc0":
if d[0] == 192:
pm25, pm10, ok = self.process_frame3(self.byte + d)
yield pm25, pm10, ok
def main(port, fname=None):
sds011 = Sds011Reader(port)
if fname is None:
f = None
else:
f = open(fname, 'at')
if sys.version_info[0] == 2:
for pm25, pm10, ok in sds011.read_forever():
ts = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
if f is not None:
msg = '{} pm2_5={} pm10={} ok='.format(ts, pm25, pm10, "OK" if ok else "NOK")
f.write(msg + '\n')
f.flush()
else:
for pm25, pm10, ok in sds011.read_forever3():
print("PM 2.5: {} μg/m^3 PM 10: {} μg/m^3 CRC={}".format(pm25, pm10, "OK" if ok else "NOK"))
#print(pm25, pm10, ok)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Run me:\n python {} /dev/ttyUSB0".format(sys.argv[0]))
sys.exit(1)
fname = sys.argv[2] if sys.argv[2] else None
main(sys.argv[1], fname)
# serial port in mac:
# '/dev/cu.wchusbserial1410'
| mit | Python | |
6d0f78ccdb8587d5a35ee297198a664274598747 | Create run_test.py | dschreij/staged-recipes,mariusvniekerk/staged-recipes,chrisburr/staged-recipes,conda-forge/staged-recipes,mcs07/staged-recipes,patricksnape/staged-recipes,shadowwalkersb/staged-recipes,scopatz/staged-recipes,larray-project/staged-recipes,chrisburr/staged-recipes,sodre/staged-recipes,pmlandwehr/staged-recipes,petrushy/staged-recipes,jakirkham/staged-recipes,kwilcox/staged-recipes,rmcgibbo/staged-recipes,chohner/staged-recipes,glemaitre/staged-recipes,stuertz/staged-recipes,chohner/staged-recipes,rmcgibbo/staged-recipes,jjhelmus/staged-recipes,glemaitre/staged-recipes,jochym/staged-recipes,shadowwalkersb/staged-recipes,basnijholt/staged-recipes,sannykr/staged-recipes,birdsarah/staged-recipes,barkls/staged-recipes,kwilcox/staged-recipes,scopatz/staged-recipes,Juanlu001/staged-recipes,ReimarBauer/staged-recipes,ocefpaf/staged-recipes,hadim/staged-recipes,rvalieris/staged-recipes,synapticarbors/staged-recipes,SylvainCorlay/staged-recipes,igortg/staged-recipes,grlee77/staged-recipes,synapticarbors/staged-recipes,jochym/staged-recipes,guillochon/staged-recipes,grlee77/staged-recipes,ceholden/staged-recipes,larray-project/staged-recipes,mcs07/staged-recipes,asmeurer/staged-recipes,jjhelmus/staged-recipes,sannykr/staged-recipes,isuruf/staged-recipes,petrushy/staged-recipes,pmlandwehr/staged-recipes,patricksnape/staged-recipes,goanpeca/staged-recipes,NOAA-ORR-ERD/staged-recipes,igortg/staged-recipes,rvalieris/staged-recipes,SylvainCorlay/staged-recipes,cpaulik/staged-recipes,conda-forge/staged-recipes,mariusvniekerk/staged-recipes,sodre/staged-recipes,NOAA-ORR-ERD/staged-recipes,hadim/staged-recipes,barkls/staged-recipes,cpaulik/staged-recipes,jakirkham/staged-recipes,stuertz/staged-recipes,Cashalow/staged-recipes,goanpeca/staged-recipes,birdsarah/staged-recipes,ceholden/staged-recipes,asmeurer/staged-recipes,ocefpaf/staged-recipes,sodre/staged-recipes,basnijholt/staged-recipes,ReimarBauer/staged-recipes,dschreij/staged-recipes,johanneskoester/staged-recipes,isuruf/staged-recipes,guillochon/staged-recipes,johanneskoester/staged-recipes,Cashalow/staged-recipes,Juanlu001/staged-recipes | recipes/pytest-sugar/run_test.py | recipes/pytest-sugar/run_test.py | import django
from django.conf import settings
settings.configure(INSTALLED_APPS=['pytest_sugar', 'django.contrib.contenttypes', 'django.contrib.auth'])
django.setup()
import pytest_sugar
| bsd-3-clause | Python | |
e50dc18525e0e4cbbef56cd16ba4e2d9690464f1 | Add solution for problem 34 | cifvts/PyEuler | euler034.py | euler034.py | #!/usr/bin/python
from math import factorial, log
values = [0]*10
for i in range(10):
values[i] = factorial(i)
total = 0
for i in range(10, factorial(9) * 7):
target = 0
test = i
while test != 0:
x = test % 10
target += values[x]
test = test // 10
if i == target:
total += i
print(total)
| mit | Python | |
427752b4ee3d63d1bf29a9a2a9be011662df8556 | add login handler | SandstoneHPC/sandstone-spawner | jupyterhub_login.py | jupyterhub_login.py | from sandstone.lib.handlers.base import BaseHandler
import requests
import os
class JupyterHubLoginHandler(BaseHandler):
def get(self):
api_token = os.environ['JUPYTERHUB_API_TOKEN']
url = '{protocol}://{host}/hub/api/authorizations/token/{token}'.format(
protocol=self.request.protocol,
host=self.request.host,
token=api_token
)
res = requests.get(
url,
headers={
'Authorization': 'token %s' % api_token
}
)
username = res.json()['name']
if username:
self.set_secure_cookie('user', username)
self.redirect('/user/{}'.format(username))
else:
self.set_status(403)
self.finish()
| mit | Python | |
8bd8ae1daa432bce9881214c4d326ac8a38e2046 | Correct MAPE loss | MagicSen/keras,xiaoda99/keras,pthaike/keras,OlafLee/keras,DLlearn/keras,tencrance/keras,gavinmh/keras,brainwater/keras,yingzha/keras,nzer0/keras,llcao/keras,xurantju/keras,meanmee/keras,zhangxujinsh/keras,bottler/keras,cvfish/keras,jbolinge/keras,jiumem/keras,jalexvig/keras,kemaswill/keras,zxsted/keras,nehz/keras,navyjeff/keras,jayhetee/keras,Cadene/keras,harshhemani/keras,printedheart/keras,vseledkin/keras,danielforsyth/keras,ekamioka/keras,ledbetdr/keras,bboalimoe/keras,marchick209/keras,LIBOTAO/keras,relh/keras,florentchandelier/keras,jimgoo/keras,jonberliner/keras,pjadzinsky/keras,keras-team/keras,gamer13/keras,DeepGnosis/keras,stephenbalaban/keras,daviddiazvico/keras,zhmz90/keras,Yingmin-Li/keras,abayowbo/keras,dolaameng/keras,hhaoyan/keras,asampat3090/keras,iScienceLuvr/keras,nt/keras,ogrisel/keras,Smerity/keras,why11002526/keras,keskarnitish/keras,happyboy310/keras,cheng6076/keras,kfoss/keras,EderSantana/keras,untom/keras,zxytim/keras,amy12xx/keras,nebw/keras,mikekestemont/keras,wxs/keras,ml-lab/keras,rodrigob/keras,rlkelly/keras,ypkang/keras,3dconv/keras,rudaoshi/keras,kuza55/keras,dribnet/keras,kod3r/keras,fmacias64/keras,keras-team/keras,chenych11/keras,eulerreich/keras,dxj19831029/keras,sjuvekar/keras,JasonTam/keras,johmathe/keras,wubr2000/keras,Aureliu/keras,dhruvparamhans/keras,jasonyaw/keras,iamtrask/keras,saurav111/keras,jhauswald/keras,ashhher3/keras,imcomking/Convolutional-GRU-keras-extension- | keras/objectives.py | keras/objectives.py | from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from six.moves import range
epsilon = 1.0e-9
def mean_squared_error(y_true, y_pred):
return T.sqr(y_pred - y_true).mean(axis=-1)
def mean_absolute_error(y_true, y_pred):
return T.abs_(y_pred - y_true).mean(axis=-1)
def mean_absolute_percentage_error(y_true, y_pred):
return T.abs_((y_true - y_pred) / T.clip(T.abs_(y_true), epsilon, np.inf)).mean(axis=-1) * 100.
def mean_squared_logarithmic_error(y_true, y_pred):
return T.sqr(T.log(T.clip(y_pred, epsilon, np.inf) + 1.) - T.log(T.clip(y_true, epsilon, np.inf) + 1.)).mean(axis=-1)
def squared_hinge(y_true, y_pred):
return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean(axis=-1)
def hinge(y_true, y_pred):
return T.maximum(1. - y_true * y_pred, 0.).mean(axis=-1)
def categorical_crossentropy(y_true, y_pred):
'''Expects a binary class matrix instead of a vector of scalar classes
'''
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
# scale preds so that the class probas of each sample sum to 1
y_pred /= y_pred.sum(axis=-1, keepdims=True)
cce = T.nnet.categorical_crossentropy(y_pred, y_true)
return cce
def binary_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
bce = T.nnet.binary_crossentropy(y_pred, y_true)
return bce
# aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'objective')
| from __future__ import absolute_import
import theano
import theano.tensor as T
import numpy as np
from six.moves import range
epsilon = 1.0e-9
def mean_squared_error(y_true, y_pred):
return T.sqr(y_pred - y_true).mean(axis=-1)
def mean_absolute_error(y_true, y_pred):
return T.abs_(y_pred - y_true).mean(axis=-1)
def mean_absolute_percentage_error(y_true, y_pred):
return T.abs_((y_true - y_pred) / y_true).mean(axis=-1) * 100
def mean_squared_logarithmic_error(y_true, y_pred):
return T.sqr(T.log(T.clip(y_pred, epsilon, np.inf) + 1.) - T.log(T.clip(y_true, epsilon, np.inf) + 1.)).mean(axis=-1)
def squared_hinge(y_true, y_pred):
return T.sqr(T.maximum(1. - y_true * y_pred, 0.)).mean(axis=-1)
def hinge(y_true, y_pred):
return T.maximum(1. - y_true * y_pred, 0.).mean(axis=-1)
def categorical_crossentropy(y_true, y_pred):
'''Expects a binary class matrix instead of a vector of scalar classes
'''
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
# scale preds so that the class probas of each sample sum to 1
y_pred /= y_pred.sum(axis=-1, keepdims=True)
cce = T.nnet.categorical_crossentropy(y_pred, y_true)
return cce
def binary_crossentropy(y_true, y_pred):
y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
bce = T.nnet.binary_crossentropy(y_pred, y_true)
return bce
# aliases
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
from .utils.generic_utils import get_from_module
def get(identifier):
return get_from_module(identifier, globals(), 'objective')
| mit | Python |
29aa26f553633fbe4a5ae37721e1da0ecca4139c | Create MyaiBot-Pictures.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/moz4r/MyaiBot-Pictures.py | home/moz4r/MyaiBot-Pictures.py | #PICTURE FIND AND DISPLAY BOT
#LONK AT http://www.myai.cloud/
#FOR SERVER NAME AND BOT STATUS
#IT S A SMALL COMPUTER FOR NOW SORRY IF PROBLEMS
from java.lang import String
import random
import threading
import itertools
http = Runtime.createAndStart("http","HttpClient")
Runtime.createAndStart("chatBot", "ProgramAB")
Runtime.createAndStart("ear", "WebkitSpeechRecognition")
Runtime.createAndStart("webGui", "WebGui")
Runtime.createAndStart("htmlFilter", "HtmlFilter")
Runtime.createAndStart("mouth", "AcapelaSpeech")
Runtime.createAndStart("Image", "ImageDisplay")
mouth.setVoice("Ryan")
mouth.setLanguage("EN")
#mouth.setVoice("Antoine")
#mouth.setLanguage("FR")
chatBot.startSession( "default", "rachel")
chatBot.addTextListener(htmlFilter)
htmlFilter.addListener("publishText", python.name, "talk")
def talk(data):
mouth.speak(data)
print "chatbot dit :", data
def FindImage(image):
mouth.speak("I show you "+image)
#mouth.speak("Voici "+image)
#PLEASE USE REAL LANGUAGE PARAMETER :
#lang=XX ( FR/EN/RU/IT etc...)
#A FAKE LANGUAGE WORKS BUT DATABASE WILL BROKE
a = String(http.get("http://myai.cloud/bot1.php?pic="+image.replace(" ", "%20")+"&lang=US"))
#a = String(http.get("http://myai.cloud/bot1.php?pic="+image.replace(" ", "%20")+"&lang=FR"))
Image.display(a)
| apache-2.0 | Python | |
147374971ad21406d61beb1512b5a702298fc3dc | add a generic seach module (relies on sqlalchemy) | mapfish/mapfish,mapfish/mapfish,mapfish/mapfish | cartoweb/plugins/search.py | cartoweb/plugins/search.py | from sqlalchemy.sql import select
from sqlalchemy.sql import and_
from sqlalchemy.sql import func
from shapely.geometry.point import Point
from shapely.geometry.polygon import Polygon
class Search:
EPSG = 4326
UNITS = 'degrees'
def __init__(self, idColumn, geomColumn, epsg=EPSG, units=UNITS):
self.idColumn = idColumn
self.geomColumn = geomColumn
self.epsg = epsg
self.units = units
self.limit = None
def buildExpression(self, request):
id = None
path = request.path_info.split("/")
if len(path) > 1:
path_pieces = path[-1].split(".")
if len(path_pieces) > 1 and path_pieces[0].isdigit():
id = int(path_pieces[0])
expr = None
if id is not None:
expr = self.idColumn == id;
if 'maxfeatures' in request.params:
self.limit = int(request.params['maxfeatures'])
epsg = self.EPSG
if 'epsg' in request.params:
epsg = request.params['epsg']
# deal with lonlat query
if 'lon' in request.params and 'lat' in request.params and 'radius' in request.params:
# define point from lonlat
lon = float(request.params['lon'])
lat = float(request.params['lat'])
point = Point(lon, lat)
pgPoint = func.pointfromtext(point.wkt, epsg)
if epsg != self.epsg:
pgPoint = func.transform(pgPoint, self.epsg)
# build query expression
if self.units == 'degrees':
dist = func.distance_sphere(self.geomColumn, pgPoint)
else:
dist = func.distance(self.geomColumn, pgPoint)
e = dist < float(request.params['radius'])
# update query expression
if expr is not None:
expr = and_(expr, e)
else:
expr = e
# deal with box query
elif 'box' in request.params:
coords = request.params['box'].split(',')
# define polygon from box
pointA = (float(coords[0]), float(coords[1]))
pointB = (float(coords[0]), float(coords[3]))
pointC = (float(coords[2]), float(coords[3]))
pointD = (float(coords[2]), float(coords[1]))
pointE = pointA
coords = (pointA, pointB, pointC, pointD, pointE)
poly = Polygon(coords)
pgPoly = func.geomfromtext(poly.wkt, epsg)
if epsg != self.epsg:
pgPoly = func.transform(pgPoly, self.epsg)
# build query expression
e = self.geomColumn.op('&&')(pgPoly)
# update query expression
if expr is not None:
expr = and_(expr, e)
else:
expr = e
return expr
def query(self, session, obj, tableObj, expr):
return session.query(obj).from_statement(
select([tableObj], expr).limit(self.limit)).all()
| bsd-3-clause | Python | |
6078617684edbc7f264cfe08d60f7c3d24d2898f | add test for handle_conversation_before_save | SkygearIO/chat,lakoo/chat,lakoo/chat,rickmak/chat,lakoo/chat,SkygearIO/chat | plugin/test/test_handle_conversation_before_save.py | plugin/test/test_handle_conversation_before_save.py | import unittest
import copy
from unittest.mock import Mock
import chat_plugin
from chat_plugin import handle_conversation_before_save
class TestHandleConversationBeforeSave(unittest.TestCase):
def setUp(self):
self.conn = None
chat_plugin.current_user_id = Mock(return_value="user1")
def record(self):
return {
'participant_ids': ['user1', 'user2'],
'admin_ids': ['user1']
}
def original_record(self):
return {
'participant_ids': ['user1', 'user2'],
'admin_ids': ['user1']
}
def test_with_valid_record(self):
handle_conversation_before_save(
self.record(), self.original_record(), self.conn)
def test_no_participants(self):
record = self.record()
record['participant_ids'] = []
with self.assertRaises(Exception) as cm:
handle_conversation_before_save(
record, self.original_record(), self.conn)
def test_no_admins(self):
record = self.record()
record['admin_ids'] = []
with self.assertRaises(Exception) as cm:
handle_conversation_before_save(
record, self.original_record(), self.conn)
def test_create_direct_message_for_others(self):
record = self.record()
record['participant_ids'] = ['user2', 'user3']
record['is_direct_message'] = True
with self.assertRaises(Exception) as cm:
handle_conversation_before_save(
record, None, self.conn)
def test_create_direct_message_with_three_participants(self):
record = self.record()
record['participant_ids'] = ['user1', 'user2', 'user3']
record['is_direct_message'] = True
with self.assertRaises(Exception) as cm:
handle_conversation_before_save(
record, None, self.conn)
def test_direct_message_should_have_no_admin(self):
record = self.record()
record['is_direct_message'] = True
handle_conversation_before_save(record, None, self.conn)
self.assertTrue(record['admin_ids'] == [])
| apache-2.0 | Python | |
943e162eee203f05b5a2d5b19bcb4a9c371cc93b | Add new script to get comet velocity from kymograph | hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_tools | plugins/Scripts/Plugins/Kymograph_Comet_Velocity.py | plugins/Scripts/Plugins/Kymograph_Comet_Velocity.py | # @Float(label="Time Interval (s)", value=1) dt
# @Float(label="Pixel Length (um)", value=1) pixel_length
# @Boolean(label="Do you want to save results files ?", required=False) save_results
# @Boolean(label="Do you want to save ROI files ?", required=False) save_roi
# @ImageJ ij
# @ImagePlus img
# @Dataset data
# @StatusService status
import os
import math
from ij.plugin.frame import RoiManager
from ij.measure import ResultsTable
def main():
# Get image processor and imgplus
imp = img.getProcessor()
imgp = data.getImgPlus()
fname = data.getSource()
name = os.path.basename(fname)
# Get ROIManager
rm = RoiManager.getInstance()
if not rm:
status.warn("Use ROI Manager tool (Analyze>Tools>ROI Manager...).")
return False
if len(rm.getRoisAsArray()) == 0:
status.warn("ROI Manager does not have any ROI.")
return False
if save_roi:
roi_path = os.path.splitext(fname)[0] + ".ROI.zip"
rm.runCommand("Save", roi_path);
rt = ResultsTable()
for i, roi in enumerate(rm.getRoisAsArray()):
x1 = roi.x1
y1 = roi.y1
x2 = roi.x2
y2 = roi.y2
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
run_length = roi.y1 - roi.y2
run_duration = roi.x2 - roi.x1
run_speed = run_length / run_duration
rt.incrementCounter()
rt.addValue("Track ID", i+1)
rt.addValue("Track Length (um)", run_length)
rt.addValue("Track duration (s)", run_duration)
rt.addValue("Track speed (um/s)", run_speed)
results_path = roi_path = os.path.splitext(fname)[0] + ".Results.csv"
rt.save(results_path)
rt.show('Comet Analysis Results for "%s"' % name)
main() | bsd-3-clause | Python | |
9934db8d079cf283f177daa55cb9e21e3f12dae2 | add sunburst graph python module | Etsukata/d3js_trace,Etsukata/d3js_trace | sbgraph.py | sbgraph.py | #!/usr/bin/python
import sys
import re
stack_traces = []
stack_trace = []
stack_samples = []
def stack_sample_to_dict(sample):
ret = {}
if len(sample['stack_trace']) == 1:
ret['name'] = sample['stack_trace'][0]
ret['size'] = sample['count']
return ret
ret['name'] = sample['stack_trace'][0]
tail_stack = sample['stack_trace'][1:]
ret['children'] = [stack_sample_to_dict({
'stack_trace' : tail_stack,
'count' : sample['count']
})]
return ret
def add_root(d):
ret = {
'name' : 'All',
'children' : [d]
}
return ret
def merge_dict(d1, d2):
def has_same_child(d1, d2):
for d in d1['children']:
if d['name'] == d2['children'][0]['name']:
return True
return False
if not d1['name'] == d2['name']:
print 'error on merge_dict(): root is not same.'
print 'd1:'
pprint(d1)
print 'd2:'
pprint(d2)
sys.exit(1)
if not d1.has_key('children'):
return merge_dict(d2, d1)
ret = d1.copy()
if d2.has_key('children'):
if has_same_child(ret, d2):
children = ret['children']
for i in range(0, len(children)):
if children[i]['name'] == d2['children'][0]['name']:
children[i] = merge_dict(children[i], d2['children'][0])
else:
ret['children'].append(d2['children'][0])
else:
dummy = {
'name' : '',
'size' : d2['size']
}
ret['children'].append(dummy)
return ret
def strip_func(f):
ret = f
ret = re.sub('\(\[kernel.kallsyms\]\.init\.text\)', '', ret)
ret = re.sub('\(\[kernel.kallsyms\]\)', '', ret)
ret = re.sub('\(\[unknown\]\)', '', ret)
ret = re.sub('\(\)', '', ret)
ret = ''.join(ret.split()[1:])
return ret
for line in sys.stdin:
if line[0] == '#':
continue
if line[0] == '\n':
if not stack_trace == []:
stack_traces.append(stack_trace)
stack_trace = []
continue
if line[0] == '\t':
stack_trace.append(line.strip())
continue
for st in stack_traces:
count = stack_traces.count(st) + 1
st.reverse()
st = map(strip_func, st)
stack_sample = {
'stack_trace' : st,
'count' : count
}
if not st in map(lambda x: x['stack_trace'], stack_samples):
stack_samples.append(stack_sample)
root = {
'name' : 'All',
'children' : []
}
for ss in stack_samples:
d = stack_sample_to_dict(ss)
d = add_root(d)
root = merge_dict(root, d)
print "var test_json = ", root
| apache-2.0 | Python | |
c426c773ee36d2872f79ff01d3bed615245e61b3 | add nbconvert.utils.pandoc | ipython/ipython,ipython/ipython | IPython/nbconvert/utils/pandoc.py | IPython/nbconvert/utils/pandoc.py | """Utility for calling pandoc"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013 the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import sys
import subprocess
# IPython imports
from IPython.utils.py3compat import cast_bytes
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def pandoc(source, fmt, to, encoding='utf-8'):
"""Convert an input string in format `from` to format `to` via pandoc.
This function will raise an error if pandoc is not installed.
Any error messages generated by pandoc are printed to stderr.
Parameters
----------
source : string
Input string, assumed to be valid format `from`.
fmt : string
The name of the input format (markdown, etc.)
to : string
The name of the output format (html, etc.)
Returns
-------
out : unicode
Output as returned by pandoc.
"""
p = subprocess.Popen(['pandoc', '-f', fmt, '-t', to],
stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
out, _ = p.communicate(cast_bytes(source, encoding))
out = out.decode(encoding, 'replace')
return out[:-1]
| bsd-3-clause | Python | |
5b951c91a7d054958d819bf19f97a5b33e21ff2d | add in some forms | muffinresearch/solitude,muffinresearch/solitude | lib/buyers/forms.py | lib/buyers/forms.py | from django import forms
from .models import Buyer
class BuyerValidation(forms.ModelForm):
class Meta:
model = Buyer
class PreapprovalValidation(forms.Form):
start = forms.DateField()
end = forms.DateField()
return_url = forms.URLField()
cancel_url = forms.URLField()
| bsd-3-clause | Python | |
4d47d14e2f630652c36765abf5907d6800a8012d | Revert "Revert "Initial python to find public APIs in Hadoop and compare them to outp…"" (#73) (cherry picked from commit a24236206b35744835781d42ff1dededbc685721) | mbalassi/bigtop,apache/bigtop,panagiotisl/bigtop,youngwookim/bigtop,sekikn/bigtop,youngwookim/bigtop,juju-solutions/bigtop,welikecloud/bigtop,welikecloud/bigtop,Guavus/bigtop,apache/bigtop,sekikn/bigtop,apache/bigtop,Guavus/bigtop,welikecloud/bigtop,youngwookim/bigtop,JunHe77/bigtop,panagiotisl/bigtop,mbalassi/bigtop,youngwookim/bigtop,juju-solutions/bigtop,JunHe77/bigtop,arenadata/bigtop,Guavus/bigtop,panagiotisl/bigtop,mbalassi/bigtop,alaunay/bigtop,alaunay/bigtop,Guavus/bigtop,sekikn/bigtop,arenadata/bigtop,JunHe77/bigtop,sekikn/bigtop,apache/bigtop,panagiotisl/bigtop,welikecloud/bigtop,juju-solutions/bigtop,JunHe77/bigtop,juju-solutions/bigtop,mbalassi/bigtop,sekikn/bigtop,apache/bigtop,alaunay/bigtop,arenadata/bigtop,arenadata/bigtop,alaunay/bigtop | bigtop-tests/spec-tests/runtime/src/test/python/find-public-apis.py | bigtop-tests/spec-tests/runtime/src/test/python/find-public-apis.py | #!/usr/bin/python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import re
import warnings
from optparse import OptionParser
def main():
parser = OptionParser()
parser.add_option("-d", "--directory", help="Top level directory of source tree")
parser.add_option("-r", "--report", help="API compatibility report file, in HTML format")
(options, args) = parser.parse_args()
# Get the ATS endpoint if it's not given.
if options.directory == None:
print "You must specify a top level directory of the source tree"
return 1
if options.report == None:
print "You must specify the report to check against"
return 1
publicClasses = set()
for directory in os.walk(options.directory):
for afile in directory[2]:
if re.search("\.java$", afile) != None:
handle = open(os.path.join(directory[0], afile))
# Figure out the package we're in
pre = re.search("org/apache/hadoop[\w/]*", directory[0])
if pre == None:
warnings.warn("No package for " + directory[0])
continue
package = pre.group(0)
expecting = 0
for line in handle:
if re.search("@InterfaceAudience.Public", line) != None:
expecting = 1
classname = re.search("class (\w*)", line)
if classname != None and expecting == 1:
publicClasses.add(package + "/" + classname.group(1))
expecting = 0
handle.close()
handle = open(options.report)
haveChecked = set()
for line in handle:
classre = re.search("mangled: <b>(org/apache/hadoop[\w/]+)", line)
if classre != None:
classname = classre.group(1)
if classname not in haveChecked:
if classname in publicClasses:
print "Warning, found change in public class " + classname
haveChecked.add(classname)
handle.close()
main()
| apache-2.0 | Python | |
e63d18f9ef70e7a42344cf13322676efa2226fa2 | Create largest_rectangle_in_histogram.py | py-in-the-sky/challenges,py-in-the-sky/challenges,py-in-the-sky/challenges | largest_rectangle_in_histogram.py | largest_rectangle_in_histogram.py | """
https://www.youtube.com/watch?v=VNbkzsnllsU
"""
def largest_rectangle_in_histogram(histogram):
"Return area of largest rectangle under histogram."
assert all(height >= 0 for height in histogram)
# Use stacks to keep track of how long a rectangle of height h
# extends to the right.
largest = 0
positions = [0]
heights = [0]
for i,height in enumerate(histogram):
if height > heights[-1]:
# Track new tallest rectangle.
positions.append(i)
heights.append(height)
elif height < heights[-1]:
# Close rectangles taller than height.
while height < heights[-1]:
h, p = heights.pop(), positions.pop()
largest = max(largest, h * (i - p))
i = len(histogram)
while heights:
# Close open rectangles.
h, p = heights.pop(), positions.pop()
largest = max(largest, h * (i - p))
return largest
def tests():
assert largest_rectangle_in_histogram([]) == 0
assert largest_rectangle_in_histogram([4]) == 4
assert largest_rectangle_in_histogram([1, 3, 2, 1, 2]) == 5
assert largest_rectangle_in_histogram(map(lambda x: x * 10000000, [1, 3, 2, 1, 2])) == 5 * 10000000
assert largest_rectangle_in_histogram([1, 2, 1, 3, 2, 0, 1]) == 5
print 'tests pass!'
| mit | Python | |
f2854aff3dde6439d990f8fd7d69e70dd4664b93 | Add tags app admin | opps/opps,opps/opps,jeanmask/opps,YACOWS/opps,YACOWS/opps,opps/opps,williamroot/opps,jeanmask/opps,opps/opps,williamroot/opps,YACOWS/opps,williamroot/opps,jeanmask/opps,YACOWS/opps,jeanmask/opps,williamroot/opps | opps/core/tags/admin.py | opps/core/tags/admin.py | # -*- encoding: utf-8 -*-
from django.contrib import admin
from .models import Tag
class TagAdmin(admin.ModelAdmin):
list_display = ('name', 'date_insert')
search_fields = ('name',)
prepopulated_fields = {"slug": ["name"]}
fieldsets = [(None, {'fields': ('name', 'slug',)})]
class Meta:
model = Tag
admin.site.register(Tag, TagAdmin)
| mit | Python | |
c3d37914777ee9e2356bfa691361351423b0615a | make a nova server instance return it's host's hostname | luos/nova-latency-scheduler,luos/nova-latency-scheduler | heat/network_aware_resources.py | heat/network_aware_resources.py | from heat.engine.resources.openstack.nova.server import Server as NovaServer
from oslo_log import log as logging
import traceback
LOG = logging.getLogger(__name__)
class NetworkAwareServer(NovaServer):
OS_EXT_HOST_KEY = 'OS-EXT-SRV-ATTR:host'
def get_attribute(self, key, *path):
if key == "host":
server, data = self.get_live_resource_data()
return data.get(self.OS_EXT_HOST_KEY)
return super(NetworkAwareServer, self).get_attribute(key, *path)
def resource_mapping():
return {
'OS::NetworkAware::Server': NetworkAwareServer,
}
| mit | Python | |
425363751244d5ff75e61126fd1481094c941129 | Create luhn.py for pypi package | garwoodpr/LuhnAlgorithmProof,garwoodpr/LuhnAlgorithmProof | luhn/luhn.py | luhn/luhn.py | #!/usr/bin/env python3
# Python 3.4 Implementation of the Luhn Algorithm
# Checks to see if 14, 15 or 16 digit account number is Luhn Compliant.
# See https://en.wikipedia.org/wiki/Luhn_algorithm for formula details.
# This file is suitable for unittest testing
# CardNumber is an account number (for example) received as a string
# Code is intentionally verbose to demonstrate each step clearly.
class aLuhn(object):
def doLuhn(cardNumber):
cardLength = str(cardNumber)
try:
cardNumbers = int(cardNumber)
except ValueError:
return(False)
cardLength = len(cardLength)
everyOtherFromFarRightFor16 = [-2,-4,-6,-8,-10,-12,-14,-16]
everyOtherFromFarRightFor15 = [-2,-4,-6,-8,-10,-12,-14]
everyOddFromFarRightButOneFor15 = [-3,-5,-7,-9,-11,-13,-15]
everyOddFromFarRightButOneFor14 = [-3,-5,-7,-9,-11,-13]
doubleList = []
doubleSet = []
addUpDoubles = 0
addUpOthers = 0
# setup the counting variables for appropriate card lengths
if (cardLength == 16):
doubleList = everyOtherFromFarRightFor16
addUpTheOddDigits = everyOddFromFarRightButOneFor15
elif (cardLength == 15):
doubleList = everyOtherFromFarRightFor15
addUpTheOddDigits = everyOddFromFarRightButOneFor15
elif (cardLength == 14):
doubleList = everyOtherFromFarRightFor15
addUpTheOddDigits = everyOddFromFarRightButOneFor14
else:
return(False)
# select the items for doubling
for each in doubleList:
doubleThis = cardNumber[each]
doubleThis = int(doubleThis) * 2
nowDoubled = str(doubleThis)
# add single digit items to the doubleSet
if (len(nowDoubled) == 1):
nowDoubled = nowDoubled
doubleSet.append(nowDoubled)
else:
# add each digit of 2-digit items to each other
# and then add each item to the the doubleSet
db1, db2 = nowDoubled[0], nowDoubled[1]
db1, db2 = int(db1), int(db2)
dbladd = db1 + db2
doubleSet.append(dbladd)
# add all items in the doubleSet together
for each in doubleSet:
addUpDoubles += int(each)
# add together all items not previously doubled
for each in addUpTheOddDigits:
addOther = cardNumber[each]
otherToAdd = int(addOther)
addUpOthers += otherToAdd
# add all the summed up additions together
totalSum = int(addUpDoubles) + int(addUpOthers)
# multiply totalSum by 9, then Modulus '%' that number by 10
totalSumTimesNine = (totalSum * 9)
modTheTotalSum = (totalSumTimesNine % 10)
# compare modTheTotalSum to the right-most digit of cardNumber
if (str(modTheTotalSum) == cardNumber[-1]):
return(True)
else:
return(False)
| mit | Python | |
be271f41103efdc26aadbc2cf3e39446bf2a05bc | Define Application class. | soasme/axe | taxe/__init__.py | taxe/__init__.py | # -*- coding: utf-8 -*-
from functools import wraps
from werkzeug.wrappers import Request, Response
class Application(object):
def route(self, url):
def deco(function):
@wraps(function)
def _(*args, **kwargs):
print self, url
return function(*args, **kwargs)
return _
return deco
@Request.application
def __call__(self, request):
return Response('Hello World')
if __name__ == '__main__':
from werkzeug.serving import run_simple
application = Application()
run_simple('localhost', 4000, application)
| mit | Python | |
d0c7dfad3e7769b6f89828733414a4a68677696a | Create UnorderedList.py | prashantas/MyDataScience | Python/GenPythonProblems/UnorderedList.py | Python/GenPythonProblems/UnorderedList.py | ## http://interactivepython.org/runestone/static/pythonds/BasicDS/ImplementinganUnorderedListLinkedLists.html
class Node:
def __init__(self,initdata):
self.data = initdata
self.next = None
def getData(self):
return self.data
def getNext(self):
return self.next
def setData(self,newdata):
self.data = newdata
def setNext(self,newnext):
self.next = newnext
class UnorderedList:
def __init__(self):
self.head = None
def isEmpty(self):
return self.head == None
def add(self,item):
temp = Node(item)
temp.setNext(self.head)
self.head = temp
def size(self):
current = self.head
count = 0
while current != None:
count = count + 1
current = current.getNext()
return count
def search(self,item):
current = self.head
found = False
while current != None and not found:
if current.getData() == item:
found = True
else:
current = current.getNext()
return found
def remove(self,item):
current = self.head
previous = None
found = False
while not found:
if current.getData() == item:
found = True
else:
previous = current
current = current.getNext()
if previous == None:
self.head = current.getNext()
else:
previous.setNext(current.getNext())
mylist = UnorderedList()
mylist.add(31)
mylist.add(77)
mylist.add(17)
mylist.add(93)
mylist.add(26)
mylist.add(54)
print(mylist.size())
print(mylist.search(93))
print(mylist.search(100))
mylist.add(100)
print(mylist.search(100))
print(mylist.size())
mylist.remove(54)
print(mylist.size())
mylist.remove(93)
print(mylist.size())
mylist.remove(31)
print(mylist.size())
print(mylist.search(93))
| bsd-2-clause | Python | |
6ac4db0b9bfc638d708fd7341b0f3e1437ce8f97 | add dir cmmbbo to hold code for docker scheduler | zam121118/mao-mbbo,zam121118/mao-mbbo,zam121118/mao-mbbo,zam121118/mao-mbbo | cmbbo/main.py | cmbbo/main.py | #coding: utf-8
| bsd-2-clause | Python | |
4ab45fc2dee8676566467706c0a433315c8fe3c8 | Add test | corburn/scikit-bio,colinbrislawn/scikit-bio,SamStudio8/scikit-bio,averagehat/scikit-bio,colinbrislawn/scikit-bio,anderspitman/scikit-bio,johnchase/scikit-bio,xguse/scikit-bio,johnchase/scikit-bio,kdmurray91/scikit-bio,demis001/scikit-bio,Achuth17/scikit-bio,Achuth17/scikit-bio,gregcaporaso/scikit-bio,jairideout/scikit-bio,averagehat/scikit-bio,jdrudolph/scikit-bio,jensreeder/scikit-bio,Kleptobismol/scikit-bio,wdwvt1/scikit-bio,jdrudolph/scikit-bio,jensreeder/scikit-bio,anderspitman/scikit-bio,xguse/scikit-bio,jairideout/scikit-bio,demis001/scikit-bio,wdwvt1/scikit-bio,Kleptobismol/scikit-bio,SamStudio8/scikit-bio,corburn/scikit-bio,Kleptobismol/scikit-bio,kdmurray91/scikit-bio,gregcaporaso/scikit-bio | skbio/util/tests/test_testing.py | skbio/util/tests/test_testing.py | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import numpy.testing as npt
from skbio.util.testing import get_data_path
def test_get_data_path():
fn = 'parrot'
path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(path, 'data', fn)
data_path_2 = get_data_path(fn)
npt.assert_string_equal(data_path_2, data_path)
| bsd-3-clause | Python | |
81032ffdae2d4bd02f3e9a6460b022079bf3cee8 | Create about.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/hairygael/GESTURES/about.py | home/hairygael/GESTURES/about.py | def about():
sleep(2)
ear.pauseListening()
sleep(2)
i01.setArmSpeed("right", 0.1, 0.1, 0.2, 0.2);
i01.setArmSpeed("left", 0.1, 0.1, 0.2, 0.2);
i01.setHeadSpeed(0.2,0.2)
i01.moveArm("right", 64, 94, 10, 10);
i01.mouth.speakBlocking("I am the first life size humanoid robot you can 3D print and animate")
i01.moveHead(65,66)
i01.moveArm("left", 64, 104, 10, 11);
i01.moveArm("right", 44, 84, 10, 11);
i01.mouth.speakBlocking("my designer creator is Gael Langevin a French sculptor, model maker")
i01.moveHead(75,86)
i01.moveArm("left", 54, 104, 10, 11);
i01.moveArm("right", 64, 84, 10, 20);
i01.mouth.speakBlocking("who has released my files to the opensource 3D world.")
i01.moveHead(65,96)
i01.moveArm("left", 44, 94, 10, 20);
i01.moveArm("right", 54, 94, 20, 11);
i01.mouth.speakBlocking("this is where my builder downloaded my files.")
i01.moveHead(75,76)
i01.moveArm("left", 64, 94, 20, 11);
i01.moveArm("right", 34, 94, 10, 11);
i01.mouth.speakBlocking("after five hundred hours of printing, four kilos of plastic, twenty five hobby servos, blood and sweat.I was brought to life") # should be " i was borght to life."
i01.moveHead(65,86)
i01.moveArm("left", 24, 94, 10, 11);
i01.moveArm("right", 24, 94, 10, 11);
i01.mouth.speakBlocking("so if You have a 3D printer, some building skills, then you can build your own version of me") # mabe add in " alot of money"
i01.moveHead(85,86)
i01.moveArm("left", 5, 94, 20, 30);
i01.moveArm("right", 24, 124, 10, 20);
i01.mouth.speakBlocking("and if enough people build me, some day my kind could take over the world") # mabe add in " alot of money"
i01.moveHead(75,96)
i01.moveArm("left", 24, 104, 10, 11);
i01.moveArm("right", 5, 94, 20, 30);
i01.mouth.speakBlocking("I'm just kidding. i need some legs to get around, and i have to over come my pyro-phobia, a fear of fire") # mabe add in " alot of money"
i01.moveHead(75,96)
i01.moveArm("left", 5, 94, 10, 11)
i01.moveArm("right", 4, 94, 10, 11);
i01.mouth.speakBlocking("so, until then. i will be humankind's humble servant")
i01.rest()
i01.setArmSpeed("right", 1, 1, 1, 1);
i01.setArmSpeed("left", 1, 1, 1, 1);
i01.setHeadSpeed(1,1)
sleep(2)
ear.resumeListening()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.