commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
52e282b8c51c71db61cb0163df02caf2dce63b45 | add pretty function repr extension | extensions/pretty_func_repr.py | extensions/pretty_func_repr.py | """
Trigger pinfo (??) to compute text reprs of functions, etc.
Requested by @katyhuff
"""
import types
from IPython import get_ipython
def pinfo_function(obj, p, cycle):
"""Call the same code as `foo?` to compute reprs of functions
Parameters
----------
obj:
The object being formatted
p:
The pretty formatter instance
cycle:
Whether a cycle has been detected (unused)
"""
text = get_ipython().inspector._format_info(obj, detail_level=1)
p.text(text)
_save_types = {}
def load_ipython_extension(ip):
"""register pinfo_function as the custom plain-text repr for funtion types"""
pprinter = ip.display_formatter.formatters['text/plain']
for t in (types.FunctionType,
types.BuiltinMethodType,
types.BuiltinFunctionType):
f = pprinter.for_type(t, pinfo_function)
_save_types[t] = f
def unload_ipython_extension(ip):
"""unregister pinfo_function"""
pprinter = ip.display_formatter.formatters['text/plain']
for t, f in _save_types.items():
pprinter.for_type(t, f)
_save_types.clear()
| Python | 0.000001 | |
48ee097349b4315b9f3c726b734aa20e878b2288 | Add binary-numbers-small resource | csunplugged/resources/views/binary_cards_small.py | csunplugged/resources/views/binary_cards_small.py | """Module for generating Binary Cards (Small) resource."""
import os.path
from PIL import Image, ImageDraw, ImageFont
from utils.retrieve_query_parameter import retrieve_query_parameter
def resource_image(request, resource):
"""Create a image for Binary Cards (Small) resource.
Args:
request: HTTP request object.
resource: Object of resource data.
Returns:
A list of Pillow image objects.
"""
BASE_IMAGE_PATH = "static/img/resources/binary-cards-small/"
IMAGE_SIZE_X = 2480
IMAGE_SIZE_Y = 3044
IMAGE_DATA = [
("binary-cards-small-1.png", 4),
("binary-cards-small-2.png", 8),
("binary-cards-small-3.png", 12),
]
# Retrieve parameters
requested_bits = retrieve_query_parameter(request, "number_bits", ["4", "8", "12"])
dot_counts = retrieve_query_parameter(request, "dot_counts", ["yes", "no"])
black_back = retrieve_query_parameter(request, "black_back", ["yes", "no"])
if dot_counts == "yes":
font_path = "static/fonts/PatrickHand-Regular.ttf"
font = ImageFont.truetype(font_path, 200)
TEXT_COORDS = [
(525, 1341),
(1589, 1341),
(525, 2889),
(1589, 2889),
]
images = []
for (image_path, image_bits) in IMAGE_DATA:
requested_bits = int(requested_bits)
if image_bits <= requested_bits:
image = Image.open(os.path.join(BASE_IMAGE_PATH, image_path))
if dot_counts == "yes":
draw = ImageDraw.Draw(image)
for number in range(image_bits - 4, image_bits):
text = str(pow(2, number))
text_width, text_height = draw.textsize(text, font=font)
coord_x = TEXT_COORDS[number % 4][0] - (text_width / 2)
coord_y = TEXT_COORDS[number % 4][1] - (text_height / 2)
draw.text(
(coord_x, coord_y),
text,
font=font,
fill="#000"
)
images.append(image)
if black_back == "yes":
black_card = Image.new("1", (IMAGE_SIZE_X, IMAGE_SIZE_Y))
images.append(black_card)
return images
def subtitle(request, resource):
"""Return the subtitle string of the resource.
Used after the resource name in the filename, and
also on the resource image.
Args:
request: HTTP request object
resource: Object of resource data.
Returns:
text for subtitle (string)
"""
if retrieve_query_parameter(request, "dot_counts") == "yes":
display_numbers_text = "with dot counts"
else:
display_numbers_text = "without dot counts"
if retrieve_query_parameter(request, "black_back") == "yes":
black_back_text = "with black back"
else:
black_back_text = "without black back"
text = "{} bits - {} - {} - {}".format(
retrieve_query_parameter(request, "number_bits"),
display_numbers_text,
black_back_text,
retrieve_query_parameter(request, "paper_size")
)
return text
def valid_options():
"""Provide dictionary of all valid parameters.
This excludes the header text parameter.
Returns:
All valid options (dict).
"""
valid_options = {
"number_bits": ["4", "8", "12"],
"dot_counts": ["yes", "no"],
"black_back": ["yes", "no"],
"paper_size": ["a4", "letter"],
}
return valid_options
| Python | 0.006224 | |
864bf2bb3bdb731d0725cc33891145f2a7da17d3 | Add initialization functions for database connection | db/common.py | db/common.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.orm.session import sessionmaker
from sqlalchemy.schema import MetaData
from sqlalchemy.ext.declarative import declarative_base
from utils import get_connection_string_from_config_file
cfg_src = os.path.join(os.path.dirname(__file__), "..", r"_config.ini")
conn_string = get_connection_string_from_config_file(cfg_src, 'db_conn_3')
Engine = create_engine(conn_string, echo=False, pool_size=5)
Session = sessionmaker(bind=Engine)
Base = declarative_base(metadata=MetaData(schema='nhl', bind=Engine))
@contextmanager
def session_scope():
session = Session()
try:
yield session
except:
session.rollback()
raise
finally:
session.close()
| Python | 0.000001 | |
ba0e1d90f5f33ed63c56c2788873624731a7a0b5 | add file | regxtest.py | regxtest.py | '''
((abc){4})
[1-5]{5}
5+
5*
5?
'''
EQUL = 1
COUNT = 2
ANY = 3
TREE = 4
class Node
def __init__(self, ntype, parent = None):
self.type = ntype
self.c = None
self.children = []
self.parent = parent
class RegX:
def __init__(self, regstr):
self.curnode = Node(TREE)
self.tokens = self.curnode.children
self.parseregx(regstr)
def parseany(self, regstr):
def parseregx(self, regstr, idx = 0):
regstr_len = len(regstr)
while True:
if regstr[idx] == '[':
newnode = Node(ANY, self.curnode)
self.tokens.append(newnode)
idx = self.parseany(regstr, idx)
elif regstr[idx] == '{':
newnode = Node(COUNT, self.curnode)
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '(':
newnode = Node(TREE, self.curnode)
self.curnode = newnode
self.tokens = newnode.children
parseregx(regstr, idx)
elif regstr[idx] == ')':
self.curnode = self.curnode.parent
self.tokens = self.curnode.children
idx+=1
elif regstr[idx] == '?':
newnode = Node(COUNT, self.curnode)
newnode.c = regstr[idx]
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '+':
newnode = Node(COUNT, self.curnode)
newnode.c = regstr[idx]
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '*':
newnode = Node(COUNT, self.curnode)
newnode.c = regstr[idx]
self.tokens.insert(-1, newnode)
idx+=1
elif regstr[idx] == '.':
pass
elif:
pass | Python | 0.000001 | |
0100a3468dbada1e7ec3cbeaebda7ee11874ab8b | find similarly related words | relation.py | relation.py | #!/usr/bin/env python
"""Given phrases p1 and p2, find nearest neighbors to both and rank
pairs of neighbors by similarity to vec(p2)-vec(p1) in given word
representation.
The basic idea is a straightforward combination of nearest neighbors
and analogy as in word2vec (https://code.google.com/p/word2vec/).
"""
import sys
import os
import numpy
import wvlib
from distance import process_options, get_query
def process_query(wv, query, options=None):
try:
vectors = [wv.words_to_vector(q) for q in query]
except KeyError, e:
print >> sys.stderr, 'Out of dictionary word: %s' % str(e)
return False
words = [w for q in query for w in q]
if not options.quiet:
for w in words:
print '\nWord: %s Position in vocabulary: %d' % (w, wv.rank(w))
nncount = 100 # TODO: add CLI parameter
nearest = [wv.nearest(v, n=nncount, exclude=words) for v in vectors]
nearest = [[(n[0], n[1], wv[n[0]]) for n in l] for l in nearest]
assert len(nearest) == 2, 'internal error'
pairs = [(n1, n2,
numpy.dot(wvlib.unit_vector(vectors[1]-vectors[0]+n2[2]), n1[2]))
for n1 in nearest[0] for n2 in nearest[1] if n1[0] != n2[0]]
pairs.sort(lambda a, b: cmp(b[2], a[2]))
nncount = options.number if options else 10
for p in pairs[:nncount]:
print '%s\t---\t%s\t%f' % (p[1][0], p[0][0], p[2])
return True
def query_loop(wv, options):
while True:
try:
query = get_query(options.prompt, options.multiword,
options.exit_word, 3)
except EOFError:
return 0
if not query:
continue
if options.echo:
print query
if len(query) < 2:
print >> sys.stderr, 'Enter two words/phrases'
continue
if len(query) > 2:
print >> sys.stderr, 'Ignoring words/phrases after the second'
query = query[:3]
process_query(wv, query, options)
def main(argv=None):
if argv is None:
argv = sys.argv
try:
wv, options = process_options(argv[1:])
except Exception, e:
print >> sys.stderr, 'Error: %s' % str(e)
return 1
return query_loop(wv, options)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python | 0.999999 | |
11380e7db081960757cbde2c4d2e69b695648782 | Add routine to calculate density. | density.py | density.py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.DENSITY
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
def density(r, norm, rs, alpha, beta, gamma):
"""
Density profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
rho = norm*(r/rs)**(-gamma)*(1+(r/rs)**alpha)**((gamma-beta)/alpha)
return rho
| Python | 0 | |
ec853b51c26e9a0e36a9a213fbf6a47c679c3b12 | Add cleaning pipeline for dictionaries | scripts/dcleaner.py | scripts/dcleaner.py | #!/usr/bin/env python3
# Author = Thamme Gowda tg@isi.edu
# Date = August 9th, 2017
# Title = Dictionary Cleaner
# Description =
# This script has a dictionary cleaning pipeline.
# Note: You have to setup your rules in get_rules() function
# TODO: Do spelling correction of english words using edit distance (for glosbe)
from argparse import ArgumentParser
from codecs import open as copen
import sys
import re
import string
import unicodedata
# Ascii punctuations
ascii_puncts = set(string.punctuation) - set('\'.,')
# Unicode Punctuations
unicode_puncts = {ch for ch in map(chr, range(sys.maxunicode)) if unicodedata.category(ch).startswith('P')}
# exclude apostrophe - it has special meaning
unicode_puncts -= set("',-፣") # do not mark these as bad characters from OCR
OCR_gibber = unicode_puncts | set(range(9))
unicode_puncts |= set(",") # remove any left over comma at the end
unicode_punct_tab = {i: None for i in map(ord, unicode_puncts)}
normalize_punct_tab = {ord('`'): '\'', ord('’'): '\''}
stop_words = {'be', 'get'}
# Cleaning functions START
# args:
# foreign_word : unicode string
# eng_word : unicode string
# returns:
# list of (f_word, e_word)
# List can have any number of records, it shall ne empty when the record needs to be dropped
#
def small_parenthesis(f, e):
"""
Drops small words (length <= 2) that is inside a parenthesis.
This is usually a part of speech like (n), (v).
:param f:
:param e:
:return:
"""
f_ = re.sub(r'\s*\([^\\)]{,2}\)', '', f)
e_ = re.sub(r'\s*\([^\\)]{,2}\)', '', e)
if f_ != f or e_ != e:
print(">Remove POS: %s -> %s \t %s -> %s" % (f, f_, e, e_))
return [(f_, e_)] if f_ and e_ else []
def normalize_puncts(f, e, t_table=normalize_punct_tab):
"""
Translates advanced punctuations with simple punctuations.
For example: back quote --> apostrophe : ` --> '
:param f:
:param e:
:param t_table:
:return:
"""
return [(f.translate(t_table), e.translate(t_table))]
def ocr_gibberish_clean(f, e, bad_chars=OCR_gibber):
"""
Drops any record that contains gibberish characters
:param f:
:param e:
:param bad_chars:
:return:
"""
if set(f) & bad_chars or set(e) & bad_chars:
print(">OCR GIBBERISH:", f, e)
return []
return [(f, e)]
def comma_swap(f, e):
"""
Swaps two words separated by comma, then removes stop words.
For example: 'away, get' -> 'get away' -> away
:param f:
:param e:
:return:
"""
if ',' in e:
parts = e.split(',')
if len(parts) == 2:
print(">SWAP_COMMA:", e)
e = '%s %s' % (parts[1], parts[0])
e = ' '.join(filter(lambda x: x not in stop_words, e.split()))
return [(f, e)] if f and e else []
def comma_synonyms(f, e):
"""
Expands comma separated words as synonyms.
For example: ('house, home') -> ('house'), ('home')
:param f:
:param e:
:return:
"""
if ',' in e:
parts = list(filter(lambda x: x, map(lambda x: x.strip(), e.split(','))))
if len(parts) > 1:
print(">COMMA_SYNS", f, e, '-->', parts)
return [(f, p) for p in parts]
return [(f, e)]
def abbreviations_expand(f, e):
"""
Expands abbreviation as synonyms
:param f:
:param e:
:return:
"""
matched = re.match(r'(.+)\(([a-zA-Z0-9\\. ]+)\)', f)
if matched:
print(">Splitting %s into %s" % (f, matched.groups()))
return [(grp, e) for grp in filter(lambda x: x, matched.groups())]
else:
return [(f, e)]
def parenthesis_inner_drop(f, e):
"""
Removes inner word in parenthesis.
For example: 'take off (slowly)' -> 'take off'
:param f:
:param e:
:return:
"""
matched = re.match(r'(.+)\([a-zA-Z0-9\\. ]+\)(.*)', f)
if matched:
print(">Splitting %s into %s" % (f, matched.groups()))
e = ' '.join(filter(lambda x: x, matched.groups()))
return [(f, e)]
def remove_puncts(f, e, t_table=unicode_punct_tab):
"""
Removes all punctuations
:param f:
:param e:
:return:
"""
f, e = f.translate(t_table), e.translate(t_table)
return [(f, e)] if f and e else []
# Cleaning Functions END
pre_mappers = [lambda k, v: [(k.strip(), v.strip())], # white space remover
small_parenthesis, normalize_puncts]
post_mappers = [remove_puncts]
rules_cache = {}
def get_rules(tag):
if tag in rules_cache:
return rules_cache[tag]
rules = []
rules.extend(pre_mappers)
if 'OCR' in tag:
rules.append(ocr_gibberish_clean)
if 'tg-ebookOCR' in tag:
rules.append(comma_swap)
rules.append(comma_synonyms)
if 'tg-glosbe-eng_il5' == tag:
rules.append(comma_synonyms)
if 'rpi-ne-il6_eng' == tag:
rules.append(parenthesis_inner_drop)
if 'il6' in tag:
rules.append(abbreviations_expand)
rules.extend(post_mappers)
rules_cache[tag] = rules
return rules
# transformation pipeline, where mappers can produce zero or more records
def transform(tag, fgn_word, eng_word, mappers, i=0):
if i >= len(mappers):
yield (tag, fgn_word, eng_word)
else:
for fw, ew in mappers[i](fgn_word, eng_word):
yield from transform(tag, fw, ew, mappers, i+1)
def cleanup(src_file):
with copen(src_file, 'r', encoding='utf-8') as inp:
for line in inp:
tag, fgn_word, eng_word = line.split('\t')
rules = get_rules(tag)
yield from transform(tag, fgn_word, eng_word, rules)
def dump_stream(recs, out=None):
opened = False
if out is None:
out = sys.stdout
elif type(out) is str:
out = copen(out, 'w', encoding='utf-8')
opened = True
for rec in recs:
out.write("%s\t%s\t%s\n" % rec)
if opened:
out.close()
if __name__ == '__main__':
parser = ArgumentParser(description='Dictionary Cleaner')
parser.add_argument('-i', '--in', help='Input CSV file', required=True)
parser.add_argument('-o', '--out', help='Output TSV File. Default=STDOUT', required=False)
args = vars(parser.parse_args())
recs = cleanup(args['in'])
dump_stream(recs, args['out'])
| Python | 0 | |
3fb3662e58e35ccb283074c1078e1c9e7aaf88ed | Add live test for session | LendingClub/tests/live_session_test.py | LendingClub/tests/live_session_test.py | #!/usr/bin/env python
import sys
import unittest
import getpass
from logger import TestLogger
sys.path.insert(0, '.')
sys.path.insert(0, '../')
sys.path.insert(0, '../../')
from LendingClub import session
class LiveTestSession(unittest.TestCase):
http = None
session = None
logger = None
def setUp(self):
self.logger = TestLogger()
self.session = session.Session(logger=self.logger)
def tearDown(self):
pass
def test_login(self):
""" test_valid_login
Test login with credentials from the user
"""
print '\n\nEnter a valid LendingClub account information...'
email = raw_input('Email:')
password = getpass.getpass()
self.assertTrue(self.session.authenticate(email, password))
print 'Authentication successful'
def test_invalid_login(self):
""" test_invalid_password
Test login with the wrong password
"""
self.assertRaises(
session.AuthenticationError,
lambda: self.session.authenticate('test@test.com', 'wrongsecret')
)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
9e4858e652fba57f767a9c6d921853a6487301bd | Add a test for the version string parsing code | epsilon/test/test_version.py | epsilon/test/test_version.py | """
Tests for turning simple version strings into twisted.python.versions.Version
objects.
"""
from epsilon import asTwistedVersion
from twisted.trial.unittest import SynchronousTestCase
class AsTwistedVersionTests(SynchronousTestCase):
def test_simple(self):
"""
A simple version string can be turned into a Version object.
"""
version = asTwistedVersion("package", "1.2.3")
self.assertEqual(version.package, "package")
self.assertEqual(version.major, 1)
self.assertEqual(version.minor, 2)
self.assertEqual(version.micro, 3)
| Python | 0.00002 | |
dff8d43edd0e831605f1b1c3b2d261fcf05dca9a | Add wordpress guid replace script | script/wordpress/guid.py | script/wordpress/guid.py | import MySQLdb
import urlparse
poe = "https://wordpress.wordpress"
db = MySQLdb.connect(db="wordpress",user="",passwd="")
c = db.cursor()
sql = "SELECT ID,guid from wp_posts;"
c.execute(sql)
records = c.fetchall()
for record in records:
o = urlparse.urlparse(record[1])
url = poe + o.path
if o.query:
url = url + "?" + o.query
print "UPDATE wp_posts SET guid ='" + url + "' where ID = '" + str(record[0]) + "';"
| Python | 0 | |
c48ec87b3e1c672864fc8c5bfe1aa551c01846ee | add basic tcp server | Server.py | Server.py | """
File: Server.py
Author: Daniel Schauenberg <schauend@informatik.uni-freiburg.de>
Description: class for implementing a search engine web server
"""
import socket
from operator import itemgetter
class Webserver:
""" class for implementing a web server, serving the
inverted index search engine to the outside
(or inside) world
"""
def __init__(self, host='', port=3366):
""" constructor method to set the webserver basic settings
"""
self.host = host
self.port = port
self.socket = None
def bind_to_port(self):
""" simple method to make the port binding easier
"""
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.bind((self.host,self.port))
# number of queued connections
self.socket.listen(3)
# create endless loop waiting for connections
# can be interrupted via CTRL-C
try:
while True:
# get socket object and client address
connection, clientsock = self.socket.accept()
print "Client %s connected with port %s." % (itemgetter(0)(clientsock),itemgetter(1)(clientsock))
while True:
data = connection.recv(8192)
if not data: break
#connection.sendall(data)
print data
connection.close()
print clientaddr
finally:
# don't leave socket open when going home
self.socket.close()
def main():
foo = Webserver()
foo.bind_to_port()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
94403aedd21947c30b5d8159fcd42288050afc3a | Create 6kyu_personalized_brand_list.py | Solutions/6kyu/6kyu_personalized_brand_list.py | Solutions/6kyu/6kyu_personalized_brand_list.py | from collections import OrderedDict
def sorted_brands(history):
poplr=OrderedDict()
for i in history:
try: poplr[i['brand']]+=1
except: poplr[i['brand']]=1
return sorted(poplr.keys(), key=lambda x: poplr[x], reverse=1)
| Python | 0 | |
48cac034e7b402e2d4b3cb52d2cae51b44928e0b | add Faster R-CNN | examples/faster_rcnn/eval.py | examples/faster_rcnn/eval.py | from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import voc_detection_label_names
from chainercv.datasets import VOCDetectionDataset
from chainercv.evaluations import eval_detection_voc
from chainercv.links import FasterRCNNVGG16
from chainercv.utils import apply_detection_link
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
model = FasterRCNNVGG16(pretrained_model='voc07')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
processed = 0
def hook(
pred_bboxes, pred_labels, pred_scores, gt_values):
global processed
processed += len(pred_bboxes)
fps = len(processed) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(processed), len(dataset), fps))
sys.stdout.flush()
pred_bboxes, pred_labels, pred_scores, gt_values = \
apply_detection_link(model, iterator, hook=hook)
gt_bboxes, gt_labels, gt_difficults = gt_values
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {:f}'.format(name, eval_[l]['ap']))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
| Python | 0.000384 | |
b098f2ad30339c0efb9728741b796fe9f2db7f74 | Make sure mopidy startup doesn't block | mycroft/skills/playback_control/mopidy_service.py | mycroft/skills/playback_control/mopidy_service.py | from mycroft.messagebus.message import Message
from mycroft.util.log import getLogger
from mycroft.skills.audioservice import AudioBackend
from os.path import dirname, abspath, basename
import sys
import time
logger = getLogger(abspath(__file__).split('/')[-2])
__author__ = 'forslund'
sys.path.append(abspath(dirname(__file__)))
Mopidy = __import__('mopidypost').Mopidy
class MopidyService(AudioBackend):
def _connect(self, message):
url = 'http://localhost:6680'
if self.config is not None:
url = self.config.get('url', url)
try:
self.mopidy = Mopidy(url)
except:
if self.connection_attempts < 1:
logger.debug('Could not connect to server, will retry quietly')
self.connection_attempts += 1
time.sleep(10)
self.emitter.emit(Message('MopidyServiceConnect'))
return
logger.info('Connected to mopidy server')
def __init__(self, config, emitter, name='mopidy'):
self.connection_attempts = 0
self.emitter = emitter
self.config = config
self.name = name
self.mopidy = None
self.emitter.on('MopidyServiceConnect', self._connect)
self.emitter.emit(Message('MopidyServiceConnect'))
def supported_uris(self):
if self.mopidy:
return ['file', 'http', 'https', 'local', 'spotify', 'gmusic']
else:
return []
def clear_list(self):
self.mopidy.clear_list()
def add_list(self, tracks):
self.mopidy.add_list(tracks)
def play(self):
self.mopidy.play()
def stop(self):
self.mopidy.clear_list()
self.mopidy.stop()
def pause(self):
self.mopidy.pause()
def resume(self):
self.mopidy.resume()
def next(self):
self.mopidy.next()
def previous(self):
self.mopidy.previous()
def lower_volume(self):
self.mopidy.lower_volume()
def restore_volume(self):
self.mopidy.restore_volume()
def track_info(self):
info = self.mopidy.currently_playing()
ret = {}
ret['name'] = info.get('name', '')
if 'album' in info:
ret['artist'] = info['album']['artists'][0]['name']
ret['album'] = info['album'].get('name', '')
else:
ret['artist'] = ''
ret['album'] = ''
return ret
| from mycroft.messagebus.message import Message
from mycroft.util.log import getLogger
from mycroft.skills.audioservice import AudioBackend
from os.path import dirname, abspath, basename
import sys
import time
logger = getLogger(abspath(__file__).split('/')[-2])
__author__ = 'forslund'
sys.path.append(abspath(dirname(__file__)))
Mopidy = __import__('mopidypost').Mopidy
class MopidyService(AudioBackend):
def _connect(self, message):
logger.debug('Could not connect to server, will retry quietly')
url = 'http://localhost:6680'
if self.config is not None:
url = self.config.get('url', url)
try:
self.mopidy = Mopidy(url)
except:
if self.connection_attempts < 1:
logger.debug('Could not connect to server, will retry quietly')
self.connection_attempts += 1
time.sleep(10)
self.emitter.emit(Message('MopidyServiceConnect'))
return
logger.info('Connected to mopidy server')
def __init__(self, config, emitter, name='mopidy'):
self.connection_attempts = 0
self.emitter = emitter
self.config = config
self.name = name
self.mopidy = None
self.emitter.on('MopidyServiceConnect', self._connect)
self._connect(None)
def supported_uris(self):
if self.mopidy:
return ['file', 'http', 'https', 'local', 'spotify', 'gmusic']
else:
return []
def clear_list(self):
self.mopidy.clear_list()
def add_list(self, tracks):
self.mopidy.add_list(tracks)
def play(self):
self.mopidy.play()
def stop(self):
self.mopidy.clear_list()
self.mopidy.stop()
def pause(self):
self.mopidy.pause()
def resume(self):
self.mopidy.resume()
def next(self):
self.mopidy.next()
def previous(self):
self.mopidy.previous()
def lower_volume(self):
self.mopidy.lower_volume()
def restore_volume(self):
self.mopidy.restore_volume()
def track_info(self):
info = self.mopidy.currently_playing()
ret = {}
ret['name'] = info.get('name', '')
if 'album' in info:
ret['artist'] = info['album']['artists'][0]['name']
ret['album'] = info['album'].get('name', '')
else:
ret['artist'] = ''
ret['album'] = ''
return ret
| Python | 0 |
7a6add647200e4fb1cb4506f7ec40a4f4424b43d | Create Human_tracker_arduino.py | Human_tracker_arduino.py | Human_tracker_arduino.py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Created during Winter Semester 2015
OpenCV Human face tracker combined with arduino powered bot to
follow humans.
@authors:
Yash Chandak Ankit Dhall
TODO:
convert frame specific values to percentages
-------------------------------------------------------------------------------
"""
import numpy as np
import sys
import time
"""
PySerial library required for arduino connection
OpenCV library requierd for face tracking
"""
import serial
import cv2
"""
Arduino connected at port No. COM28,
Confirm and change this value accordingly from control panel
Baud Rate = 9600
"""
arduino = serial.Serial('COM28', 9600)
time.sleep(2) # waiting the initialization...
print("initialised")
#gets the direction for Arduino serial
def direction(bound, initArea=40000):
"""
Direction control Index:
'<' , '>' are the frame check bits for serial communication
Numbers represent the direction to be moved as per their position on numpad
1: Back Left
2: Back
3: Back right
4: Left
5: Stay still
6: Right
7: Front Left
8: Forward
9: Forward right
"""
#anchor the centre position of the image
center=(320, 240)
#current rectangle center
curr = (bound[0] + bound[2]/2, bound[1]+bound[3]/2)
out=0
flag=0
fb = 0 #0-stay 1-fwd 2-bwd
lr = 0 #0-stay 1-left 2-right
#if the object is coming closer i.e. it's size is increasing then move bwd
if bound[2]*bound[3] > (initArea+5000) or bound[1]<50 :
fb = 2
#if the object os moving away i.e. it's size is decreasing then move towards it
elif bound[2]*bound[3] < (initArea-5000) or (bound[1]+bound[3])>430 :
fb = 1
else :
fb = 0
#move right
if curr[0] > (center[0] + 100):
lr = 2
#move left
elif curr[0] < (center[0] - 100):
lr = 1
else:
lr = 0
if lr == 0 and fb == 0:
out = 5
print "stay"
elif lr == 0 and fb == 1:
out =8
print "fwd"
elif lr == 0 and fb == 2:
out = 2
print "back"
elif lr == 1 and fb == 0:
out = 4
print "left"
elif lr == 1 and fb == 1:
out = 7
print "fwd left"
elif lr == 1 and fb == 2:
out = 1
print "left back"
elif lr == 2 and fb == 0:
out = 6
print "right"
elif lr == 2 and fb == 1:
out = 9
print "fwd right"
elif lr == 2 and fb == 2:
out = 3
print "bwd right"
else :
out = 5
print "Stay Still"
#Write the encoded direction value on the serial communication line
print out
arduino.write('<')
arduino.write(str(out))
arduino.write('>')
def detectAndDisplay(frame):
#use OpenCV HAAR face detetcion algorithm to detect faces
faces = cascade.detectMultiScale(frame, scaleFactor=1.1, minNeighbors=3,
minSize=(30, 30),maxSize=(500,500),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE)
#if any face is detected then process else continue searching
if(len(faces)!=0):
#If number of faces in the image is more than 1
#Then choose the one with maximum size
max_area=-1
i=0
for (x,y,w,h) in faces:
if w*h > max_area:
max_area=w*h
pos=i
i=i+1
RECT=faces[pos]
#Mark the face being tracked on the image display
cv2.rectangle(frame, (RECT[0], RECT[1]), (RECT[0]+RECT[2], RECT[1]+RECT[3]), (0, 255, 0), 2)
#draw_str(frame, (RECT[0], RECT[3]+16), 'x: %.2f y: %.2f size: %.2f' % (RECT[2]-RECT[0])/2 % (RECT[3]-RECT[1])/2 % RECT[2]*RECT[3])
#Put the text details about the ROI on imdisplay
cv2.putText(frame, `RECT[0] + RECT[2]/2`+' '+`RECT[1]+RECT[3]/2`+' '+`RECT[2]*RECT[3]`, (RECT[0],RECT[1]+RECT[3]), cv2.FONT_HERSHEY_SIMPLEX , 1, (0,0,255));
#compute direction for the arduino bot to be moved.
direction(RECT)
else:
print 'Search...'
arduino.write('<')
arduino.write(str(5))
arduino.write('>')
cv2.imshow('frame',frame)
cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(1)
cap.grab()
ret, frame = cap.retrieve()
cv2.namedWindow('frame')
#Run the tracker in infinite loop
while(1):
#grab the frames from web camera
ret, frame = cap.retrieve()
if ret ==0:
print "frame not loaded"
if ret==True:
#Resize the frame for faster computation
#cv2.resize(frame,(240,320))
#Process the frame and pass data to arduino
detectAndDisplay(frame)
#cv2.imshow('input',frame)
#press ESC to exit program
ch = cv2.waitKey(1)
if ch==27:
break
#Free up memory on exit
cap.release()
cv2.destroyAllWindows()
arduino.close()
| Python | 0.000008 | |
874e2c35bb0aea38a1161d96b8af484a69336ea6 | Add htpasswd.py to the contrib tree as it may be useful more generally than just for the Testing branch | contrib/htpasswd.py | contrib/htpasswd.py | #!/usr/bin/python
"""Replacement for htpasswd"""
import os
import random
try:
import crypt
except ImportError:
import fcrypt as crypt
from optparse import OptionParser
def salt():
"""Returns a string of 2 randome letters"""
# FIXME: Additional characters may be legal here.
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
return random.choice(letters) + random.choice(letters)
class HtpasswdFile:
def __init__(self, filename, create=False):
self.entries = []
self.filename = filename
if not create:
if os.path.exists(self.filename):
self.load()
else:
raise Exception("%s does not exist" % self.filename)
def load(self):
lines = open(self.filename, 'r').readlines()
self.entries = []
for line in lines:
username, hash = line.split(':')
entry = [username, hash.rstrip()]
self.entries.append(entry)
def save(self):
open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1]) for entry in self.entries])
def update(self, username, password):
hash = crypt.crypt(password, salt())
matching_entries = [entry for entry in self.entries if entry[0] == username]
if matching_entries:
matching_entries[0][1] = hash
else:
self.entries.append([username, hash])
def delete(self, username):
self.entries = [entry for entry in self.entries if entry[0] != username]
def main():
"""%prog [-c] -b filename username password
Create or update an htpasswd file"""
# For now, we only care about the use cases that affect tests/functional.py
parser = OptionParser(usage=main.__doc__)
parser.add_option('-b', action='store_true', dest='batch', default=False,
help='Batch mode; password is passed on the command line IN THE CLEAR.')
parser.add_option('-c', action='store_true', dest='create', default=False,
help='Create a new htpasswd file, overwriting any existing file.')
parser.add_option('-D', action='store_true', dest='delete_user', default=False,
help='Remove the given user from the password file.')
options, args = parser.parse_args()
assert(options.batch) # We only support batch mode for now.
# Non-option arguments
filename, username = args[:2]
if options.delete_user:
password = None
else:
password = args[2]
passwdfile = HtpasswdFile(filename, create=options.create)
if options.delete_user:
passwdfile.delete(username)
else:
passwdfile.update(username, password)
passwdfile.save()
if __name__ == '__main__':
main()
| Python | 0.000003 | |
88eb8887bd71702fbf0c5095d8c2d637876de4b8 | Add the upload_file_test | examples/upload_file_test.py | examples/upload_file_test.py | from seleniumbase import BaseCase
class FileUploadButtonTests(BaseCase):
""" The main purpose of this is to test the self.choose_file() method. """
def test_file_upload_button(self):
self.open("https://www.w3schools.com/jsref/tryit.asp"
"?filename=tryjsref_fileupload_get")
self.wait_for_element('[id*="google_ads"]')
self.remove_elements('[id*="google_ads"]')
self.switch_to_frame('iframeResult')
self.add_css_style(
'input[type="file"]{zoom: 1.5;-moz-transform: scale(1.5);}')
self.highlight('input[type="file"]')
self.choose_file('input[type="file"]', "example_logs/screenshot.png")
self.demo_mode = True # Adds highlighting to the assert statement
self.assert_element('input[type="file"]')
| Python | 0.000018 | |
a98ba6efa109383ecc1dfeb07691dc0a4a4e2a5b | Update migrations | django_afip/migrations/0002_auto_20150909_1837.py | django_afip/migrations/0002_auto_20150909_1837.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('afip', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='tax',
name='amount',
field=models.DecimalField(decimal_places=2, max_digits=15, verbose_name='cantidad'),
),
migrations.AlterField(
model_name='vat',
name='amount',
field=models.DecimalField(decimal_places=2, max_digits=15, verbose_name='cantidad'),
),
]
| Python | 0.000001 | |
9668580633a1a8baaa59030e5a52d2478222cbd2 | Add cost tracking file to openstack | nodeconductor/openstack/cost_tracking.py | nodeconductor/openstack/cost_tracking.py | from . import models
from nodeconductor.cost_tracking import CostTrackingBackend
class OpenStackCostTrackingBackend(CostTrackingBackend):
@classmethod
def get_monthly_cost_estimate(cls, resource):
backend = resource.get_backend()
return backend.get_monthly_cost_estimate()
| Python | 0 | |
4be42297e48421cf41275b67aecfee691ca10e9e | Create grab_junos.py | grab_junos.py | grab_junos.py | #!/usr/bin/python
# Author: Scott Reisinger
# Date: 06062015
# Purpose: Automate config grab for JUNOS devices (Or any other vendor but Juniper is the best so..)
#
# You will need to install the SSH utilities from paramiko
# If you are on a mac there is an easy tutorial here:
# http://osxdaily.com/2012/07/10/how-to-install-paramiko-and-pycrypto-in-mac-os-x-the-easy-way/
#Import some good tools this so reminds me of C++
from sys import argv
import os
import getpass
import exceptions
import logging
import subprocess
import time
logging.basicConfig()
#These are needed for the paramiko routine
import sys
import socket
import pprint
import paramiko
mytime = time.strftime("%m%d%y%H%M%S")
print "START TIME", mytime
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Sub routine get_password
def get_password():
#print "Please enter your password:"
os.system("stty -echo")
#local_password = raw_input( getpass.getuser() + "'s password")
local_password = raw_input( username + "'s password> ")
os.system("stty echo")
print "\n"
return local_password
# Check TCP 22 connection
def Check_SSH(IP):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
try:
s.connect((IP, 22))
s.shutdown(2)
return True
except:
print "%s SSH connection failed" % (IP)
return False
def IPCMD(ipAddress, command, username, password):
if Check_SSH(ipAddress):
try:
buff_size = 2048
print "Running: %s on %s" % (command, ipAddress)
ssh.connect(ipAddress, username=username, password=password, timeout=3)
stdin, stdout, stderr = ssh.exec_command(command)
if len(stderr.readlines()) > 0:
print stderr.readlines()
# For verbose print, if not desired, comment out next 3 lines
#output = stdout.readlines()
#output = sys.stdout
#output = map(lambda s: s.strip().encode("utf-8"), output)
#filename
fout = str(ipAddress + '_' + mytime + '.txt')
ssh.close()
except paramiko.AuthenticationException:
print "%s Authentication failed" % (ipAddress)
finally:
with open(fout, 'w') as output:
output.write(''.join(stdout))
with open('Error_Log.txt', 'w') as output:
output.write(''.join(stderr))
# Lets grab the SSH credentials right from the start
print "Please enter your username: :",
username = raw_input("> ")
print "Please enter your password: :",
password = get_password()
# Now lets figure out if this is going to be a single ip or a list and a single command or a list
print "Do you want to run this for a (S)ingle IP or a (L)ist of IPs (S/L)?: ",
ipSource = raw_input("> ")
if ipSource == 'S' or ipSource == 's':
print "Please enter the IP address you want to query: ",
ipAddress = raw_input("> ")
elif ipSource == 'L' or ipSource == 'l':
print "Please enter the filename of the IP List (e.g. /home/scott/IPLIST): ",
ipList = raw_input("> ")
else:
print "\t ERROR: S and L are the only valid responses for this prompt"
# What is our source for the coammnds single or file
print "\n\nDo you want to run a (S)ingle command or a (L)ist of commands (S/L)?: ",
cmdSource = raw_input("> ")
if cmdSource == 'S' or cmdSource == 's':
print "Please enter the command you want to run on the device(s): "
command = raw_input("> ")
elif cmdSource == 'L' or cmdSource == 'l':
print "Please enter the filename of the command List (e.g. /home/scott/JUNOSCMDS): ",
cmdFile = raw_input("> ")
COMMANDS = open(cmdFile, 'r')
else:
print "\t ERROR: S and L are the only valid responses for this prompt"
def IP1CMDS(ipAddress, cmdFile):
fcmds = open(cmdFile, 'r')
for CMD in fcmds.readlines():
CMD = CMD.rstrip()
IPCMD(ipAddress, CMD, username, password)
def IPSCMD1(ipList, command):
ip = open(ipList, 'r')
for ip in ip.readlines():
ip = ip.rstrip()
IPCMD(ip, command, username, password)
def rewind(f):
f.seek(0)
def IPSCMDS(ipList, cmdFile):
ip = open(ipList, 'r')
fcmds = open(cmdFile)
for ip in ip.readlines():
ip = ip.rstrip()
rewind(fcmds)
for CMD in fcmds.readlines():
CMD = CMD.rstrip()
print ip
print CMD
IPCMD(ip, CMD, username, password)
if ((ipSource == 'S' or ipSource == 's') and (cmdSource == 'S' or cmdSource == 's')):
IPCMD(ipAddress, command, username, password)
elif ((ipSource == 'S' or ipSource == 's') and (cmdSource == 'L' or cmdSource == 'l')):
IP1CMDS(ipAddress, cmdFile)
elif ((ipSource == 'L' or ipSource == 'l') and (cmdSource == 'S' or cmdSource == 's')):
IPSCMD1(ipList, command)
elif ((ipSource == 'L' or ipSource == 'l') and (cmdSource == 'L' or cmdSource == 'l')):
IPSCMDS(ipList, cmdFile)
else:
print "You entered some weird combo. S and L are the only valid choices"
| Python | 0.00007 | |
2dfa68eb458cfc7d6166ede8a222b1d11b9577a0 | Create grabscreen.py | grabscreen.py | grabscreen.py | # Done by Frannecklp
import cv2
import numpy as np
import win32gui, win32ui, win32con, win32api
def grab_screen(region=None):
hwin = win32gui.GetDesktopWindow()
if region:
left,top,x2,y2 = region
width = x2 - left + 1
height = y2 - top + 1
else:
width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN)
height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN)
left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
hwindc = win32gui.GetWindowDC(hwin)
srcdc = win32ui.CreateDCFromHandle(hwindc)
memdc = srcdc.CreateCompatibleDC()
bmp = win32ui.CreateBitmap()
bmp.CreateCompatibleBitmap(srcdc, width, height)
memdc.SelectObject(bmp)
memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY)
signedIntsArray = bmp.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (height,width,4)
srcdc.DeleteDC()
memdc.DeleteDC()
win32gui.ReleaseDC(hwin, hwindc)
win32gui.DeleteObject(bmp.GetHandle())
return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
| Python | 0 | |
6bbea60eb3eac4da75ac4e590c5729056b05d63b | test imgs retrieval | get_exp_imgs.py | get_exp_imgs.py | import argparse
import os
import itertools
import random
from math import log10
import scipy.stats as stats
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import Variable, grad
from models.iv_model import *
from data.proData import CreateDataLoader
parser = argparse.ArgumentParser()
parser.add_argument('--datarootC', required=True, help='path to colored dataset')
parser.add_argument('--datarootS', required=True, help='path to sketch dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=512, help='the height / width of the input image to network')
parser.add_argument('--cut', type=int, default=1, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--zero_mask', action='store_true', help='finetune?')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--baseGeni', type=int, default=2500, help='start base of pure pair L1 loss')
parser.add_argument('--stage', type=int, required=True, help='training stage')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup # !!!!!
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
dataloader = CreateDataLoader(opt)
netG = torch.nn.DataParallel(def_netG(ngf=opt.ngf))
netG.module.toH = nn.Sequential(nn.Conv2d(5, opt.ngf, kernel_size=7, stride=1, padding=3), nn.LeakyReLU(0.2, True))
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netI = torch.nn.DataParallel(def_netI())
print(netI)
criterion_L1 = nn.L1Loss()
criterion_MSE = nn.MSELoss()
L2_dist = nn.PairwiseDistance(2)
one = torch.FloatTensor([1])
mone = one * -1
half_batch = opt.batchSize // 2
zero_mask_advW = torch.FloatTensor([opt.advW] * half_batch + [opt.advW2] * half_batch).view(opt.batchSize, 1)
noise = torch.Tensor(opt.batchSize, 1, opt.imageSize // 4, opt.imageSize // 4)
fixed_sketch = torch.FloatTensor()
fixed_hint = torch.FloatTensor()
fixed_sketch_feat = torch.FloatTensor()
if opt.cuda:
netG = netG.cuda()
netI = netI.cuda().eval()
fixed_sketch, fixed_hint, fixed_sketch_feat = fixed_sketch.cuda(), fixed_hint.cuda(), fixed_sketch_feat.cuda()
criterion_L1 = criterion_L1.cuda()
criterion_MSE = criterion_MSE.cuda()
one, mone = one.cuda(), mone.cuda()
zero_mask_advW = Variable(zero_mask_advW.cuda())
noise = noise.cuda()
def mask_gen(zero_mask):
if zero_mask:
mask1 = torch.cat(
[torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize // 2)],
0).cuda()
mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(opt.batchSize // 2)],
0).cuda()
mask = torch.cat([mask1, mask2], 0)
else:
mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],
0).cuda()
return mask
flag = 1
lower, upper = 0, 1
mu, sigma = 1, 0.005
maskS = opt.imageSize // 4
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
data_iter = iter(dataloader)
data = zip(*[data_iter.next() for _ in range(16 // opt.batchSize)])
real_cim, real_vim, real_sim = [torch.cat(dat, 0) for dat in data]
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask1 = torch.cat(
[torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(8)],
0).cuda()
mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(8)],
0).cuda()
mask = torch.cat([mask1, mask2], 0)
hint = torch.cat(
(real_vim * mask, mask,
torch.Tensor(16, 1, opt.imageSize // 4, opt.imageSize // 4).normal_().cuda()), 1)
with torch.no_grad():
feat_sim = netI(Variable(real_sim)).data
fixed_sketch.resize_as_(real_sim).copy_(real_sim)
fixed_hint.resize_as_(hint).copy_(hint)
fixed_sketch_feat.resize_as_(feat_sim).copy_(feat_sim)
with torch.no_grad():
fake = netG(Variable(fixed_sketch), Variable(fixed_hint), Variable(fixed_sketch_feat), opt.stage)
vutils.save_image(real_cim.mul(0.5).add(0.5),
'%s/color_samples' % opt.outf + '.png')
vutils.save_image(fake.data.mul(0.5).add(0.5),
'%s/colored_samples' % opt.outf + '.png')
np.save('%s/color_samples' % opt.outf, real_cim.cpu().numpy())
np.save('%s/color_samples' % opt.outf, fake.data.cpu().numpy())
| Python | 0.000005 | |
0486e02bbaefea63a2dff9983be51623a184dc66 | test python interpreter | test/test_interpreter_layer.py | test/test_interpreter_layer.py | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
import cocos
from cocos.director import director
import pyglet
if __name__ == "__main__":
director.init()
interpreter_layer = cocos.layer.InterpreterLayer()
main_scene = cocos.scene.Scene(interpreter_layer)
director.run(main_scene)
| Python | 0.000062 | |
76baf574ba5a4ff9e835412e27fd2ebc634a9992 | add Cython register test | new_pymtl/translation_tools/verilator_sim_test.py | new_pymtl/translation_tools/verilator_sim_test.py | from verilator_sim import get_verilated
from new_pmlib.regs import Reg
from new_pymtl import SimulationTool
def test_reg():
model = Reg(16)
print "BEGIN"
vmodel = get_verilated( model )
print "END"
vmodel.elaborate()
sim = SimulationTool( vmodel )
sim.reset()
assert vmodel.out == 0
vmodel.in_.value = 10
sim.cycle()
assert vmodel.out == 10
vmodel.in_.value = 12
assert vmodel.out == 10
sim.cycle()
assert vmodel.out == 12
| Python | 0 | |
214aa96b5e816ad6386fc20fed684152ac8181d1 | add migration for ip to generic ip field change | newsletters/migrations/0003_auto_20150701_1840.py | newsletters/migrations/0003_auto_20150701_1840.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('newsletters', '0002_auto_20150630_0009'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='ip',
field=models.GenericIPAddressField(),
),
]
| Python | 0 | |
f90a9e585b5de36b3abc11cf454cde75a44a1a6b | Include Overlay Utils | evaluation/overlay_utils.py | evaluation/overlay_utils.py | #!/usr/bin/env python
"""Utility functions for segmentation tasks."""
from PIL import Image
import scipy.ndimage
import numpy as np
def replace_colors(segmentation, color_changes):
"""
Replace the values in segmentation to the values defined in color_changes.
Parameters
----------
segmentation : numpy array
Two dimensional
color_changes : dict
The key is the original color, the value is the color to change to.
The key 'default' is used when the color is not in the dict.
If default is not defined, no replacement is done.
Each color has to be a tuple (r, g, b) with r, g, b in {0, 1, ..., 255}
Returns
-------
np.array
The new colored segmentation
"""
width, height = segmentation.shape
output = scipy.misc.toimage(segmentation)
output = output.convert('RGBA')
for x in range(0, width):
for y in range(0, height):
if segmentation[x, y] in color_changes:
output.putpixel((y, x), color_changes[segmentation[x, y]])
elif 'default' in color_changes:
output.putpixel((y, x), color_changes['default'])
return output
def overlay_segmentation(image, segmentation, color_dict):
"""
Overlay original_image with segmentation_image.
Parameters
----------
"""
width, height = segmentation.shape
output = scipy.misc.toimage(segmentation)
output = output.convert('RGBA')
for x in range(0, width):
for y in range(0, height):
if segmentation[x, y] in color_dict:
output.putpixel((y, x), color_dict[segmentation[x, y]])
elif 'default' in color_dict:
output.putpixel((y, x), color_dict['default'])
background = scipy.misc.toimage(image)
background.paste(output, box=None, mask=output)
return np.array(background)
| Python | 0 | |
c37452e7cd4401bd7cbb8e855af65c26d730187c | add web_utl for crawler | crawler/web_util.py | crawler/web_util.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
chrome有个功能,对于请求可以直接右键copy as curl,然后在命令行里边用curl
模拟发送请求。现在需要把此curl字符串处理成requests库可以传入的参数格式,
http://stackoverflow.com/questions/23118249/whats-the-difference-between-request-payload-vs-form-data-as-seen-in-chrome
"""
import re
from functools import wraps
import traceback
import requests
def encode_to_dict(encoded_str):
""" 将encode后的数据拆成dict
>>> encode_to_dict('name=foo')
{'name': foo'}
>>> encode_to_dict('name=foo&val=bar')
{'name': 'foo', 'val': 'var'}
"""
pair_list = encoded_str.split('&')
d = {}
for pair in pair_list:
if pair:
key = pair.split('=')[0]
val = pair.split('=')[1]
d[key] = val
return d
def parse_curl_str(s):
"""convert chrome curl string to url, headers dict and data"""
pat = re.compile("'(.*?)'")
str_list = [i.strip() for i in re.split(pat, s)] # 拆分curl请求字符串
url = ''
headers = {}
data = ''
for i in range(0, len(str_list)-1, 2):
arg = str_list[i]
string = str_list[i+1]
if arg.startswith('curl'):
url = string
elif arg.startswith('-H'):
header_key = string.split(':', 1)[0].strip()
header_val = string.split(':', 1)[1].strip()
headers[header_key] = header_val
elif arg.startswith('--data'):
data = string
return url, headers, data
def retry(retries=3):
"""一个失败请求重试,或者使用下边这个功能强大的retrying
pip install retrying
https://github.com/rholder/retrying
:param retries: number int of retry times.
"""
def _retry(func):
@wraps(func)
def _wrapper(*args, **kwargs):
index = 0
while index < retries:
index += 1
try:
response = func(*args, **kwargs)
if response.status_code == 404:
print(404)
break
elif response.status_code != 200:
print(response.status_code)
continue
else:
break
except Exception as e:
traceback.print_exc()
response = None
return response
return _wrapper
return _retry
_get = requests.get
@retry(5)
def get(*args, **kwds):
if 'timeout' not in kwds:
kwds['timeout'] = 10
if 'headers' not in kwds:
headers = {
'User-Agent': 'Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)',
}
kwds['headers'] = headers
return _get(*args, **kwds)
requests.get = get
| Python | 0 | |
9733b08f8e9837e4f4246ad18b89b689cfe816dc | Test shape manipulation of Representations | astropy/coordinates/tests/test_representation_methods.py | astropy/coordinates/tests/test_representation_methods.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import numpy as np
from ... import units as u
from .. import SphericalRepresentation, Longitude, Latitude
from ...tests.helper import pytest
from ...utils.compat.numpycompat import NUMPY_LT_1_9
class TestManipulation():
"""Manipulation of Representation shapes.
Checking that attributes are manipulated correctly.
Even more exhaustive tests are done in time.tests.test_methods
"""
def setup(self):
lon = Longitude(np.arange(0, 24, 4), u.hourangle)
lat = Latitude(np.arange(-90, 91, 30), u.deg)
# With same-sized arrays
self.s0 = SphericalRepresentation(
lon[:, np.newaxis] * np.ones(lat.shape),
lat * np.ones(lon.shape)[:, np.newaxis],
np.ones(lon.shape + lat.shape) * u.kpc)
# With unequal arrays -> these will be broadcast.
self.s1 = SphericalRepresentation(lon[:, np.newaxis], lat, 1. * u.kpc)
# For completeness on some tests, also a cartesian one
self.c0 = self.s0.to_cartesian()
def test_ravel(self):
s0_ravel = self.s0.ravel()
assert s0_ravel.shape == (self.s0.size,)
assert np.all(s0_ravel.lon == self.s0.lon.ravel())
assert np.may_share_memory(s0_ravel.lon, self.s0.lon)
assert np.may_share_memory(s0_ravel.lat, self.s0.lat)
assert np.may_share_memory(s0_ravel.distance, self.s0.distance)
# Since s1 was broadcast, ravel needs to make a copy.
s1_ravel = self.s1.ravel()
assert s1_ravel.shape == (self.s1.size,)
assert np.all(s1_ravel.lon == self.s1.lon.ravel())
assert not np.may_share_memory(s1_ravel.lat, self.s1.lat)
def test_flatten(self):
s0_flatten = self.s0.flatten()
assert s0_flatten.shape == (self.s0.size,)
assert np.all(s0_flatten.lon == self.s0.lon.flatten())
# Flatten always copies.
assert not np.may_share_memory(s0_flatten.distance, self.s0.distance)
s1_flatten = self.s1.flatten()
assert s1_flatten.shape == (self.s1.size,)
assert np.all(s1_flatten.lon == self.s1.lon.flatten())
assert not np.may_share_memory(s1_flatten.lat, self.s1.lat)
def test_transpose(self):
s0_transpose = self.s0.transpose()
assert s0_transpose.shape == (7, 6)
assert np.all(s0_transpose.lon == self.s0.lon.transpose())
assert np.may_share_memory(s0_transpose.distance, self.s0.distance)
s1_transpose = self.s1.transpose()
assert s1_transpose.shape == (7, 6)
assert np.all(s1_transpose.lat == self.s1.lat.transpose())
assert np.may_share_memory(s1_transpose.lat, self.s1.lat)
# Only one check on T, since it just calls transpose anyway.
# Doing it on the CartesianRepresentation just for variety's sake.
c0_T = self.c0.T
assert c0_T.shape == (7, 6)
assert np.all(c0_T.x == self.c0.x.T)
assert np.may_share_memory(c0_T.y, self.c0.y)
def test_diagonal(self):
s0_diagonal = self.s0.diagonal()
assert s0_diagonal.shape == (6,)
assert np.all(s0_diagonal.lat == self.s0.lat.diagonal())
if not NUMPY_LT_1_9:
assert np.may_share_memory(s0_diagonal.lat, self.s0.lat)
def test_swapaxes(self):
s1_swapaxes = self.s1.swapaxes(0, 1)
assert s1_swapaxes.shape == (7, 6)
assert np.all(s1_swapaxes.lat == self.s1.lat.swapaxes(0, 1))
assert np.may_share_memory(s1_swapaxes.lat, self.s1.lat)
def test_reshape(self):
s0_reshape = self.s0.reshape(2, 3, 7)
assert s0_reshape.shape == (2, 3, 7)
assert np.all(s0_reshape.lon == self.s0.lon.reshape(2, 3, 7))
assert np.all(s0_reshape.lat == self.s0.lat.reshape(2, 3, 7))
assert np.all(s0_reshape.distance == self.s0.distance.reshape(2, 3, 7))
assert np.may_share_memory(s0_reshape.lon, self.s0.lon)
assert np.may_share_memory(s0_reshape.lat, self.s0.lat)
assert np.may_share_memory(s0_reshape.distance, self.s0.distance)
s1_reshape = self.s1.reshape(3, 2, 7)
assert s1_reshape.shape == (3, 2, 7)
assert np.all(s1_reshape.lat == self.s1.lat.reshape(3, 2, 7))
assert np.may_share_memory(s1_reshape.lat, self.s1.lat)
# For reshape(3, 14), copying is necessary for lon, lat, but not for d
s1_reshape2 = self.s1.reshape(3, 14)
assert s1_reshape2.shape == (3, 14)
assert np.all(s1_reshape2.lon == self.s1.lon.reshape(3, 14))
assert not np.may_share_memory(s1_reshape2.lon, self.s1.lon)
assert s1_reshape2.distance.shape == (3, 14)
assert np.may_share_memory(s1_reshape2.distance, self.s1.distance)
def test_squeeze(self):
s0_squeeze = self.s0.reshape(3, 1, 2, 1, 7).squeeze()
assert s0_squeeze.shape == (3, 2, 7)
assert np.all(s0_squeeze.lat == self.s0.lat.reshape(3, 2, 7))
assert np.may_share_memory(s0_squeeze.lat, self.s0.lat)
def test_add_dimension(self):
s0_adddim = self.s0[:, np.newaxis, :]
assert s0_adddim.shape == (6, 1, 7)
assert np.all(s0_adddim.lon == self.s0.lon[:, np.newaxis, :])
assert np.may_share_memory(s0_adddim.lat, self.s0.lat)
def test_take(self):
s0_take = self.s0.take((5, 2))
assert s0_take.shape == (2,)
assert np.all(s0_take.lon == self.s0.lon.take((5, 2)))
| Python | 0 | |
a12dd320df30404df8c8ec196e21067376cc1e2c | Add tests of table and column pickling | astropy/table/tests/test_pickle.py | astropy/table/tests/test_pickle.py | import cPickle as pickle
import numpy as np
import pytest
from ...table import Table, Column, MaskedColumn
@pytest.fixture(params=[0, 1, -1])
def protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
"""
return request.param
def test_pickle_column(protocol):
c = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp == c)
assert cp.attrs_equal(c)
def test_pickle_masked_column(protocol):
c = MaskedColumn(data=[1, 2], name='a', format='%05d', description='col a', unit='cm',
meta={'a': 1})
c.mask[1] = True
c.fill_value = -99
cs = pickle.dumps(c)
cp = pickle.loads(cs)
assert np.all(cp._data == c._data)
assert np.all(cp.mask == c.mask)
assert cp.attrs_equal(c)
assert cp.fill_value == -99
def test_pickle_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
t = Table([a, b], meta={'a': 1})
ts = pickle.dumps(t)
tp = pickle.loads(ts)
assert np.all(tp['a'] == t['a'])
assert np.all(tp['b'] == t['b'])
assert tp['a'].attrs_equal(t['a'])
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
def test_pickle_masked_table(protocol):
a = Column(data=[1, 2], name='a', format='%05d', description='col a', unit='cm', meta={'a': 1})
b = Column(data=[3.0, 4.0], name='b', format='%05d', description='col b', unit='cm',
meta={'b': 1})
t = Table([a, b], meta={'a': 1}, masked=True)
t['a'].mask[1] = True
t['a'].fill_value = -99
ts = pickle.dumps(t)
tp = pickle.loads(ts)
for colname in ('a', 'b'):
for attr in ('_data', 'mask', 'fill_value'):
assert np.all(getattr(tp[colname], attr) == getattr(tp[colname], attr))
assert tp['a'].attrs_equal(t['a'])
assert tp['b'].attrs_equal(t['b'])
assert tp.meta == t.meta
| Python | 0 | |
fb5f6b5db2e2701692dd0a35dfad36d7b6dd4f2d | Create example file | example.py | example.py | from blender_wrapper.api import Scene
from blender_wrapper.api import Camera
from blender_wrapper.api import SunLamp
from blender_wrapper.api import ORIGIN
def main():
scene = Scene(1500, 1000, filepath="~/Desktop/")
scene.setup()
camera = Camera((1, 0, 1), (90, 0, 0), view_align=True)
camera.add_to_scene()
lamp = SunLamp(10, (0, 0, 3), ORIGIN)
lamp.add_to_scene()
scene.render(resolution_percentage=100)
# Execute running:
# blender --background -P ./test.py
if __name__ == "__main__":
main()
| Python | 0.000001 | |
62032986f4e57c85f842c16fdb916b0a19bdbd0e | Create _webui.py | marionette_tg/plugins/_webui.py | marionette_tg/plugins/_webui.py | import flask
import gnupg, base64
#https://gist.github.com/dustismo/6203329 / apt-get install libleveldb1 libleveldb-dev && pip install plyvel
#import plyvel #leveldb, very fast, you can even run the database in ram if you want
#import MySQLdb #if you want mysql
from os import urandom
from base64 import b64decode
import datetime
import sys
from functools import wraps
datab = marionette_tg.conf.get("server.database")
if datab == 'leveldb':
import plyvel
elif datab == 'mysql':
import MySQLdb
else:
print 'error'
#webui for layerprox
lp = flask.Flask(__name__)
dbplace = '' #database directory, test: '/tmp/testdb/'
def add_response_headers(headers={}):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
resp = flask.make_response(f(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return decorated_function
return decorator
#general trolling bots
@lp.before_request
def blockuseragentreq():
useragent = flask.request.headers['User-Agent']
if 'sqlmap' in useragent:
return flask.redirect(flask.url_for('dummy'))
elif 'wget' in useragent:
return flask.redirect(flask.url_for('dummy'))
elif 'w3af' in useragent:
return flask.redirect(flask.url_for('dummy'))
# elif 'curl' in useragent:
# return flask.redirect(flask.url_for('dummy'))
elif 'Scanner' in useragent:
return flask.redirect(flask.url_for('dummy'))
else:
pass
#root@box:~# curl -I http://127.0.0.1:80/
#HTTP/1.0 200 OK
#Content-Type: text/html; charset=utf-8
#Content-Length: 198
#Server: amiIN9vf36G1T3xzpg==
#Date: Sat, 26 Nov 2016 10:24:22 GMT
#protection against server fingerprinting
def antifingerprint(f):
jibbrish = base64.b64encode(urandom(13))
jibbrish = jibbrish.replace("==", "")
@wraps(f)
@add_response_headers({'Server': jibbrish})
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
@lp.route('/')
@antifingerprint
def firstpage():
return '''
<html>
<head>
<title>LayerProx</title>
</head>
<body>
<h2>
<center>This is a LayerProx server
</h2>
<br>
<br>
<t>get encrypted by goin to /getproxied</t>
</center>
</body>
</html>
'''
#@lp.route('', methods=['GET'])
@lp.route('/getproxied', methods=['GET', 'POST'])
@antifingerprint
def get_registerd():
if flask.request.method == 'POST':
day = gen_day()
h1 = urandom()
fingerprint = 'x'
return '''
<html>
<head>
<title> LayerProx</title>
</head>
<body>
</body>
</html>
'''
#choice a serve
#choice a serverr
#if sucess redirect to
@lp.route('/welcome')
@antifingerprint
def wel():
return '''
<html>
<body>
<center>
<h1>Welcome to the LayerProx network</h1>
</body>
</html>
'''
#make the bots read 1984
@lp.route('/nobots')
@antifingerprint
def dummy():
return flask.redirect('http://msxnet.org/orwell/1984.pdf', code=302)
#tell noone pub info, only dax
@lp.route('/robots.txt')
@antifingerprint
def robots():
return '''
User-agent: *
Dissallow: /
User-agent: DuckDuckBot/1.1
Dissallow:
User-agent: DuckDuckBot/1.0
Dissallow:
'''
def check_db():
db = plyvel.DB(dbplace, create_if_missing=True)
#db.put(b'20', b'value')
today = datetime.date.today()
today = str(today)
#key prefix is date + hmac, one value is date
for key, value in db.iterator(start=today):
if key:
db.delete(key)
else:
pass
#datetime syntax
#>>> today = datetime.date.today()
#>>> print today
#2016-11-02
#>>> today = datetime.date.today()
#>>> EndDate = today + timedelta(days=10)
#>>> print EndDate
#2016-11-12
#day generation system
def gen_day():
test = map(ord, urandom(10))
test = str(test[0])# first number pair
test = test[0] # first number
test = int(test[0])
test2 = map(ord, urandom(test))
number = test2[test]
today = datetime.date.today()
number = int(number)
#number * daytime
day = today + datetime.timedelta(days=+number) #plus int generated
return day
#mysql - db
#create database lp;
#create table layerprox(
#fingerprint hmac h1 h2 to_date
#)
if __name__ == '__main__':
lp.run(debug=False,port=80) #host=0.0.0.0
| Python | 0.000001 | |
68e16ca50bec3802184e098548aa2c2584c352b2 | Add main example code | signal_decorator.py | signal_decorator.py | #!/usr/bin/python
__author__ = 'Neil Parley'
from functools import wraps
import signal
import sys
def catch_sig(f):
"""
Adds the signal handling as a decorator, define the signals and functions that handle them. Then wrap the functions
with your decorator.
:param f: Function
:return: Function wrapped with registered signal handling
"""
@wraps(f)
def reg_signal(*args, **kwargs):
def signal_handler(*args):
print('Got killed')
sys.exit(0)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
return f(*args, **kwargs)
return reg_signal
@catch_sig
def test():
import time
print("Waiting")
time.sleep(60)
if __name__ == "__main__":
test() | Python | 0.000033 | |
bec85af38596c2a4c38b8a53e3960a9ba375fe6f | remove sklearn.test() | sklearn/__init__.py | sklearn/__init__.py | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
__version__ = '0.14-git'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
def test(*args, **kwargs):
import warnings
# Not using a DeprecationWarning, as they are turned off by
# default
warnings.warn("""sklearn.test() is no longer supported to run the
scikit-learn test suite.
After installation, you can launch the test suite from outside the
source directory (you will need to have nosetests installed)::
$ nosetests --exe sklearn
See the web page http://scikit-learn.org/stable/install.html#testing
for more information.
""", stacklevel=2)
__all__ = ['cross_validation', 'cluster', 'covariance',
'datasets', 'decomposition', 'feature_extraction',
'feature_selection', 'semi_supervised',
'gaussian_process', 'grid_search', 'hmm', 'lda', 'linear_model',
'metrics', 'mixture', 'naive_bayes', 'neighbors', 'pipeline',
'preprocessing', 'qda', 'svm', 'clone',
'cross_decomposition',
'isotonic', 'pls']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
__version__ = '0.14-git'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
try:
from numpy.testing import nosetester
class _NoseTester(nosetester.NoseTester):
""" Subclass numpy's NoseTester to add doctests by default
"""
def test(self, label='fast', verbose=1, extra_argv=['--exe'],
doctests=True, coverage=False):
"""Run the full test suite
Examples
--------
This will run the test suite and stop at the first failing
example
>>> from sklearn import test
>>> test(extra_argv=['--exe', '-sx']) #doctest: +SKIP
"""
return super(_NoseTester, self).test(label=label,
verbose=verbose,
extra_argv=extra_argv,
doctests=doctests,
coverage=coverage)
try:
test = _NoseTester(raise_warnings="release").test
except TypeError:
# Older versions of numpy do not have a raise_warnings argument
test = _NoseTester().test
del nosetester
except:
pass
__all__ = ['cross_validation', 'cluster', 'covariance',
'datasets', 'decomposition', 'feature_extraction',
'feature_selection', 'semi_supervised',
'gaussian_process', 'grid_search', 'hmm', 'lda', 'linear_model',
'metrics', 'mixture', 'naive_bayes', 'neighbors', 'pipeline',
'preprocessing', 'qda', 'svm', 'test', 'clone',
'cross_decomposition',
'isotonic', 'pls']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs
"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| Python | 0 |
950bdd0f528fc61175c39dc2ade6abb9d46d767a | Change plan on book | contacts/migrations/0027_auto_20170106_0627.py | contacts/migrations/0027_auto_20170106_0627.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-06 06:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0026_auto_20161231_2045'),
]
operations = [
migrations.AlterField(
model_name='book',
name='plan',
field=models.CharField(blank=True, choices=[('team_monthly', 'Team Monthly Subscription'), ('basic_yearly', 'Basic Yearly Subscription'), ('basic_monthly', 'Basic Monthly Subscription'), ('family_monthly', 'Family Monthly Subscription'), ('family_yearly', 'Family Yearly Subscription'), ('team_yearly', 'Team Yearly Subscription')], max_length=100),
),
migrations.AlterField(
model_name='historicalbook',
name='plan',
field=models.CharField(blank=True, choices=[('team_monthly', 'Team Monthly Subscription'), ('basic_yearly', 'Basic Yearly Subscription'), ('basic_monthly', 'Basic Monthly Subscription'), ('family_monthly', 'Family Monthly Subscription'), ('family_yearly', 'Family Yearly Subscription'), ('team_yearly', 'Team Yearly Subscription')], max_length=100),
),
]
| Python | 0 | |
97c87237de87c91d66a92c1cacc362a7b831b8ef | add script to install python modules with pip | install_py_modules.py | install_py_modules.py | # this will install most necessary packages for this project
# that you may not already have on your system
import pip
def install(package):
pip.main(['install', package])
# Example
if __name__ == '__main__':
# for scraping akc.org for a list of breed names and pics
install('Scrapy')
# for calculating Haralick textures
install('mahotas')
# image operations convenience functions
install('imutils')
# plotting package
install('seaborn')
# data operations
install('pandas')
# machine learning lib
install('scikit-learn')
# image processing
install('scikit-image')
# eta and % completion of tasks
install('progressbar') | Python | 0 | |
98e822a78722e735b31817e74cc5e310fcb43c9a | add missed migration (HomeBanner verbose texts) | brasilcomvc/portal/migrations/0005_homebanner_verbose.py | brasilcomvc/portal/migrations/0005_homebanner_verbose.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('portal', '0004_homebanner_image_upload_to'),
]
operations = [
migrations.AlterModelOptions(
name='homebanner',
options={'verbose_name_plural': 'Banners da Home', 'verbose_name': 'Banner da Home'},
),
]
| Python | 0 | |
7fc947fec85b1bb621c7014b2008e1c3c0cce28c | add epg code (based on Brian Hargreaves' Matlab code) | epg/epg.py | epg/epg.py | #!/usr/bin/python
# EPG Simulation code, based off of Matlab scripts from Brian Hargreaves <bah@stanford.edu>
# 2015 Jonathan Tamir <jtamir@eecs.berkeley.edu>
import numpy as np
from numpy import pi, cos, sin, exp, conj
from warnings import warn
def epg_rf(FpFmZ, alpha, phi):
""" Propagate EPG states through an RF rotation of
alpha, with phase phi (both radians).
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
SEE ALSO:
epg_grad, epg_grelax
"""
# -- From Weigel at al, JMR 205(2010)276-285, Eq. 8.
if abs(alpha) > 2 * pi:
warn('epg_rf: Flip angle should be in radians!')
RR = np.array([ [ (cos(alpha/2.))**2, exp(2*1j*phi)*(sin(alpha/2.))**2, -1j*exp(1j*phi)*sin(alpha)],
[exp(-2*1j*phi)*(sin(alpha/2.))**2, (cos(alpha/2.))**2, 1j*exp(-1j*phi)*sin(alpha)],
[-1j/2.*exp(-1j*phi)*sin(alpha), 1j/2.*exp(1j*phi)*sin(alpha), cos(alpha)] ])
FpFmZ = np.dot(RR , FpFmZ)
return FpFmZ, RR
def epg_relax(FpFmZ, T1, T2, T):
""" Propagate EPG states through a period of relaxation over
an interval T.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
T1,T2 = Relaxation times (same as T)
T = Time interval (same as T1,T2)
OUTPUT:
FpFmZ = updated F+, F- and Z states.
EE = decay matrix, 3x3 = diag([E2 E2 E1]);
SEE ALSO:
epg_grad, epg_rf
"""
E2 = exp(-T/T2)
E1 = exp(-T/T1)
EE = np.diag([E2, E2, E1]) # Decay of states due to relaxation alone.
RR = 1 - E1 # Mz Recovery, affects only Z0 state, as
# recovered magnetization is not dephased.
FpFmZ = np.dot(EE, FpFmZ) # Apply Relaxation
FpFmZ[2,0] = FpFmZ[2,0] + RR # Recovery
return FpFmZ, EE
def epg_grad(FpFmZ, noadd=False):
"""Propagate EPG states through a "unit" gradient.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
Updated FpFmZ state.
SEE ALSO:
epg_grad, epg_grelax
"""
# Gradient does not affect the Z states.
if noadd == False:
FpFmZ = np.hstack((FpFmZ, [[0],[0],[0]])) # add higher dephased state
FpFmZ[0,:] = np.roll(FpFmZ[0,:], 1) # shift Fp states
FpFmZ[1,:] = np.roll(FpFmZ[1,:], -1) # shift Fm states
FpFmZ[1,-1] = 0 # Zero highest Fm state
FpFmZ[0,0] = conj(FpFmZ[1,0]) # Fill in lowest Fp state
return FpFmZ
def FSE_signal(angles_rad, TE, T1, T2):
"""Simulate Fast Spin Echo sequence with specific flip angle sequence.
INPUT:
angles_rad = array of flip angles in radians equal to echo train length
TE = echo time/spacing
T1 = T1 value in seconds
T2 = T2 value in seconds
OUTPUT:
Complex signal value at each echo time
"""
T = len(angles_rad)
S = np.zeros((T,1), dtype=complex)
P = np.array([[0],[0],[1]]) # initially in M0
P = epg_rf(P, pi/2, pi/2)[0] # 90 degree tip
for i in range(T):
alpha = angles_rad[i]
P = epg_relax(P, T1, T2, TE/2.)[0]
P = epg_grad(P)
P = epg_rf(P, alpha, 0)[0]
P = epg_relax(P, T1, T2, TE/2.)[0]
P = epg_grad(P)
S[i] = P[0,0]
return S
if __name__ == "__main__":
import matplotlib.pyplot as plt
T1 = 1000e-3
T2 = 200e-3
TE = 5e-3
N = 100
angles = 180 * np.ones((N,))
angles_rad = angles * pi / 180.
S = FSE_signal(angles_rad, TE, T1, T2)
S2 = abs(S)
plt.plot(TE*1000*np.arange(1, N+1), S2)
plt.xlabel('time (ms)')
plt.ylabel('signal')
plt.title('T1 = %.2f ms, T2 = %.2f ms' % (T1 * 1000, T2 * 1000))
plt.show()
| Python | 0 | |
35310a8fa136b5b6e094401a8289f5eabeb28cbc | Create batterylevel.py | home/hairygael/batterylevel.py | home/hairygael/batterylevel.py | def batterylevel():
power_now = subprocess.call ("WMIC PATH Win32_Battery Get EstimatedChargeRemaining", "r".readline())
ANSWER = float(power_now) * 100 , "%"
i01.mouth.speak(str(ANSWER))
| Python | 0.000063 | |
bd301eebd91a5dcca00d2b17b95f1e82bd8a572f | Add unit tests for show_server and list_servers | tempest/tests/services/compute/test_servers_client.py | tempest/tests/services/compute/test_servers_client.py | # Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.compute.json import servers_client
from tempest.tests import fake_auth_provider
from tempest.tests.services.compute import base
class TestServersClient(base.BaseComputeServiceTest):
FAKE_SERVERS = {'servers': [{
"id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
"links": [
{
"href": "http://os.co/v2/616fb98f-46ca-475e-917e-2563e5a8cd19",
"rel": "self"
},
{
"href": "http://os.co/616fb98f-46ca-475e-917e-2563e5a8cd19",
"rel": "bookmark"
}
],
"name": u"new\u1234-server-test"}]
}
FAKE_SERVER_GET = {'server': {
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
"private": [
{
"addr": "192.168.0.3",
"version": 4
}
]
},
"created": "2012-08-20T21:11:09Z",
"flavor": {
"id": "1",
"links": [
{
"href": "http://os.com/openstack/flavors/1",
"rel": "bookmark"
}
]
},
"hostId": "65201c14a29663e06d0748e561207d998b343e1d164bfa0aafa9c45d",
"id": "893c7791-f1df-4c3d-8383-3caae9656c62",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
"href": "http://imgs/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
}
]
},
"links": [
{
"href": "http://v2/srvs/893c7791-f1df-4c3d-8383-3caae9656c62",
"rel": "self"
},
{
"href": "http://srvs/893c7791-f1df-4c3d-8383-3caae9656c62",
"rel": "bookmark"
}
],
"metadata": {
u"My Server Nu\1234me": u"Apau\1234che1"
},
"name": u"new\u1234-server-test",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
"updated": "2012-08-20T21:11:09Z",
"user_id": "fake"}
}
server_id = FAKE_SERVER_GET['server']['id']
def setUp(self):
super(TestServersClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = servers_client.ServersClient(
fake_auth, 'compute', 'regionOne')
def test_list_servers_with_str_body(self):
self._test_list_servers()
def test_list_servers_with_bytes_body(self):
self._test_list_servers(bytes_body=True)
def _test_list_servers(self, bytes_body=False):
self.check_service_client_function(
self.client.list_servers,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_SERVERS,
bytes_body)
def test_show_server_with_str_body(self):
self._test_show_server()
def test_show_server_with_bytes_body(self):
self._test_show_server(bytes_body=True)
def _test_show_server(self, bytes_body=False):
self.check_service_client_function(
self.client.show_server,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_SERVER_GET,
bytes_body,
server_id=self.server_id
)
def test_delete_server(self, bytes_body=False):
self.check_service_client_function(
self.client.delete_server,
'tempest.common.service_client.ServiceClient.delete',
{},
status=204,
server_id=self.server_id
)
| Python | 0.000001 | |
c7851b61268848cf1b02d9e5c845a846ded4c2a7 | Update __init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | tendrl/node_agent/objects/cluster_message/__init__.py | from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| from tendrl.commons import etcdobj
from tendrl.commons.message import Message as message
from tendrl.commons import objects
class ClusterMessage(objects.BaseObject, message):
internal = True
def __init__(self, **cluster_message):
self._defs = {}
message.__init__(self, **cluster_message)
objects.BaseObject.__init__(self)
self.value = 'clusters/%s/Messages/%s'
self._etcd_cls = _ClusterMessageEtcd
class _ClusterMessageEtcd(etcdobj.EtcdObj):
"""Cluster message object, lazily updated
"""
__name__ = 'clusters/%s/Messages/%s'
_tendrl_cls = ClusterMessage
def render(self):
self.__name__ = self.__name__ % (
self.cluster_id, self.message_id
)
return super(_ClusterMessageEtcd, self).render()
| Python | 0.000072 |
7fddacd1a751c095f70693bb703bb9959a706ae1 | Add an example with end to end data | example.py | example.py | """
Example script for getting events over a Zaqar queue.
To run:
$ export IDENTITY_API_VERSION=3
$ source ~/devstack/openrc
$ python example.py
"""
import json
import os
import uuid
import requests
import websocket
from keystoneauth1.identity import v3
from keystoneauth1 import session
client_id = str(uuid.uuid4())
def authenticate(ws, token, project_id):
ws.send(json.dumps(
{'action': 'authenticate',
'headers': {'X-Auth-Token': token,
'Client-ID': client_id,
'X-Project-ID': project_id}}))
return ws.recv()
def send_message(ws, project_id, action, body=None):
msg = {'action': action,
'headers': {'Client-ID': client_id, 'X-Project-ID': project_id}}
if body:
msg['body'] = body
ws.send(json.dumps(msg))
return json.loads(ws.recv())
def main():
auth_url = os.environ.get('OS_AUTH_URL')
user = os.environ.get('OS_USERNAME')
password = os.environ.get('OS_PASSWORD')
project = os.environ.get('OS_PROJECT_NAME')
auth = v3.Password(auth_url=auth_url,
username=user,
user_domain_name='default',
password=password,
project_name=project,
project_domain_name='default')
sess = session.Session(auth=auth)
token = auth.get_token(sess)
project_id = auth.get_project_id(project)
nabu_url = auth.get_endpoint(sess, service_type='subscription')
requests.post('%s/v1/subscription' % (nabu_url,),
data=json.dumps({'source': 'compute',
'target': 'nabu_queue'}),
headers={'X-Auth-Token': token,
'Content-Type': 'application/json'})
ws_url = auth.get_endpoint(sess, service_type='messaging-websocket')
ws = websocket.create_connection(ws_url.replace('http', 'ws'))
authenticate(ws, token, project_id)
send_message(ws, project_id, 'queue_create', {'queue_name': 'nabu_queue'})
send_message(ws, project_id, 'subscription_create',
{'queue_name': 'nabu_queue', 'ttl': 3000})
while True:
ws.recv()
if __name__ == '__main__':
main()
| Python | 0.00006 | |
e1907624a143d0733cd89e5458d104ed0a4fee43 | Add simple tasks | fabfile.py | fabfile.py | # Simple Tasks
def hello():
print 'Hello ThaiPy!'
def hi(name='Kan'):
print 'Hi ' + name
| Python | 0.999917 | |
50769229ce8ef4e84f345184b0aebf036bc0e179 | add fabfile | fabfile.py | fabfile.py | from fabric.api import local, put, run, cd, sudo
def status():
run("systemctl status web")
def restart():
sudo("systemctl restart web")
def deploy():
local('tar -czf cydev_web.tgz web static/')
put("cydev_web.tgz", "~/cydev.ru")
with cd("~/cydev.ru"):
run("tar -xvf cydev_web.tgz")
restart()
status()
| Python | 0.000002 | |
fe0d8aa2e8293a14c9f2b0ac9fe9c51a99b75f16 | Make gallery images a bit smaller. | docs/source/notebook_gen_sphinxext.py | docs/source/notebook_gen_sphinxext.py | #
# Generation of RST from notebooks
#
import glob
import os
import os.path
import warnings
warnings.simplefilter('ignore')
from nbconvert.exporters import rst
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.connect('builder-inited', generate_rst)
return dict(
version='0.1',
parallel_read_safe=True,
parallel_write_safe=True
)
notebook_source_dir = '../../examples/notebooks'
generated_source_dir = 'examples/generated'
def nb_to_rst(nb_path):
"""convert notebook to restructured text"""
exporter = rst.RSTExporter()
out, resources = exporter.from_file(open(nb_path))
basename = os.path.splitext(os.path.basename(nb_path))[0]
imgdir = basename + '_files'
img_prefix = os.path.join(imgdir, basename + '_')
resources['metadata']['basename'] = basename
resources['metadata']['name'] = basename.replace('_', ' ')
resources['metadata']['imgdir'] = imgdir
base_url = ('http://nbviewer.ipython.org/github/metpy/MetPy/blob/master/'
'examples/notebooks/')
out_lines = ['`Notebook <%s>`_' % (base_url + os.path.basename(nb_path))]
for line in out.split('\n'):
if line.startswith('.. image:: '):
line = line.replace('output_', img_prefix)
out_lines.append(line)
out = '\n'.join(out_lines)
return out, resources
def write_nb(dest, output, resources):
if not os.path.exists(dest):
os.makedirs(dest)
rst_file = os.path.join(dest,
resources['metadata']['basename'] + resources['output_extension'])
name = resources['metadata']['name']
with open(rst_file, 'w') as rst:
header = '=' * len(name)
rst.write(header.encode('utf-8') + b'\n')
rst.write(name.encode('utf-8') + b'\n')
rst.write(header.encode('utf-8') + b'\n')
rst.write(output.encode('utf-8'))
imgdir = os.path.join(dest, resources['metadata']['imgdir'])
if not os.path.exists(imgdir):
os.makedirs(imgdir)
basename = resources['metadata']['basename']
for filename in resources['outputs']:
img_file = os.path.join(imgdir, filename.replace('output_', basename + '_'))
with open(img_file, 'wb') as img:
img.write(resources['outputs'][filename])
def generate_rst(app):
for fname in glob.glob(os.path.join(app.srcdir, notebook_source_dir, '*.ipynb')):
write_nb(os.path.join(app.srcdir, generated_source_dir), *nb_to_rst(fname))
with open(os.path.join(app.srcdir, 'examples', 'index.rst'), 'w') as test:
test.write('==============\n''MetPy Examples\n''==============\n'
'.. toctree::\n :glob:\n :hidden:\n\n generated/*\n\n')
no_images = []
for fname in glob.glob(os.path.join(app.srcdir, generated_source_dir, '*.rst')):
filepath, filename = os.path.split(fname)
target = filename.replace('.rst', '.html')
dir = os.listdir(os.path.join(app.srcdir, generated_source_dir, filename.replace('.rst', '_files')))
if dir:
file = dir[0]
test.write('.. image:: generated/' + filename.replace('.rst', '_files') + '/' + file +
'\n :width: 220px'
'\n :target: generated/' + target + '\n\n')
else:
no_images.append(target)
for filename in no_images:
test.write('`' + filename.replace('_', ' ').replace('.html', '') +
' <generated/' + filename + '>`_\n\n')
| #
# Generation of RST from notebooks
#
import glob
import os
import os.path
import warnings
warnings.simplefilter('ignore')
from nbconvert.exporters import rst
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.connect('builder-inited', generate_rst)
return dict(
version='0.1',
parallel_read_safe=True,
parallel_write_safe=True
)
notebook_source_dir = '../../examples/notebooks'
generated_source_dir = 'examples/generated'
def nb_to_rst(nb_path):
"""convert notebook to restructured text"""
exporter = rst.RSTExporter()
out, resources = exporter.from_file(open(nb_path))
basename = os.path.splitext(os.path.basename(nb_path))[0]
imgdir = basename + '_files'
img_prefix = os.path.join(imgdir, basename + '_')
resources['metadata']['basename'] = basename
resources['metadata']['name'] = basename.replace('_', ' ')
resources['metadata']['imgdir'] = imgdir
base_url = ('http://nbviewer.ipython.org/github/metpy/MetPy/blob/master/'
'examples/notebooks/')
out_lines = ['`Notebook <%s>`_' % (base_url + os.path.basename(nb_path))]
for line in out.split('\n'):
if line.startswith('.. image:: '):
line = line.replace('output_', img_prefix)
out_lines.append(line)
out = '\n'.join(out_lines)
return out, resources
def write_nb(dest, output, resources):
if not os.path.exists(dest):
os.makedirs(dest)
rst_file = os.path.join(dest,
resources['metadata']['basename'] + resources['output_extension'])
name = resources['metadata']['name']
with open(rst_file, 'w') as rst:
header = '=' * len(name)
rst.write(header.encode('utf-8') + b'\n')
rst.write(name.encode('utf-8') + b'\n')
rst.write(header.encode('utf-8') + b'\n')
rst.write(output.encode('utf-8'))
imgdir = os.path.join(dest, resources['metadata']['imgdir'])
if not os.path.exists(imgdir):
os.makedirs(imgdir)
basename = resources['metadata']['basename']
for filename in resources['outputs']:
img_file = os.path.join(imgdir, filename.replace('output_', basename + '_'))
with open(img_file, 'wb') as img:
img.write(resources['outputs'][filename])
def generate_rst(app):
for fname in glob.glob(os.path.join(app.srcdir, notebook_source_dir, '*.ipynb')):
write_nb(os.path.join(app.srcdir, generated_source_dir), *nb_to_rst(fname))
with open(os.path.join(app.srcdir, 'examples', 'index.rst'), 'w') as test:
test.write('==============\n''MetPy Examples\n''==============\n'
'.. toctree::\n :glob:\n :hidden:\n\n generated/*\n\n')
no_images = []
for fname in glob.glob(os.path.join(app.srcdir, generated_source_dir, '*.rst')):
filepath, filename = os.path.split(fname)
target = filename.replace('.rst', '.html')
dir = os.listdir(os.path.join(app.srcdir, generated_source_dir, filename.replace('.rst', '_files')))
if dir:
file = dir[0]
test.write('.. image:: generated/' + filename.replace('.rst', '_files') + '/' + file +
'\n :height: 300px'
'\n :width: 375px'
'\n :target: generated/' + target + '\n\n')
else:
no_images.append(target)
for filename in no_images:
test.write('`' + filename.replace('_', ' ').replace('.html', '') +
' <generated/' + filename + '>`_\n\n')
| Python | 0 |
2af8c695c1463c080ce8c4bff7e3d81662a49c81 | implement generic decorator and register function | dispatk.py | dispatk.py | """
This function is inspired by singledispatch of Python 3.4+ (PEP 443),
but the dispatch happens on the key extracted fro the arguments values.
from dispatk import dispatk
@dispatk(lambda n: int(n))
def fib(n):
return fib(n-1) + fib(n-2)
@fib.register(0)
def _(n):
return 0
@fib.register(1, 2)
def _(n):
return 1
@fib.register(41)
def _(n):
return 165580141
*register* accepts one or more keys.
@fib.register(1, 2)
def _(n):
return 1
is equivalent to
@fib.register(1)
@fib.register(2)
def _(n):
return 1
"""
from functools import wraps
__all__ = ('dispatk',)
def dispatk(keyer):
"""This is the decorator for the generic function and it accepts
only one argument *keyer*, it'll be called with the same arguments
of the function call and it must return an hashable object
(int, tuple, etc.).
Rhe generic function has a *register* method used to decorate the
function for some specific keys.
*register* accepts one or more keys and returns the decorated
function.
"""
calls = {}
def _dispatk(main):
def register(*keys):
def _register(spec):
for key in keys:
if key in calls:
raise ValueError(
"function already registered for %r"
% (main.__name__, key))
calls[key] = spec
return spec
return _register
@wraps(main)
def run(*args, **kwargs):
return calls.get(keyer(*args, **kwargs), main)(*args, **kwargs)
run.register = register
return run
return _dispatk
| Python | 0 | |
e823c55f62c8aa1d72ec3bf2b58288b3dd413561 | Create radix_sort.py | sorts/radix_sort.py | sorts/radix_sort.py | def radixsort(lst):
RADIX = 10
maxLength = False
tmp , placement = -1, 1
while not maxLength:
maxLength = True
# declare and initialize buckets
buckets = [list() for _ in range( RADIX )]
# split lst between lists
for i in lst:
tmp = i / placement
buckets[tmp % RADIX].append( i )
if maxLength and tmp > 0:
maxLength = False
# empty lists into lst array
a = 0
for b in range( RADIX ):
buck = buckets[b]
for i in buck:
lst[a] = i
a += 1
# move to next
placement *= RADIX
| Python | 0.000003 | |
2b810eb1900ca96c7fb2d8b63b70b7b0df8b9ed5 | Create find_digits.py | algorithms/implementation/python3/find_digits.py | algorithms/implementation/python3/find_digits.py | #!/bin/python3
import sys
t = int(input().strip())
for a0 in range(t):
n = int(input().strip())
count = 0
digits = str(n)
for digit in digits:
if int(digit) != 0:
if n % int(digit) == 0:
count += 1
print(count)
| Python | 0.998631 | |
c723865ae8013020f6f0a28cd41592c3dc900968 | add a second test for process_dc_env. | tests/process_dc_env_test_2.py | tests/process_dc_env_test_2.py | #!/usr/bin/env python
import sys
import os
import argparse
# There is a PEP8 warning about this next line not being at the top of the file.
# The better answer is to append the $dcUTILS/scripts directory to the sys.path
# but I wanted to illustrate it here...so your mileage may vary how you want
from process_dc_env import pythonGetEnv
# ==============================================================================
"""
This script provides an example of how to use the process_dc_env.py in a python
script. In a python script, the pythonGetEnv is imported from the
process_dc_env script and then called directly in the script. That function will
do the necessary handling of some of the arguments on behalf of the python
script. Any other arguments passed in are ignored by the process_dc_env script
and it is expected that the python script would handle the rest of them. The
pythonGetEnv will return a environment list presented in a dictionary with the
environment variable set as the key and the value, is, well, the value.
Note that the argparse statement for processing arguments needs to be a bit
different than what you probably normally use. We need to ignore some of the
commands that are processed in the proces_dc_env.py (ie appName, env and
workspaceName if used). to do this use parse_known_args instead of parse_args
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
def checkArgs():
parser = argparse.ArgumentParser(
description='Script that provides a facility to watch for file ' +
'changes and then perform actions based upon the files' +
' that change.')
parser.add_argument('-f', '--foo', help='foo option',
required=False)
parser.add_argument('-w', '--workspaceName', help='The alternate ' +
'directory name to find the application env files ' +
'in. This will not change the .dcConfig/' +
'baseDiretory file but will read it for the ' +
'alternate path and use it directly',
required=False)
# old way
# args = parser.parse_args()
# new way and the extra options are in the unknown part
args, unknown = parser.parse_known_args()
# if we get here then the
return (args.foo, args.workspaceName)
def main(argv):
# for manageApp.py only ... or designed to only be used by manageApp.py
# retVals = pythonGetEnv(initialCreate=True)
# normal call for all other python scripts
try:
(foo, workspaceName) = checkArgs()
except SystemExit:
pythonGetEnv()
sys.exit(1)
retVals = pythonGetEnv()
print "=>{}<=".format(retVals)
print "foo={}".format(foo)
print "workspaceName={}".format(workspaceName)
print "CUSTOMER_APP_NAME=" + retVals["CUSTOMER_APP_NAME"]
print "ENV=" + retVals["ENV"]
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| Python | 0 | |
51aefefc3cdcd131678e921a29b5acd5b9601b81 | add a unit-tests that essentially import the the python python file in src/dynamic_graph/ | tests/python/python_imports.py | tests/python/python_imports.py | #!/usr/bin/env python
import unittest
class PythonImportTest(unittest.TestCase):
def test_math_small_entities(self):
try:
import dynamic_graph.sot.core.math_small_entities
except ImportError as ie:
self.fail(str(ie))
def test_feature_position_relative(self):
try:
import dynamic_graph.sot.core.feature_position_relative
except ImportError as ie:
self.fail(str(ie))
def test_feature_position(self):
try:
import dynamic_graph.sot.core.feature_position
except ImportError as ie:
self.fail(str(ie))
def test_matrix_util(self):
try:
import dynamic_graph.sot.core.matrix_util
except ImportError as ie:
self.fail(str(ie))
def test_meta_task_6d(self):
try:
import dynamic_graph.sot.core.meta_task_6d
except ImportError as ie:
self.fail(str(ie))
def test_meta_task_posture(self):
try:
import dynamic_graph.sot.core.meta_task_posture
except ImportError as ie:
self.fail(str(ie))
def test_meta_task_visual_point(self):
try:
import dynamic_graph.sot.core.meta_task_visual_point
except ImportError as ie:
self.fail(str(ie))
def test_meta_tasks_kine_relative(self):
try:
import dynamic_graph.sot.core.meta_tasks_kine_relative
except ImportError as ie:
self.fail(str(ie))
def test_meta_tasks_kine(self):
try:
import dynamic_graph.sot.core.meta_tasks_kine
except ImportError as ie:
self.fail(str(ie))
def test_meta_tasks(self):
try:
import dynamic_graph.sot.core.meta_tasks
except ImportError as ie:
self.fail(str(ie))
def test_attime(self):
try:
import dynamic_graph.sot.core.utils.attime
except ImportError as ie:
self.fail(str(ie))
def test_history(self):
try:
import dynamic_graph.sot.core.utils.history
except ImportError as ie:
self.fail(str(ie))
def test_thread_interruptible_loop(self):
try:
import dynamic_graph.sot.core.utils.thread_interruptible_loop
except ImportError as ie:
self.fail(str(ie))
def test_viewer_helper(self):
try:
import dynamic_graph.sot.core.utils.viewer_helper
except ImportError as ie:
self.fail(str(ie))
def test_viewer_loger(self):
try:
import dynamic_graph.sot.core.utils.viewer_loger
except ImportError as ie:
self.fail(str(ie))
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
606118fa4c7b203d986f37d061777beb843b278b | add consistency checker | catalog/model/check_consistency.py | catalog/model/check_consistency.py | from toolz.curried import operator
from api.eoss_api import Api
from dateutil.parser import parse
import datetime
import requests, grequests
import time
import logging
import click
from utilities import chunks
logger = logging.getLogger()
def append_data(file, data):
with open(file, "a") as myfile:
for item in data:
myfile.write(item+'\n')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version='1.0.0')
def cli(*args, **kwargs):
"""
EOSS catalog
consistency checker
check if registered external URLs exist (e.g. quicklooks, metadata or zip archives
"""
@click.option('--api_endpoint', nargs=1, default='http://api.eoss.cloud')
@click.argument('sensor', nargs=1)
@click.argument('year', nargs=1, type=click.INT)
@cli.command('check_consistency', short_help='update catalog with exported sentinel2 metadata file')
def main(sensor, year, api_endpoint):
api = Api(api_endpoint)
aoi_nw = (-180, 90)
aoi_se = (180, -90)
aoi_ne = (aoi_se[0], aoi_nw[1])
aoi_sw = (aoi_nw[0], aoi_se[1])
aoi = [aoi_nw, aoi_ne, aoi_se, aoi_sw, aoi_nw]
for delta_day in range(293, 0, -1):
start_time = time.time()
start_date = parse('%d-01-01'% year) + datetime.timedelta(days=delta_day)
end_date = start_date + datetime.timedelta(days=1)
logger.info('Checking consistencty for %s between %s and %s' % (sensor, start_date.isoformat(), end_date.isoformat()))
# Object representation
results = api.search_dataset(aoi, 100, start_date, end_date, sensor, full_objects=False)
url_resources = list()
missing_urls = list()
missing_types = list()
wrong_urls = list()
for r in results:
if r['resources']['s3public']['zip'] != None:
url_resources.append(r['resources']['s3public']['zip'])
else:
missing_urls.append('%s:%s' % (r['tile_identifier'], r['entity_id']))
missing_types.append('zip')
if r['resources']['metadata']!= None:
url_resources.append(r['resources']['metadata'])
else:
missing_urls.append('%s:%s' % (r['tile_identifier'], r['entity_id']))
missing_types.append('metadata')
if r['resources']['quicklook'] != None:
url_resources.append(r['resources']['quicklook'])
else:
missing_urls.append('%s:%s' % (r['tile_identifier'], r['entity_id']))
missing_types.append('quicklook')
logger.info('total scans: %d' %len(url_resources))
logger.info('already missed resources: %d' %len(missing_urls))
if False:
for counter, res in enumerate(url_resources):
req = requests.head(res)
if req.status_code != requests.codes.ok:
print res, req.status_code
missing_urls.append(res)
print res
if (counter % 25) == 0:
print counter
else:
counter = 0
for url_parts in chunks(url_resources, 500):
counter+=1
rs = (grequests.head(u) for u in url_parts)
res = grequests.map(rs)
for req in res:
if req.status_code != requests.codes.ok:
print res, req.status_code
wrong_urls.append(res)
missing_types.append('zip_registered')
#print missing_urls
if len(wrong_urls) > 0:
print wrong_urls
append_data('/tmp/wrong_urls.txt', wrong_urls)
if len(missing_urls) > 0:
append_data('/tmp/missing_urls.txt', missing_urls)
if len(missing_types) > 0:
for type in ['zip_registered', 'quicklook', 'metadata', 'zip']:
logger.info('%d:%s' % (operator.countOf(missing_types, type), type))
logger.info('Executed in %f secs.' % (time.time()-start_time))
if __name__ == '__main__':
cli()
| Python | 0.000001 | |
f8067853546a9c25716aef6bc9f255591cb65626 | Add migration to change the project results report URL | akvo/rsr/migrations/0125_auto_20180315_0829.py | akvo/rsr/migrations/0125_auto_20180315_0829.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
ORIGINAL_URL = '/en/reports/project_results/{project}?format={format}&download=true'
NEW_URL = ORIGINAL_URL + '&p_StartDate={start_date}&p_EndDate={end_date}'
REPORT_ID = 1
def add_start_end_dates_report_url(apps, schema):
Report = apps.get_model('rsr', 'Report')
project_results_report = Report.objects.get(id=REPORT_ID)
project_results_report.url = NEW_URL
project_results_report.save()
def remove_start_end_dates_report_url(apps, schema):
Report = apps.get_model('rsr', 'Report')
project_results_report = Report.objects.get(id=REPORT_ID)
project_results_report.url = ORIGINAL_URL
project_results_report.save()
class Migration(migrations.Migration):
dependencies = [
('rsr', '0124_auto_20180309_0923'),
]
operations = [
migrations.RunPython(add_start_end_dates_report_url, remove_start_end_dates_report_url)
]
| Python | 0 | |
2aa0990746b71086b4c31ee81ac8874436c63e32 | Add a few tests (close #4) | tests/test_crosslinking_bot.py | tests/test_crosslinking_bot.py | from datetime import datetime
from datetime import date, timedelta
import pytest
from crosslinking_bot import crosslinking_bot as cb
class TestParseDate:
def test_return_today(self):
today = datetime.today().date()
assert 'today' == cb.parse_date(today)
def test_return_1_day_ago(self):
yesterday = date.today() - timedelta(1)
assert '1 day ago' == cb.parse_date(yesterday)
def test_return_2_days_ago(self):
two_days_ago = date.today() - timedelta(2)
assert '2 days ago' == cb.parse_date(two_days_ago)
class TestPrepareComment:
@pytest.fixture
def hn_hits(self):
return [{
'objectID': 12135399,
'created_at_i': 1469823139,
},
{
'objectID': 12135398,
'created_at_i': 1469821139,
},
]
def test_one_hit_contains_right_url(hn_hits):
hn_hits = [hn_hits.hn_hits()[0]]
hn_url = cb.HN_STORY.format(hn_hits[0]['objectID'])
assert hn_url in cb.prepare_comment(hn_hits)
def test_two_hits_contain_second_url(hn_hits):
hn_hits = hn_hits.hn_hits()
hn_url = cb.HN_STORY.format(hn_hits[1]['objectID'])
assert hn_url in cb.prepare_comment(hn_hits)
def test_two_hits_contain_plural_form(hn_hits):
hn_hits = hn_hits.hn_hits()
hn_url = cb.HN_STORY.format(hn_hits[1]['objectID'])
assert 'discussions' in cb.prepare_comment(hn_hits)
| Python | 0.000001 | |
bf97c20edc50cbe245f49bf867406eecc843404b | Add the wsgi app handler for serverless (amending last commit) | drift/contrib/aws/lambdawsgiapp.py | drift/contrib/aws/lambdawsgiapp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module converts an AWS API Gateway proxied request to a WSGI request.
Pretty much copied verbatim from https://github.com/logandk/serverless-wsgi
"""
import base64
import os
import sys
import logging
from werkzeug.datastructures import Headers
from werkzeug.wrappers import Response
from werkzeug.urls import url_encode
from werkzeug._compat import BytesIO, string_types, to_bytes, wsgi_encoding_dance
# List of MIME types that should not be base64 encoded. MIME types within `text/*`
# are included by default.
TEXT_MIME_TYPES = [
"application/json",
"application/javascript",
"application/xml",
"application/vnd.api+json",
]
# The logger is already configured at this point by the lambda thunker so we need to reset it.
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(format='[%(levelname)s] %(name)s: %(message)s', level=logging.INFO)
# Log out import and app factory exceptions explicitly with traceback because the AWS
# host is not terribly keen on doing so.
try:
from drift.flaskfactory import drift_app
app = drift_app()
except Exception:
logging.exception("Can't create Drift app object.")
def handler(event, context):
return handle_request(app, event, context)
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algoritm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
def handle_request(app, event, context):
if event.get("source") in ["aws.events", "serverless-plugin-warmup"]:
return {}
headers = Headers(event[u"headers"])
if u"amazonaws.com" in headers.get(u"Host", u""):
script_name = "/{}".format(event[u"requestContext"].get(u"stage", ""))
else:
script_name = ""
# If a user is using a custom domain on API Gateway, they may have a base
# path in their URL. This allows us to strip it out via an optional
# environment variable.
path_info = event[u"path"]
base_path = os.environ.get("API_GATEWAY_BASE_PATH", "")
if base_path:
script_name = "/" + base_path
if path_info.startswith(script_name):
path_info = path_info[len(script_name) :]
body = event[u"body"] or ""
if event.get("isBase64Encoded", False):
body = base64.b64decode(body)
if isinstance(body, string_types):
body = to_bytes(body, charset="utf-8")
environ = {
"API_GATEWAY_AUTHORIZER": event[u"requestContext"].get(u"authorizer"),
"CONTENT_LENGTH": str(len(body)),
"CONTENT_TYPE": headers.get(u"Content-Type", ""),
"PATH_INFO": path_info,
"QUERY_STRING": url_encode(event.get(u"queryStringParameters") or {}),
"REMOTE_ADDR": event[u"requestContext"]
.get(u"identity", {})
.get(u"sourceIp", ""),
"REMOTE_USER": event[u"requestContext"]
.get(u"authorizer", {})
.get(u"principalId", ""),
"REQUEST_METHOD": event[u"httpMethod"],
"SCRIPT_NAME": script_name,
"SERVER_NAME": headers.get(u"Host", "lambda"),
"SERVER_PORT": headers.get(u"X-Forwarded-Port", "80"),
"SERVER_PROTOCOL": "HTTP/1.1",
"event": event,
"context": context,
"wsgi.errors": sys.stderr,
"wsgi.input": BytesIO(body),
"wsgi.multiprocess": False,
"wsgi.multithread": False,
"wsgi.run_once": False,
"wsgi.url_scheme": headers.get(u"X-Forwarded-Proto", "http"),
"wsgi.version": (1, 0),
}
for key, value in environ.items():
if isinstance(value, string_types):
environ[key] = wsgi_encoding_dance(value)
for key, value in headers.items():
key = "HTTP_" + key.upper().replace("-", "_")
if key not in ("HTTP_CONTENT_TYPE", "HTTP_CONTENT_LENGTH"):
environ[key] = value
response = Response.from_app(app, environ)
# If there are multiple Set-Cookie headers, create case-mutated variations
# in order to pass them through APIGW. This is a hack that's currently
# needed. See: https://github.com/logandk/serverless-wsgi/issues/11
# Source: https://github.com/Miserlou/Zappa/blob/master/zappa/middleware.py
new_headers = [x for x in response.headers if x[0] != "Set-Cookie"]
cookie_headers = [x for x in response.headers if x[0] == "Set-Cookie"]
if len(cookie_headers) > 1:
for header, new_name in zip(cookie_headers, all_casings("Set-Cookie")):
new_headers.append((new_name, header[1]))
elif len(cookie_headers) == 1:
new_headers.extend(cookie_headers)
returndict = {u"statusCode": response.status_code, u"headers": dict(new_headers)}
if response.data:
mimetype = response.mimetype or "text/plain"
if (
mimetype.startswith("text/") or mimetype in TEXT_MIME_TYPES
) and not response.headers.get("Content-Encoding", ""):
returndict["body"] = response.get_data(as_text=True)
else:
returndict["body"] = base64.b64encode(response.data).decode("utf-8")
returndict["isBase64Encoded"] = "true"
return returndict
| Python | 0 | |
3a178c100cbf64b8ab60954a9b9ea5a01640f842 | Integrate LLVM at llvm/llvm-project@852d84e36ed7 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "852d84e36ed7a3db0ff4719f44a12b6bc09d35f3"
LLVM_SHA256 = "3def20f54714c474910e5297b62639121116254e9e484ccee04eee6815b5d58c"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "0128f8016770655fe7a40d3657f00853e6badb93"
LLVM_SHA256 = "f90705c878399b7dccca9cf9b28d695a4c6f8a0e12f2701f7762265470fa6c22"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:mathextras.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| Python | 0.000001 |
52f49543dd7bf01a2a24db435d8461b7c8921789 | Integrate LLVM at llvm/llvm-project@9a764ffeb6f0 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9a764ffeb6f06a87c7ad482ae39f8a38b3160c5e"
LLVM_SHA256 = "8f000d6541d64876de8ded39bc140176c90b74c3961b9ca755b1fed44423c56b"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "72136d8ba266eea6ce30fbc0e521c7b01a13b378"
LLVM_SHA256 = "54d179116e7a79eb1fdf7819aad62b4d76bc0e15e8567871cae9b675f7dec5c1"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| Python | 0.000001 |
9365d95e8f739c8c13bf0520ac20ad07a3387a42 | Avoid building blink_heap_unittests to unblock the Blink roll | public/all.gyp | public/all.gyp | #
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'includes': [
'../Source/build/features.gypi',
],
'targets': [
{
'target_name': 'all_blink',
'type': 'none',
'dependencies': [
'../Source/testing/testing.gyp:TestRunner_resources',
# FIXME: This test doesn't link properly. Commenting it out to
# unblock the Blink roll. See crbug.com/332220.
#'../Source/heap/blink_heap_tests.gyp:blink_heap_unittests',
'../Source/platform/blink_platform_tests.gyp:blink_platform_unittests',
'../Source/web/web_tests.gyp:webkit_unit_tests',
'../Source/wtf/wtf_tests.gyp:wtf_unittests',
],
'conditions': [
# Special target to wrap a gtest_target_type==shared_library
# webkit_unit_tests into an android apk for execution. See
# base.gyp for TODO(jrg)s about this strategy.
['OS=="android" and android_webview_build==0 and gtest_target_type == "shared_library"', {
'dependencies': [
'../Source/web/web_tests.gyp:webkit_unit_tests_apk',
],
}],
],
},
],
}
| #
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
{
'includes': [
'../Source/build/features.gypi',
],
'targets': [
{
'target_name': 'all_blink',
'type': 'none',
'dependencies': [
'../Source/testing/testing.gyp:TestRunner_resources',
'../Source/heap/blink_heap_tests.gyp:blink_heap_unittests',
'../Source/platform/blink_platform_tests.gyp:blink_platform_unittests',
'../Source/web/web_tests.gyp:webkit_unit_tests',
'../Source/wtf/wtf_tests.gyp:wtf_unittests',
],
'conditions': [
# Special target to wrap a gtest_target_type==shared_library
# webkit_unit_tests into an android apk for execution. See
# base.gyp for TODO(jrg)s about this strategy.
['OS=="android" and android_webview_build==0 and gtest_target_type == "shared_library"', {
'dependencies': [
'../Source/web/web_tests.gyp:webkit_unit_tests_apk',
],
}],
],
},
],
}
| Python | 0.998657 |
45254d35def51a5e8936fe649f8c3fc089cd4a6d | add `schemas.py` | todo/schemas.py | todo/schemas.py | """Request/Response Schemas are defined here"""
# pylint: disable=invalid-name
from marshmallow import Schema, fields
from marshmallow_enum import EnumField
from todo.enums import Status
class TaskSchema(Schema):
"""Schema for api.portal.models.Panel"""
id = fields.Int(required=True)
title = fields.Str(required=True)
description = fields.Str(required=True)
status = EnumField(Status, required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
| Python | 0.000001 | |
4f9660704445e6da62fc4e893d93fc84288303d4 | Integrate LLVM at llvm/llvm-project@aec908f9b248 | third_party/llvm/workspace.bzl | third_party/llvm/workspace.bzl | """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "aec908f9b248b27cb44217081c54e2c00604dff7"
LLVM_SHA256 = "c88b75b4d60b960c7da65b7bacfdf8c5cf4c7846ab85a334f1ff18a8b50f2d98"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| """Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "5dcd6afa20881490b38f3d88c4e59b0b4ff33551"
LLVM_SHA256 = "86f64f78ba3b6c7e8400fe7f5559b3dd110b9a4fd9bfe9e5ea8a4d27301580e0"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
| Python | 0.000001 |
735dee2da41bf8df8519d516bd9b231ff440f5f9 | Create globals.system module for Python & system related settings | source/globals/system.py | source/globals/system.py | # -*- coding: utf-8 -*-
## \package globals.system
# MIT licensing
# See: LICENSE.txt
import sys
PY_VER_MAJ = sys.version_info[0]
PY_VER_MIN = sys.version_info[1]
PY_VER_REL = sys.version_info[2]
PY_VER_STRING = u'{}.{}.{}'.format(PY_VER_MAJ, PY_VER_MIN, PY_VER_REL)
| Python | 0 | |
b83b09f937f91a870165d88730a36faaee8a5261 | add a parser of metadata | retsmeta.py | retsmeta.py | # -*- coding: utf-8 -*-
from xml.etree import ElementTree
class MetaParser(object):
def GetResources(self):
pass
def GetRetsClass(self, resource):
pass
def GetTables(self, resource, rets_class):
pass
def GetLookUp(self, resource, rets_class):
pass
class StandardXmlMetaParser(MetaParser):
def __init__(self, filepath):
with open(filepath,'r') as f:
xml_str = f.read()
self.meta_xml = ElementTree.fromstring(xml_str)
def GetResources(self):
resource_list = []
resource_xml_list = self.meta_xml.find('METADATA').find('METADATA-SYSTEM').find('SYSTEM').find('METADATA-RESOURCE').findall('Resource')
for resource_xml in resource_xml_list:
resource = RetsResource()
resource.resource_id = resource_xml.find('ResourceID').text
resource_list.append(resource)
return resource_list
def GetRetsClass(self, resource):
class_list = []
resource_xml_list = self.meta_xml.find('METADATA').find('METADATA-SYSTEM').find('SYSTEM').find('METADATA-RESOURCE').findall('Resource')
for resource_xml in resource_xml_list:
if resource_xml.find('ResourceID')==resource:
class_xml_list = resource_xml.findall('Class')
for class_xml in class_xml_list:
def GetTables(self, resource, rets_class):
pass
def GetLookUp(self, resource, rets_class):
pass
class RetsResource(object):
def __init__(self):
self.resource_id = None
class RetsClass(object):
def __init__(self):
self.rets_classname = None
class RetsTable(object):
def __init__(self):
self.system_name = None
| Python | 0.000118 | |
f2e5c56297a00ebf4b5029b702f8441adca83a8e | Update 'systemd' module from oslo-incubator | cinder/openstack/common/systemd.py | cinder/openstack/common/systemd.py | # Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import logging
import os
import socket
import sys
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
| # Copyright 2012-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper module for systemd service readiness notification.
"""
import os
import socket
import sys
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _abstractify(socket_name):
if socket_name.startswith('@'):
# abstract namespace socket
socket_name = '\0%s' % socket_name[1:]
return socket_name
def _sd_notify(unset_env, msg):
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
sock.connect(_abstractify(notify_socket))
sock.sendall(msg)
if unset_env:
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
LOG.debug("Systemd notification failed", exc_info=True)
finally:
sock.close()
def notify():
"""Send notification to Systemd that service is ready.
For details see
http://www.freedesktop.org/software/systemd/man/sd_notify.html
"""
_sd_notify(False, 'READY=1')
def notify_once():
"""Send notification once to Systemd that service is ready.
Systemd sets NOTIFY_SOCKET environment variable with the name of the
socket listening for notifications from services.
This method removes the NOTIFY_SOCKET environment variable to ensure
notification is sent only once.
"""
_sd_notify(True, 'READY=1')
def onready(notify_socket, timeout):
"""Wait for systemd style notification on the socket.
:param notify_socket: local socket address
:type notify_socket: string
:param timeout: socket timeout
:type timeout: float
:returns: 0 service ready
1 service not ready
2 timeout occurred
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(_abstractify(notify_socket))
try:
msg = sock.recv(512)
except socket.timeout:
return 2
finally:
sock.close()
if 'READY=1' in msg:
return 0
else:
return 1
if __name__ == '__main__':
# simple CLI for testing
if len(sys.argv) == 1:
notify()
elif len(sys.argv) >= 2:
timeout = float(sys.argv[1])
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
retval = onready(notify_socket, timeout)
sys.exit(retval)
| Python | 0 |
bbb445b691f7370059c7bf9c94e2e9c6f4155273 | update to latest | tasks/base.py | tasks/base.py | import os
from invoke import run
class BaseTest(object):
def download_mspec(self):
if not os.path.isdir("../mspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/mspec")
def download_rubyspec(self):
if not os.path.isdir("../rubyspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/spec")
run("mv spec rubyspec")
| import os
from invoke import run
class BaseTest(object):
def download_mspec(self):
if not os.path.isdir("../mspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/mspec")
run("cd ../mspec && git checkout v1.6.0")
def download_rubyspec(self):
if not os.path.isdir("../rubyspec"):
run("cd .. && git clone --depth=100 --quiet https://github.com/ruby/spec")
run("mv spec rubyspec")
| Python | 0 |
50843d6a2c93be4e05a0a2da338e4b0e0d99d294 | Add tls proxy helper | jujuxaas/tls_proxy.py | jujuxaas/tls_proxy.py | import copy
import select
import socket
import ssl
import sys
import threading
import logging
logger = logging.getLogger(__name__)
class TlsProxyConnection(object):
def __init__(self, server, inbound_socket, inbound_address, outbound_address):
self.server = server
self.inbound_socket = inbound_socket
self.inbound_address = inbound_address
self.outbound_socket = None
self.outbound_address = outbound_address
self.thread = None
def start(self):
self.thread = threading.Thread(target=self._proxy)
self.thread.daemon = True
self.thread.start()
def _proxy(self):
try:
self.outbound_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.outbound_socket = self.server._wrap_ssl(self.outbound_socket)
self.outbound_socket.connect(self.outbound_address)
logger.debug("Proxy for %s: connected to remote", self.inbound_address)
pairs = {}
pairs[self.inbound_socket] = self.outbound_socket
pairs[self.outbound_socket] = self.inbound_socket
selectors = [self.inbound_socket, self.outbound_socket]
while True:
ready, _, _ = select.select(selectors, [], [])
for s in ready:
data = s.recv(8192)
if len(data) == 0:
# Close
break
else:
other = pairs[s]
other.send(data)
except:
logger.warn("Proxy for %s: error: %s", self.inbound_address, sys.exc_info())
finally:
logger.debug("Proxy for %s: closing", self.inbound_address)
self.inbound_socket.close()
if self.outbound_socket:
self.outbound_socket.close()
class TlsProxy(object):
def __init__(self, ssl_context, listen_address, forward_address):
self.listen_address = listen_address
self.forward_address = forward_address
self.ssl_context = ssl_context
self._ready = threading.Event()
def _serve(self):
server = None
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(self.listen_address)
server.listen(50)
self._ready.set()
while True:
client, client_address = server.accept()
proxy = TlsProxyConnection(self, client, client_address, self.forward_address)
proxy.start()
finally:
if server:
server.close()
def start(self):
self.thread = threading.Thread(target=self._serve)
self.thread.daemon = True
self.thread.start()
self._ready.wait()
def _wrap_ssl(self, socket):
options = copy.copy(self.ssl_context)
options['sock'] = socket
return ssl.wrap_socket(**options)
| Python | 0 | |
420c14d38fdddc3ed5d646a99c355b707be011fc | Add tests for ansible module | instance/tests/test_ansible.py | instance/tests/test_ansible.py | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Ansible - Tests
"""
# Imports #####################################################################
import yaml
from unittest.mock import call, patch
from django.test import TestCase
from instance import ansible
# Tests #######################################################################
class YAMLTestCase(TestCase):
"""
Test cases for YAML helper functions
"""
def setUp(self):
self.yaml_dict1 = {
'testa': 'firsta with unicode «ταБЬℓσ»',
'testb': 'firstb',
'test_dict': {
'foo': 'firstfoo',
'bar': 'firstbar',
},
}
self.yaml_dict2 = {
'testb': 'secondb with unicode «ταБЬℓσ2»',
'testc': 'secondc',
'test_dict': {
'foo': 'secondfoo',
'other': 'secondother',
}
}
self.yaml_str1 = yaml.dump(self.yaml_dict1)
self.yaml_str2 = yaml.dump(self.yaml_dict2)
def test_yaml_merge(self):
"""
Merge of two yaml strings with overlapping variables
"""
yaml_result_str = ansible.yaml_merge(self.yaml_str1, self.yaml_str2)
self.assertEquals(yaml.load(yaml_result_str), {
'testa': 'firsta with unicode «ταБЬℓσ»',
'testb': 'secondb with unicode «ταБЬℓσ2»',
'testc': 'secondc',
'test_dict': {
'foo': 'secondfoo',
'bar': 'firstbar',
'other': 'secondother',
}
})
def test_yaml_merge_with_none(self):
"""
Merge of a yaml string with None
"""
self.assertEqual(ansible.yaml_merge(self.yaml_str1, None), self.yaml_str1)
class AnsibleTestCase(TestCase):
"""
Test cases for ansible helper functions & wrappers
"""
def test_string_to_file_path(self):
"""
Store a string in a temporary file
"""
test_str = 'My kewl string\nwith unicode «ταБЬℓσ», now 20% off!'
file_path = ansible.string_to_file_path(test_str)
with open(file_path) as fp:
self.assertEqual(fp.read(), test_str)
@patch('subprocess.Popen')
@patch('instance.ansible.mkdtemp')
@patch('instance.ansible.string_to_file_path')
def test_run_playbook(self, mock_string_to_file_path, mock_mkdtemp, mock_popen):
"""
Run the ansible-playbook command
"""
mock_string_to_file_path.return_value = '/test/str2path'
mock_mkdtemp.return_value = '/test/mkdtemp'
ansible.run_playbook(
'/requirements/path.txt',
"INVENTORY: 'str'",
"VARS: 'str2'",
'/play/book',
'playbook_name_str',
)
run_playbook_cmd = (
'virtualenv -p /usr/bin/python /test/mkdtemp && '
'/test/mkdtemp/bin/python -u /test/mkdtemp/bin/pip install -r /requirements/path.txt && '
'/test/mkdtemp/bin/python -u /test/mkdtemp/bin/ansible-playbook -i /test/str2path '
'-e @/test/str2path -u root playbook_name_str'
)
self.assertEqual(
mock_popen.mock_calls,
[call(run_playbook_cmd, bufsize=1, stdout=-1, cwd='/play/book', shell=True)]
)
| Python | 0 | |
7d7fd5b167528654b9fed5b0c971c2b8110d93ea | Create wrapper_exploit.py | wrapper_exploit.py | wrapper_exploit.py | # Author: Chris Duffy
# Date: May 2015
# Purpose: An sample exploit for testing UDP services
import sys, socket, strut, subprocess
program_name = 'C:\exploit_writing\vulnerable.exe'
fill ="A"*####
eip = struct.pack('<I',0x########)
offset = "\x90"*##
available_shellcode_space = ###
shell =() #Code to insert
# NOPs to fill the remaining space
exploit = fill + eip + offset + shell
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.sendto(exploit, (rhost, rport))
subprocess.call([program_name, exploit])
| Python | 0 | |
ef24797a12e8a8919ddb11c7b6763154c5c3aad1 | transform DR script to observe exceptions | transform_DR.py | transform_DR.py | __author__ = 'kuhn'
__author__ = 'kuhn'
from batchxslt import processor
from batchxslt import cmdiresource
import codecs
import os
dgd_corpus = "/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/corpora/extern"
dgd_events = "/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/events/extern"
dgd_speakers = "/home/kuhn/Data/IDS/svn_rev1233/dgd2_data/metadata/speakers/extern"
corpus_xsl = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdCorpus2cmdi.xsl"
event_xsl = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdEvent2cmdi.xsl"
speaker_xsl = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/xslt/dgdSpeaker2cmdi.xsl"
saxon_jar = "/home/kuhn/Data/IDS/svn/dgd2_data/dgd2cmdi/dgd2cmdi/saxon/saxon9he.jar"
pf_corpus = os.path.join(dgd_corpus, 'DR--_extern.xml')
pf_events = os.path.join(dgd_events, 'DR')
pf_speakers = os.path.join(dgd_speakers, 'DR')
xsl_processor = processor.XSLBatchProcessor(saxon_jar)
xsl_processor.transform(corpus_xsl, pf_corpus, "cmdi_", '/tmp/cmdi/corpus/')
xsl_processor.transform(event_xsl, pf_events, "cmdi_", '/tmp/cmdi/events/DR/')
xsl_processor.transform(speaker_xsl, pf_speakers, "cmdi_", '/tmp/cmdi/speakers/DR/')
| Python | 0 | |
60b5228818c92f4d13b0a054956a5f834c7f7549 | Implement remove.py | programs/genesis_util/remove.py | programs/genesis_util/remove.py | #!/usr/bin/env python3
import argparse
import json
import sys
def dump_json(obj, out, pretty):
if pretty:
json.dump(obj, out, indent=2, sort_keys=True)
else:
json.dump(obj, out, separators=(",", ":"), sort_keys=True)
return
def main():
parser = argparse.ArgumentParser(description="Remove entities from snapshot")
parser.add_argument("-o", "--output", metavar="OUT", default="-", help="output filename (default: stdout)")
parser.add_argument("-i", "--input", metavar="IN", default="-", help="input filename (default: stdin)")
parser.add_argument("-a", "--asset", metavar="ASSETS", nargs="+", help="list of asset(s) to delete")
parser.add_argument("-p", "--pretty", action="store_true", default=False, help="pretty print output")
opts = parser.parse_args()
if opts.input == "-":
genesis = json.load(sys.stdin)
else:
with open(opts.input, "r") as f:
genesis = json.load(f)
if opts.asset is None:
opts.asset = []
rm_asset_set = set(opts.asset)
removed_asset_entries = {aname : 0 for aname in opts.asset}
new_initial_assets = []
for asset in genesis["initial_assets"]:
symbol = asset["symbol"]
if symbol not in rm_asset_set:
new_initial_assets.append(asset)
else:
removed_asset_entries[symbol] += 1
genesis["initial_assets"] = new_initial_assets
removed_balance_entries = {aname : [] for aname in opts.asset}
new_initial_balances = []
for balance in genesis["initial_balances"]:
symbol = balance["asset_symbol"]
if symbol not in rm_asset_set:
new_initial_balances.append(balance)
else:
removed_balance_entries[symbol].append(balance)
genesis["initial_balances"] = new_initial_balances
# TODO: Remove from initial_vesting_balances
for aname in opts.asset:
sys.stderr.write(
"Asset {sym} removed {acount} initial_assets, {bcount} initial_balances totaling {btotal}\n".format(
sym=aname,
acount=removed_asset_entries[aname],
bcount=len(removed_balance_entries[aname]),
btotal=sum(int(e["amount"]) for e in removed_balance_entries[aname]),
))
if opts.output == "-":
dump_json( genesis, sys.stdout, opts.pretty )
sys.stdout.flush()
else:
with open(opts.output, "w") as f:
dump_json( genesis, f, opts.pretty )
return
if __name__ == "__main__":
main()
| Python | 0.0001 | |
6e43f611420068f0829fc64c1963ee51931b0099 | change name of data.py | node-interactions.py | node-interactions.py | import operator
from os import listdir
from os.path import isfile, join
import sys
def get_dict_of_all_contacts():
datapath = 'flu-data/moteFiles'
datafiles = [f for f in listdir(datapath) if isfile(join(datapath,f)) ]
dict_of_all_contacts = dict()
for datafile in datafiles:
node_contacts = dict()
f = open(join(datapath,datafile), 'r')
line = f.readline()
while line:
numlist = line.split()
if len(numlist) < 5:
continue
node = numlist[0]
time = int(numlist[-1])
if node not in node_contacts:
node_contacts[node] = time
line = f.readline()
nodename = datafile[5:]
dict_of_all_contacts[nodename] = node_contacts
f.close()
return dict_of_all_contacts
dict_of_all_contacts = get_dict_of_all_contacts()
node1 = dict_of_all_contacts['1']
infected = {}
for k, v in node1.iteritems():
infected[k] = v
final_infected = infected.copy()
for k,v in infected.iteritems():
current_node = dict_of_all_contacts[k]
for k, v in current_node.iteritems():
if k not in infected:
final_infected[k] = v
else:
if infected[k] > v:
final_infected[k] = v
print len(final_infected)
sorted_infected = sorted(final_infected.iteritems(), key=operator.itemgetter(1))
print sorted_infected
| Python | 0.000016 | |
4b9925a429692175ad1e0a89859a67117cbba9fe | Create pirates_of_the_caribbean.py | extras/pirates_of_the_caribbean.py | extras/pirates_of_the_caribbean.py | #This makes the coding of the song easier
def note(n):
if n == 1:return 880
elif n == 2:return 987.77
elif n == 3:return 1046.5
elif n == 4:return 1174.66
elif n == 5:return 1318.51
elif n == 6:return 1396.91
elif n == 7:return 1567.98
elif n == 8:return 1760.00
elif n == 9:return 932.33
return 0
#This is the coding for the song
def Pirates( time, octave):
beep(time, note(1)*octave)
beep(time, note(3)*octave)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(5)*octave)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
beep(time, note(7)*octave)
beep(time, note(5)*octave)
wait(time)
beep(time, note(5)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(3)*octave)
beep(time, note(3)*octave)
beep(time*2, note(4)*octave)
wait(time)
beep(time, note(1)*octave)
beep(time, note(3)*octave)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(5)*octave)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
beep(time, note(7)*octave)
beep(time, note(5)*octave)
wait(time)
beep(time, note(5)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(3)*octave)
beep(time*2, note(4)*octave)
wait(time*2)
beep(time, note(1)*octave)
beep(time, note(3)*octave)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
wait(time)
beep(time, note(4)*octave)
beep(time, note(6)*octave)
beep(time, note(7)*octave)
wait(time)
beep(time, note(7)*octave)
wait(time)
beep(time, note(7)*octave)
beep(time, note(1)*2*octave)
beep(time, note(9)*2*octave)
wait(time)
beep(time, note(9)*2*octave)
wait(time)
beep(time, note(1)*2*octave)
beep(time, note(7)*octave)
beep(time, note(1)*2*octave)
beep(time, note(4)*octave)
wait(time*2)
beep(time, note(4)*octave)
beep(time, note(5)*octave)
beep(time, note(6)*octave)
wait(time)
beep(time, note(6)*octave)
wait(time)
beep(time, note(7)*octave)
wait(time)
beep(time, note(1)*2*octave)
beep(time, note(4)*octave)
wait(time*2)
beep(time, note(4)*octave)
beep(time, note(6)*octave)
beep(time, note(5)*octave)
wait(time)
beep(time, note(5)*octave)
wait(time)
beep(time, note(6)*octave)
beep(time, note(4)*octave)
beep(time*3, note(5)*octave)
from Myro import *
init("sim")
#This is the song part of the program
Pirates(.15,1) #takes in duration of each note, and the octave to use
| Python | 0.999871 | |
45064a2b6279cfe303c978929daeaa027a001ce0 | add a script to create posts, elections, area types, etc. | elections/cr/management/commands/cr_create_basic_site.py | elections/cr/management/commands/cr_create_basic_site.py | # -*- coding: utf-8 -*-
from datetime import date
from django.core.management.base import BaseCommand
from django.db import transaction
from django.utils.text import slugify
from candidates.models import (
AreaExtra, OrganizationExtra, PostExtra, PartySet
)
from elections.models import AreaType, Election
from popolo.models import Area, Organization, Post
import requests
class Command(BaseCommand):
def get_or_create_organization(self, slug, name):
try:
org_extra = OrganizationExtra.objects.get(slug=slug)
org = org_extra.base
org.name = name
org.save()
except OrganizationExtra.DoesNotExist:
org = Organization.objects.create(name=name)
org_extra = OrganizationExtra.objects.create(base=org, slug=slug)
return org
def handle(self, *args, **options):
with transaction.atomic():
# Create all the AreaType objects first:
area_type, created = AreaType.objects.get_or_create(
name='CRCANTON',
defaults={'source': 'MapIt'},
)
# Now the Election objects (and the organizations they're
# associated with)
elections = []
for election_data in [
{
'slug': 'mun-al-2016',
'for_post_role': 'Alcalde',
'name': u'Elección de Alcaldes 2016',
'organization_name': 'Alcaldía Municipal',
'organization_slug': 'alcaldia-municipal',
'party_lists_in_use': False,
},
{
'slug': 'mun-re-2016',
'for_post_role': 'Regidor',
'name': u'Elección de Regidores 2016',
'organization_name': 'Consejo Municipal',
'organization_slug': 'consejo-municipal',
'party_lists_in_use': True,
'default_party_list_members_to_show': 3,
},
]:
org = self.get_or_create_organization(
election_data['organization_slug'],
election_data['organization_name'],
)
del election_data['organization_name']
del election_data['organization_slug']
election_data['organization'] = org
consistent_data = {
'candidate_membership_role': 'Candidate',
'election_date': date(2016, 2, 7),
'current': True,
'use_for_candidate_suggestions': False,
'area_generation': 2,
'organization': org,
}
election_slug = election_data.pop('slug')
election_data.update(consistent_data)
election, created = Election.objects.update_or_create(
slug=election_slug,
defaults=election_data,
)
election.area_types.add(area_type)
elections.append(election)
# Now create all the Area objects:
areas = []
for area_id, area_data in requests.get(
'http://international.mapit.mysociety.org/areas/CRCANTON'
).json().items():
area, created = Area.objects.update_or_create(
identifier=area_data['id'],
defaults={
'name': area_data['name'],
'classification': area_data['type_name'],
}
)
AreaExtra.objects.update_or_create(
base=area,
defaults={'type': area_type}
)
areas.append(area)
# Now create all the Post objects:
for election in elections:
for area in areas:
organization = election.organization
party_set_slug = '2016_canton_' + slugify(area.name)
party_set = PartySet.objects.get(slug=party_set_slug)
post_role = election.for_post_role
post_prefix = {
'Alcalde': 'al-',
'Regidor': 're-',
}[post_role]
post_label = {
'Alcalde': u'Alcalde de {area_name}',
'Regidor': u'Regidor de {area_name}',
}[post_role].format(area_name=area.name)
post_slug = post_prefix + str(area.identifier)
try:
post_extra = PostExtra.objects.get(slug=post_slug)
post = post_extra.base
except PostExtra.DoesNotExist:
post = Post.objects.create(
label=post_label,
organization=organization,
)
post_extra = PostExtra.objects.create(
base=post,
slug=post_slug,
)
post.role = post_role
post.label = post_label
post.organization = organization
post.save()
post_extra.party_set = party_set
post_extra.save()
post_extra.elections.all().delete()
post_extra.elections.add(election)
| Python | 0 | |
f8afd9d77a61f2baae15fec841817b0f97e573f9 | add redone twitterminal.py script | twitterminal.py | twitterminal.py | #!/usr/bin/env python3
#
# Tweet from the shell.
#
# Requires the following pip packages:
# * simplejson
# * twitter (NOT python-twitter, aka Python Twitter Tools)
#
import sys, os, argparse, subprocess, logging
# twitter requires a json module
# simplejson is updated more and may be faster
# see: http://stackoverflow.com/questions/712791
import simplejson
import twitter
class Twitterminal:
CREDS_FILE = os.getenv('HOME')+"/.twitterminal_creds"
APP_CREDS_FILE = os.getenv('HOME')+"/.twitterminal_appcreds"
ERR_ARGS = 1
ERR_OAUTH = 2
## CLI-related {{{
def __init_logging(self):
self.logger = logging.getLogger(os.path.basename(sys.argv[0]))
lh = logging.StreamHandler()
lh.setFormatter(logging.Formatter("%(name)s: %(levelname)s: %(message)s"))
self.logger.addHandler(lh)
def __parse_args(self):
self.parser = argparse.ArgumentParser(description="Tweet from the shell.")
self.parser.add_argument("-v", "--verbose", help="be verbose", action="count", default=0)
self.parser.add_argument("-q", "--quiet", help="be quiet (overrides -v)", action="count", default=0)
self.parser.add_argument("message", help="text to tweet")
self.args = self.parser.parse_args()
if self.args.verbose == 0:
self.logger.setLevel(logging.INFO)
elif self.args.verbose >= 1:
self.logger.setLevel(logging.DEBUG)
if self.args.quiet >= 1:
self.logger.setLevel(logging.NOTSET)
if len(self.args.message) == 0:
exit("message needs to be longer than 0 characters", ERR_ARGS)
def run(self):
"""Run from CLI: parse arguments, try to tweet."""
self.__init_logging()
self.__parse_args()
self.tweet(self.args.message)
## }}}
def __init__(self):
self.__init_client()
def exit(self, msg, ret):
"""Exit with explanation."""
self.logger.error(msg)
sys.exit(ret)
def get_shell(self, args):
"""Run a shell command and return the exit code."""
return subprocess.run(args).returncode
def __init_client(self):
"""Initialise the Twitter client."""
# get application OAuth tokens
with open(Twitterminal.APP_CREDS_FILE) as f:
api_tokens = [line.strip() for line in f]
if len(api_tokens) != 2:
exit("app creds key incorrectly formatted", ERR_OAUTH)
# get consumer OAuth tokens
# TODO: the oauth dance if required
#twitter.oauth_dance("twitterminal.py", api_tokens[0], api_tokens[1], Twitterminal.CREDS_FILE)
oauth_token, oauth_secret = twitter.read_token_file(Twitterminal.CREDS_FILE)
self.client = twitter.Twitter(auth=twitter.OAuth(oauth_token,
oauth_secret, api_tokens[0], api_tokens[1]))
def tweet(self, msg):
"""Tweet a message."""
self.client.statuses.update(status=msg)
if __name__ == "__main__":
twat = Twitterminal()
twat.run()
| Python | 0 | |
0c6becaa179aba9408def1b3cce61d5ec1509942 | Load the simul module and run a simulation | python/main.py | python/main.py | from simul import *
if __name__ == '__main__':
# create a new simulation
s = Simulation(Re=5)
# initial conditions psi(0) = 0, Omega(0) = 0
s.psi.initial("null")
s.omega.initial("null")
# T_n(t=0) = sin(pi*k*dz) & T_0(t=0) = 1-k*dz
s.T.initial(lambda n, k: T_0(n,k,s))
# main loop over time
while s.step():
s.T.step()
s.psi.step()
s.omega.step()
del s
| Python | 0 | |
eb40e609122787e9e82479905479d955568e3f36 | add test for constants configuration | astropy/constants/tests/test_constants_config.py | astropy/constants/tests/test_constants_config.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import os
import pytest
import tempfile
from astropy.tests.helper import catch_warnings
from astropy.config import paths
def write_test_config(dir, physical_constants=None,
astronomical_constants=None):
"""Write configuration items to a test directory
Parameters:
-----------
dir: `str`
path to directory to write configuration (in astropy subdirectory)
physical_constants: None or `str`
value of config item for physical constants. If None, comment out.
astronomical_constants: `str`
value of config item for astronomical constants. If None, comment out.
"""
cfgdir = os.path.join(dir, 'astropy')
os.makedirs(os.path.join(cfgdir), exist_ok=True)
cfgpath = os.path.join(cfgdir, 'astropy.cfg')
with open(cfgpath, 'w') as cfgfile:
cfgfile.write("# -*- coding: utf-8 -*-\n\n")
cfgfile.write("[constants]\n\n")
if isinstance(physical_constants, str):
cfgfile.write("physical_constants = '{}'\n"
.format(physical_constants))
else:
cfgfile.write("# physical_constants = 'codata2014'\n")
if isinstance(astronomical_constants, str):
cfgfile.write("astronomical_constants = '{}'\n"
.format(astronomical_constants))
else:
cfgfile.write("# astronomical_constants = 'iau2015'\n")
def test_prior_config():
with tempfile.TemporaryDirectory() as tmpdirname:
write_test_config(tmpdirname, physical_constants='codata2010',
astronomical_constants='iau2012')
with paths.set_temp_config(tmpdirname):
from astropy.constants import conf
with catch_warnings():
conf.reload()
import astropy.constants as const
importlib.reload(const)
assert conf.physical_constants == 'codata2010'
h = const.h
# check that the value is the CODATA2010 value
assert abs(h.value - 6.62606957e-34) < 1e-43
assert abs(h.si.value - 6.62606957e-34) < 1e-43
assert abs(h.cgs.value - 6.62606957e-27) < 1e-36
from astropy.constants.codata2014 import h as h_2014
# Check it is different from the current value
assert abs(h.value - h_2014.value) > 4e-42
assert conf.astronomical_constants == 'iau2012'
R_earth = const.R_earth
assert R_earth.value == 6.378136e6
from astropy.constants.iau2015 import R_earth as R_earth_2015
# Check it is different from the current value
assert abs(R_earth.value - R_earth_2015.value) > 10.0
# Test setting by version
write_test_config(tmpdirname, physical_constants='astropyconst13',
astronomical_constants='astropyconst13')
with paths.set_temp_config(tmpdirname):
from astropy.constants import conf
with catch_warnings():
conf.reload()
import astropy.constants as const
importlib.reload(const)
assert conf.physical_constants == 'astropyconst13'
h = const.h
# check that the value is the CODATA2010 value
assert abs(h.value - 6.62606957e-34) < 1e-43
assert abs(h.si.value - 6.62606957e-34) < 1e-43
assert abs(h.cgs.value - 6.62606957e-27) < 1e-36
from astropy.constants.codata2014 import h as h_2014
# Check it is different from the current value
assert abs(h.value - h_2014.value) > 4e-42
assert conf.astronomical_constants == 'astropyconst13'
R_earth = const.R_earth
assert R_earth.value == 6.378136e6
from astropy.constants.iau2015 import R_earth as R_earth_2015
# Check it is different from the current value
assert abs(R_earth.value - R_earth_2015.value) > 10.0
# reset state of constants (in part to prevent failures of later tests)
with catch_warnings():
conf.reload()
importlib.reload(const)
assert conf.physical_constants == 'codata2014'
h = const.h
assert abs(h.value - h_2014.value) < 4e-42
assert conf.astronomical_constants == 'iau2015'
R_earth = const.R_earth
assert abs(R_earth.value - R_earth_2015.value) < 0.01
# check current constants are consistent with units
import astropy.units as u
assert (abs((const.M_earth / u.M_earth).to(u.dimensionless_unscaled)
- 1) < 1.e-6)
def assert_config_outputs(physical_in, physical_out,
astronomical_in, astronomical_out):
"""Write inputs to temporary config and assert the outputs
Parameters:
-----------
physical_in: `str` or None
input physical constants version
physical_out: `str`
output physical constants version
astronomical_in: `str` or None
input astronomical constants version
astronomical_out: `str`
output astronomical constants version
"""
import astropy.constants as const
from astropy.constants import conf
with tempfile.TemporaryDirectory() as tmpdirname:
write_test_config(tmpdirname, physical_constants=physical_in,
astronomical_constants=astronomical_in)
with paths.set_temp_config(tmpdirname):
with catch_warnings():
conf.reload()
importlib.reload(const)
assert conf.physical_constants == physical_out
assert conf.astronomical_constants == astronomical_out
def test_invalid_config():
"""Test invalid config items"""
with pytest.raises(ValueError):
assert_config_outputs('cooldata2014', 'codata2014',
'iau2012', 'iau2012')
with pytest.raises(ValueError):
assert_config_outputs('codata2010', 'codata2010',
'allen1976', 'iau2015')
def test_valid_config():
"""Test valid config items"""
assert_config_outputs('codata2014', 'codata2014',
'iau2015', 'iau2015')
assert_config_outputs('codata2010', 'codata2010',
'iau2015', 'iau2015')
assert_config_outputs('codata2014', 'codata2014',
'iau2012', 'iau2012')
assert_config_outputs('codata2010', 'codata2010',
'iau2012', 'iau2012')
assert_config_outputs('astropyconst13', 'astropyconst13',
'astropyconst20', 'astropyconst20')
assert_config_outputs('astropyconst20', 'astropyconst20',
'astropyconst20', 'astropyconst20')
assert_config_outputs('astropyconst20', 'astropyconst20',
'astropyconst13', 'astropyconst13')
# Check that commenting out values gives the defaults
assert_config_outputs(None, 'codata2014',
None, 'iau2015')
| Python | 0.000001 | |
90169095a9e1adbc23e1efa35ea0e1a9a09259de | Solve Code Fights sortByHeight problem | Problems/sortByHeight.py | Problems/sortByHeight.py | #!/usr/local/bin/python
# Code Fights Arcade Mode
def sortByHeight(a):
trees = [i for i, t in enumerate(a) if t == -1]
humans = sorted([h for h in a if h != -1])
for tree in trees:
humans.insert(tree, -1)
return humans
def main():
a = [-1, 150, 190, 170, -1, -1, 160, 180]
new = sortByHeight(a)
print(new)
if __name__ == '__main__':
main()
| Python | 0.999278 | |
5820a2b6130ea7be9eb86341aa6b3b69861a9a36 | Create example.py | example.py | example.py |
from lxmlmate import ObjectifiedElementProxy
print("#To create a brand new xml:")
p = ObjectifiedElementProxy( rootag='Person' )
p.name = 'peter'
p.age = 13
print( p )
print('''
##<Person>
## <name>peter</name>
## <age>13</age>
##</Person>
''')
print('===================')
print( p.name )
print('''
##<name>peter</name>
''')
print('===================')
#To retrieve peter's name and age:
peter = p.name.pyval
age = p.age.pyval
print('#To create from xml string:')
p = ObjectifiedElementProxy( xmlStr="<Person><name>peter</name><age>13</age></Person>" )
print( p )
print('''
##<Person>
## <name>peter</name>
## <age>13</age>
##</Person>
''')
#Multiple levels' example:
r = ObjectifiedElementProxy()
r.person.name = 'jack'
r.person.age = 10
print('===================')
print('''#To insert descedants like '<person><name>peter</name><age>13</age></person>':''')
r.insert( 'person' )('name','peter')('age',13)
p = r('person').person[-1]
p.name = 'david'
p.age = 16
print( r )
print('''
##<root>
## <person>
## <name>jack</name>
## <age>10</age>
## </person>
## <person>
## <name>peter</name>
## <age>13</age>
## </person>
## <person>
## <name>david</name>
## <age>16</age>
## </person>
##</root>
''')
print('===================')
print( r.person[1].name.pyval )
print('##peter')
##To retrieve the last person:
r.person[-1]
##To insert a new tag with attrib:
r.insert( 'person', attrib={ 'height' : "185cm" } )
##To modify a tag's attrib:
r.person[0].attrib['height'] = "170cm"
##You can use lxml.ObjectifiedElement's methods directly like this:
r.addattr( 'kkk','vvv' )
##To modify tag:
r.person[-1].tag = 'person_new'
print('===================')
print( r.person[-1] )
print('''
##<person_new>
## <name>david</name>
## <age>16</age>
##</person_new>
''')
print('===================')
print('#To insert a new tag with attrib:')
r.insert( 'person', attrib={ 'height':'185cm'} )("name","joe")
print( r.person[-1] )
print('''
##<person height="185cm">
## <name>joe</name>
##</person>
''')
##To dump to xml document:
r.dump( 'person.xml' )
| Python | 0.000001 | |
1a97d686ed5afd9a97083bc09f6c4bfb4ef124fc | Add quick helpers to get a client | helpers.py | helpers.py | from zaqarclient.queues import client
import os
conf = {
'auth_opts': {
'backend': 'keystone',
'options': {
'os_username': os.environ.get('OS_USERNAME'),
'os_password': os.environ.get('OS_PASSWORD'),
'os_project_name': os.environ.get('OS_PROJECT_NAME', 'admin'),
'os_auth_url': os.environ.get('OS_AUTH_URL') + '/v2.0/',
'insecure': '',
},
},
}
client = client.Client(url='http://192.168.122.58:8888', version=2, conf=conf)
| Python | 0 | |
7aee3720617aa3442245e2d0bf3de7393e4acb01 | Add lc0133_clone_graph.py | lc0133_clone_graph.py | lc0133_clone_graph.py | """Leetcode 133. Clone Graph
Medium
URL: https://leetcode.com/problems/clone-graph/
Given a reference of a node in a connected undirected graph, return a deep copy
(clone) of the graph. Each node in the graph contains a val (int) and a list
(List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},
{"$id":"3","neighbors":[{"$ref":"2"},{"$id":"4","neighbors":[{"$ref":"3"},
{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
- The number of nodes will be between 1 and 100.
- The undirected graph is a simple graph, which means no repeated edges and no
self-loops in the graph.
- Since the graph is undirected, if node p has node q as neighbor, then node q
must have node p as neighbor too.
- You must return the copy of the given node as a reference to the cloned graph.
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
class Solution(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000002 | |
ef63c538aff066230030aaf02981933b652830e4 | Create module_posti.py | pyfibot/modules/module_posti.py | pyfibot/modules/module_posti.py | # -*- encoding: utf-8 -*-
"""
Get package tracking information from the Finnish postal service
"""
from __future__ import unicode_literals, print_function, division
from bs4 import BeautifulSoup
import requests
from datetime import datetime, timedelta
lang = 'en'
def command_posti(bot, user, channel, args):
"""Parse the package status page"""
args = args.strip()
if not args:
return bot.say(channel, 'Need a tracking ID as argument.')
url = 'http://www.itella.fi/itemtracking/itella/search_by_shipment_id'
params = {
'ShipmentId': args,
'lang': lang,
'LOTUS_hae': 'Hae',
'LOTUS_side': '1'
}
r = requests.post(url, params=params)
bs = BeautifulSoup(r.content)
try:
status_table = bs.find('table', {'id': 'shipment-event-table'}).find_all('tr')[1]
except:
if lang == 'en':
return bot.say(channel, 'Item not found.')
return bot.say(channel, 'Lähetystä ei löytynyt.')
try:
event = status_table.find('div', {'class': 'shipment-event-table-header'}).text.strip()
except:
event = '???'
location = '???'
dt = timedelta(0, 0, 0)
now = datetime.now()
for x in status_table.find_all('div', {'class': 'shipment-event-table-row'}):
try:
row_label = x.find('span', {'class': 'shipment-event-table-label'}).text.strip()
row_data = x.find('span', {'class': 'shipment-event-table-data'}).text.strip()
except:
continue
if lang == 'en':
if row_label == 'Registration:':
dt = now - datetime.strptime(row_data, '%d.%m.%Y %H:%M:%S')
if row_label == 'Location:':
location = row_data
else:
if row_label == 'Rekisteröinti:':
dt = now - datetime.strptime(row_data, '%d.%m.%Y klo %H:%M:%S')
if row_label == 'Paikka:':
location = row_data
agestr = []
if dt.days > 0:
agestr.append('%dd' % dt.days)
secs = dt.seconds
hours, minutes, seconds = secs // 3600, secs // 60 % 60, secs % 60
if hours > 0:
agestr.append('%dh' % hours)
if minutes > 0:
agestr.append('%dm' % minutes)
if lang == 'en':
return bot.say(channel, '%s - %s - %s' % (' '.join(agestr) + ' ago', event, location))
return bot.say(channel, '%s - %s - %s' % (' '.join(agestr) + ' sitten', event, location))
| Python | 0.000001 | |
fbfdc979b5fbb7534a625db390b92856714dcfe1 | add basic tests for model_utils | pysat/tests/test_model_utils.py | pysat/tests/test_model_utils.py | import numpy as np
import sys
from nose.tools import assert_raises, raises
import pandas as pds
import pysat
from pysat import model_utils as mu
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.testInst = pysat.Instrument(platform='pysat',
name='testing',
clean_level='clean')
self.start = pysat.datetime(2009, 1, 1)
self.stop = pysat.datetime(2009, 1, 1)
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst, self.start, self.stop
@raises(ValueError)
def test_collect_inst_model_pairs_wo_date(self):
"""Try to run without start or stop dates"""
match = mu.collect_inst_model_pairs(inst=self.testInst)
@raises(ValueError)
def test_collect_inst_model_pairs_wo_inst(self):
"""Try to run without an instrument"""
match = mu.collect_inst_model_pairs(start=self.start, stop=self.stop)
@raises(ValueError)
def test_collect_inst_model_pairs_wo_model(self):
"""Try to run without a model"""
match = mu.collect_inst_model_pairs(start=self.start, stop=self.stop,
inst=self.testInst)
| Python | 0 | |
458a4a3e5759c4ea1e5b33349288012a86d0d97d | revert syntax object instantiation change as it appears to be buggy. use an optional, dedicated value mangling methods instead. | pysnmp/entity/rfc3413/mibvar.py | pysnmp/entity/rfc3413/mibvar.py | # MIB variable pretty printers/parsers
import types
from pyasn1.type import univ
from pysnmp.smi.error import NoSuchObjectError
# Name
def mibNameToOid(mibView, name):
if type(name[0]) == types.TupleType:
modName, symName = apply(lambda x='',y='': (x,y), name[0])
if modName: # load module if needed
mibView.mibBuilder.loadModules(modName)
else:
mibView.mibBuilder.loadModules() # load all (slow)
if symName:
oid, label, suffix = mibView.getNodeNameByDesc(symName, modName)
else:
oid, label, suffix = mibView.getFirstNodeName(modName)
suffix = name[1:]
else:
oid, label, suffix = mibView.getNodeNameByOid(name)
modName, symName, _s = mibView.getNodeLocation(oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column XXX
modName, symName, _s = mibView.getNodeLocation(oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(modName, symName)
return oid, apply(rowNode.getInstIdFromIndices, suffix)
else: # scalar or incomplete spec
return oid, suffix
__scalarSuffix = (univ.Integer(0),)
def oidToMibName(mibView, oid):
_oid, label, suffix = mibView.getNodeNameByOid(tuple(oid))
modName, symName, __suffix = mibView.getNodeLocation(_oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column
__modName, __symName, __s = mibView.getNodeLocation(_oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(__modName, __symName)
return (symName, modName), rowNode.getIndicesFromInstId(suffix)
elif not suffix: # scalar
return (symName, modName), suffix
elif suffix == (0,): # scalar
return (symName, modName), __scalarSuffix
else:
raise NoSuchObjectError(
str='No MIB info for %s (closest parent %s)' %
(oid, mibNode.name)
)
# Value
def cloneFromMibValue(mibView, modName, symName, value):
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'syntax'): # scalar
return mibNode.syntax.clone(value)
else:
return # identifier
| # MIB variable pretty printers/parsers
import types
from pyasn1.type import univ
from pysnmp.smi.error import NoSuchObjectError
# Name
def mibNameToOid(mibView, name):
if type(name[0]) == types.TupleType:
modName, symName = apply(lambda x='',y='': (x,y), name[0])
if modName: # load module if needed
mibView.mibBuilder.loadModules(modName)
else:
mibView.mibBuilder.loadModules() # load all (slow)
if symName:
oid, label, suffix = mibView.getNodeNameByDesc(symName, modName)
else:
oid, label, suffix = mibView.getFirstNodeName(modName)
suffix = name[1:]
else:
oid, label, suffix = mibView.getNodeNameByOid(name)
modName, symName, _s = mibView.getNodeLocation(oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column XXX
modName, symName, _s = mibView.getNodeLocation(oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(modName, symName)
return oid, apply(rowNode.getInstIdFromIndices, suffix)
else: # scalar or incomplete spec
return oid, suffix
__scalarSuffix = (univ.Integer(0),)
def oidToMibName(mibView, oid):
_oid, label, suffix = mibView.getNodeNameByOid(tuple(oid))
modName, symName, __suffix = mibView.getNodeLocation(_oid)
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'createTest'): # table column
__modName, __symName, __s = mibView.getNodeLocation(_oid[:-1])
rowNode, = mibView.mibBuilder.importSymbols(__modName, __symName)
return (symName, modName), rowNode.getIndicesFromInstId(suffix)
elif not suffix: # scalar
return (symName, modName), suffix
elif suffix == (0,): # scalar
return (symName, modName), __scalarSuffix
else:
raise NoSuchObjectError(
str='No MIB info for %s (closest parent %s)' %
(oid, mibNode.name)
)
# Value
def cloneFromMibValue(mibView, modName, symName, value):
mibNode, = mibView.mibBuilder.importSymbols(
modName, symName
)
if hasattr(mibNode, 'syntax'): # scalar
return mibNode.syntax.__class__(value)
else:
return # identifier
| Python | 0 |
6cd8b4c733de5a4ed39e3d3ba3d06e78b04dbb4b | read a value from a file that is in ConfigObj format - no section check | python/2.7/read_config_value.py | python/2.7/read_config_value.py | #!/usr/bin/env python
from configobj import ConfigObj
import argparse
import os
import sys
def read_config(fname, skey):
config = ConfigObj(fname, raise_errors=True)
return config[skey]
def main():
parser = argparse.ArgumentParser(description='read a value from a ConfigObj file', prog=os.path.basename(__file__))
parser.add_argument('-f', '--file', help='input file', type=str)
parser.add_argument('-k', '--key', help='key to read', type=str)
args = parser.parse_args()
if args.file is None:
parser.print_usage()
return
if args.key is None:
parser.print_usage()
return
if os.path.isfile(args.file):
try:
value = read_config(args.file, args.key)
print value
except:
print >> sys.stderr, '[e] unable to read key:', args.key
return
else:
print >> sys.stderr, '[e] unable to access file:', args.file
if __name__ == '__main__':
main()
| Python | 0 | |
837d1f26ad339fbe4338ef69c947f83042daba9f | add prelim script for looking at incident data | Scripts/fire_incident.py | Scripts/fire_incident.py | #Weinschenk
#12-14
from __future__ import division
import numpy as np
import pandas as pd
from pylab import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
incident = pd.read_csv('../Data/arlington_incidents.csv', header=0)
total_incidents = len(incident['incident_class_code'])
total_fires = 0
for i in incident['incident_class_code']:
if i == 1:
total_fires = total_fires + 1
print 100*(total_fires/total_incidents)
| Python | 0.000001 | |
ab00f54344e4aa39503a59551e87db2ed4be9c3d | Create print_rectangle.py | python3/print_rectangle.py | python3/print_rectangle.py | while 1:
m, n = input().split()# m:height, n:width
if m == "0" and n == "0":
breaku
for i in range(int(m)):
print("#" * int(n))
print()
| Python | 0.001609 | |
989a94c81f74a17707e66f126960b6bb45e9b4d5 | Add index to cover testgroup_details (previous runs) | migrations/versions/3042d0ca43bf_index_job_project_id.py | migrations/versions/3042d0ca43bf_index_job_project_id.py | """Index Job(project_id, status, date_created) where patch_id IS NULL
Revision ID: 3042d0ca43bf
Revises: 3a3366fb7822
Create Date: 2014-01-03 15:24:39.947813
"""
# revision identifiers, used by Alembic.
revision = '3042d0ca43bf'
down_revision = '3a3366fb7822'
from alembic import op
def upgrade():
op.execute('CREATE INDEX idx_job_previous_runs ON job (project_id, status, date_created) WHERE patch_id IS NULL')
def downgrade():
op.drop_index('idx_job_previous_runs', 'job')
| Python | 0 | |
b96f39b3527cef7fd9766315fbdf7b87b6315ec8 | add watch file which generated by scratch | src/car_control_manual/scratch/watch_file.py | src/car_control_manual/scratch/watch_file.py | from __future__ import print_function
"""Watch File generated by Scratch
1. save Scratch file *.sb2 into the same directory or specify with path
2. change name *.sb2 to *.zip
3. unzip *.zip file and read json data from project.json
"""
import sys, time, logging, os, zipfile
import watchdog
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
class MyFileMonitor(watchdog.events.FileSystemEventHandler):
def __init__(self, suffix, callback):
super(MyFileMonitor, self).__init__()
self.callback = callback
if suffix.startswith('.'):
self.suffix = suffix[1:]
else:
self.suffix = suffix
def on_created(self, event):
super(MyFileMonitor, self).on_created(event)
n_suffix = event.src_path.split('.')[-1]
if not event.is_directory and n_suffix == self.suffix:
# when detected file created which we need , use callback to deal with
self.callback(event.src_path)
class WatchFile(object):
def __init__(self, *argv, **kargv):
self.path = kargv['path'] if kargv.has_key('path') else '.'
self.suffix = kargv['suffix'] if kargv.has_key('suffix') else '*' # star represent any file
self.observer = Observer()
self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)
def run(self):
self.observer.schedule(self.event_handler, self.path, recursive=True)
self.observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
self.observer.stop()
self.observer.join()
def get_data(self, filename):
return self._unpack(filename)
def _unpack(self, filename):
# first rename suffix to zip file
# may not work on linux
new_name = filename.split('.')[1] + '.zip'
new_name = new_name[1:] if new_name.startswith('\\') else new_name
os.rename(filename, new_name)
zip_file = zipfile.ZipFile(new_name, 'r')
json_data = ""
for name in zip_file.namelist():
if name == "project.json":
file = open(name, 'r')
json_data = "".join(file.readlines())
return json_data
if __name__ == "__main__":
wd = WatchFile(suffix=".sb2")
wd.run()
| Python | 0 | |
46b3c0c024dd0d8dbb80911d04848571b3176be7 | add yaml config reader | config.py | config.py | # -*- coding: utf-8 -*-
import (os, sys, yaml)
class Settings(dict):
''' base settings class '''
def __init__( self, data = None ):
super( Settings, self ).__init__()
if data:
self.__update( data, {} )
def __update( self, data, did ):
dataid = id(data)
did[ dataid ] = self
for k in data:
dkid = id(data[k])
if did.has_key(dkid):
self[k] = did[dkid]
elif isinstance( data[k], Settings ):
self[k] = data[k]
elif isinstance( data[k], dict ):
obj = Settings()
obj.__update( data[k], did )
self[k] = obj
obj = None
else:
self[k] = data[k]
def __getitem__(self, item):
return self.__getattr__(item)
def __getattr__( self, key ):
return self.get( key, None )
def __setattr__( self, key, value ):
if isinstance(value,dict):
self[key] = Settings( value )
else:
self[key] = value
def update( self, *args ):
for obj in args:
for k in obj:
if isinstance(obj[k],dict):
self[k] = Settings( obj[k] )
else:
self[k] = obj[k]
return self
def merge( self, *args ):
for obj in args:
for k in obj:
if self.has_key(k):
if isinstance(self[k],list) and isinstance(obj[k],list):
self[k] += obj[k]
elif isinstance(self[k],list):
self[k].append( obj[k] )
elif isinstance(obj[k],list):
self[k] = [self[k]] + obj[k]
elif isinstance(self[k],Settings) and isinstance(obj[k],Settings):
self[k].merge( obj[k] )
elif isinstance(self[k],Settings) and isinstance(obj[k],dict):
self[k].merge( obj[k] )
else:
self[k] = [ self[k], obj[k] ]
else:
if isinstance(obj[k],dict):
self[k] = Settings( obj[k] )
else:
self[k] = obj[k]
return self
def load(config_file):
''' load data from yaml file '''
with open(config_file) as fd:
data = yaml.load(fd.read()) or dict()
return Settings(data)
if __name__ == '__main__':
''' '''
settings = load('./config.yaml')
| Python | 0 | |
8b9fe74976d77df32d73792f74ef4ddea1eb525f | Add Config.get() to skip KeyErrors | config.py | config.py | #! /usr/bin/env python
import os
import warnings
import yaml
class Config(object):
config_fname = "configuration.yaml"
def __init__(self, config_fname=None):
config_fname = config_fname or self.config_fname
fo = open(config_fname, "r")
blob = fo.read()
fo.close()
self.config = yaml.load(blob)
def __getattr__(self, attrname):
if attrname == "slack_name":
warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" %
self.config_fname, DeprecationWarning)
return self.config[attrname]
def get(self, attrname, fallback=None):
try:
return self.config[attrname]
except KeyError:
return fallback
# This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME
SLACK_NAME = os.getenv("SLACK_NAME")
if SLACK_NAME is None:
SLACK_NAME = Config().slack_name
| #! /usr/bin/env python
import os
import warnings
import yaml
class Config(object):
config_fname = "configuration.yaml"
def __init__(self, config_fname=None):
config_fname = config_fname or self.config_fname
fo = open(config_fname, "r")
blob = fo.read()
fo.close()
self.config = yaml.load(blob)
def __getattr__(self, attrname):
if attrname == "slack_name":
warnings.warn("The `slack_name` key in %s is deprecated in favor of the `SLACK_NAME` environment variable" %
self.config_fname, DeprecationWarning)
return self.config[attrname]
# This deliberately isn't a `getenv` default so `.slack_name` isn't tried if there's a SLACK_NAME
SLACK_NAME = os.getenv("SLACK_NAME")
if SLACK_NAME is None:
SLACK_NAME = Config().slack_name
| Python | 0 |
7dde102dd51db08f9021234fa3d8f11ab165b210 | add custom_preprocess.py | src/custom_preprocess.py | src/custom_preprocess.py | import unittest
import csv
from datetime import datetime, timedelta
def load_raw_data_and_split_by_dt(path, output_dir):
base_datetime = datetime.strptime('141021', '%y%m%d')
output_file_dict = {(base_datetime + timedelta(days=x)).strftime('%y%m%d'): open(
output_dir + '/' + (base_datetime + timedelta(days=x)).strftime('%y%m%d') + '.csv', 'w') for x in range(0, 10)}
with open(path, 'rb') as csvfile:
header = csvfile.readline()
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
hour_column = row[2]
dt = hour_column[:6]
hour = hour_column[6:]
output_file_dict[dt].write(",".join(row[:2] + [hour] + row[3:]) + "\n")
class TestCustomPreprocess(unittest.TestCase):
def test_load_raw_data_and_split_by_dt(self):
load_raw_data_and_split_by_dt('../fixtures/train.thumb', '../fixtures')
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
81c55f35fdbaaf0892402345719cd89fde51e160 | Create test_trove_utils.py | unit_tests/test_trove_utils.py | unit_tests/test_trove_utils.py | import mock
import unittest
import reactive.designate_utils as dutils
#DOMAIN_LIST = b"""
#b78d458c-2a69-47e7-aa40-a1f9ff8809e3 frodo.com. 1467534540
#fa5111a7-5659-45c6-a101-525b4259e8f0 bilbo.com. 1467534855
"""
SERVER_LIST = b"""
77eee1aa-27fc-49b9-acca-3faf68126530 ns1.www.example.com.
"""
class TestTroveUtils(unittest.TestCase):
def setUp(self):
self._patches = {}
self._patches_start = {}
def tearDown(self):
for k, v in self._patches.items():
v.stop()
setattr(self, k, None)
self._patches = None
self._patches_start = None
def patch(self, obj, attr, return_value=None):
mocked = mock.patch.object(obj, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_run_command(self):
self.patch(dutils, 'get_environment')
self.patch(dutils.subprocess, 'Popen')
process_mock = mock.Mock()
attrs = {
'communicate.return_value': ('ouput', 'error'),
'returncode': 0}
process_mock.configure_mock(**attrs)
self.Popen.return_value = process_mock
self.Popen.returncode.return_value = 0
dutils.run_command(['ls'])
self.Popen.assert_called_once_with(
['ls'],
env=None,
stderr=-1,
stdout=-1)
def test_run_command_fail(self):
self.patch(dutils, 'get_environment')
self.patch(dutils.subprocess, 'Popen')
process_mock = mock.Mock()
attrs = {
'communicate.return_value': ('ouput', 'error'),
'returncode': 1}
process_mock.configure_mock(**attrs)
self.Popen.return_value = process_mock
self.Popen.returncode.return_value = 0
with self.assertRaises(RuntimeError):
dutils.run_command(['ls'])
def test_get_environment(self):
text_file_data = '\n'.join(["export a=b", "export c=d"])
with mock.patch('builtins.open',
mock.mock_open(read_data=text_file_data),
create=True) as m:
m.return_value.__iter__.return_value = text_file_data.splitlines()
with open('filename', 'rU'):
self.assertEqual(
dutils.get_environment({}),
{'a': 'b', 'c': 'd'})
def test_get_server_id(self):
self.patch(dutils, 'get_servers')
self.get_servers.return_value = {'server1': {'id': 'servid1'}}
self.assertEquals(dutils.get_server_id('server1'), 'servid1')
self.assertEquals(dutils.get_server_id('server2'), None)
def test_get_domain_id(self):
self.patch(dutils, 'get_domains')
self.get_domains.return_value = {'domain1': {'id': 'domainid1'}}
self.assertEquals(dutils.get_domain_id('domain1'), 'domainid1')
self.assertEquals(dutils.get_domain_id('domain2'), None)
def test_create_server(self):
_server_ids = ['servid1', None]
self.patch(dutils, 'get_server_id')
self.patch(dutils, 'display')
self.get_server_id.side_effect = lambda x: _server_ids.pop()
self.patch(dutils, 'run_command')
self.run_command.return_value = ('out', 'err')
dutils.create_server('server1')
cmd = [
'designate', 'server-create',
'--name', 'server1',
'-f', 'value',
]
self.run_command.assert_called_with(cmd)
self.display.assert_called_with('servid1')
def test_create_domain(self):
_domain_ids = ['domainid1', None]
self.patch(dutils, 'get_domain_id')
self.patch(dutils, 'display')
self.get_domain_id.side_effect = lambda x: _domain_ids.pop()
self.patch(dutils, 'run_command')
self.run_command.return_value = ('out', 'err')
dutils.create_domain('dom1', 'email1')
cmd = [
'designate', 'domain-create',
'--name', 'dom1',
'--email', 'email1',
'-f', 'value',
]
self.run_command.assert_called_with(cmd)
self.display.assert_called_with('domainid1')
def test_delete_domain(self):
self.patch(dutils, 'get_domain_id', return_value='dom1')
self.patch(dutils, 'run_command')
dutils.delete_domain('dom1')
self.run_command.assert_called_with(['domain-delete', 'dom1'])
def test_get_domains(self):
self.patch(dutils, 'run_command')
self.run_command.return_value = (DOMAIN_LIST, 'err')
expect = {
'bilbo.com.':
{
'id': 'fa5111a7-5659-45c6-a101-525b4259e8f0',
'serial': '1467534855'},
'frodo.com.':
{
'id': 'b78d458c-2a69-47e7-aa40-a1f9ff8809e3',
'serial': '1467534540'}}
self.assertEqual(dutils.get_domains(), expect)
self.run_command.assert_called_with(
['designate', 'domain-list', '-f', 'value'])
def test_get_servers(self):
self.patch(dutils, 'run_command')
self.run_command.return_value = (SERVER_LIST, 'err')
expect = {
'ns1.www.example.com.': {
'id': '77eee1aa-27fc-49b9-acca-3faf68126530'}}
self.assertEqual(dutils.get_servers(), expect)
self.run_command.assert_called_with(
['designate', 'server-list', '-f', 'value'])
| Python | 0.000004 | |
1f5134b36846cf0e5e936888a4fe51a2012e0d78 | Create alternate_disjoint_set.py (#2302) | data_structures/disjoint_set/alternate_disjoint_set.py | data_structures/disjoint_set/alternate_disjoint_set.py | """
Implements a disjoint set using Lists and some added heuristics for efficiency
Union by Rank Heuristic and Path Compression
"""
class DisjointSet:
def __init__(self, set_counts: list) -> None:
"""
Initialize with a list of the number of items in each set
and with rank = 1 for each set
"""
self.set_counts = set_counts
self.max_set = max(set_counts)
num_sets = len(set_counts)
self.ranks = [1] * num_sets
self.parents = list(range(num_sets))
def merge(self, src: int, dst: int) -> bool:
"""
Merge two sets together using Union by rank heuristic
Return True if successful
Merge two disjoint sets
>>> A = DisjointSet([1, 1, 1])
>>> A.merge(1, 2)
True
>>> A.merge(0, 2)
True
>>> A.merge(0, 1)
False
"""
src_parent = self.get_parent(src)
dst_parent = self.get_parent(dst)
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
self.set_counts[src_parent] = 0
self.parents[src_parent] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
joined_set_size = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
self.set_counts[dst_parent] = 0
self.parents[dst_parent] = src_parent
joined_set_size = self.set_counts[src_parent]
self.max_set = max(self.max_set, joined_set_size)
return True
def get_parent(self, disj_set: int) -> int:
"""
Find the Parent of a given set
>>> A = DisjointSet([1, 1, 1])
>>> A.merge(1, 2)
True
>>> A.get_parent(0)
0
>>> A.get_parent(1)
2
"""
if self.parents[disj_set] == disj_set:
return disj_set
self.parents[disj_set] = self.get_parent(self.parents[disj_set])
return self.parents[disj_set]
| Python | 0 | |
8c078a12e7915ea91a2345bd0f6093ae0ee3df18 | Add ConfigRegistrar class for loading and registering pack configs. | st2common/st2common/bootstrap/configsregistrar.py | st2common/st2common/bootstrap/configsregistrar.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from oslo_config import cfg
from st2common import log as logging
from st2common.content import utils as content_utils
from st2common.constants.meta import ALLOWED_EXTS
from st2common.bootstrap.base import ResourceRegistrar
from st2common.models.api.pack import ConfigAPI
from st2common.persistence.pack import Config
from st2common.exceptions.db import StackStormDBObjectNotFoundError
__all__ = [
'ConfigsRegistrar'
]
LOG = logging.getLogger(__name__)
class ConfigsRegistrar(ResourceRegistrar):
"""
Class for loading and registering pack configs located in
/opt/stackstorm/configs/<pack name>.yaml
"""
ALLOWED_EXTENSIONS = ALLOWED_EXTS
def register_configs_for_all_packs(self, base_dirs):
"""
Register configs for all the available packs.
"""
# Register packs first
self.register_packs(base_dirs=base_dirs)
registered_count = 0
packs = self._pack_loader.get_packs(base_dirs=base_dirs)
pack_names = packs.keys()
for pack_name in pack_names:
config_path = self._get_config_path_for_pack(pack_name=pack_name)
if not os.path.isfile(config_path):
# Config for that pack doesn't exist
LOG.debug('No config found for pack "%s" (file "%s" is not present).', pack_name,
config_path)
continue
try:
self._register_config_for_pack(pack=pack_name, config_path=config_path)
except Exception as e:
if self._fail_on_failure:
raise e
LOG.exception('Failed to register config for pack "%s": %s', pack_name, str(e))
else:
registered_count += 1
return registered_count
def register_config_for_pack(self, pack_dir):
"""
Register config for a provided pack.
"""
pack_dir = pack_dir[:-1] if pack_dir.endswith('/') else pack_dir
_, pack_name = os.path.split(pack_dir)
# Register pack first
self.register_pack(pack_name=pack_name, pack_dir=pack_dir)
config_path = self._get_config_path_for_pack(pack_name=pack_name)
if not os.path.isfile(config_path):
return 0
self._register_config_for_pack(pack=pack_name, config_path=config_path)
return 1
def _get_config_path_for_pack(self, pack_name):
configs_path = os.path.join(cfg.CONF.system.base_path, 'configs/')
config_path = os.path.join(configs_path, '%s.yaml' % (pack_name))
return config_path
def _register_config_for_pack(self, pack, config_path):
content = {}
values = self._meta_loader.load(config_path)
content['pack'] = pack
content['values'] = values
config_api = ConfigAPI(**content)
# TODO: Validate config against schema here
config_api.validate()
config_db = ConfigAPI.to_model(config_api)
try:
config_db.id = Config.get_by_pack(config_api.pack).id
except StackStormDBObjectNotFoundError:
LOG.debug('Config for pack "%s" not found. Creating new entry.', pack)
try:
config_db = Config.add_or_update(config_db)
extra = {'config_db': config_db}
LOG.audit('Config for pack "%s" is updated.', config_db.pack, extra=extra)
except Exception:
LOG.exception('Failed to config for pack %s.', pack)
raise
return config_db
def register_configs(packs_base_paths=None, pack_dir=None, use_pack_cache=True,
fail_on_failure=False):
if packs_base_paths:
assert isinstance(packs_base_paths, list)
if not packs_base_paths:
packs_base_paths = content_utils.get_packs_base_paths()
registrar = ConfigsRegistrar(use_pack_cache=use_pack_cache,
fail_on_failure=fail_on_failure)
if pack_dir:
result = registrar.register_config_for_pack(pack_dir=pack_dir)
else:
result = registrar.register_configs_for_all_packs(base_dirs=packs_base_paths)
return result
| Python | 0 | |
4c53ffbd9b23238b3402752f33fcabb2724921f4 | Add dunder init for lowlevel. | astrodynamics/lowlevel/__init__.py | astrodynamics/lowlevel/__init__.py | # coding: utf-8
from __future__ import absolute_import, division, print_function
| Python | 0 | |
ae79ca36e3cfca362414f2293a4c6d295c6db38b | Create addroundkey.py | research/aes/addroundkey.py | research/aes/addroundkey.py | import sys
sys.path.append("../..")
import pyrtl
from pyrtl import *
import keyexpansion
from keyexpansion import *
""" AddRoundKey round of AES.
Input: 128-bit state array.
Output: 128-bit state array.
"""
def addroundkey_initial(state, expanded_key):
input_wire_1 = pyrtl.WireVector(bitwidth=128, name='input_wire_1')
input_wire_1 <<= state
new_1 = pyrtl.WireVector(bitwidth=128, name='new_1')
new_1 <<= state ^ expanded_key[1280:1408]
return new_1
def addroundkey_1(state, expanded_key):
input_wire_2 = pyrtl.WireVector(bitwidth=128, name='input_wire_2')
input_wire_2 <<= state
new_2 = pyrtl.WireVector(bitwidth=128, name='new_2')
new_2 <<= state ^ expanded_key[1152:1280]
return new_2
def addroundkey_2(state, expanded_key):
input_wire_3 = pyrtl.WireVector(bitwidth=128, name='input_wire_3')
input_wire_3 <<= state
new_3 = pyrtl.WireVector(bitwidth=128, name='new_3')
new_3 <<= state ^ expanded_key[1024:1152]
return new_3
def addroundkey_3(state, expanded_key):
input_wire_4 = pyrtl.WireVector(bitwidth=128, name='input_wire_4')
input_wire_4 <<= state
new_4 = pyrtl.WireVector(bitwidth=128, name='new_4')
new_4 <<= state ^ expanded_key[896:1024]
return new_4
def addroundkey_4(state, expanded_key):
input_wire_5 = pyrtl.WireVector(bitwidth=128, name='input_wire_5')
input_wire_5 <<= state
new_5 = pyrtl.WireVector(bitwidth=128, name='new_5')
new_5 <<= state ^ expanded_key[768:896]
return new_5
def addroundkey_5(state, expanded_key):
input_wire_6 = pyrtl.WireVector(bitwidth=128, name='input_wire_6')
input_wire_6 <<= state
new_6 = pyrtl.WireVector(bitwidth=128, name='new_6')
new_6 <<= state ^ expanded_key[640:768]
return new_6
def addroundkey_6(state, expanded_key):
input_wire_7 = pyrtl.WireVector(bitwidth=128, name='input_wire_7')
input_wire_7 <<= state
new_7 = pyrtl.WireVector(bitwidth=128, name='new_7')
new_7 <<= state ^ expanded_key[512:640]
return new_7
def addroundkey_7(state, expanded_key):
input_wire_8 = pyrtl.WireVector(bitwidth=128, name='input_wire_8')
input_wire_8 <<= state
new_8 = pyrtl.WireVector(bitwidth=128, name='new_8')
new_8 <<= state ^ expanded_key[384:512]
return new_8
def addroundkey_8(state, expanded_key):
input_wire_9 = pyrtl.WireVector(bitwidth=128, name='input_wire_9')
input_wire_9 <<= state
new_9 = pyrtl.WireVector(bitwidth=128, name='new_9')
new_9 <<= state ^ expanded_key[256:384]
return new_9
def addroundkey_9(state, expanded_key):
input_wire_10 = pyrtl.WireVector(bitwidth=128, name='input_wire_10')
input_wire_10 <<= state
new_10 = pyrtl.WireVector(bitwidth=128, name='new_10')
new_10 <<= state ^ expanded_key[128:256]
return new_10
def addroundkey_10(state, expanded_key):
input_wire_11 = pyrtl.WireVector(bitwidth=128, name='input_wire_11')
input_wire_11 <<= state
new_11 = pyrtl.WireVector(bitwidth=128, name='new_11')
new_11 <<= state ^ expanded_key[0:128]
return new_11
# Hardware build.
aes_input = pyrtl.Input(bitwidth=128, name='aes_input')
aes_output = pyrtl.Output(bitwidth=128, name='aes_output')
aes_output <<= addroundkey_x(aes_input)
print pyrtl.working_block()
print
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(1):
sim.step({aes_input: 0xff})
sim_trace.render_trace(symbol_len=5, segment_size=5)
| Python | 0.000002 | |
94f922c77ee89a5b54b99e135a5045f450badb0e | add new script to dump nice looking release notes like. Borrowed from antlr. | scripts/github_release_notes.py | scripts/github_release_notes.py | # Get github issues / PR for a release
# Exec with "python github_release_notes.py YOUR_GITHUB_API_ACCESS_TOKEN 1.19"
import sys
from collections import Counter
from github import Github
TOKEN=sys.argv[1]
MILESTONE=sys.argv[2]
g = Github(login_or_token=TOKEN)
# Then play with your Github objects:
org = g.get_organization("antlr")
repo = org.get_repo("intellij-plugin-v4")
milestone = [x for x in repo.get_milestones() if x.title==MILESTONE]
milestone = milestone[0]
issues = repo.get_issues(state="closed", milestone=milestone, sort="created", direction="desc")
# dump bugs fixed
print()
print("## Issues fixed")
for x in issues:
labels = [l.name for l in x.labels]
if x.pull_request is None and not ("type:improvement" in labels or "type:feature" in labels):
print("* [%s](%s) (%s)" % (x.title, x.html_url, ", ".join([l.name for l in x.labels])))
# dump improvements closed for this release (issues or pulls)
print()
print("## Improvements, features")
for x in issues:
labels = [l.name for l in x.labels]
if ("type:enhancement" in labels or "type:feature" in labels):
print("* [%s](%s) (%s)" % (x.title, x.html_url, ", ".join(labels)))
# dump PRs closed for this release
print()
print("## Pull requests")
for x in issues:
labels = [l.name for l in x.labels]
if x.pull_request is not None:
print("* [%s](%s) (%s)" % (x.title, x.html_url, ", ".join(labels)))
# dump contributors
print()
print("## Contributors")
user_counts = Counter([x.user.login for x in issues])
users = {x.user.login:x.user for x in issues}
for login,count in user_counts.most_common(10000):
name = users[login].name
logins = f" ({users[login].login})"
if name is None:
name = users[login].login
logins = ""
print(f"* {count:3d} items: [{name}]({users[login].html_url}){logins}")
| Python | 0 | |
fe08ce77958c637539b24817ffca45587fa31a7e | Implement shared API | platformio/shared.py | platformio/shared.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-import
from platformio.device.filters.base import DeviceMonitorFilterBase
from platformio.device.list import list_serial_ports
from platformio.fs import to_unix_path
from platformio.platform.base import PlatformBase
from platformio.project.config import ProjectConfig
from platformio.project.helpers import load_build_metadata
from platformio.test.result import TestCase, TestCaseSource, TestStatus
from platformio.test.runners.base import TestRunnerBase
from platformio.test.runners.doctest import DoctestTestCaseParser
from platformio.test.runners.googletest import GoogletestTestRunner
from platformio.test.runners.unity import UnityTestRunner
from platformio.util import get_systype
| Python | 0.00001 | |
42297354f575e2c82346cf033202c5dfad5ddd99 | Add python class for writing out xyz files of trajectory coordinates | lib/examples/nacl_amb/utils.py | lib/examples/nacl_amb/utils.py | #!/usr/bin/env python
import numpy
class TrajWriter(object):
'''
A class for writing out trajectory traces as an xyz file, for subsequent
visualization.
'''
def __init__(self, trace, w, filename='trace.xyz'):
self.trace = trace
self.w = w
self.filename = filename
self._write()
def _get_coords(self, iteration, seg_id):
self.w.iteration = iteration
coords = self.w.current.auxdata['coord'][seg_id]
return coords
def _write(self):
all_coords = []
starting_iteration = self.w.iteration
for i, iteration in enumerate(self.trace.iteration):
seg_id = self.trace.seg_id[i]
coords = self._get_coords(iteration, seg_id)
# The last timepoint of one iteration is the same as the first
# timepoint of the last, so skip the last timepoint of each
# iteration
coords = coords[:-1]
all_coords.append(coords)
self.w.iteration = starting_iteration
all_coords = numpy.concatenate(all_coords)
with open(self.filename, 'w') as outfile:
for i, frame in enumerate(all_coords):
outfile.write("2\n")
outfile.write("{0}\n".format(i))
outfile.write("SOD {0:9.5f} {1:9.5f} {2:9.5f}\n".format(
float(frame[0,0]), float(frame[0,1]), float(frame[0,2])))
outfile.write("CLA {0:9.5f} {1:9.5f} {2:9.5f}\n".format(
float(frame[1,0]), float(frame[1,1]), float(frame[1,2])))
| Python | 0.000072 | |
d5125205801b9771115a052162ee700f64601557 | Create frequency.py | frequency.py | frequency.py | import sys
import csv
csv.field_size_limit(sys.maxsize)
from pymystem3 import Mystem
import time
import cProfile
from collections import defaultdict
class CsvHandler:
INPUTFILE = 'wiki_noxml_full.txt'
OUTPUTFILE = 'my_frequency_list.csv'
def __init__(self):
self.file_name = self.INPUTFILE
self.csvlength = 0
self.lemmatiser = Mystem()
#self.freq_dict = {}
self.fd = defaultdict(dict)
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats()
return profiled_func
def get_freq_dict(self, filename):
t0 = time.time()
print("Start freq dict")
counter = 0
with open(filename, 'r') as csvfile:
datareader = csv.reader(csvfile, delimiter='\t')
for ln, row in enumerate(datareader):
if ln % 100 == 0: print(ln, "articles processed")
input_text = row[2]
counter += 1
#if counter > 10:
#break
lemmas = self.get_lem_set(input_text)
for i,li in enumerate(lemmas):
self.fd[li] = 1 if li not in self.fd else self.fd[li] + 1
t1 = time.time()
for a,b in self.fd.items():
print(a,b)
print("Finished. Get input file processing time %2.2f secs, whoosh !" % (t1 - t0))
def get_lem_set(self, text):
return_set = set()
for el in self.lemmatiser.analyze(text):
analysis = el.get('analysis', None)
if analysis:
POS = ['A=', 'S,', 'V=']
if (analysis[0].get('gr')[0:2] in POS) and (len(analysis[0].get('lex'))>1):
return_set.add(analysis[0].get('lex'))
return return_set
def output_dict(self, filename, output_dictionary, threshold):
t0 = time.time()
with open(filename, 'w', newline='', encoding="UTF-8") as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(["First word", "Second word", "Frequency"])
for key in output_dictionary.keys():
if output_dictionary[key] > threshold:
words = key.split(':::')
first_word = words[0]
second_word = words[1]
csv_writer.writerow([
first_word,
second_word,
output_dictionary[key]
])
csv_file.flush()
csv_file.close()
t1 = time.time()
print("Finished. Get output file processing time %2.2f secs, whoosh !" % (t1 - t0))
def process(self):
self.get_freq_dict(self.file_name)
#if self.freq_dict:
#t0 = time.time()
#sorted_dict = self.sort_dict()
#t1 = time.time()
#print("Finished. Sorting - processing time %2.2f secs, whoosh !" % (t1 - t0))
#self.output_dict(self.OUTPUTFILE, sorted_dict, 2)
#self.output_dict(self.OUTPUTFILE, self.freq_dict, 2)
if __name__ == '__main__':
print("Start")
c = CsvHandler()
t0 = time.time()
c.process()
t1 = time.time()
print("Finished. Total processing time %2.2f secs, whoosh !" % (t1 - t0))
| Python | 0.00011 | |
0c719d59b6155ed50692810fab57814370fde1bb | Create fcp_xml2csv.py | fcp_xml2csv.py | fcp_xml2csv.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################
# FoodCheckPeel XML2CSV Converter
#
# This script converts the FoodCheckPeel XML
# file to the Comma Separated Values file type
# so it's easier to import to common spreadsheet
# applications.
# Based on the script posted here:
# http://www.bearpanther.com/2013/09/14/converting-xml-to-csv-with-python/
#
# Programmed with Python 2.7 + lxml 3.2.4
# Kevin Farrugia
############################################
__author__ = "Kevin Farrugia"
__copyright__ = "(c) 2014 Kevin Farrugia"
__license__ = "MIT"
__credits__ = "Derek Swingley (author of Bear Panther)"
import csv
from lxml import etree
print "Welcome to the FoodCheck Peel XML to CSV Conversion utility!\n"
print "============================================================\n"
print "\tPlease make sure that the XML file is the latest\n\t version so that your data isn't out of date.\n"
print "Authored by Kevin Farrugia - Last updated January 2014\n"
print "------------------------------------------------------------\n"
outFileName = raw_input("What would you like the output file to be named (this defaults to the correct FoodCheckPeel.csv)? ")
if outFileName.strip() is "":
outFileName = "FoodCheckPeel.csv"
outputData = []
# File parser
# Recover set to True to allow it to try to work through broken XML
# remove_blank_text set to True so that it removes tailing whitespace
fileParse = etree.XMLParser(recover=True, remove_blank_text=True)
#The input XML file name - in our case, it is static and predictable so we don't allow users to change it
fileName = "FoodCheckPeel.XML"
#Parse the XML
root = etree.parse(fileName, fileParse)
#Names of elements that will be carried over (in this case, all of them)
headers = [ "FACILITY_NUMBER", "FACILITY_NAME", "FACILITY_TYPE", "STREET_NUMBER", "STREET_NAME", "STREET_DIR", "CITY", "X", "Y", "LAT", "LON", "INSPECTION_DATE", "STATUS", "INSPECTION_ID", "INSPECTION_TYPE", "INFRACTION_ID", "INFRACTION_TYPE" ]
#Here is where we grab information for each FCP location and parse it out
def getInfo(p):
rowData = []
for attribute in headers:
node = p.find(attribute)
if node == "LAT" or node == "LON": #Maybe I should change this to X & Y and 2 decimals (still sub-metre accuracy)
#This is to round off the lat and long so that the filesize isn't as large
# 5 decimal places comes out to 0.7871m of accuracy error E/W at 45 degrees N/S
rowData.append(round(float(node.text),5))
else:
rowData.append(node.text.encode("utf-8"))
else:
rowData.append("")
return rowData
print "\nReading the Food Check Peel XML..."
print "\n\t...please be patient while it reads and writes the files..."
location = root.findall("ROW")
for p in location:
locationStatus = getInfo(p)
if locationStatus:
outputData.append(locationStatus)
print "\n...finished parsing the XML, starting to write the file..."
outputFile = open(outFileName, "wb")
#This writes the CSV using Python's CSV plugin
# quoting = QUOTE_MINIMAL is used for a couple of reasons:
# (1) It quotes text only where there are special characters that would interfere with the correct use of the CSV
# (2) It keeps the file size to a minimum and allows the end user more control over how the field types are interpreted
# As an alternate, QUOTE_NONNUMERIC could be used to quote all fields that contain text. This, however, makes non-quoted fields of type float (for better or worse)
# See http://docs.python.org/2/library/csv.html for more options and info
writeCSV = csv.writer(outputFile, quoting=csv.QUOTE_MINIMAL)
writeCount = 0
for row in outputData:
writeCSV.writerow(row)
writeCount += 1
outputFile.close()
print "\n------------------------------------------------------------"
print "\nWrote " + str(writeCount) + " rows out to the " + str(outFileName) + " output file."
print "Great success! Double check the output, though!"
| Python | 0 | |
88bd6466940d21d52c0d5235ace10b6a97d69d46 | Create emailtoHIBP.py | emailtoHIBP.py | emailtoHIBP.py | #!/usr/bin/python
#EmailtoHIBP.py
#Author: Sudhanshu Chauhan - @Sudhanshu_C
#This Script will retrieve the Domain(s) at which the specified account has been compromised
#It uses the API provided by https://haveibeenpwned.com/
#Special Thanks to Troy Hunt - http://www.troyhunt.com/
#For MaltegoTransform library and Installation guidelines go to http://www.paterva.com/web6/documentation/developer-local.php
from MaltegoTransform import *
import sys
import urllib2
mt = MaltegoTransform();
mt.parseArguments(sys.argv);
email=mt.getValue();
mt = MaltegoTransform()
hibp="https://haveibeenpwned.com/api/breachedaccount/"
getrequrl=hibp+email
response = urllib2.urlopen(getrequrl)
for rep in response:
mt.addEntity("maltego.Phrase","Pwned at " + rep)
mt.returnOutput()
| Python | 0 | |
cd2e95c157f5ea09000a540fc4689a2aa3e82006 | Add Applebee's. Closes #108. | locations/spiders/applebees.py | locations/spiders/applebees.py | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from locations.items import GeojsonPointItem
class ApplebeesSpider(scrapy.Spider):
name = "applebees"
allowed_domains = ["restaurants.applebees.com"]
start_urls = (
'http://restaurants.applebees.com/sitemap.html',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for line in store_hours.split('m '):
match = re.search(r'^(Su|Mo|Tu|We|Th|Fr|Sa) (\d{1,2}):(\d{2})(a|p)m-(\d{1,2}):(\d{2})(a|p)m?$', line)
(day, f_hr, f_min, f_ampm, t_hr, t_min, t_ampm) = match.groups()
f_hr = int(f_hr)
if f_ampm == 'p':
f_hr += 12
elif f_ampm == 'a' and f_hr == 12:
f_hr = 0
t_hr = int(t_hr)
if t_ampm == 'p':
t_hr += 12
elif t_ampm == 'a' and t_hr == 12:
t_hr = 0
hours = '{:02d}:{}-{:02d}:{}'.format(
f_hr,
f_min,
t_hr,
t_min,
)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def address(self, address):
if not address:
return None
addr_tags = {
"addr:full": address['streetAddress'],
"addr:city": address['addressLocality'],
"addr:state": address['addressRegion'],
"addr:postcode": address['postalCode'],
"addr:country": address['addressCountry'],
}
return addr_tags
def parse(self, response):
base_url = get_base_url(response)
urls = response.xpath('//ul[@class="store-list"]/li/a/@href').extract()
for path in urls:
yield scrapy.Request(urljoin_rfc(base_url, path))
if urls:
return
json_data = response.xpath('//head/script[@type="application/ld+json"]/text()')
data = json.loads(json_data[0].extract())
properties = {
'name': response.xpath('//div[@itemprop="name"]/text()')[0].extract(),
'phone': data['telephone'],
'website': data['url'],
'ref': data['url'],
'opening_hours': self.store_hours(data['openingHours'])
}
address = self.address(data['address'])
if address:
properties.update(address)
lon_lat = [
float(data['geo']['longitude']),
float(data['geo']['latitude']),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.