commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
de4e54e1de5905600d539df781994612f03e0672 | Add files via upload | matrix.py | matrix.py | import numpy as np
def parse_to_matrix(input_file_path, div = '\t', data_type = int):
input_file = open(input_file_path, 'r')
matrix = [ map(data_type,line.strip().split('%s' % div)) for line in input_file if line.strip() != "" ]
input_file.close()
return np.array(matrix)
def parse_to_vectors(input_file_path, div = '\t', data_type = int):
input_file = open(input_file_path, 'r')
matrix = [ map(data_type,line.strip().split('%s' % div)) for line in input_file if line.strip() != "" ]
input_file.close()
return np.array(matrix)
def write_matrix_into_file(matrix, output_file_path):
output = open(output_file_path, 'w')
size = len(matrix)
for row_i in range(size):
vec = matrix[row_i]
output.write('%s' % ' '.join(str(i) for i in vec))
output.write('\n')
output.close()
def write_matrix_into_file(matrix, heads, output_file_path):
output = open(output_file_path, 'w')
size = len(matrix)
for row_i in range(size):
vec = matrix[row_i]
head = heads[row_i]
output.write('%s' % head)
output.write(' ')
output.write('%s' % ' '.join(str(i) for i in vec))
output.write('\n')
output.close()
| Python | 0 | |
42e485b7367e1a707a73b834f39fc6d3f356b61d | remove valid config check | verbs/gdb.py | verbs/gdb.py | """implement 'gdb' verb (debugs a single target with gdb)
gdb
gdb [target]
gdb [target] [config]
"""
import subprocess
from mod import log, util, config, project, settings
#-------------------------------------------------------------------------------
def gdb(fips_dir, proj_dir, cfg_name, target=None) :
"""debug a single target with gdb"""
# prepare
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
for cfg in configs :
# check if config is valid
config_valid, _ = config.check_config_valid(fips_dir, cfg, print_errors = True)
if config_valid :
deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg)
log.colored(log.YELLOW, "=== gdb: {}".format(cfg['name']))
cmdLine = ['gdb', target]
subprocess.call(args = cmdLine, cwd = deploy_dir)
else :
log.error("Config '{}' not valid in this environment".format(cfg['name']))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
return True
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
"""debug a single target with gdb"""
if not util.is_valid_project_dir(proj_dir) :
log.error('must be run in a project directory')
tgt_name = None
cfg_name = None
if len(args) > 0 :
tgt_name = args[0]
if len(args) > 1:
cfg_name = args[1]
if not cfg_name :
cfg_name = settings.get(proj_dir, 'config')
if not tgt_name :
tgt_name = settings.get(proj_dir, 'target')
if not tgt_name :
log.error('no target specified')
gdb(fips_dir, proj_dir, cfg_name, tgt_name)
#-------------------------------------------------------------------------------
def help() :
"""print 'gdb' help"""
log.info(log.YELLOW +
"fips gdb\n"
"fips gdb [target]\n"
"fips gdb [target] [config]\n" + log.DEF +
" debug a single target in current or named config")
| """implement 'gdb' verb (debugs a single target with gdb)
gdb
gdb [target]
gdb [target] [config]
"""
import subprocess
from mod import log, util, config, project, settings
#-------------------------------------------------------------------------------
def gdb(fips_dir, proj_dir, cfg_name, target=None) :
"""debug a single target with gdb"""
# prepare
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
for cfg in configs :
# check if config is valid
config_valid, _ = config.check_config_valid(fips_dir, cfg, print_errors = True)
if config_valid :
deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg)
log.colored(log.YELLOW, "=== gdb: {}".format(cfg['name']))
cmdLine = ['gdb', target]
subprocess.call(args = cmdLine, cwd = deploy_dir)
else :
log.error("Config '{}' not valid in this environment".format(cfg['name']))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
if num_valid_configs != len(configs) :
log.error('{} out of {} configs failed!'.format(len(configs) - num_valid_configs, len(configs)))
return False
else :
log.colored(log.GREEN, '{} configs built'.format(num_valid_configs))
return True
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, args) :
"""debug a single target with gdb"""
if not util.is_valid_project_dir(proj_dir) :
log.error('must be run in a project directory')
tgt_name = None
cfg_name = None
if len(args) > 0 :
tgt_name = args[0]
if len(args) > 1:
cfg_name = args[1]
if not cfg_name :
cfg_name = settings.get(proj_dir, 'config')
if not tgt_name :
tgt_name = settings.get(proj_dir, 'target')
if not tgt_name :
log.error('no target specified')
gdb(fips_dir, proj_dir, cfg_name, tgt_name)
#-------------------------------------------------------------------------------
def help() :
"""print 'gdb' help"""
log.info(log.YELLOW +
"fips gdb\n"
"fips gdb [target]\n"
"fips gdb [target] [config]\n" + log.DEF +
" debug a single target in current or named config")
| Python | 0.000001 |
0118316df964c09198747255f9f3339ed736066d | Create test.py | test/test.py | test/test.py | # TweetPy
# Test
import unittest
import tweet
class SampleTestClass(unittest.TestCase):
def sampleTest(self):
#do something
a = 1
if __name__ == '__main__':
unittest.main()
| Python | 0.000005 | |
ba180a1798296346116a7c11557ddbe4aa40da8b | Solving p20 | p020.py | p020.py | sum([int(digit) for digit in str(math.factorial(100))]) | Python | 0.999485 | |
1e96ec2104e6e30af3bcf7c9dd61bd8f157b7519 | Solving p028 | p028.py | p028.py | def spiral_corners(size):
yield 1
for x in range(3, size+1, 2):
base_corner = x**2
corner_diff = x-1
for corner in (3,2,1,0):
yield base_corner-corner_diff*corner
def solve_p026():
return sum(spiral_corners(1001))
if __name__ == '__main__':
print solve_p026()
| Python | 0.999362 | |
fa4b01102d1226ccc3dcf58119053bbc8839c36e | add ex42 | lpthw/ex42.py | lpthw/ex42.py | #!/usr/bin/env python
# Exercise 42: Is-A, Has-A, Objects, and Classes
## Animal is-a object (yes, sort of confusing) look at the extra credit
class Animal(object):
pass
## ??
class Dog(Animal):
def __init__(self, name):
## ??
self.name = name
## ??
class Cat(Animal):
def __init__(self, name):
## ??
self.name = name
## ??
class Person(object):
def __init__(self, name):
## ??
self.name = name
## Person has-a pet of some kind
self.pet = None
## ??
class Employee(Person):
def __init__(self, name, salary):
## ?? hmm what is this strange magic?
super(Employee, self).__init__(name)
## ??
self.salary = salary
## ??
class Fish(object):
pass
## ??
class Salmon(Fish):
pass
## ??
class Halibut(Fish):
pass
## rover is-a Dog
rover = Dog("Rover")
## ??
satan = Cat("Satan")
## ??
mary = Person("Mary")
## ??
mary.pet = satan
## ??
frank = Employee("Frank", 120000)
## ??
frank.pet = rover
## ??
flipper = Fish()
## ??
crouse = Salmon()
## ??
harry = Halibut()
| Python | 0.998437 | |
94bc0d6596aba987943bf40e2289f34240081713 | Add lc0041_first_missing_positive.py | lc0041_first_missing_positive.py | lc0041_first_missing_positive.py | """Leetcode 41. First Missing Positive
Hard
URL: https://leetcode.com/problems/first-missing-positive/
Given an unsorted integer array, find the smallest missing positive integer.
Example 1:
Input: [1,2,0]
Output: 3
Example 2:
Input: [3,4,-1,1]
Output: 2
Example 3:
Input: [7,8,9,11,12]
Output: 1
Note:
Your algorithm should run in O(n) time and uses constant extra space.
"""
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.998416 | |
1aba0e5ba6aa91c2aa608c2c94411c59e4a3eca5 | Create stack_plot.py | stack_plot.py | stack_plot.py | # -*- coding: utf-8 -*-
"""
Includes a function for visualization of data with a stack plot.
"""
from matplotlib import pyplot as plt
from matplotlib import ticker
import random
def stack(number_of_topics, TopicTitles, X, Y):
"""Creates a stack plot for the number of papers published from 2002 to 2014
for each topic"""
# random colors as RGB
colors = [(random.randint(0,255),random.randint(0,255),random.randint(0,255)) for i in range(number_of_topics)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(colors)):
r, g, b = colors[i]
colors[i] = (r / 255., g / 255., b / 255.)
plt.figure(num=1,figsize=(30,27))
ax1 = plt.subplot()
x_formatter = ticker.ScalarFormatter(useOffset=False)
y_formatter = ticker.ScalarFormatter(useOffset=False)
ax1.yaxis.set_major_formatter(y_formatter)
ax1.xaxis.set_major_formatter(x_formatter)
ax1.set_ylabel('Number of Papers')
ax1.set_xlabel('Year of Publication')
polys = ax1.stackplot(X, Y, colors=colors)
legendProxies = []
for poly in polys:
legendProxies.append(plt.Rectangle((0, 0), 1, 1, fc=poly.get_facecolor()[0]))
plt.legend(legendProxies,TopicTitles,prop={'size':8})
plt.tight_layout(pad=1.08, h_pad=None, w_pad=None, rect=None)
plt.show()
| Python | 0.000081 | |
d4ac57f3a328dd98b76f6c8924ddc9d735c32c04 | Add py-sphinxcontrib-qthelp package (#13275) | var/spack/repos/builtin/packages/py-sphinxcontrib-qthelp/package.py | var/spack/repos/builtin/packages/py-sphinxcontrib-qthelp/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySphinxcontribQthelp(PythonPackage):
"""sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp
document."""
homepage = "http://sphinx-doc.org/"
url = "https://pypi.io/packages/source/s/sphinxcontrib-qthelp/sphinxcontrib-qthelp-1.0.2.tar.gz"
version('1.0.2', sha256='79465ce11ae5694ff165becda529a600c754f4bc459778778c7017374d4d406f')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
def test(self):
# Requires sphinx, creating a circular dependency
pass
| Python | 0 | |
6b5587fc7856b5d03b3605e1a31234ff98df88e2 | add L3 quiz - Expressions | lesson3/quizExpressions/unit_tests.py | lesson3/quizExpressions/unit_tests.py | import re
is_correct = False
brace_regex = "{{.*}}"
color_regex = "(?:brick.)?color"
size_regex = "(?:brick.)?size"
price_regex = "(?:brick.)?price"
heading = widget_inputs["text1"]
brick_color = widget_inputs["text2"]
brick_size = widget_inputs["text3"]
brick_price = widget_inputs["text4"]
brick_description = widget_inputs["text5"]
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if heading == '':
is_correct = True
else:
commentizer("Do you think the heading should change if you use a different brick? Why would a different brick make the heading change?")
#check the brick's color matches a RegEx
if re.search( color_regex, brick_color ):
if not re.search( brace_regex, brick_color ):
is_correct = False
commentizer("What you entered into the color field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The color field is not correct.")
#check the brick's size matches a RegEx
if re.search( size_regex, brick_size ):
if not re.search( brace_regex, brick_size ):
is_correct = False
commentizer("What you entered into the size field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The size field is not correct.")
#check the brick's price matches a RegEx
if re.search( price_regex, brick_price ):
if not re.search( brace_regex, brick_price ):
is_correct = False
commentizer("What you entered into the price field is correct, but it's still regular text. How do you create an expression in Angular?")
else:
is_correct = is_correct and True
else:
is_correct = False
commentizer("The price field is not correct.")
# if they're all unchecked
if not any([heading, brick_color, brick_size, brick_price, brick_description]):
is_correct = False
comments = []
comments.append('At least one of these should be converted into an expression.\n\nLook at the data in the template and ask yourself, "Will this change if I use a different brick?" If the answer is yes, then enter the expression into the appropriate field.')
if is_correct:
commentizer("Great job!")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct
| Python | 0.000001 | |
6855bfdc910c0c74743906f195f430817f2399b3 | Add rel-fra creation | omwg/fra2tab.py | omwg/fra2tab.py | #!/usr/share/python
# -*- encoding: utf-8 -*-
#
# Extract synset-word pairs from the WOLF (Wordnet Libre du Français)
# Remap 'b' to 'r'
# Some clean up (remove ' ()', '|fr.*')
import sys, re
import codecs, collections
### Change this!
wndata = "../wordnet/"
wnname = "WOLF (Wordnet Libre du Français)"
wnurl = "http://alpage.inria.fr/~sagot/wolf-en.html"
wnlang = "fra"
wnlicense = "CeCILL-C"
wnversion = "1.0b"
#
# header
#
outfile = "wn-data-%s.tab" % wnlang
o = codecs.open(outfile, "w", "utf-8" )
o.write("# %s\t%s\t%s\t%s\n" % (wnname, wnlang, wnurl, wnlicense))
#
# Data is in the file wolf-1.0b.xml
#<?xml version="1.0" encoding="utf-8"?>
#<!DOCTYPE WN SYSTEM "debvisdic-strict.dtd">
#<WN>
#<SYNSET><ILR type="near_antonym">eng-30-00002098-a</ILR><ILR type="be_in_state">eng-30-05200169-n</ILR><ILR type="be_in_state">eng-30-05616246-n</ILR><ILR type="eng_derivative">eng-30-05200169-n</ILR><ILR type="eng_derivative">eng-30-05616246-n</ILR><ID>eng-30-00001740-a</ID><SYNONYM><LITERAL lnote="2/2:fr.csbgen,fr.csen">comptable</LITERAL></SYNONYM><DEF>(usually followed by `to') having the necessary means or skill or know-how or authority to do something</DEF><USAGE>able to swim</USAGE><USAGE>she was able to program her computer</USAGE><USAGE>we were at last able to buy a car</USAGE><USAGE>able to get a grant for the project</USAGE><BCS>3</BCS><POS>a</POS></SYNSET>
synset = str()
lemma = str()
### need to do some cleanup, so store once to remove duplicates
wn = collections.defaultdict(set)
hyper = collections.defaultdict(list)
f = codecs.open(wndata + 'wolf-1.0b4.xml', 'rb', encoding='utf-8')
for l in f:
m = re.search(r'<ID>eng-30-(.*-[avnrb])<\/ID>',l)
if(m):
synset = m.group(1).strip().replace('-b', '-r')
i = re.finditer(r"<LITERAL[^>]*>([^<]+)<",l)
for m in i:
lemma = m.group(1).strip()
#lemma = re.sub(r'[ _]\(.*\)', ' ', lemma).strip()
#lemma = re.sub(r'\|fr.*$', '', lemma).strip()
if lemma != '_EMPTY_':
wn[synset].add(lemma)
i = re.finditer(r"<ILR type=\"hypernym\">([^<]+)<", l)
for m in i:
if lemma != '_EMPTY_':
key = m.group(1).strip().replace('-b', '-r').split('-')[2] + '-' + m.group(1).strip().replace('-b', '-r').split('-')[3]
hyper[key].append(synset)
for synset in sorted(wn):
for lemma in wn[synset]:
o.write("%s\t%s:%s\t%s\n" % (synset, wnlang, 'lemma', lemma))
rels = open('rels-fra.csv', 'a')
for key in hyper.keys():
for g in wn[key]:
for g2 in hyper[key]:
for w in wn[g2]:
g = str(g)
w = str(w)
g = g.replace(" ", "_")
w = w.replace(" ", "_")
rels.write(str(key)+str(g)+'fra' + '\t' + str(g2)+str(w)+'fra' + '\t' + str(g) + '\t' + str(w) + '\tHYPER' + '\n')
rels.write(str(g2)+str(w)+'fra' + '\t' +str(key)+str(g)+'fra' + '\t' + str(w) + '\t' + str(g) + '\tHYPO' + '\n')
| Python | 0 | |
4731e99882d035a59555e5352311d00c4e122f09 | Print useful information about a GTFS feed | onestop/info.py | onestop/info.py | """Provide useful information about a GTFS file."""
import argparse
import geohash
import gtfs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='GTFS Information')
parser.add_argument('filename', help='GTFS File')
parser.add_argument('--debug', help='Show helpful debugging information', action='store_true')
args = parser.parse_args()
g = gtfs.GTFSReader(args.filename)
stops_centroid = g.stops_centroid()
stops_centroid_geohash = g.stops_geohash(debug=args.debug)
print "==== GTFS:", g.filename
print "Stops centroid:",stops_centroid
print "Stops centroid geohash:", geohash.encode(stops_centroid)
print "Stops centroid geohash with all stops in neighbors:", stops_centroid_geohash
| Python | 0 | |
335881f4644a6bb2b5f2abb5b193f39d304dbc71 | Fix user agent for the bnn_ sites | pages_scrape.py | pages_scrape.py | import logging
import requests
def scrape(url, extractor):
"""
Function to request and parse a given URL. Returns only the "relevant"
text.
Parameters
----------
url : String.
URL to request and parse.
extractor : Goose class instance.
An instance of Goose that allows for parsing of content.
Returns
-------
text : String.
Parsed text from the specified website.
meta : String.
Parsed meta description of an article. Usually equivalent to the
lede.
"""
logger = logging.getLogger('scraper_log')
try:
headers = {'User-Agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.107 Safari/537.36"}
page = requests.get(url, headers=headers)
try:
article = extractor.extract(raw_html=page.content)
text = article.cleaned_text
meta = article.meta_description
return text, meta
#Generic error catching is bad
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem scraping URL: {}. {}.'.format(url, e))
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem requesting url: {}. {}'.format(url, e))
| import logging
import requests
def scrape(url, extractor):
"""
Function to request and parse a given URL. Returns only the "relevant"
text.
Parameters
----------
url : String.
URL to request and parse.
extractor : Goose class instance.
An instance of Goose that allows for parsing of content.
Returns
-------
text : String.
Parsed text from the specified website.
meta : String.
Parsed meta description of an article. Usually equivalent to the
lede.
"""
logger = logging.getLogger('scraper_log')
try:
page = requests.get(url)
try:
article = extractor.extract(raw_html=page.content)
text = article.cleaned_text
meta = article.meta_description
return text, meta
#Generic error catching is bad
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem scraping URL: {}. {}.'.format(url, e))
except Exception, e:
print 'There was an error. Check the log file for more information.'
logger.warning('Problem requesting url: {}. {}'.format(url, e))
| Python | 0.999201 |
dddf634f8445fac66aa25265c7f7e859dab4c000 | add test file for python | test/test.py | test/test.py | # Highlighter Demo
class Person:
def __init__(self, x):
self.x = x
def show(self):
print(self.x)
person = Person("Ken")
person.show()
| Python | 0.000001 | |
abcbe6443492ba2f011dec0132a0afb3b8cc9b0b | Create __init__.py | hello-world/__init__.py | hello-world/__init__.py | Python | 0.000429 | ||
8d36c444fe379b5901692485c2850e86ed714f89 | Add sql connection tester | sql_connection_test.py | sql_connection_test.py | import mysql.connector
import json
with open("config.json") as f:
config = json.load(f)
try:
conn = mysql.connector.connect(
user=config["database_connection"]["username"],
password=config["database_connection"]["password"],
host=config["database_connection"]["host"],
database=config["database_connection"]["database"])
cursor = conn.cursor()
cursor.close()
print "Connection success"
except mysql.connector.errors.ProgrammingError as err:
print "Error connecting to database: \n{}".format(err)
| Python | 0.000007 | |
05741f17ffac95d66290d2ec705cbfb66fc74ff9 | Add dummpy documentation/stats/plot_sky_locations.py | documentation/stats/plot_sky_locations.py | documentation/stats/plot_sky_locations.py | from bokeh.plotting import figure, output_file, show
output_file("example.html")
x = [1, 2, 3, 4, 5]
y = [6, 7, 6, 4, 5]
p = figure(title="example", plot_width=300, plot_height=300)
p.line(x, y, line_width=2)
p.circle(x, y, size=10, fill_color="white")
show(p)
| Python | 0 | |
fe4b226b9b3d6fbc7be7d545c185ed7950f3a5fd | Add Python benchmark | lib/node_modules/@stdlib/math/base/dist/beta/logpdf/benchmark/python/benchmark.scipy.py | lib/node_modules/@stdlib/math/base/dist/beta/logpdf/benchmark/python/benchmark.scipy.py | #!/usr/bin/env python
"""Benchmark scipy.stats.beta.logpdf."""
import timeit
name = "beta:logpdf"
repeats = 3
iterations = 1000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from scipy.stats import beta; from random import random;"
stmt = "y = beta.logpdf(random(), 100.56789, 55.54321)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(repeats):
print("# python::" + name)
elapsed = t.timeit(number=iterations)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(repeats, repeats)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| Python | 0.000138 | |
c36e390910b62e1ad27066a0be0450c81a6f87c6 | Add context manager for logging | d1_common_python/src/d1_common/logging_context.py | d1_common_python/src/d1_common/logging_context.py | # -*- coding: utf-8 -*-
"""Context manager that enables temporary changes in logging level.
Note: Not created by DataONE.
Source: https://docs.python.org/2/howto/logging-cookbook.html
"""
import logging
import sys
class LoggingContext(object):
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
| Python | 0 | |
4e36e520cb8fef8f07b545a3109e8507789e64bf | add tests, most are still stubbed out | tests/test.py | tests/test.py | import unittest
import urlparse
import sys
import os
import time
from datetime import datetime
FILE_PATH = os.path.dirname(os.path.realpath(__file__))
ROOT_PATH = os.path.abspath(os.path.join(FILE_PATH, '../'))
sys.path.append(ROOT_PATH)
from module.util import JSONTemplate
from module.graphite_utils import GraphStyle, graphite_time
class TestGraphiteTarget(unittest.TestCase):
pass
class TestGraphiteURL(unittest.TestCase):
pass
class TestGraphiteMetric(unittest.TestCase):
pass
class TestGraphiteTime(unittest.TestCase):
def test_unixtime_0(self):
self.assertEqual(graphite_time(0), '17:00_19691231')
def test_unixtime_now(self):
self.assertEqual(graphite_time(time.time()), datetime.now().strftime('%H:%M_%Y%m%d'))
def test_string(self):
self.assertEqual(graphite_time('test'), 'test')
class TestGraphiteStyle(unittest.TestCase):
def test_base(self):
style = GraphStyle()
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['308'], 'fontSize': ['8']})
def test_width(self):
style = GraphStyle(width=10)
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['10'], 'height': ['308'], 'fontSize': ['8']})
with self.assertRaises(ValueError):
GraphStyle(width='test')
def test_height(self):
style = GraphStyle(height=7)
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['7'], 'fontSize': ['8']})
with self.assertRaises(ValueError):
GraphStyle(height='test')
def test_font(self):
style = GraphStyle(font_size=16)
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['308'], 'fontSize': ['16']})
with self.assertRaises(ValueError):
GraphStyle(font_size='test')
def test_line_style(self):
style = GraphStyle(line_style='connected')
style = urlparse.parse_qs(str(style))
self.assertEqual(style, {'width': ['586'], 'height': ['308'], 'fontSize': ['8'], 'lineMode': ['connected']})
class TestJSONTemplate(unittest.TestCase):
data = [
{
"width": 586,
"height": 308,
"title": "Response Time on {{host}}",
"min": 0,
"targets": [
{
"target": "legendValue(alias({{host}}.{{service}}.rta,\"Response Time\"),\"last\")"
}
]
},
{
"width": 586,
"height": 308,
"title": "Packet Loss Percentage on {{host}}",
"min": 0,
"max": 100,
"targets": [
{
"target": "legendValue(alias({{host}}.{{service}}.pl,\"Packet loss percentage\"),\"last\")"
}
]
}
]
def test_load_file_path(self):
file_path = os.path.join(ROOT_PATH, 'tempaltes', 'graphite', 'check-host-alive.graph')
template = JSONTemplate(file_path)
self.assertEqual(template.data, self.data)
class TestGraphFactory(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main() | Python | 0 | |
0ab60ca22a41db85632e39065d142f3de081b0b9 | Create thesaurus.py | thesaurus.py | thesaurus.py | """
First, we must develop an API for thesaurus.com, as all the current ones are crap.
When searching a word (ex: "small"), the synonyms are grouped into three
categories, differing in presentation by a change in the background color
of their <span> object.
The categories are as follows:
relevant-1
relevant-2
relevant-3
The higher the integer suffix, clearly, the more relevant it is.
As for the antonyms, they are grouped similarly- however they are of value:
relevant--1
relevant--2
relevant--3
...wherein (-3) is the best matching antonym.
"""
# external libraries
import requests
from bs4 import BeautifulSoup
def findRankedSynonyms(inputWord,rank):
# set up the soup of beauty
url = "http://www.thesaurus.com/browse/" + inputWord
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
# check to see if there are actually synonyms for the entry.
errorTag = soup.select("#words-gallery-no-results")
if errorTag != []:
errorText = [item.text for item in errorTag][0]
print(errorText)
else:
# initialize a dictionary to hold all the synonyms
wordDict = {}
wordDict['syn3'] = []
wordDict['syn2'] = []
wordDict['syn1'] = []
wordTags = soup.select("span.text")
for word in wordTags:
relevanceLevel = word.parent.attrs["data-category"].rsplit("name\": \"")[1].rsplit("\",")[0]
if relevanceLevel == "relevant-3":
wordDict['syn3'].append(str(word.text)) # using str() to remove unicode u''
# print(word.text)
elif relevanceLevel == "relevant-2":
wordDict['syn2'].append(str(word.text))
# print(word.text)
elif relevanceLevel == "relevant-1":
wordDict['syn1'].append(str(word.text))
# print(word.text)
else:
break
return wordDict['syn' + str(rank)]
def findRankedAntonyms(inputWord,rank):
# set up the soup of beauty
url = "http://www.thesaurus.com/browse/" + inputWord
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
# check to see if there are actually synonyms for the entry.
errorTag = soup.select("#words-gallery-no-results")
if errorTag != []:
errorText = [item.text for item in errorTag][0]
print(errorText)
else:
# initialize a dictionary to hold all the antonyms
wordDict = {}
wordDict['ant3'] = []
wordDict['ant2'] = []
wordDict['ant1'] = []
wordTags = soup.select("span.text")
for word in wordTags:
relevanceLevel = word.parent.attrs["data-category"].rsplit("name\": \"")[1].rsplit("\",")[0]
if relevanceLevel == "relevant--3":
wordDict['ant3'].append(str(word.text)) # using str() to remove unicode u''
# print(word.text)
elif relevanceLevel == "relevant--2":
wordDict['ant2'].append(str(word.text))
# print(word.text)
elif relevanceLevel == "relevant--1":
wordDict['ant1'].append(str(word.text))
# print(word.text)
return wordDict['ant' + str(rank)]
def findSynonyms(inputWord):
# set up the soup of beauty
url = "http://www.thesaurus.com/browse/" + inputWord
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
# check to see if there are actually synonyms for the entry.
errorTag = soup.select("#words-gallery-no-results")
if errorTag != []:
errorText = [item.text for item in errorTag][0]
print(errorText)
else:
# initialize a list to hold all the synonyms
synonyms = []
wordTags = soup.select("span.text")
for word in wordTags:
relevanceLevel = word.parent.attrs["data-category"].rsplit("name\": \"")[1].rsplit("\",")[0]
print(word.text + " " + str(relevanceLevel))
# if relevanceLevel == ("relevant-3" or "relevant-2" or "relevant-1"):
# synonyms.append(str(word.text)) # using str() to remove unicode u''
return synonyms
def findAntonyms(inputWord):
# set up the soup of beauty
url = "http://www.thesaurus.com/browse/" + inputWord
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
# check to see if there are actually synonyms for the entry.
errorTag = soup.select("#words-gallery-no-results")
if errorTag != []:
errorText = [item.text for item in errorTag][0]
print(errorText)
else:
# initialize a list to hold all the antonyms
wordDict = {}
wordDict['ant3'] = []
wordDict['ant2'] = []
wordDict['ant1'] = []
wordTags = soup.select("span.text")
for word in wordTags:
relevanceLevel = word.parent.attrs["data-category"].rsplit("name\": \"")[1].rsplit("\",")[0]
if relevanceLevel == "relevant--3":
wordDict['ant3'].append(str(word.text)) # using str() to remove unicode u''
# print(word.text)
elif relevanceLevel == "relevant--2":
wordDict['ant2'].append(str(word.text))
# print(word.text)
elif relevanceLevel == "relevant--1":
wordDict['ant1'].append(str(word.text))
# print(word.text)
return wordDict
synonyms = findSynonyms("big")
for word in synonyms:
print word
# antonyms = findAntonyms("big")
# print(antonyms)
rankedSyn = findRankedSynonyms("big",1)
print rankedSyn
| Python | 0 | |
6c7a927e2fc0a054470c2a87fa98d07e993657ac | Add tests | test/test.py | test/test.py | import os
import unittest
try:
import directio
except ImportError:
import sys
sys.exit("""
Please install directio:
take a look at directio/README""")
class TestDirectio(unittest.TestCase):
def setUp(self):
super(TestDirectio, self).setUp()
flags = os.O_RDWR | os.O_DIRECT | os.O_SYNC | os.O_CREAT | os.O_TRUNC
self.file = os.open('test.txt', flags, 0o666)
self.buffer = bytearray(512)
self.msg = b'It just works!'
self.buffer[:len(self.msg)] = self.msg
def tearDown(self):
super(TestDirectio, self).tearDown()
os.close(self.file)
def test_read_after_write(self):
# can write only immutable buffer, so we buffer wrap in bytes
written = directio.write(self.file, bytes(self.buffer))
self.assertEqual(written, len(self.buffer))
os.lseek(self.file, 0, os.SEEK_SET)
got = directio.read(self.file, len(self.buffer))
self.assertEqual(got, self.buffer)
def test_fails_to_write_not_multiple_of_512(self):
self.assertRaises(ValueError, directio.write, self.file, self.msg)
def test_fails_to_read_not_multiple_of_512(self):
os.lseek(self.file, 0, os.SEEK_SET)
self.assertRaises(ValueError, directio.read, self.file, 511)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
45148b72cb69c49b2a6ef6e278f23d63328a7942 | Clean up and docs | testQuery.py | testQuery.py | #!/usr/bin/python
import unittest
import omniture
import sys
import os
import pprint
creds = {}
creds['username'] = os.environ['OMNITURE_USERNAME']
creds['secret'] = os.environ['OMNITURE_SECRET']
class QueryTest(unittest.TestCase):
def setUp(self):
self.analytics = omniture.authenticate(creds['username'], creds['secret'])
reportdef = self.analytics.suites[0].report
queue = []
queue.append(reportdef)
self.report = omniture.sync(queue)
def test_ranked(self):
basic_report = self.analytics.suites[0].report.element("page")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.elements[0].id, "page", "The element is wrong")
self.assertEqual(len(report.elements), 1, "There are too many elements")
self.assertEqual(report.type, "ranked", "This is the wrong type of report it should be ranked")
def test_report_run(self):
self.assertIsInstance(self.analytics.suites[0].report.run(), omniture.Report, "The run method doesn't work to create a report")
def test_bad_element(self):
self.assertRaises(KeyError,self.analytics.suites[0].report.element, "pages")
def test_overtime(self):
basic_report = self.analytics.suites[0].report.metric("orders").granularity("hour")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
def test_double_element(self):
basic_report = self.analytics.suites[0].report.element("page").element("browser")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.elements[0].id,"page", "The 1st element is wrong")
self.assertEqual(report.elements[1].id,"browser", "The 2nd element is wrong")
self.assertEqual(len(report.elements), 2, "The number of elements is wrong")
self.assertEqual(report.type, "ranked", "This is the wrong type of report it should be ranked")
def test_double_metric(self):
basic_report = self.analytics.suites[0].report.metric("pageviews").metric("visits")
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.metrics[0].id,"pageviews", "The 1st element is wrong")
self.assertEqual(report.metrics[1].id,"visits", "The 2nd element is wrong")
self.assertEqual(len(report.metrics), 2, "The number of elements is wrong")
self.assertEqual(report.type, "overtime", "This is the wrong type of report it should be overtime")
def test_element_paratmers(self):
"""Test the top and startingWith parameters
This isn't a conclusive test. I really should run to two reports and compare the results to make sure it is corrent
However, these tests need to be able run on any report suite and some reports suites (like ones that are currenly being
used) don't have 10 items in the page name
"""
basic_report = self.analytics.suites[0].report.element("page", top=5, startingWith=5)
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.elements['page'].id, "page" ,"The parameters might have screwed this up")
@unittest.skip("don't have this one done yet")
def test_anamoly_detection(self):
basic_report = self.analytics.suites[0].report.metric("pageviews").range('2014-05-1', '2014-05-07').anomaly_detection()
queue = []
queue.append(basic_report)
response = omniture.sync(queue)
for report in response:
self.assertEqual(report.metrics, "upper bound" ,"Anomaly Detection isn't working")
def test_sortBy(self):
""" Make sure sortBy gets put in report description """
basic_report = self.analytics.suites[0].report.element('page').metric('pageviews').metric('visits').sortBy('visits')
self.assertEqual(basic_report.raw['sortBy'], "visits")
def test_current_data(self):
""" Make sure the current data flag gets set correctly """
basic_report = self.analytics.suites[0].report.element('page').metric('pageviews').metric('visits').currentData()
self.assertEqual(basic_report.raw['currentData'], True)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
96a3fc178c9da5a8f917378e40454a0702d746e5 | Initialize construction module | drydock/construction.py | drydock/construction.py | """DryDock container construction."""
def construct(spec):
pass | Python | 0.000001 | |
4c33fe7a927cde83aa53374e9fcaedfa18e51e77 | Add function to delete collection | utilities.py | utilities.py | def delete_collection(ee, id):
if 'users' not in id:
root_path_in_gee = ee.data.getAssetRoots()[0]['id']
id = root_path_in_gee + '/' + id
params = {'id': id}
items_in_collection = ee.data.getList(params)
for item in items_in_collection:
ee.data.deleteAsset(item['id'])
ee.data.deleteAsset(id) | Python | 0 | |
4d9d286ec96e834fcb9acf1f1f52876e81668996 | Test script | tools/test.py | tools/test.py | from fakturo.billingstack.client import Client
client = Client('http://localhost:9090/billingstack', username='ekarlso', password='secret0')
merchants = client.merchant.list()
| Python | 0.000001 | |
fabaea5e9f7b1e78887f91572540412182b8228c | add generic DTW algorithm | pyannote/algorithms/alignment/dtw.py | pyannote/algorithms/alignment/dtw.py | #!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2014 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Authors
# Hervé BREDIN (http://herve.niderb.fr)
"""Generic dynamic time warping (DTW) algorithm"""
from __future__ import unicode_literals
import numpy as np
class DynamicTimeWarping(object):
"""
Parameters
----------
vsequence, hsequence : iterable
(vertical and horizontal) sequences to be aligned.
vcost : float, optional
Cost for vertical paths (i, j) --> (i+1, j)
hcost : float, optional
Cost for horizontal paths (i, j) --> (i, j+1)
dcost : float, optional
Cost for diagonal paths (i, j) --> (i+1, j+1)
distance_func : func, optional
Function (vitem, hitem) --> distance between items
precomputed : np.array, optional
(H, W)-shaped array with pre-computed distances
"""
def __init__(self, vsequence, hsequence,
distance_func=None, precomputed=None,
vcost=1., hcost=1., dcost=1.):
super(DynamicTimeWarping, self).__init__()
# sequences to be aligned
self.vsequence = vsequence
self.hsequence = hsequence
# cost for elementary paths
self.vcost = vcost # vertical
self.hcost = hcost # horizontal
self.dcost = dcost # diagonal
# precomputed distance matrix
if precomputed is not None:
self._distance = precomputed
# on-the-fly distance computation
elif distance_func is not None:
H, W = len(vsequence), len(hsequence)
self._distance = np.empty((H, W))
self._distance[:] = np.NAN
self.distance_func = distance_func
# any other case is not valid
else:
raise ValueError('')
def _get_distance(self, v, h):
# if distance is not compute already
# do it once and for all
if np.isnan(self._distance[v, h]):
vitem = self.vsequence[v]
hitem = self.hsequence[h]
self._distance[v, h] = self.distance_func(vitem, hitem)
return self._distance[v, h]
def _get_cost(self):
height = len(self.vsequence)
width = len(self.hsequence)
cost = np.inf * np.ones((height, width))
# initialize first row and first column
cost[0, 0] = self._get_distance(0, 0)
for v in range(1, height):
cost[v, 0] = cost[v - 1, 0] + self.vcost * self._get_distance(v, 0)
for h in range(1, width):
cost[0, h] = cost[0, h - 1] + self.hcost * self._get_distance(0, h)
for v in range(1, height):
for h in range(1, width):
d = self._get_distance(v, h)
dv = cost[v - 1, h] + self.vcost * d
dh = cost[v, h - 1] + self.hcost * d
dd = cost[v - 1, h - 1] + self.dcost * d
cost[v, h] = min(dv, dh, dd)
return cost
def get_path(self):
"""Get lowest cost path
Returns
-------
path : [(0, 0), ..., [(height-1, width-1)]
"""
# compute cost matrix
cost = self._get_cost()
# initialize path at bottom/right
height, width = len(self.vsequence), len(self.hsequence)
v, h = height - 1, width - 1
path = [(v, h)]
# backtrack from bottom/right to top/left
while v > 0 or h > 0:
# backtrack one step
v, h = min(
# go left, go up or both?
[(v - 1, h), (v, h - 1), (v - 1, h - 1)],
# use cost matrix to choose among allowed paths
key=lambda (i, j): np.inf if i < 0 or j < 0 else cost[i, j]
)
path.append((v, h))
# reverse path so that it goes from top/left to bottom/right
return path[::-1]
| Python | 0.000002 | |
583b520a6dada6e7a8bf984469fd6d2e9d8eaf28 | add general methods to instruments | pysat/instruments/methods/general.py | pysat/instruments/methods/general.py | # -*- coding: utf-8 -*-
"""Provides generalized routines for integrating instruments into pysat.
"""
from __future__ import absolute_import, division, print_function
import pandas as pds
import pysat
import logging
logger = logging.getLogger(__name__)
def list_files(tag=None, sat_id=None, data_path=None, format_str=None,
supported_tags=None, fake_daily_files_from_monthly=False,
two_digit_year_break=None):
"""Return a Pandas Series of every file for chosen satellite data.
This routine is intended to be used by pysat instrument modules supporting
a particular NASA CDAWeb dataset.
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are <tag strings>.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : (dict or NoneType)
keys are sat_id, each containing a dict keyed by tag
where the values file format template strings. (default=None)
fake_daily_files_from_monthly : bool
Some CDAWeb instrument data files are stored by month, interfering
with pysat's functionality of loading by day. This flag, when true,
appends daily dates to monthly files internally. These dates are
used by load routine in this module to provide data by day.
two_digit_year_break : int
If filenames only store two digits for the year, then
'1900' will be added for years >= two_digit_year_break
and '2000' will be added for years < two_digit_year_break.
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Examples
--------
::
fname = 'cnofs_vefi_bfield_1sec_{year:04d}{month:02d}{day:02d}_v05.cdf'
supported_tags = {'dc_b': fname}
list_files = functools.partial(nasa_cdaweb.list_files,
supported_tags=supported_tags)
fname = 'cnofs_cindi_ivm_500ms_{year:4d}{month:02d}{day:02d}_v01.cdf'
supported_tags = {'': fname}
list_files = functools.partial(mm_gen.list_files,
supported_tags=supported_tags)
"""
if data_path is not None:
if format_str is None:
try:
format_str = supported_tags[sat_id][tag]
except KeyError as estr:
raise ValueError('Unknown sat_id or tag: ' + estr)
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if (not out.empty) and fake_daily_files_from_monthly:
out.loc[out.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
return out
else:
estr = ''.join(('A directory must be passed to the loading routine ',
'for <Instrument Code>'))
raise ValueError(estr)
| Python | 0.000001 | |
6bf12f844bb67c0e97adab2b3a17f3c02f04259b | fix test runner compatibility with old pythons and weird tests (PY-1976) | python/helpers/pycharm/tcunittest.py | python/helpers/pycharm/tcunittest.py | import traceback, sys
from unittest import TestResult
import datetime
from pycharm.tcmessages import TeamcityServiceMessages
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class TeamcityTestResult(TestResult):
def __init__(self, stream=sys.stdout):
TestResult.__init__(self)
self.output = stream
self.messages = TeamcityServiceMessages(self.output, prepend_linebreak=True)
self.current_suite = None
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def getTestName(self, test):
if hasattr(test, '_testMethodName'):
return test._testMethodName
else:
return str(test)
def getTestId(self, test):
return test.id
def addSuccess(self, test):
TestResult.addSuccess(self, test)
def addError(self, test, err):
TestResult.addError(self, test, err)
err = self.formatErr(err)
self.messages.testFailed(self.getTestName(test),
message='Error', details=err)
def addFailure(self, test, err):
TestResult.addFailure(self, test, err)
err = self.formatErr(err)
self.messages.testFailed(self.getTestName(test),
message='Failure', details=err)
def addSkip(self, test, reason):
self.messages.testIgnored(self.getTestName(test), message=reason)
def startTest(self, test):
suite = test.__class__
if suite != self.current_suite:
if self.current_suite:
self.messages.testSuiteFinished(strclass(self.current_suite))
self.current_suite = suite
self.messages.testSuiteStarted(strclass(self.current_suite), location="python_uttestid://" + strclass(self.current_suite))
setattr(test, "startTime", datetime.datetime.now())
self.messages.testStarted(self.getTestName(test), location="python_uttestid://" + str(test.id()))
def stopTest(self, test):
start = getattr(test, "startTime", datetime.datetime.now())
d = datetime.datetime.now() - start
duration=d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
self.messages.testFinished(self.getTestName(test), duration=int(duration))
def endLastSuite(self):
if self.current_suite:
self.messages.testSuiteFinished(strclass(self.current_suite))
self.current_suite = None
class TeamcityTestRunner:
def __init__(self, stream=sys.stdout):
self.stream = stream
def _makeResult(self):
return TeamcityTestResult(self.stream)
def run(self, test):
result = self._makeResult()
test(result)
result.endLastSuite()
return result
| import traceback, sys
from unittest import TestResult
import datetime
from pycharm.tcmessages import TeamcityServiceMessages
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
class TeamcityTestResult(TestResult):
def __init__(self, stream=sys.stdout):
TestResult.__init__(self)
self.output = stream
self.messages = TeamcityServiceMessages(self.output, prepend_linebreak=True)
self.current_suite = None
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def getTestName(self, test):
return test._testMethodName
def getTestId(self, test):
return test.id
def addSuccess(self, test):
TestResult.addSuccess(self, test)
def addError(self, test, err):
TestResult.addError(self, test, err)
err = self.formatErr(err)
self.messages.testFailed(self.getTestName(test),
message='Error', details=err)
def addFailure(self, test, err):
TestResult.addFailure(self, test, err)
err = self.formatErr(err)
self.messages.testFailed(self.getTestName(test),
message='Failure', details=err)
def addSkip(self, test, reason):
self.messages.testIgnored(self.getTestName(test), message=reason)
def startTest(self, test):
suite = test.__class__
if suite != self.current_suite:
if self.current_suite:
self.messages.testSuiteFinished(strclass(self.current_suite))
self.current_suite = suite
self.messages.testSuiteStarted(strclass(self.current_suite), location="python_uttestid://" + strclass(self.current_suite))
setattr(test, "startTime", datetime.datetime.now())
self.messages.testStarted(self.getTestName(test), location="python_uttestid://" + str(test.id()))
def stopTest(self, test):
start = getattr(test, "startTime", datetime.datetime.now())
d = datetime.datetime.now() - start
duration=d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
self.messages.testFinished(self.getTestName(test), duration=int(duration))
def endLastSuite(self):
if self.current_suite:
self.messages.testSuiteFinished(strclass(self.current_suite))
self.current_suite = None
class TeamcityTestRunner:
def __init__(self, stream=sys.stdout):
self.stream = stream
def _makeResult(self):
return TeamcityTestResult(self.stream)
def run(self, test):
result = self._makeResult()
test(result)
result.endLastSuite()
return result
| Python | 0 |
79a236133ea00fa1d1af99426380392fe51ec0f4 | Create iis_shortname.py | middileware/iis/iis_shortname.py | middileware/iis/iis_shortname.py | #!/usr/bin/env python
# encoding: utf-8
from t import T
import re
import urllib2,requests,urllib2,json,urlparse
requests.packages.urllib3.disable_warnings()
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo='',verify=False):
timeout=5
if int(port) == 443:
protocal = "https"
else:
protocal = "http"
target_url = protocal + "://"+ip+":"+str(port)
result = {}
result['result']=False
r=None
try:
status_1=requests.get(url=target_url+'/*~1****/a.aspx',timeout=timeout,verify=verify,allow_redirects=False).status_code
status_2=requests.get(url=target_url+'/l1j1e*~1****/a.aspx',timeout=timeout,verify=verify,allow_redirects=False).status_code
#print target_url
if status_1 == 404 and status_2 == 400:
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='iis short name Vulnerability'
result['VerifyInfo']['URL'] =target_url
result['VerifyInfo']['payload']= 'null'
result['VerifyInfo']['result'] =r.content
except Exception,e:
#print '[-]error',
print e.text
#pass
#print traceback.print_exc()
finally:
if r is not None:
r.close()
del r
return result
if __name__ == '__main__':
print P().verify(ip='cos.99.com',port='80')
| Python | 0.000002 | |
4683bb493358452df35268508e2abdc44d6fd330 | Add UDp decoder | malware/core/decoder.py | malware/core/decoder.py | import dpkt
import socket
import binascii
def hexify(x):
h = binascii.hexlify(x)
tohex = " ".join(h[i:i+2] for i in range(0, len(h), 2))
return tohex
def truncate_dns(x):
return x[36:-12]
def _udp_iterator(pc):
for ts, pkt in pc:
try:
eth = dpkt.ethernet.Ethernet(pkt)
except dpkt.dpkt.NeedData:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP:
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP:
udp = ip.data
yield (ip.src, udp.sport, ip.dst, udp.dport, udp.data)
else:
pass
# Not UDP packets
else:
pass
# Not ether packets
return
def _tcp_iterator(pc):
for ts, pkt in pc:
try:
eth = dpkt.ethernet.Ethernet(pkt)
except dpkt.dpkt.NeedData:
continue
if eth.type == dpkt.ethernet.ETH_TYPE_IP:
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_TCP:
tcp = ip.data
yield (ip.src, tcp.sport, ip.dst, tcp.dport, tcp.data)
else:
pass
# Not TCP packets
else:
pass
# Not ether packets
return
def decode_dns_qd_name(pcap_path):
qd_name_list = []
five_tuple = []
conn = {}
fp = open(pcap_path)
pc = dpkt.pcap.Reader(fp)
unknown_opcode_counter = 0
for (src, sport, dst, dport, data) in _udp_iterator(pc):
if dport == 53:
key = (src, sport, dst, dport)
# UDP/53 is a DNS query
try:
dns = dpkt.dns.DNS(data)
conn[key] = [dns.qd[0].name, truncate_dns(hexify(data))]
except (dpkt.dpkt.UnpackError, IndexError):
unknown_opcode_counter += 1
# An unknown opcode maybe malicious traffic
# print unknown_opcode_counter
key = (src, sport, dst, dport, unknown_opcode_counter)
# print 'UNKNOWN_DNS_DATA:', hexify(data)
conn[key] = ['UNKNOWN_DNS', truncate_dns(hexify(data))]
# qd_name_list.append(dns.qd[0].name)
# five_tuple.append((src, sport, dst, dport))
# print truncate_dns(hexify(data))
# print "Query for", repr(dns.qd[0].name)
fp.close()
return conn
def decode_http_req_header(pcap_path):
host_list = []
uri_list = []
five_tuple = []
user_agent_list = []
fp = open(pcap_path)
pc = dpkt.pcap.Reader(fp)
for (src, sport, dst, dport, data) in _tcp_iterator(pc):
if dport == 80 and len(data) > 0:
key = (src, sport, dst, dport)
http_req = dpkt.http.Request(data)
# host_list.append(http_req.headers['host'])
# uri_list.append(http_req.uri)
# user_agent_list.append(http_req.headers['user-agent'])
# five_tuple.append((src, sport, dst, dport))
conn[key] = [http_req.headers['host'],
http_req.uri,
http_req.headers['user-agent']]
# print http_req.headers.keys()
# print "URI is ", http_req.uri
# for header in http_req.headers.keys() :
# pass
# print header, http_req.headers[header]
# print "method is ", http_req.method
# print "HTTP headers, packed ", http_req.pack()
# print "HTTP version", http_req.version
# print "HTTP data ", http_req.data
fp.close()
return conn
if __name__ == '__main__':
conn = decode_dns_qd_name('./2a.pcap')
print len(conn)
conn2 = decode_http_req_header('./2a.pcap')
print conn2
| Python | 0.000004 | |
50af4f518912f758e7961055342642c9d31832a0 | Create 6-pwm2.py | Code/6-pwm2.py | Code/6-pwm2.py | # CamJam EduKit 3 - Robotics
# Worksheet 6 – Varying the speed of each motor with PWM
import RPi.GPIO as GPIO # Import the GPIO Library
import time # Import the Time library
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set variables for the GPIO motor pins
pinMotorAForwards = 10
pinMotorABackwards = 9
pinMotorBForwards = 8
pinMotorBBackwards = 7
# How many times to turn the pin on and off each second
Frequency = 20
# How long the pin stays on each cycle, as a percent
DutyCycleA = 30
DutyCycleB = 30
# Settng the duty cycle to 0 means the motors will not turn
Stop = 0
# Set the GPIO Pin mode to be Output
GPIO.setup(pinMotorAForwards, GPIO.OUT)
GPIO.setup(pinMotorABackwards, GPIO.OUT)
GPIO.setup(pinMotorBForwards, GPIO.OUT)
GPIO.setup(pinMotorBBackwards, GPIO.OUT)
# Set the GPIO to software PWM at 'Frequency' Hertz
pwmMotorAForwards = GPIO.PWM(pinMotorAForwards, Frequency)
pwmMotorABackwards = GPIO.PWM(pinMotorABackwards, Frequency)
pwmMotorBForwards = GPIO.PWM(pinMotorBForwards, Frequency)
pwmMotorBBackwards = GPIO.PWM(pinMotorBBackwards, Frequency)
# Start the software PWM with a duty cycle of 0 (i.e. not moving)
pwmMotorAForwards.start(Stop)
pwmMotorABackwards.start(Stop)
pwmMotorBForwards.start(Stop)
pwmMotorBBackwards.start(Stop)
# Turn all motors off
def StopMotors():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors forwards
def Forwards():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn both motors backwards
def Backwards():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Turn left
def Left():
pwmMotorAForwards.ChangeDutyCycle(Stop)
pwmMotorABackwards.ChangeDutyCycle(DutyCycleA)
pwmMotorBForwards.ChangeDutyCycle(DutyCycleB)
pwmMotorBBackwards.ChangeDutyCycle(Stop)
# Turn Right
def Right():
pwmMotorAForwards.ChangeDutyCycle(DutyCycleA)
pwmMotorABackwards.ChangeDutyCycle(Stop)
pwmMotorBForwards.ChangeDutyCycle(Stop)
pwmMotorBBackwards.ChangeDutyCycle(DutyCycleB)
# Your code to control the robot goes below this line
Forwards()
time.sleep(1) # Pause for 1 second
Left()
time.sleep(0.5) # Pause for half a second
Forwards()
time.sleep(1)
Right()
time.sleep(0.5)
Backwards()
time.sleep(0.5)
StopMotors()
GPIO.cleanup()
| Python | 0.000013 | |
2d25c2329a9ae4d084671ab99cf53290fe7547ab | add tests for cython script | streams/simulation/tests/test_integrate_lm10.py | streams/simulation/tests/test_integrate_lm10.py | # coding: utf-8
"""
Test the Cython integrate code
"""
from __future__ import absolute_import, unicode_literals, division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import glob
import time
# Third-party
import numpy as np
import pytest
import astropy.units as u
import matplotlib.pyplot as plt
from .._integrate_lm10 import lm10_acceleration, leapfrog_lm10
from ...potential import LawMajewski2010
from ...integrate import leapfrog
def test_cython_vs_python1():
r = np.random.random((100,3))
a = time.time()
for ii in range(10000):
lm10_acceleration(r, 2, 1.6, 1.6, 1.69, 0.121)
cython = (time.time() - a) / 10000.
lm10 = LawMajewski2010()
a = time.time()
for ii in range(10000):
lm10.acceleration_at(r)
pure_python = (time.time() - a) / 10000.
assert cython < pure_python
def test_cython_vs_python2():
r = np.random.random((100,3))
v = np.random.random((100,3))
t = np.arange(0, 7000, 10.)
a = time.time()
for ii in range(10):
leapfrog_lm10(r, v, 1.6, 1.6, 1.69, 0.121, t=t)
cython = (time.time() - a) / 10.
lm10 = LawMajewski2010()
a = time.time()
for ii in range(10):
leapfrog(lm10.acceleration_at, r, v, t)
pure_python = (time.time() - a) / 10.
print(cython, pure_python)
#assert cython < pure_python
| Python | 0 | |
af3333906125e9bde3cc5b3ebdb7209c25bcf6ff | Add pinger script | pinger.py | pinger.py | #!/usr/bin/python3
import requests
import datetime
import time
while True:
hour = datetime.datetime.now().hour
if hour > 7:
requests.get('https://biblion.se')
time.sleep(60*29) | Python | 0 | |
e140c21cd0b7d5b0e7cbe7895096476105d03f91 | Create update_sql.py | update_sql.py | update_sql.py | __author__ = 'userme865'
# ver 0.1
import MySQLdb
def update_db():
try: # start msql and creat stable at first time
conn = MySQLdb.connect(host='localhost', user='root', passwd='', port=3306)
cur = conn.cursor()
conn.select_db('python')
cur.execute('DROP TABLE dataexchange')
cur.execute(
"CREATE TABLE dataexchange SELECT indexer.words, group_concat(indexer.pages ORDER BY indexer.words SEPARATOR ',') AS 'pages',group_concat(indexer.pagewords ORDER BY indexer.words SEPARATOR ',') AS 'pagewords' from indexer GROUP BY indexer.words")
cur.execute("DROP TABLE indexer")
cur.execute("CREATE TABLE indexer SELECT* FROM dataexchange")
conn.commit()
cur.close()
conn.close()
except MySQLdb.Error, e:
print "Mysql Error %d: %s" % (e.args[0], e.args[1])
| Python | 0.000002 | |
3921f1522851767444644d1dc3c126521476d9dc | add util script to help troll autoplot feature ideas | scripts/util/list_stale_autoplots.py | scripts/util/list_stale_autoplots.py | """Look into which autoplots have not been used in a while"""
import psycopg2
import re
import pandas as pd
QRE = re.compile("q=([0-9]+)")
pgconn = psycopg2.connect(database='mesosite', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""SELECT valid, appurl from feature WHERE appurl is not null
and appurl != ''
""")
q = {}
for row in cursor:
appurl = row[1]
valid = row[0]
if appurl.find("/plotting/auto/") != 0:
continue
tokens = QRE.findall(appurl)
if len(tokens) == 0:
print("appurl: %s valid: %s failed RE" % (appurl, valid))
continue
appid = int(tokens[0])
res = q.setdefault(appid, valid)
if res < valid:
q[appid] = valid
df = pd.DataFrame.from_dict(q, orient='index')
df.columns = ['valid']
df.sort_values(by='valid', inplace=True)
print df.head()
| Python | 0 | |
faa6872cf008171afa3db6687d23c1bcc9b6dbac | Add views to the main files | Druid/views.py | Druid/views.py | from django.shortcuts import render
from gfx.models import Material
from django.template import RequestContext
def home( request ):
rc = RequestContext(request)
return render( request, 'Druid/index.html', context_instance=rc ) | Python | 0 | |
fc23860b1adbf7c75dfd53dc213c24a65b455597 | Create ExtractData.py | ExtractData.py | ExtractData.py | Python | 0.000001 | ||
ff3b36b4d64af54b6bd22f107a9d5dd5cf4f4473 | solve problem no.1152 | 1152/answer.py | 1152/answer.py | from sys import stdin
input = stdin.readline().strip()
if input == "":
print(0)
exit()
i = 1
for char in input:
if char == ' ':
i += 1
print(i) | Python | 0.999818 | |
f5706084caca2c6f6235914cb70e79c16438e1a0 | Create OverlappingAMR.py | src/Python/CompositeData/OverlappingAMR.py | src/Python/CompositeData/OverlappingAMR.py | #!/usr/bin/env python
import vtk
def MakeScalars(dims, origin, spacing, scalars):
# Implicit function used to compute scalars
sphere = vtk.vtkSphere()
sphere.SetRadius(3)
sphere.SetCenter(5, 5, 5)
scalars.SetNumberOfTuples(dims[0]*dims[1]*dims[2])
for k in range(0, dims[2]):
z = origin[2] + spacing[2]*k
for j in range(0, dims[1]):
y = origin[1] + spacing[1]*j
for i in range(0,dims[0]):
x = origin[0] + spacing[0]*i
scalars.SetValue(k*dims[0]*dims[1] + j*dims[0] + i, sphere.EvaluateFunction(x, y, z))
def main():
# Create and populate the AMR dataset
# The dataset should look like
# Level 0
# uniform grid, dimensions 11, 11, 11, AMR box (0, 0, 0) - (9, 9, 9)
# Level 1 - refinement ratio : 2
# uniform grid, dimensions 11, 11, 11, AMR box (0, 0, 0) - (9, 9, 9)
# uniform grid, dimensions 11, 11, 11, AMR box (10, 10, 10) - (19, 19, 19)
# Use MakeScalars() above to fill the scalar arrays
amr = vtk.vtkOverlappingAMR()
blocksPerLevel = [1, 2]
amr.Initialize(2, blocksPerLevel)
origin = [0.0, 0.0, 0.0]
spacing = [1.0, 1.0, 1.0]
dims = [11, 11, 11]
ug1 = vtk.vtkUniformGrid()
# Geometry
ug1.SetOrigin(origin)
ug1.SetSpacing(spacing)
ug1.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug1.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin, spacing, scalars)
lo = [0, 0, 0]
hi = [9, 9, 9]
box1 = vtk.vtkAMRBox()
amr.SetAMRBox(0, 0, box1)
amr.SetDataSet(0, 0, ug1)
spacing2 = [0.5, 0.5, 0.5]
ug2 = vtk.vtkUniformGrid()
# Geometry
ug2.SetOrigin(origin)
ug2.SetSpacing(spacing2)
ug2.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug2.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin, spacing2, scalars)
lo2 = [0, 0, 0]
hi2 = [9, 9, 9]
box2 = vtk.vtkAMRBox()
amr.SetAMRBox(1, 0, box2)
amr.SetDataSet(1, 0, ug2)
origin3 = [5, 5, 5]
ug3 = vtk.vtkUniformGrid()
# Geometry
ug3.SetOrigin(origin3)
ug3.SetSpacing(spacing2)
ug3.SetDimensions(dims)
# Data
scalars = vtk.vtkFloatArray()
ug3.GetPointData().SetScalars(scalars)
MakeScalars(dims, origin3, spacing2, scalars)
lo3 = [10, 10, 10]
hi3 = [19, 19, 19]
box3 = vtk.vtkAMRBox()
amr.SetAMRBox(1, 1, box3)
amr.SetDataSet(1, 1, ug3)
amr.SetRefinementRatio(0, 2)
# Render the amr data here.
of = vtk.vtkOutlineFilter()
of.SetInputData(amr)
geomFilter = vtk.vtkCompositeDataGeometryFilter()
geomFilter.SetInputConnection(of.GetOutputPort())
# Create an iso-surface - at 10.
cf = vtk.vtkContourFilter()
cf.SetInputData(amr)
cf.SetNumberOfContours(1)
cf.SetValue(0, 10.0)
geomFilter2 = vtk.vtkCompositeDataGeometryFilter()
geomFilter2.SetInputConnection(cf.GetOutputPort())
# Create the render window, renderer, and interactor.
aren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Associate the geometry with a mapper and the mapper to an actor.
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(geomFilter.GetOutputPort())
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper)
# Associate the geometry with a mapper and the mapper to an actor.
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(geomFilter2.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
# Add the actor to the renderer and start handling events.
aren.AddActor(actor1)
aren.AddActor(actor2)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| Python | 0 | |
82bfe668b11ac76159f2a599734ba33c4ef57026 | Add another views_graph_service file | portal/views_graph_service.py | portal/views_graph_service.py | from flask import (flash, redirect, render_template, request,
session, url_for)
import requests
from portal import app, datasets
from portal.decorators import authenticated
from portal.utils import get_portal_tokens
@app.route('/graph', methods=['GET', 'POST'])
@authenticated
def graph():
if request.method == 'GET':
return render_template('graph.jinja2', datasets=datasets)
selected_ids = request.form.getlist('dataset')
selected_year = request.form.get('year')
if not (selected_ids and selected_year):
flash("Please select at least one dataset and a year to graph.")
return redirect(url_for('graph'))
service_token = get_portal_tokens()['service']
service_url = '{}/{}'.format(app.config['SERVICE_URL_BASE'], 'api/doit')
req_headers = dict(Authorization='Bearer {}'.format(service_token))
req_data = dict(datasets=selected_ids,
year=selected_year,
user_identity_id=session.get('primary_identity'),
user_identity_name=session.get('primary_username'))
resp = requests.post(service_url, headers=req_headers, data=req_data,
verify=False)
resp.raise_for_status()
resp_data = resp.json()
dest_ep = resp_data.get('dest_ep')
dest_path = resp_data.get('dest_path')
dest_name = resp_data.get('dest_name')
graph_count = resp_data.get('graph_count')
flash("%d-file SVG upload to %s on %s completed!" %
(graph_count, dest_path, dest_name))
return redirect(url_for('browse', endpoint_id=dest_ep,
endpoint_path=dest_path.lstrip('/')))
@app.route('/graph/clean-up', methods=['POST'])
@authenticated
def graph_cleanup():
service_token = get_portal_tokens()['service']
service_url = '{}/{}'.format(app.config['SERVICE_URL_BASE'], 'api/cleanup')
req_headers = dict(Authorization='Bearer {}'.format(service_token))
resp = requests.post(service_url,
headers=req_headers,
data=dict(
user_identity_name=session['primary_username']
),
verify=False)
resp.raise_for_status()
task_id = resp_data['task_id']
msg = '{} ({}).'.format('Your existing processed graphs have been removed',
task_id)
flash(msg)
return redirect(url_for('graph'))
| Python | 0 | |
3684e8be098300006b09c6677a2805e10d623acd | Add GYP file tld_cleanup tool. | net/tools/tld_cleanup/tld_cleanup.gyp | net/tools/tld_cleanup/tld_cleanup.gyp | # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../../../build/common.gypi',
],
'targets': [
{
'target_name': 'tld_cleanup',
'type': 'executable',
'dependencies': [
'../../../base/base.gyp:base',
'../../../build/temp_gyp/googleurl.gyp:googleurl',
],
'sources': [
'tld_cleanup.cc',
],
},
],
}
| Python | 0.000001 | |
5f9c7d10957c7b0b0da46b031120fe2434315d0d | Test of new persistence layer. | ndtable/persistence/simple.py | ndtable/persistence/simple.py | from ndtable.carray import carray, cparams
from bloscpack import pack_list, unpack_file
from numpy import array, frombuffer
def test_simple():
filename = 'output'
# hackish, just experimenting!
arr = carray(xrange(10000)).chunks
ca = [bytes(chunk.viewof) for chunk in arr]
pack_list(ca, {}, filename, {'typesize': 8, 'clevel': 0, 'shuffle': False})
out_list, meta_info = unpack_file('output')
assert out_list[0] == ca[0]
assert out_list[1] == ca[1]
def test_compressed():
filename = 'output'
# hackish, just experimenting!
arr = carray(xrange(10000), cparams(clevel=5, shuffle=True)).chunks
ca = [bytes(chunk.viewof) for chunk in arr]
pack_list(ca, {}, filename, {'typesize': 8, 'clevel': 5, 'shuffle': True})
out_list, meta_info = unpack_file('output')
assert out_list[0] == ca[0]
assert out_list[1] == ca[1]
| Python | 0 | |
e6a4863d9663791fabc4bd6ccdf0ab45ba2a86eb | Add standalone benchmark runner | remote_bench.py | remote_bench.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Standalone benchmark runner
"""
import cProfile
import pstats
import profile
import numpy as np
print("Running Rust and Pyproj benchmarks\n")
# calibrate
pr = profile.Profile()
calibration = np.mean([pr.calibrate(100000) for x in xrange(5)])
# add the bias
profile.Profile.bias = calibration
cProfile.run(open('benches/cprofile_rust.py', 'rb'), 'benches/output_stats_rust')
rust = pstats.Stats('benches/output_stats_rust')
cProfile.run(open('benches/cprofile_pyproj.py', 'rb'), 'benches/output_stats_pyproj')
pyproj_ = pstats.Stats('benches/output_stats_pyproj')
print("Rust Benchmark\n")
rust.sort_stats('cumulative').print_stats(5)
print("Pyproj Benchmark\n")
pyproj_.sort_stats('cumulative').print_stats(5)
| Python | 0.000001 | |
b4042f23d02e77c45d772fe64ae5e98db8b5e4e4 | Add new package: re2 (#18302) | var/spack/repos/builtin/packages/re2/package.py | var/spack/repos/builtin/packages/re2/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Re2(CMakePackage):
"""RE2 is a fast, safe, thread-friendly alternative to backtracking
regular expression engines like those used in PCRE, Perl, and Python."""
homepage = "https://github.com/google/re2"
url = "https://github.com/google/re2/archive/2020-08-01.tar.gz"
version('2020-08-01', sha256='6f4c8514249cd65b9e85d3e6f4c35595809a63ad71c5d93083e4d1dcdf9e0cd6')
version('2020-04-01', sha256='98794bc5416326817498384a9c43cbb5a406bab8da9f84f83c39ecad43ed5cea')
| Python | 0.00009 | |
9c0bcd4e0317aa8b76ebbf3c9ecae82d1b90027d | Create initial night sensor code for Pi | night_sensor/night_feature.py | night_sensor/night_feature.py | """
@author: Sze "Ron" Chau
@e-mail: chaus3@wit.edu
@source: https://github.com/wodiesan/sweet-skoomabot
@desc Night sensor-->RPi for Senior Design 1
"""
import logging
import os
import RPi.GPIO as GPIO
import serial
import subprocess
import sys
import time
import traceback
# GPIO pins. Uses the BCM numbering system based on RPi B+ board.
IR1 = 26
IR2 = 19
IR3 = 13
IR4 = 6
def init_serial():
"""Initialize the serial connection to the light sensor."""
ser = serial.Serial()
#ser.port = "\\.\COM4" # Windows
ser.port = "/dev/ttyUSB0" # Linux
ser.baudrate = 57600
try:
ser.open()
except Exception, e:
logger.info("Possible open serial port: " + str(e))
print 'Check the serial USB port.'
exit()
return ser
def init_leds():
"""Initial setup for light sensor and IR LEDs. Currently uses the BCM
numbering system based on RPi B+ board."""
GPIO.setmode(GPIO.BCM)
GPIO.setup(IR1, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR2, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR3, GPIO.OUT, initial=GPIO.HIGH)
GPIO.setup(IR4, GPIO.OUT, initial=GPIO.HIGH)
thread = threading.Thread(target=warnings)
thread.daemon = False
thread.start()
return thread
| Python | 0 | |
2aa2400678ac039a448d529b919c44694912ca2e | Add a useful method for hnadling configuration | conveyor/config.py | conveyor/config.py | import errno
import imp
import importlib
import os
import six
class Config(dict):
"""
Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, six.string_types):
modname, objname = obj.rsplit(".", 1)
mod = importlib.import_module(modname)
obj = getattr(mod, objname)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| Python | 0.000014 | |
83d3f01c9f18c687a0348638431fba24d68db636 | Split components out of PCBBase. | Components.py | Components.py | import numpy as np
from PCBBase import PCBDrawer, PCBFeature
# From https://www.digikey.com/Web%20Export/Supplier%20Content/Vishay_8026/PDF/VishayBeyschlag_SolderPad.pdf?redirected=1
resistorsParams = {
"0102": (0.65, 1.10, 1.40, 2.85),
"0204": (1.50, 1.25, 1.75, 4.00),
"0207": (2.80, 2.20, 2.20, 7.20),
"0402": (0.25, 0.60, 0.55, 1.45),
"0603": (0.50, 0.95, 0.95, 2.40),
"0805": (0.65, 1.10, 1.40, 2.85),
"1206": (1.50, 1.25, 1.75, 4.00)}
class StdSMAResistor(PCBFeature):
def __init__(self, codeString):
PCBFeature.__init__(self)
# MRG TODO: MM assertion
self._g, self._y, self._x, self._z = resistorsParams[codeString]
self.width = int(codeString[0:2]) * 0.254
self.height = int(codeString[2:4]) * 0.254
self.setLayerArtist("Top", "Mask", self.drawTopMask)
self.setLayerArtist("Top", "Layer", self.drawTopLayer)
def _computePadCoords(self, buff=0):
x1 = (self._g / 2) - buff
x2 = (self._z / 2) + buff
y1 = (-self._x / 2) - buff
y2 = (self._x / 2) + buff
coords = []
coords.append((x1, y1))
coords.append((x1, y2))
coords.append((x2, y2))
coords.append((x2, y1))
coords.append((x1, y1))
return np.array(coords).T
def drawTopMask(self, gerberWriter):
xs1, ys1 = self._computePadCoords(buff=0)
# Pad two is just mirrored wrt to y-axis
xs2, ys2 = -xs1.copy(), ys1.copy()
xs1, ys1 = self.transform(xs1, ys1)
xs2, ys2 = self.transform(xs2, ys2)
gerberWriter.simplePolygon(xs1, ys1)
gerberWriter.simplePolygon(xs2, ys2)
def drawTopLayer(self, gerberWriter):
xs1, ys1 = self._computePadCoords(buff=0.1)
# Pad two is just mirrored wrt to y-axis
xs2, ys2 = -xs1.copy(), ys1.copy()
xs1, ys1 = self.transform(xs1, ys1)
xs2, ys2 = self.transform(xs2, ys2)
gerberWriter.simplePolygon(xs1, ys1)
gerberWriter.simplePolygon(xs2, ys2)
class ThroughHoleLED(PCBFeature):
def __init__(self):
PCBFeature.__init__(self)
self.setLayerArtist("Top", "Layer", self.drawTopLayer)
self.setLayerArtist("Top", "Mask", self.drawTopMask)
self.setLayerArtist("Bottom", "Layer", self.drawBottomLayer)
self.setLayerArtist("Bottom", "Mask", self.drawBottomMask)
self.setLayerArtist("Drill", "Drill", self.drawDrills)
def computePadCenters(self):
ccSpacing = 2.54
xs = [-ccSpacing / 2, -ccSpacing / 2]
ys = [0, 0]
return self.transform(xs, ys)
def drawLayer(self, gerberWriter, size):
gerberWriter.defineAperature(size, True)
for px, py in self.computePadCenters():
gerberWriter.flashAt(px, py)
def drawTopLayer(self, gerberWriter):
self.drawLayer(gerberWriter, 1.1 / 2)
def drawBottomLayer(self, gerberWriter):
self.drawLayer(gerberWriter, 1.1 / 2)
def drawTopMask(self, gerberWriter):
self.drawLayer(gerberWriter, 1.0 / 2)
def drawBottomMask(self, gerberWriter):
self.drawLayer(gerberWriter, 1.0 / 2)
def drawDrills(self, drillWriter):
for px, py in self.computePadCenters():
drillWriter.addHole(px, py, 0.5 * np.sqrt(2) + 0.15)
class TraceSegment(PCBFeature):
def __init__(self, sPt, ePt, width):
PCBFeature.__init__(self)
self.sPt = sPt
self.ePt = ePt
self.w = width
def drawSegment(self, gerberWriter):
gerberWriter.defineAperature(self.w, setAsCurrent=True)
gerberWriter.moveTo(self.transform(self.sPt))
gerberWriter.lineTo(self.transform(self.ePt))
class AppolonianTest(PCBFeature):
def __init__(self, scalar):
self.rs, self.xs, self.ys = np.loadtxt("gasket.csv", skiprows=1, delimit=",")
class TestFeature(PCBFeature):
def __init__(self):
PCBFeature.__init__(self)
ts = np.linspace(0, 2 * np.pi, 7)[:-1]
xs = np.cos(ts) * 3
ys = np.sin(ts) * 3
self.setLayerArtist("Top", "Mask", self.drawTriFactory(xs[0], ys[0], 2))
self.setLayerArtist("Top", "Layer", self.drawTriFactory(xs[1], ys[1], 2))
self.setLayerArtist("Top", "Overlay", self.drawTriFactory(xs[2], ys[2], 2))
self.setLayerArtist("Bottom", "Mask", self.drawTriFactory(xs[3], ys[3], 2))
self.setLayerArtist("Bottom", "Layer", self.drawTriFactory(xs[4], ys[4], 2))
self.setLayerArtist("Bottom", "Overlay", self.drawTriFactory(xs[5], ys[5], 2))
self.setLayerArtist("Drill", "Drill", self.drawDrills)
def drawTriFactory(self, x, y, s):
def tDraw(gWrit):
ts = np.linspace(0, 2 * np.pi, 4)
xs = (np.cos(ts) * s) + x
ys = (np.sin(ts) * s) + y
gWrit.simplePolygon(xs, ys)
gWrit.defineAperature(0.1, True)
gWrit.circle(x, y, s)
return tDraw
def drawDrills(self, drillWriter):
ts = np.linspace(0, 2 * np.pi, 7)[:-1]
xs = np.cos(ts) * 3
ys = np.sin(ts) * 3
for px, py in zip(xs, ys):
drillWriter.addHole(px, py, 0.5 * np.sqrt(2) + 0.15)
if __name__ == "__main__":
pcb = PCBDrawer("testing")
# r1 = ThroughHoleLED()
# r1.setRotation(np.pi / 7)
# pcb.addFeature(r1)
# r2 = StdSMAResistor("0805")
# for phi in range(5):
# r2.setCentroid(0, -7 + 3 * phi)
# pcb.addFeature(r2)
pcb.addFeature(TestFeature())
extent = 5.1
pcb.drawOutline((-extent, -extent), (extent, extent))
pcb.finalize()
pcb.visualize()
| Python | 0 | |
b7e1e05bfe5aa7a8d91a4d8ee786e61b4aa7bd1b | Add ArrayQueue | ArrayQueue.py | ArrayQueue.py | class ArrayQueue:
def __init__(self, max=10):
self._data = [None] * max
self._size = 0
self._front = 0
self._max = max
def enqueue(self, e):
self._data[(self._front + self._size) % self._max] = e
self._size += 1
def dequeue(self):
rst, self._data[self._front] = self._data[self._front], None
self._front = (self._front + 1) % self._max
self._size -= 1
return rst
def __len__(self):
return self._size | Python | 0.000001 | |
898d3ffe027a10ff91c0df66494dbddfcaee41d4 | add expect_column_values_to_be_valid_imei (#4753) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_imei.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_imei.py | """
This is a template for creating custom ColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_column_map_expectations
"""
import json
from typing import Optional
from stdnum import imei
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_imei(imei_num: str) -> bool:
try:
imei.validate(imei_num)
except Exception as e:
return False
return True
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidImei(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.to_be_valid_imei"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_imei(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidImei(ColumnMapExpectation):
"""Expect column values to be valid IMEI (International Mobile Equipment Identity)"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"35686800-004141-20",
"3568680000414120",
"35686800-004141-20",
"354178036859789",
"35-686800-004141-8",
],
"some_other": [
"35686800-004141-20",
"3568680000414120",
"35686800-004141-20",
"354178036859789",
"35-417803-685978-1",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.to_be_valid_imei"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
return True
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["python-stdnum"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidImei().print_diagnostic_checklist()
| Python | 0 | |
55b6d19fc8c80e3d4ff7842f20d284879f5ea151 | Create BubbleSort.py | BubbleSort.py | BubbleSort.py | """
冒泡:
原始版本:将i由0开始,与后面每一个j=i+1 进行比较,交换
再i=1 ...这样好不容易换到前面第一位的容易被序列最后一个最小值直接怼到末尾去
现在的更新版:i由0开始
j = length-2 与 j = length-1 进行比较,换位
确保移到上面的较小值不会有太大的变动 -- 见P381 图
"""
def bubble_sort(lists):
count = len(lists)
for i in range(0, count):
for j in range(i, count-1)[::-1]:
if lists[j] > lists[j+1]:
lists[j], lists[j+1] = lists[j+1], lists[j]
return lists
| Python | 0.000001 | |
17966b6af3039aa6d6308e1592c14527513c70c1 | apply oa start date from journals to relative update requests - script | portality/migrate/3053_oa_start_date_from_journals_to_urs/migrate.py | portality/migrate/3053_oa_start_date_from_journals_to_urs/migrate.py | """
This script can be run to generate a CSV output of accounts which do not have their passwords set, along
with some useful account information, and possible explanations for the lack of password
```
python accounts_with_missing_passwords.py -o accounts.csv
```
"""
import csv
import esprit
from portality.core import es_connection
from portality.util import ipt_prefix
from portality import models
JOURNALS_WITH_OA_START_DATE = {
"query": {
"filtered": {
"filter": {
"exists" : {
"field" : "bibjson.oa_start"
}
},
"query": {
"match_all": {}
}
}
},
"size": 200000
}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out", help="output file path")
args = parser.parse_args()
if not args.out:
print("Please specify an output file path with the -o option")
parser.print_help()
exit()
conn = es_connection
with open(args.out, "w", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(["ID", "OA Start Date", "Current Application ID", "Application found"])
for j in esprit.tasks.scroll(conn, ipt_prefix('journal'),
q=JOURNALS_WITH_OA_START_DATE,
page_size=100, keepalive='1m'):
journal = models.Journal(_source=j)
bibjson = journal.bibjson()
if journal.current_application is not None:
ur = models.Application.pull(journal.current_application)
application_found = True
if ur is not None:
application_found = False
urb = ur.bibjson()
urb.oa_start = bibjson.oa_start
ur.save()
try:
writer.writerow(
[journal.id, bibjson.oa_start, journal.current_application, application_found])
except AttributeError:
print("Error reading attributes for journal {0}".format(j['id']))
| Python | 0 | |
ab50818c18b4275c205419c4c844bfc9ecb7a4c8 | add rename.py | FileUtils/rename.py | FileUtils/rename.py | import os
import sys
import re
dirname, filename = os.path.split(os.path.abspath(sys.argv[0]))
os.chdir(dirname)
fileList = os.listdir(dirname)
print dirname
name='edge_effect_'
for fileItem in fileList:
dotIndex = fileItem.rfind('.')
fileName = fileItem[: dotIndex]
fileExt = fileItem[dotIndex : ]
print fileName,fileExt
#m=re.search("[^qd]\w+",fileName)
if fileName.find(name)<0 and fileName.find("rename")<0:
print "111"
os.rename(fileItem,name+fileName+fileExt)
pass
#print 'm.group:'m.group(0) | Python | 0.000002 | |
68cc3555c510d835ded5cd9cbffc69129a8f7e64 | Add nrfconnect_firmware_utils.py (#5666) | scripts/flashing/nrfconnect_firmware_utils.py | scripts/flashing/nrfconnect_firmware_utils.py | #!/usr/bin/env python3
# Copyright (c) 2020-2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Flash an NRF5 device.
This is layered so that a caller can perform individual operations
through an `Flasher` instance, or operations according to a command line.
For `Flasher`, see the class documentation. For the parse_command()
interface or standalone execution:
usage: nrfconnect_firmware_utils.py [-h] [--verbose] [--erase]
[--application FILE]
[--verify_application] [--reset]
[--skip_reset] [--nrfjprog FILE]
[--snr SERIAL] [--family FAMILY]
Flash NRF5 device
optional arguments:
-h, --help show this help message and exit
configuration:
--verbose, -v Report more verbosely
--nrfjprog FILE File name of the nrfjprog executable
--snr SERIAL, --serial SERIAL, -s SERIAL
Serial number of device to flash
--family FAMILY NRF5 device family
operations:
--erase Erase device
--application FILE Flash an image
--verify_application, --verify-application
Verify the image after flashing
--reset Reset device after flashing
--skip_reset, --skip-reset
Do not reset device after flashing
"""
import errno
import os
import sys
import firmware_utils
# Additional options that can be use to configure an `Flasher`
# object (as dictionary keys) and/or passed as command line options.
NRF5_OPTIONS = {
# Configuration options define properties used in flashing operations.
'configuration': {
# Tool configuration options.
'nrfjprog': {
'help': 'File name of the nrfjprog executable',
'default': 'nrfjprog',
'argparse': {
'metavar': 'FILE'
},
'command': [
'{nrfjprog}',
{'optional': 'family'},
{'optional': 'snr'},
()
],
'verify': ['{nrfjprog}', '--version'],
'error':
"""\
Unable to execute {nrfjprog}.
Please ensure that this tool is installed and
available. See the NRF5 example README for
installation instructions.
""",
},
'snr': {
'help': 'Serial number of device to flash',
'default': None,
'alias': ['--serial', '-s'],
'argparse': {
'metavar': 'SERIAL'
},
},
# Device configuration options.
'family': {
'help': 'NRF5 device family',
'default': None,
'argparse': {
'metavar': 'FAMILY'
},
},
},
}
class Flasher(firmware_utils.Flasher):
"""Manage nrf5 flashing."""
def __init__(self, **options):
super().__init__(platform='NRF5', module=__name__, **options)
self.define_options(NRF5_OPTIONS)
def erase(self):
"""Perform nrfjprog --eraseall"""
return self.run_tool('nrfjprog', ['--eraseall'], name='Erase all')
def verify(self, image):
"""Verify image."""
return self.run_tool('nrfjprog',
['--quiet', '--verify', image],
name='Verify',
pass_message='Verified',
fail_message='Not verified',
fail_level=2)
def flash(self, image):
"""Flash image."""
return self.run_tool('nrfjprog',
['--program', image, '--sectoranduicrerase'],
name='Flash')
def reset(self):
"""Reset the device."""
return self.run_tool('nrfjprog', ['--pinresetenable'], name='Enable pin reset')
return self.run_tool('nrfjprog', ['--pinreset'], name='Apply pin reset')
def actions(self):
"""Perform actions on the device according to self.option."""
self.log(3, 'Options:', self.option)
if self.option.erase:
if self.erase().err:
return self
application = self.optional_file(self.option.application)
if application:
if self.flash(application).err:
return self
if self.option.verify_application:
if self.verify(application).err:
return self
if self.option.reset is None:
self.option.reset = True
if self.option.reset:
if self.reset().err:
return self
return self
### Mobly integration
class Nrf5Platform:
def __init__(self, flasher_args):
self.flasher = Flasher(**flasher_args)
def flash(self):
self.flasher.flash_command([os.getcwd()])
def verify_platform_args(platform_args):
required_args = ['application']
for r in required_args:
if not r in platform_args:
raise ValueError("Required argument %s missing" % r)
def create_platform(platform_args):
verify_platform_args(platform_args[0])
return Nrf5Platform(platform_args[0])
### End of Mobly integration
if __name__ == '__main__':
sys.exit(Flasher().flash_command(sys.argv))
| Python | 0.000024 | |
dc993796fc15e3670c8a702f43fcb9a5d9b4c84e | Add forgotten file. | astrobin_apps_donations/utils.py | astrobin_apps_donations/utils.py | from subscription.models import UserSubscription
def user_is_donor(user):
if user.is_authenticated:
return UserSubscription.objects.filter(user = user, subscription__name = 'AstroBin Donor').count() > 0
return False
| Python | 0 | |
d459c39e00295664e184d4fcddbb0739ec6c77f2 | Add class for working with .lhe files | test/lheReader.py | test/lheReader.py | import xml.etree.ElementTree as ET
class lheReader(object):
ORIGINAL = 'original'
def __init__(self,fn=None):
self.tree = None
self.root = None
self.events = []
self.lhacode_map = {} # Maps the integer lhacode to the corresponding newcoup string name
self.skip = False # Flags the sample as having no re-weighted points
if fn is not None:
self.tree = ET.parse(fn)
self.root = self.tree.getroot()
self.events = self.root.findall('./event')
def parseModelFile(self,np_param_path):
#NOTE: This possibly should be placed somewhere else, as it doesn't directly involve the lhe file
"""Maps the newcoup lhacode to its corresponding name
Ex: self.lhacode_map = {11:'cA',22:'cuB',23:'cuW'}
"""
self.lhacode_map = {}
in_block = False
with open(np_param_path) as f:
for l in f.readlines():
l = l.strip()
if l.lower() == "block newcoup":
# Entering newcoup block
in_block = True
continue
if in_block:
if l == "":
# Exiting newcoup block
in_block = False
break
sp_l = l.split("#",1) # Split the line into non-comment and comment sections
base = sp_l[0].strip()
tail = sp_l[1].strip() if len(sp_l) > 1 else None
if len(base.split()) != 2:
print "ERROR: Failed to parse model file!"
return
lhacode = int(base.split()[0])
self.lhacode_map[lhacode] = tail
def getCouplingName(self,lhacode):
"""Attempts to map the specified lhacode to a newcoup name
"""
if self.lhacode_map.has_key(lhacode):
return self.lhacode_map[lhacode]
else:
return lhacode
def getWeightMap(self):
#TODO: Figure out a better name for this function...
"""Get a dictionary mapping each weight id to a set of coefficient strengths
Ex:
{'mg_reweight_1':
11: 0.1,
23: -0.3,
24: 0.2
}
"""
weight_map = {}
if self.tree is None or self.root is None:
return weight_map
# Map the initial point first
slha = self.root.find('./header/slha')
if slha is None:
print "WARNING: No slha tag found!"
return weight_map
weight_map[self.ORIGINAL] = {}
in_block = False
for l in slha.text.strip().split('\n'):
l = l.strip()
if l.lower() == "block newcoup":
# Entering newcoup block
in_block = True
continue
if in_block:
if l == "":
# Exiting newcoup block
in_block = False
break
sp_l = l.split("#",1) # Split the line into non-comment and comment sections
base = sp_l[0].strip()
tail = sp_l[1].strip() if len(sp_l) > 1 else None
if len(base) > 0:
coeff_id,val = base.split()
weight_map[self.ORIGINAL][int(coeff_id)] = float(val)
# Map the re-weighted points next
for weight in self.root.iter('weight'):
wgt_id = weight.attrib.get('id')
weight_map[wgt_id] = {}
for l in weight.text.split('\n')[:-1]:
coeff_id,val = l.split()[3:5]
weight_map[wgt_id][int(coeff_id)] = float(val)
return weight_map
def getEvent(self,i):
"""Get a specific event
"""
if self.tree is None:
return None
if i >= len(self.events):
print "ERROR: Event index out of range!"
return None
return self.events[i]
def getEventWeights(self,event_num):
"""Get original weight and re-weighted values for a specific event
Ex: {
'original': 0.01,
'mg_reweight_1': 0.23,
'mg_reweight_2': 0.03,
}
"""
event = self.getEvent(event_num)
if event is None:
return None
event_weights = {}
event_weights[self.ORIGINAL] = self.getOriginalEventWeight(event_num)
if self.skip:
return event_weights
elif event.find('rwgt') is None:
print "WARNING: LHE file doesn't have re-weighted points!"
self.skip = True
return event_weights
for wgt in event.find('rwgt').iter('wgt'):
wgt_id = wgt.attrib.get('id')
event_weights[wgt_id] = float(wgt.text.strip())
return event_weights
def getOriginalEventWeight(self,event_num):
"""Get the original event weight (XWGTUP) for a specific event
"""
event = self.getEvent(event_num)
if event is None:
return None
line = event.text.strip().split('\n')[0]
wgt = float(line.split()[2])
return wgt
def getWeightBounds(self):
"""Returns the largest and smallest weights in the entire sample, for all weightings
"""
init_wgt = self.getOriginalEventWeight(0)
if init_wgt is None:
return None,None
lo = init_wgt
hi = init_wgt
for idx in range(len(self.events)):
event_wgts = self.getEventWeights(event_num=idx)
for wgt_id,wgt_val in event_wgts.iteritems():
if wgt_val < lo:
lo = wgt_val
if wgt_val > hi:
hi = wgt_val
return lo,hi
def getCrossSections(self):
#TODO: Include calculated error
"""Re-calculates the cross sections by summing over event weights
"""
xsecs = {key: 0.0 for key in self.getWeightMap().keys()}
for idx in range(len(self.events)):
event_wgts = self.getEventWeights(event_num=idx)
for wgt_id,wgt_val in event_wgts.iteritems():
xsecs[wgt_id] += wgt_val
return xsecs
if __name__ == "__main__":
# Example usage
sandbox = "reweight_v4"
lhe_path = "%s/processtmp/Events/run_01/unweighted_events.lhe" % (sandbox)
np_model_path = "%s/models/HEL_UFO/restrict_no_b_mass.dat" % (sandbox)
print "Parsing tree..."
lhe_tree = lheReader(lhe_path)
coeff_pts = lhe_tree.getWeightMap()
n_events = len(lhe_tree.events)
lhe_tree.parseModelFile(np_model_path)
print "Getting bounds..."
low,high = lhe_tree.getWeightBounds()
print "Low Weight : %s" % (low)
print "High Weight: %s" % (high)
print "Getting cross sections..."
xsecs = lhe_tree.getCrossSections()
for wgt_id in sorted(xsecs.keys()):
if coeff_pts.has_key(wgt_id):
print "%s: %s (%s)" % (wgt_id,xsecs[wgt_id],coeff_pts[wgt_id])
else:
print "%s: %s" % (wgt_id,xsecs[wgt_id]) | Python | 0 | |
e51f3869b4a047489b9bb1e4b88af0e0bdc3078b | Add a command to list all the documents. | paper_to_git/commands/list_command.py | paper_to_git/commands/list_command.py | """
List the Documents and Folders
"""
from paper_to_git.commands.base import BaseCommand
from paper_to_git.models import PaperDoc, PaperFolder
__all__ = [
'ListCommand',
]
class ListCommand(BaseCommand):
"""List the PaperDocs and Folders
"""
name = 'list'
def add(self, parser, command_parser):
self.parser = parser
command_parser.add_argument('-d', '--docs',
default=False, action='store_true',
help=("""\
List all the documents currently stored."""))
command_parser.add_argument('-fd', '--folders',
default=False, action='store_true',
help=("""List all folders in Dropbox Paper"""))
def process(self, args):
if args.docs:
for doc in PaperDoc.select():
print(doc)
if args.folders:
for folder in PaperFolder.select():
print(folder)
for doc in folder.docs:
print('|----{}'.format(doc))
if not (args.docs or args.folders):
print("Please provide atleast one of the --docs or --folders flags")
| Python | 0 | |
a797de9014a3d466bb10e9bc318c3e2edec328be | add base for rendering widgets | packages/SCIRun/renderbase.py | packages/SCIRun/renderbase.py | from core import system
from core.modules.module_registry import registry
from packages.spreadsheet.basic_widgets import SpreadsheetCell, CellLocation
class Render(SpreadsheetCell):
def compute(self):
pass
def registerRender():
registry.add_module(Render, abstract=True)
| Python | 0 | |
7beed056f9a44e6d99cb632b2ecb34bd6182cf06 | Create GeneratorDB.py | GeneratorDB.py | GeneratorDB.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
import re
from glob import glob
from operator import itemgetter
from multiprocessing import Process
from sys import argv
def get_genomas_locus():
out_csv = open('genomas_locus.csv', 'w')
print('Starting get_genomas_locus')
genomas_locus = [tuple(filter(None, tuple(map(itemgetter(i), lista)))) # obtiene la columna de todos los locus de cada genoma
for i in range(14, len(lista[0]))]
for genoma in genomas_locus: # para cada columna haz
for locus in genoma[1:]: # genoma[0] es el nombre del genoma y lo demas son los locus
locus = locus.split() # corta los espacios en blanco
if len(locus) == 1: # revisa que solo tenga un locus
locus = locus[0]
print('{}|{}'.format(genoma[0], locus), file=out_csv)
else: # tiene mas de un locus en esa celda
for locus in loci: # separalos!
print('{}|{}'.format(genoma[0], locus), file=out_csv)
out_csv.close()
print('END get_genomas_locus')
def get_pangenoma(): # parsea el gene_presence_absence.csv
out_csv = open('pangenoma.csv', 'w')
print('Starting get_pangenoma')
for row in lista[1:]:
Gene = row[0]
Non_unique_Gene_name = row[1]
Annotation = row[2]
No_isolates = row[3]
No_sequences = row[4]
Avg_sequences_per_isolate = row[5]
Genome_Fragment = row[6]
Order_within_Fragment = row[7]
Accessory_Fragment = row[8]
Accessory_Order_with_Fragment = row[9]
QC = row[10]
Min_group_size_nuc = row[11]
Max_group_size_nuc = row[12]
Avg_group_size_nuc = row[13]
print('{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}|{}'.format(Gene,
Non_unique_Gene_name, Annotation, No_isolates, No_sequences,
Avg_sequences_per_isolate, Genome_Fragment, Order_within_Fragment,
Accessory_Fragment, Accessory_Order_with_Fragment, QC,
Min_group_size_nuc, Max_group_size_nuc, Avg_group_size_nuc), file=out_csv)
out_csv.close()
print('END get_pangenoma')
def get_pangenoma_locus():
out_csv = open('pangenoma_locus.csv', 'w')
print('Starting get_pangenoma_locus')
for row in lista[1:]: # como la lista tiene encabezados hay que partri del segundo
Gene = row[0] # getea el nombre del gen
loci = row[14:] # y de los locis
for locus in loci:
locus = locus.split()
if len(locus) == 1: # tiene solo un locus
locus = locus[0]
print('{}|{}'.format(Gene, locus), file=out_csv)
else: # tiene mas de un locus
for l in locus:
print('{}|{}'.format(Gene, l), file=out_csv)
out_csv.close()
print('END get_pangenoma_locus')
def get_locus_sequence():
out_csv = open('locus_sequence.csv', 'w')
print('Starting get_locus_sequence')
ffns = glob('{}/*.ffn'.format(argv[1])) # genera la lista de todos los ffn entregados por PROKKA, prokka_ es el prefijo de los directorios anotados por prokka
p = re.compile(r'>(\w+).*') # regex para encontrar los locus en formato fasta
genomas_locus = open('genomas_locus.csv') # es necesario tener el csv listo
reader = csv.reader(genomas_locus, delimiter='|')
lista_genomas_locus = [row for row in reader] # lista de TODOS los locus
for ffn in ffns: # por cada archivo de secuencia
archivo = open(ffn)
reader = archivo.readlines()
parsed = []
codigo = ffn.split('/')[-1].split('.')[0] # es el codigo del archivo ffn
db = [x[1] for x in lista_genomas_locus if codigo in x[0]] # todos los locus con el hash unico dado por roary del genoma espesifico si el codigo del archivo fnn es el codigo del genoma del locus, agregalo
# todo este bloque es para obtener la secuencia sin saltos de linea
# y solo un saldo de linea antes del > en el fasta
for linea in reader:
if '>' in linea:
parsed.append(linea)
else:
parsed.append(linea.strip())
string = p.sub(r'\n>\1', ''.join(parsed))
# fin del bloque magico
lista_locus = string.split('>') # lista de los locus y su secuencia
lista_locus = [x.split() for x in lista_locus] # lista de la forma [[locus, secuencia],...]
for locus in lista_locus[1:]: # para cada uno de todos los locus (se usa [1:] para saltar la cabecera del csv)
codp = re.compile(locus[0]) # regex para buscar el hash asociado al locus de roary
search = [codp.search(x) for x in db]
search = tuple(filter(None, search)) # elimina los None
if len(search) == 1: # si hay una coincidencia, es que se encontro
search = search[0].string
print('{}|{}'.format(search, locus[-1]), file=out_csv)
elif len(search) == 0:
pass # no hay resultados del locus en la db
else:
print(locus)
raise
out_csv.close()
print('END get_locus_sequence')
if __name__ == '__main__':
if not argv[1]:
print('Se necesita pasar el directorio de los ffns como primer y unico argumento')
exit()
csvfile = open('gene_presence_absence.csv')
reader = csv.reader(csvfile)
lista = [row for row in reader]
LISTA_GENOMAS = tuple(lista[0][14:])
APIGFF = 'https://www.patricbrc.org/portal/portal/patric/Genome?cType=genome&cId={}'
process_get_genomas_locus = Process(target=get_genomas_locus)
process_get_pangenoma = Process(target=get_pangenoma)
process_get_pangenoma_locus = Process(target=get_pangenoma_locus)
process_get_locus_sequence = Process(target=get_locus_sequence)
process_get_pangenoma.start()
process_get_pangenoma_locus.start()
process_get_genomas_locus.start()
process_get_genomas_locus.join()
# espero a que termine el proceso para que tenga listo el csv para correr las demas funciones que dependen del csv
process_get_locus_sequence.start()
| Python | 0.000001 | |
6a9fae290c8ce1618a7207efe669347b9503e3be | Add missing logparse file. | python/spinn/util/logparse.py | python/spinn/util/logparse.py | """
Really easy log parsing.
"""
try:
from parse import *
except:
pass
import json
FMT_TRAIN = "Train-Format: "
FMT_TRAIN_EXTRA = "Train-Extra-Format: "
FMT_EVAL = "Eval-Format: "
FMT_EVAL_EXTRA = "Eval-Extra-Format: "
IS_TRAIN = "Acc:"
IS_TRAIN_EXTRA = "Train Extra:"
IS_EVAL = "Eval acc:"
IS_EVAL_EXTRA = "Eval Extra:"
START_TRAIN = "Step:"
START_TRAIN_EXTRA = "Train Extra:"
START_EVAL = "Step:"
START_EVAL_EXTRA = "Eval Extra:"
def get_format(filename, prefix):
with open(filename) as f:
for line in f:
if prefix in line:
return line[line.find(prefix) + len(prefix):].strip()
raise Exception("Format string not found.")
def get_json_data(filename, prefix):
with open(filename) as f:
for line in f:
if prefix in line:
data = line[line.find(prefix) + len(prefix):].strip()
return json.loads(data)
raise Exception("Format string not found.")
def parse_flags(filename):
PREFIX_FLAGS = "Flag Values:\n"
TERMINAL = "}\n"
data = ""
read_json = False
with open(filename) as f:
for line in f:
if read_json:
data += line
if TERMINAL in line:
break
if PREFIX_FLAGS in line:
read_json = True
return json.loads(data)
def is_train(line):
return line.find(FMT_TRAIN) < 0 and line.find(IS_TRAIN) >= 0
def is_train_extra(line):
return line.find(FMT_TRAIN_EXTRA) < 0 and line.find(IS_TRAIN_EXTRA) >= 0
def is_eval(line):
return line.find(FMT_EVAL) < 0 and line.find(IS_EVAL) >= 0
def is_eval_extra(line):
return line.find(FMT_EVAL_EXTRA) < 0 and line.find(IS_EVAL_EXTRA) >= 0
def read_file(filename):
flags = parse_flags(filename)
train_str, train_extra_str = get_format(filename, FMT_TRAIN), get_format(filename, FMT_TRAIN_EXTRA)
eval_str, eval_extra_str = get_format(filename, FMT_EVAL), get_format(filename, FMT_EVAL_EXTRA)
dtrain, dtrain_extra, deval, deval_extra = [], [], [], []
with open(filename) as f:
for line in f:
line = line.strip()
if is_train(line):
dtrain.append(parse(train_str, line[line.find(START_TRAIN):].strip()))
elif is_train_extra(line):
dtrain_extra.append(parse(train_extra_str, line[line.find(START_TRAIN_EXTRA):].strip()))
elif is_eval(line):
deval.append(parse(eval_str, line[line.find(START_EVAL):].strip()))
elif is_eval_extra(line):
deval_extra.append(parse(eval_extra_str, line[line.find(START_EVAL_EXTRA):].strip()))
return dtrain, dtrain_extra, deval, deval_extra, flags
if __name__ == '__main__':
import gflags
import sys
FLAGS = gflags.FLAGS
gflags.DEFINE_string("path", "scripts/sample.log", "")
FLAGS(sys.argv)
dtrain, dtrain_extra, deval, deval_extra, flags = read_file(FLAGS.path)
print "Flags:"
print "Model={model_type}\nLearning_Rate={learning_rate}".format(**flags)
print
print "Train:"
for d in dtrain:
print("Step: {} Acc: {}".format(d['step'], d['class_acc']))
print
print "Eval:"
for d in deval:
print("Step: {} Acc: {}".format(d['step'], d['class_acc']))
| Python | 0 | |
8b1e6b226d925d7f2ef4890463122ec8046aa07a | add test | sensor/test_compass.py | sensor/test_compass.py | #! /usr/bin/python
from Adafruit_LSM303 import LSM303
lsm = LSM303()
while 1:
print lsm.read() | Python | 0.000002 | |
a5dbda3f429d0a1e6cb4fc28b2a620dc2b40fd59 | Resolve import dependency in consoleauth service | nova/cmd/consoleauth.py | nova/cmd/consoleauth.py | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VNC Console Proxy Server."""
import sys
from oslo.config import cfg
from nova import config
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import version
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup("nova")
objects.register_all()
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-consoleauth',
topic=CONF.consoleauth_topic)
service.serve(server)
service.wait()
| # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VNC Console Proxy Server."""
import sys
from oslo.config import cfg
from nova import config
from nova.openstack.common import log as logging
from nova.openstack.common.report import guru_meditation_report as gmr
from nova import service
from nova import version
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup("nova")
gmr.TextGuruMeditation.setup_autorun(version)
server = service.Service.create(binary='nova-consoleauth',
topic=CONF.consoleauth_topic)
service.serve(server)
service.wait()
| Python | 0.000014 |
23b2578fadd8a7ee0885e9956a10667d647acaf8 | add basic test for bist | test/test_bist.py | test/test_bist.py | #!/usr/bin/env python3
from litex.soc.tools.remote import RemoteClient
wb = RemoteClient(csr_data_width=8)
wb.open()
regs = wb.regs
# # #
test_size = 128*1024*1024
regs.generator_reset.write(1)
regs.generator_reset.write(0)
regs.generator_base.write(0)
regs.generator_length.write((test_size*8)//128)
regs.generator_shoot.write(1)
while(not regs.generator_done.read()):
pass
regs.checker_reset.write(1)
regs.checker_reset.write(0)
regs.checker_base.write(0)
regs.checker_length.write((test_size*8)//128)
regs.checker_shoot.write(1)
while(not regs.checker_done.read()):
pass
print("errors: {:d}".format(regs.checker_error_count.read()))
# # #
wb.close()
| Python | 0.00044 | |
f070b3c9a97b16aebc8500af703ed713e170f519 | Fix Dask-on-Ray test: Python 3 dictionary .values() is a view, and is not indexable (#13945) | python/ray/tests/test_dask_scheduler.py | python/ray/tests/test_dask_scheduler.py | import dask
import numpy as np
import dask.array as da
import pytest
import ray
from ray.util.dask import ray_dask_get
def test_ray_dask_basic(ray_start_regular_shared):
@ray.remote
def stringify(x):
return "The answer is {}".format(x)
zero_id = ray.put(0)
def add(x, y):
# Can retrieve ray objects from inside Dask.
zero = ray.get(zero_id)
# Can call Ray methods from inside Dask.
return ray.get(stringify.remote(x + y + zero))
add = dask.delayed(add)
@ray.remote
def call_add():
z = add(2, 4)
# Can call Dask graphs from inside Ray.
return z.compute(scheduler=ray_dask_get)
ans = ray.get(call_add.remote())
assert ans == "The answer is 6", ans
def test_ray_dask_persist(ray_start_regular_shared):
arr = da.ones(5) + 2
result = arr.persist(scheduler=ray_dask_get)
np.testing.assert_array_equal(
next(iter(result.dask.values())),
np.ones(5) + 2)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| import dask
import numpy as np
import dask.array as da
import pytest
import ray
from ray.util.dask import ray_dask_get
def test_ray_dask_basic(ray_start_regular_shared):
@ray.remote
def stringify(x):
return "The answer is {}".format(x)
zero_id = ray.put(0)
def add(x, y):
# Can retrieve ray objects from inside Dask.
zero = ray.get(zero_id)
# Can call Ray methods from inside Dask.
return ray.get(stringify.remote(x + y + zero))
add = dask.delayed(add)
@ray.remote
def call_add():
z = add(2, 4)
# Can call Dask graphs from inside Ray.
return z.compute(scheduler=ray_dask_get)
ans = ray.get(call_add.remote())
assert ans == "The answer is 6", ans
def test_ray_dask_persist(ray_start_regular_shared):
arr = da.ones(5) + 2
result = arr.persist(scheduler=ray_dask_get)
np.testing.assert_array_equal(result.dask.values()[0], np.ones(5) + 2)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| Python | 0.000001 |
8e91c1fa76382f3b2568c425b41339f5597f9268 | Add bound and brake solver (initial raw implementation) | solvers/BoundAndBrake.py | solvers/BoundAndBrake.py | #!/usr/bin/env python
# encoding: utf-8
from collections import deque
from copy import deepcopy
from itertools import permutations
from random import shuffle
from base_solver import BaseSolver
INF = float('inf')
class PartialSolution(object):
lower_bound = INF
upper_bound = INF
partial_route = []
done = False
def __init__(self, partial_route=[]):
self.partial_route = partial_route
def build(self, task, ancestor, next_stop):
self.partial_route = ancestor.partial_route[:]
self.partial_route.insert(-1, next_stop)
self.partial_route
self.lower_bound = task.get_path_distance(self.partial_route)
upper_bound_route = (
self.partial_route[:-1] +
list(set(task.all_nodes.keys()) - set(self.partial_route)) +
[self.partial_route[-1],]
)
self.upper_bound = task.get_path_distance(upper_bound_route)
if self.lower_bound == self.upper_bound:
self.done = True
class BoundAndBrakeDeepFitstSearch(BaseSolver):
deterministic = False # actually it's distance is deterministic,
# but time isn't.
# helper
sort_key = lambda self, x: x.upper_bound
cycles = 0
def __init__(self, *args, **kwargs):
super(BoundAndBrakeDeepFitstSearch, self).__init__(*args, **kwargs)
def run_search(self):
self.current_best = self.get_random_solution()
self.current_score = self.task.get_path_distance(self.current_best)
solution = PartialSolution([self.task.start.name, self.task.finish.name])
solution.lower_bound = self.current_score
self.best_upper = solution
self.to_check = deque([solution,])
self.traverse()
return self.current_best, self.current_score, self.cycles
def traverse(self):
while 1:
try:
solution = self.to_check.pop()
except IndexError:
# all solutions have been checked - this is the end
break
# check if this solution is still worth checking
if not (solution.lower_bound <= self.current_score
and solution.lower_bound < self.best_upper.upper_bound):
# if not, then continue...
continue
self.cycles += 1
partials = []
# iterate over unused stops...
for stop in (set(self.task.all_nodes.keys()) - set(solution.partial_route)):
# and create partial solutions
partial = PartialSolution()
partial.build(self.task, solution, stop)
# check if this is a full solution...
if partial.done:
# ... and if it is the best so far
if partial.lower_bound < self.current_score:
self.current_best = partial.partial_route
self.current_score = partial.lower_bound
# if solutions lower bound is lower then current_best, and lower
# then best partial solutions upper bound...
elif (partial.lower_bound < self.current_score
and partial.lower_bound < self.best_upper.upper_bound):
# ...then add it to the list of potential best solutions
partials.append(partial)
# otherwise - forget about it
else:
pass
partials.sort(key=self.sort_key)
self.to_check.extend(partials)
def get_random_solution(self):
route = [n.name for n in self.task.mid_nodes]
shuffle(route)
route = [self.task.start.name, ] + route
route.append(self.task.finish.name)
return route
| Python | 0 | |
5e008ac92016a092c1ce9c9590a79d72f4cf1cf6 | Initialize tests | tests/__main__.py | tests/__main__.py | import unittest
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
0e3effc3a7402d3b4c1b2c91539c4d1004c5b0e3 | Add test_traitscli.py | test_traitscli.py | test_traitscli.py | import unittest
from traits.api import Event, Callable, Type
from traitscli import TraitsCLIBase
from sample import SampleCLI
class TestingCLIBase(TraitsCLIBase):
def do_run(self):
# Get trait attribute names
names = self.class_trait_names(
# Avoid 'trait_added' and 'trait_modified'
# (See also `HasTraits.traits`):
trait_type=lambda t: not isinstance(t, Event))
self.attributes = dict((n, getattr(self, n)) for n in names)
class TestCaseBase(unittest.TestCase):
cliclass = None
"""Subclass of `TraitsCLIBase`."""
def assert_attributes(self, attributes, args=[]):
ret = self.cliclass.cli(args)
self.assertEqual(ret.attributes, attributes)
class TestSampleCLI(TestCaseBase):
class cliclass(TestingCLIBase, SampleCLI):
pass
def test_empty_args(self):
self.assert_attributes(dict(
yes=False,
no=True,
fnum=0.0,
inum=0,
string='',
choice='a',
not_configurable_from_cli=False,
))
def test_full_args(self):
self.assert_attributes(
dict(
yes=True,
no=False,
fnum=0.2,
inum=2,
string='some string',
choice='b',
not_configurable_from_cli=False,
),
['--yes', '--no',
'--fnum', '0.2',
'--inum', '2',
'--string', 'some string',
'--choice', 'b',
])
def test_invalid_type_int(self):
self.assertRaises(SystemExit, self.cliclass.cli, ['--inum', 'x'])
def test_invalid_type_float(self):
self.assertRaises(SystemExit, self.cliclass.cli, ['--fnum', 'x'])
def test_invalid_type_enum(self):
self.assertRaises(SystemExit, self.cliclass.cli, ['--choice', 'x'])
class TestEvalType(TestCaseBase):
class cliclass(TestingCLIBase):
callable = Callable(config=True)
type = Type(config=True)
def test_full_args(self):
self.assert_attributes(
dict(
callable=id,
type=int,
),
['--callable', 'id',
'--type', 'int',
])
| Python | 0.000004 | |
31622652980f603ddc308dff514eae65635eb318 | Add serializers to serialize Image to: - A PIL image (optionally resized) - A binary object (optionally resized) | app/grandchallenge/retina_api/serializers.py | app/grandchallenge/retina_api/serializers.py | from io import BytesIO
import SimpleITK as sitk
from PIL import Image as PILImage
from django.http import Http404
from rest_framework import serializers
class PILImageSerializer(serializers.BaseSerializer):
"""
Read-only serializer that returns a PIL image from a Image instance.
If "width" and "height" are passed as extra serializer content, the
PIL image will be resized to those dimensions.
"""
def to_representation(self, instance):
image_itk = instance.get_sitk_image()
if image_itk is None:
raise Http404
pil_image = self.convert_itk_to_pil(image_itk)
try:
pil_image.thumbnail(
(self.context["width"], self.context["height"]),
PILImage.ANTIALIAS,
)
except KeyError:
pass
return pil_image
@staticmethod
def convert_itk_to_pil(image_itk):
depth = image_itk.GetDepth()
image_nparray = sitk.GetArrayFromImage(image_itk)
if depth > 0:
# Get center slice of image if 3D
image_nparray = image_nparray[depth // 2]
return PILImage.fromarray(image_nparray)
class BytesImageSerializer(PILImageSerializer):
"""
Read-only serializer that returns a BytesIO image from an Image instance.
"""
def to_representation(self, instance):
image_pil = super().to_representation(instance)
return self.create_thumbnail_as_bytes_io(image_pil)
@staticmethod
def create_thumbnail_as_bytes_io(image_pil):
buffer = BytesIO()
image_pil.save(buffer, format="png")
return buffer.getvalue()
| Python | 0.000002 | |
101a4c1288ddadbad6dbe0186adde3921ef2546f | add ctrl-c handler | lib/ctrlc.py | lib/ctrlc.py | import sys
import time
import signal
class CtrlC:
pressed = False
@classmethod
def handle(cls, signal, frame):
print('Ctrl-C pressed, will exit soon')
if cls.pressed:
print('Ctrl-C pressed twice. Committing violent suicide.')
sys.exit(1)
cls.pressed = True
signal.signal(signal.SIGINT, CtrlC.handle)
if __name__ == '__main__':
time.sleep(2)
if CtrlC.pressed:
print('yay')
time.sleep(2)
| Python | 0.000036 | |
1298cf9c7a40ce73d46067035ded2318c62f7380 | Add simple tests for DrsSymbol and DrsIndexed | tests/drs_test.py | tests/drs_test.py | """Tests for drudge scripts."""
from sympy import Symbol, IndexedBase
from drudge.drs import DrsSymbol
from drudge.utils import sympy_key
#
# Unit tests for the utility classes and functions
# ------------------------------------------------
#
def test_basic_drs_symb():
"""Test the symbol class for basic operations.
"""
name = 'a'
ref = Symbol(name)
dict_ = {ref: 1}
symbs = [
DrsSymbol(None, name),
DrsSymbol([], name)
]
for i in symbs:
assert isinstance(i, DrsSymbol)
assert ref == i
assert i == ref
assert hash(ref) == hash(i)
assert dict_[i] == 1
assert sympy_key(ref) == sympy_key(i)
ref = Symbol(name + 'x')
for i in symbs:
assert ref != i
assert i != ref
assert hash(ref) != hash(i)
assert sympy_key(ref) != sympy_key(i)
def test_basic_drs_indexed():
"""Test basic properties of drudge script indexed object."""
base_name = 'a'
orig_base = IndexedBase(base_name)
for drudge in [None, []]:
matching_indices = [
(Symbol('x'), DrsSymbol(drudge, 'x')),
(
(Symbol('x'), Symbol('y')),
(DrsSymbol(drudge, 'x'), DrsSymbol(drudge, 'y'))
)
]
drs_base = DrsSymbol(drudge, base_name)
for orig_indices, drs_indices in matching_indices:
ref = orig_base[orig_indices]
for i in [
orig_base[drs_indices],
drs_base[orig_indices],
drs_base[drs_indices]
]:
assert ref == i
assert hash(ref) == hash(i)
assert sympy_key(ref) == sympy_key(i)
| Python | 0 | |
a412295b09481113d6f42565520d03ce8bfd36b8 | Create ECIScraper.py | ECIScraper.py | ECIScraper.py | from bs4 import BeautifulSoup as bs
import httplib
class ECIScrapper:
def __init__(self, url):
self.url = url.split("/")[0]
self.getrequest = '/'.join(url.split('/')[1:])
print self.url, self.getrequest
self.connection = httplib.HTTPConnection(self.url)
self.connection.request("GET", '/'+self.getrequest)
self.response = self.connection.getresponse()
self.page = self.response.read()
self.soup = bs(self.page)
print self.soup.find_all('table', style="margin: auto; width: 100%; font-family: Verdana; border: solid 1px black;font-weight:lighter")
style = "margin: auto; width: 100%; font-family: Verdana; border: solid 1px black;font-weight:lighter"
def getData(self):
print url;
if __name__=="__main__":
url = "eciresults.ap.nic.in/ConstituencywiseS2653.htm?ac=53"
ECIScrapper(url)
| Python | 0 | |
67018fa6dc38f0035b1ce17dee4a7840f37cab30 | Move documentation to Sphinx/RST | doc/conf.py | doc/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# libslax documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 10 10:18:55 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'slax'
# General information about the project.
project = 'libslax'
copyright = '2017, Juniper Networks'
author = 'Phil Shafer'
default_role = 'code'
primary_domain = 'c'
smart_quotes = False
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.22.0'
# The full version, including alpha/beta/rc tags.
release = '0.22.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
"sidebarwidth": 320,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
alabaster_html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'libxo-manual'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'libxo.tex', 'libxo Documentation',
'Phil Shafer', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'libxo', 'libxo Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'libxo', 'libxo Documentation',
author, 'libxo', 'A Library for Generating Text, XML, JSON, and HTML Output',
'Miscellaneous'),
]
| Python | 0 | |
1753de3492b76d9c13d72bde7f13c0f696499e3a | Add configuration of pytest with some fixtures related to tests based on fantasy example | tests/conftest.py | tests/conftest.py | import json
import socket
import uuid
import docker as libdocker
import pathlib
import invoke
import psycopg2
import pytest
import time
from jsonschema import Draft4Validator
DSN_FORMAT = 'postgresql://{user}:{password}@{host}:{port}/{dbname}'
@pytest.fixture(scope='session')
def session_id():
return str(uuid.uuid4())
@pytest.fixture(scope='session')
def docker():
return libdocker.APIClient()
@pytest.fixture(scope='session')
def unused_port():
def f():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 0))
return s.getsockname()[1]
return f
@pytest.fixture(scope='session')
def here():
return pathlib.Path(__file__).parent
@pytest.yield_fixture(scope='session')
def pg_server(unused_port, session_id, docker):
docker_image = 'postgres:10-alpine'
database = 'example'
user = 'example'
password = 'somepassword'
port = unused_port()
host_config_options = {'port_bindings': {5432: port}}
host_config = dict(
tmpfs={'/var/lib/postgresql/data': ''},
**host_config_options
)
docker.pull(docker_image)
container = docker.create_container(
image=docker_image,
name=f'test-fantasy-example-{session_id}',
ports=[5432],
detach=True,
environment={
'POSTGRES_USER': user,
'POSTGRES_PASSWORD': password
},
host_config=docker.create_host_config(**host_config)
)
docker.start(container=container['Id'])
host = '0.0.0.0'
pg_params = dict(dbname=database,
user=user,
password=password,
host=host,
port=port,
connect_timeout=2)
delay = 0.001
for i in range(20):
try:
conn = psycopg2.connect(**pg_params)
conn.close()
break
except psycopg2.Error:
time.sleep(delay)
delay *= 2
else:
pytest.fail("Cannot start postgres server")
inspection = docker.inspect_container(container['Id'])
container['host'] = inspection['NetworkSettings']['IPAddress']
container['port'] = 5432
container['pg_params'] = pg_params
yield container
docker.kill(container=container['Id'])
docker.remove_container(container['Id'])
@pytest.fixture(scope='session')
def pg_params(pg_server):
return dict(**pg_server['pg_params'])
@pytest.fixture(scope='session')
def populated_db(here, pg_params):
from examples.fantasy.tasks import populate_db
populate_db(
invoke.context.Context(),
data_folder=here.parent / 'examples' / 'fantasy' / 'fantasy-database',
dsn=DSN_FORMAT.format(**pg_params)
)
@pytest.fixture(scope='session')
def jsonapi_validator(here):
path = here / 'spec' / 'schema.dms'
with open(path) as fp:
schema = json.load(fp)
Draft4Validator.check_schema(schema)
return Draft4Validator(schema)
@pytest.fixture
async def fantasy_app(loop, pg_params, populated_db):
from examples.fantasy.main import init
return await init(DSN_FORMAT.format(**pg_params), loop=loop)
| Python | 0 | |
82617f295ed21c179bab6ad3c3c2af5c417f40ba | Install pandas and scipy from Anaconda as part of upgrade process. Provides final installation fix for burden testing code. #167 #191 | gemini/gemini_update.py | gemini/gemini_update.py | """Perform in-place updates of gemini and databases when installed into virtualenv.
"""
import os
import subprocess
import sys
import gemini.config
def release(parser, args):
"""Update gemini to the latest release, along with associated data files.
"""
url = "https://raw.github.com/arq5x/gemini/master/requirements.txt"
# update locally isolated python
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
activate_bin = os.path.join(os.path.dirname(sys.executable), "activate")
conda_bin = os.path.join(os.path.dirname(sys.executable), "conda")
if os.path.exists(conda_bin):
pkgs = ["cython", "distribute", "ipython", "nose", "numpy",
"pip", "pycrypto", "pyparsing", "pysam", "pyyaml",
"pyzmq", "pandas", "scipy"]
subprocess.check_call([conda_bin, "install", "--yes"] + pkgs)
elif os.path.exists(activate_bin):
subprocess.check_call([pip_bin, "install", "--upgrade", "distribute"])
else:
raise NotImplementedError("Can only upgrade gemini installed in anaconda or virtualenv")
# update libraries
#subprocess.check_call([pip_bin, "install", "-r", url])
# update datafiles
config = gemini.config.read_gemini_config()
install_script = os.path.join(os.path.dirname(__file__), "install-data.py")
subprocess.check_call([sys.executable, install_script, config["annotation_dir"]])
print "Gemini upgraded to latest version"
# update tests
test_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(pip_bin))),
"gemini")
if os.path.exists(test_dir) and os.path.exists(os.path.join(test_dir, "master-test.sh")):
os.chdir(test_dir)
subprocess.check_call(["git", "pull", "origin", "master"])
print "Run test suite with: cd %s && bash master-test.sh" % test_dir
| """Perform in-place updates of gemini and databases when installed into virtualenv.
"""
import os
import subprocess
import sys
import gemini.config
def release(parser, args):
"""Update gemini to the latest release, along with associated data files.
"""
url = "https://raw.github.com/arq5x/gemini/master/requirements.txt"
# update locally isolated python
pip_bin = os.path.join(os.path.dirname(sys.executable), "pip")
activate_bin = os.path.join(os.path.dirname(sys.executable), "activate")
conda_bin = os.path.join(os.path.dirname(sys.executable), "conda")
if os.path.exists(conda_bin):
pkgs = ["cython", "distribute", "ipython", "nose", "numpy",
"pip", "pycrypto", "pyparsing", "pysam", "pyyaml", "pyzmq"]
subprocess.check_call([conda_bin, "install", "--yes"] + pkgs)
elif os.path.exists(activate_bin):
subprocess.check_call([pip_bin, "install", "--upgrade", "distribute"])
else:
raise NotImplementedError("Can only upgrade gemini installed in anaconda or virtualenv")
# update libraries
#subprocess.check_call([pip_bin, "install", "-r", url])
# update datafiles
config = gemini.config.read_gemini_config()
install_script = os.path.join(os.path.dirname(__file__), "install-data.py")
subprocess.check_call([sys.executable, install_script, config["annotation_dir"]])
print "Gemini upgraded to latest version"
# update tests
test_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(pip_bin))),
"gemini")
if os.path.exists(test_dir) and os.path.exists(os.path.join(test_dir, "master-test.sh")):
os.chdir(test_dir)
subprocess.check_call(["git", "pull", "origin", "master"])
print "Run test suite with: cd %s && bash master-test.sh" % test_dir
| Python | 0 |
7925c0a4536a221adc5c76eaccc0a1c79c9a7efa | Add bower module | lib/ansible/modules/extras/packaging/bower.py | lib/ansible/modules/extras/packaging/bower.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Michael Warkentin <mwarkentin@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bower
short_description: Manage bower packages with bower
description:
- Manage bower packages with bower
version_added: 1.7
author: Michael Warkentin
options:
name:
description:
- The name of a bower package to install
required: false
offline:
description:
- Install packages from local cache, if the packages were installed before
required: false
default: no
choices: [ "yes", "no" ]
path:
description:
- The base path where to install the bower packages
required: true
state:
description:
- The state of the bower package
required: false
default: present
choices: [ "present", "absent", "latest" ]
version:
description:
- The version to be installed
required: false
'''
EXAMPLES = '''
description: Install "bootstrap" bower package.
- bower: name=bootstrap
description: Install "bootstrap" bower package on version 3.1.1.
- bower: name=bootstrap version=3.1.1
description: Remove the "bootstrap" bower package.
- bower: name=bootstrap state=absent
description: Install packages based on bower.json.
- bower: path=/app/location
description: Update packages based on bower.json to their latest version.
- bower: path=/app/location state=latest
'''
class Bower(object):
def __init__(self, module, **kwargs):
self.module = module
self.name = kwargs['name']
self.offline = kwargs['offline']
self.path = kwargs['path']
self.version = kwargs['version']
if kwargs['version']:
self.name_version = self.name + '#' + self.version
else:
self.name_version = self.name
def _exec(self, args, run_in_check_mode=False, check_rc=True):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = ["bower"] + args
if self.name:
cmd.append(self.name_version)
if self.offline:
cmd.append('--offline')
# If path is specified, cd into that path and run the command.
cwd = None
if self.path:
if not os.path.exists(self.path):
os.makedirs(self.path)
if not os.path.isdir(self.path):
self.module.fail_json(msg="path %s is not a directory" % self.path)
cwd = self.path
rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
def list(self):
cmd = ['list', '--json']
installed = list()
missing = list()
outdated = list()
data = json.loads(self._exec(cmd, True, False))
if 'dependencies' in data:
for dep in data['dependencies']:
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
missing.append(dep)
elif data['dependencies'][dep]['pkgMeta']['version'] != data['dependencies'][dep]['update']['latest']:
outdated.append(dep)
elif 'incompatible' in data['dependencies'][dep] and data['dependencies'][dep]['incompatible']:
outdated.append(dep)
else:
installed.append(dep)
# Named dependency not installed
else:
missing.append(self.name)
return installed, missing, outdated
def install(self):
return self._exec(['install'])
def update(self):
return self._exec(['update'])
def uninstall(self):
return self._exec(['uninstall'])
def main():
arg_spec = dict(
name=dict(default=None),
offline=dict(default='no', type='bool'),
path=dict(required=True),
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
version=dict(default=None),
)
module = AnsibleModule(
argument_spec=arg_spec
)
name = module.params['name']
offline = module.params['offline']
path = module.params['path']
state = module.params['state']
version = module.params['version']
if state == 'absent' and not name:
module.fail_json(msg='uninstalling a package is only available for named packages')
bower = Bower(module, name=name, offline=offline, path=path, version=version)
changed = False
if state == 'present':
installed, missing, outdated = bower.list()
if len(missing):
changed = True
bower.install()
elif state == 'latest':
installed, missing, outdated = bower.list()
if len(missing) or len(outdated):
changed = True
bower.update()
else: # Absent
installed, missing, outdated = bower.list()
if name in installed:
changed = True
bower.uninstall()
module.exit_json(changed=changed)
# Import module snippets
from ansible.module_utils.basic import *
main()
| Python | 0 | |
7259a1ea9f0d32249e96581ecc78bcdd81197f2e | add maxmind-importer (#3) | Ip-maxmind.py | Ip-maxmind.py | import base64
import codecs
from collections import namedtuple
import csv
import os
import io
import sys
import zipfile
import requests
import iptools
IP_VERSION = {'ipv4': 'IPv4', 'ipv6': 'IPv6'}
# Include or Exclude
ei_action = None
def error(text):
sys.stderr.write(text + "\n")
exit()
archive = requests.get('http://geolite.maxmind.com/download/geoip/database/GeoLite2-City-CSV.zip')
with zipfile.ZipFile(io.BytesIO(archive.content)) as ziped_data:
# get parent folder name
zip_name = ziped_data.namelist()[0].split('/')[0]
# get list of available csv languages
available_languages = [x.replace('GeoLite2-City-Locations-', '').replace('.csv', '').replace(zip_name+'/', '')
for x in ziped_data.namelist() if 'GeoLite2-City-Locations' in x]
# arg testing
# args: lang ip_ver path include-exclude ISO_CODEs
args = sys.argv[1:]
if len(args) < 3:
error("\nUsage:python3 Ip-maxmind.py language ip_ver path -optional(include or exclude one or more country)\n"
"\nExample:python3 Ip-maxmind.py ru ipv4 /full/path/to/file.txt"
"\n\nExample:python3 Ip-maxmind.py ru ipv4 /full/path/to/file.txt include RU UA BY"
"\navailable_languages:" + ' '.join(available_languages))
if args[0] not in available_languages:
error('Unsuported language:' + args[0])
filename = args[2]
if len(args) > 3:
if args[3] != 'include' and args[3] != 'exclude':
error('4rd argument must be a include or exclude, not ' + args[3])
if len(args) <= 4:
error('nothing to ' + args[3])
else:
ei_action = args[3]
iso_codes = args[4:] # for include or exclude
if IP_VERSION.get(args[1]):
ipver = IP_VERSION.get(args[1])
else:
error('ip_version can be ipv4 or ipv6 not ' + args[1])
with ziped_data.open(os.path.join(zip_name, 'GeoLite2-City-Blocks-' + ipver + '.csv')) as zip_blocks:
city_blocks = [i for i in csv.reader(codecs.iterdecode(zip_blocks, 'utf-8'), delimiter=',')]
# field names from 1st row of csv file
Block_IP = namedtuple('Block_'+ipver, ' '.join(city_blocks[0]))
ip_blocks = [Block_IP(*x) for x in city_blocks[1:]]
with ziped_data.open(os.path.join(zip_name, 'GeoLite2-City-Locations-'+args[0]+'.csv')) as zip_city_locations:
city_locations = [i for i in csv.reader(codecs.iterdecode(zip_city_locations, 'utf-8'), delimiter=',')]
# field names from 1st row of csv file
CityLocations = namedtuple('CityLocations', ' '.join(city_locations[0]))
city_locations = [CityLocations(*x) for x in city_locations[1:]]
def get_ip_range(cidr):
ip_range = iptools.ipv4.cidr2block(cidr)
return str(ip_range[0]) + '-' + str(ip_range[-1])
def join_data(ip, city):
city_dict = {x.geoname_id: x for x in city if x.geoname_id is not None}
join_fields = set(ip[0]._fields + city[0]._fields)
jd = namedtuple('MMJoinedInfo', ' '.join(join_fields))
joined_data = []
for x in ip:
if x.geoname_id:
joined_data.append(jd(
network=x.network,
geoname_id=x.geoname_id,
registered_country_geoname_id=x.registered_country_geoname_id,
represented_country_geoname_id=x.represented_country_geoname_id,
is_anonymous_proxy=x.is_anonymous_proxy,
is_satellite_provider=x.is_satellite_provider,
postal_code=x.postal_code,
latitude=x.latitude,
longitude=x.longitude,
accuracy_radius=x.accuracy_radius,
# data from city_locations
locale_code=city_dict[x.geoname_id].locale_code,
continent_code=city_dict[x.geoname_id].continent_code,
continent_name=city_dict[x.geoname_id].continent_name,
country_iso_code=city_dict[x.geoname_id].country_iso_code,
country_name=city_dict[x.geoname_id].country_name,
subdivision_1_iso_code=city_dict[x.geoname_id].subdivision_1_iso_code,
subdivision_1_name=city_dict[x.geoname_id].subdivision_1_name,
subdivision_2_iso_code=city_dict[x.geoname_id].subdivision_2_iso_code,
subdivision_2_name=city_dict[x.geoname_id].subdivision_2_name,
city_name=city_dict[x.geoname_id].city_name,
metro_code=city_dict[x.geoname_id].metro_code,
time_zone=city_dict[x.geoname_id].time_zone,
))
return joined_data
data_set = join_data(ip_blocks, city_locations)
os.makedirs(os.path.dirname(filename), exist_ok=True)
# Creating data for output
# for ipv4
if ipver == 'IPv4':
if ei_action == 'include':
output = '\n'.join('{} {};'.format(get_ip_range(data.network),
base64.b64encode(bytes(data.city_name, 'utf-8')).decode('utf-8'))
for data in data_set
if data.country_iso_code in iso_codes and get_ip_range(data.network) and data.city_name)
elif ei_action == 'exclude':
output = '\n'.join('{} {};'.format(get_ip_range(data.network),
base64.b64encode(bytes(data.city_name, 'utf-8')).decode('utf-8'))
for data in data_set
if data.country_iso_code not in iso_codes and get_ip_range(data.network) and data.city_name)
else:
output = '\n'.join('{} {};'.format(get_ip_range(data.network),
base64.b64encode(bytes(data.city_name, 'utf-8')).decode('utf-8'))
for data in data_set
if get_ip_range(data.network) and data.city_name)
# for ipv6
elif ipver == 'IPv6':
if ei_action == 'include':
output = '\n'.join('{} {};'.format(data.network, base64.b64encode(bytes(data.city_name, 'utf-8')).decode('utf-8'))
for data in data_set
if data.country_iso_code in iso_codes and data.network and data.city_name)
elif ei_action == 'exclude':
output = '\n'.join('{} {};'.format(data.network, base64.b64encode(bytes(data.city_name, 'utf-8')).decode('utf-8'))
for data in data_set
if data.country_iso_code not in iso_codes and data.network and data.city_name)
else:
output = '\n'.join('{} {};'.format(data.network, base64.b64encode(bytes(data.city_name, 'utf-8')).decode('utf-8'))
for data in data_set
if data.network and data.city_name)
with open(filename, 'w') as file:
file.write(output)
| Python | 0 | |
41553e2c2a9ad7f2396e8492ce11d053c2fe5c7a | Add a console application template | basic/template.py | basic/template.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
A template for writing Python application with MIT license.
Latest version can be found at https://github.com/letuananh/pydemo
References:
Python documentation:
https://docs.python.org/
argparse module:
https://docs.python.org/2/howto/argparse.html
PEP 257 - Python Docstring Conventions:
https://www.python.org/dev/peps/pep-0257/
@author: Le Tuan Anh <tuananh.ke@gmail.com>
'''
# Copyright (c) 2015, Le Tuan Anh <tuananh.ke@gmail.com>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
__author__ = "Le Tuan Anh <tuananh.ke@gmail.com>"
__copyright__ = "Copyright 2015, pydemo"
__credits__ = [ "Le Tuan Anh" ]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Le Tuan Anh"
__email__ = "<tuananh.ke@gmail.com>"
__status__ = "Prototype"
########################################################################
import sys
import os
import argparse
########################################################################
def echo(input_str):
print(input_str)
########################################################################
def main():
'''Main entry of this demo application.
'''
# It's easier to create a user-friendly console application by using argparse
# See reference at the top of this script
parser = argparse.ArgumentParser(description="Display a line of text.")
# Positional argument(s)
parser.add_argument('input', help='The string to be printed.')
# Optional argument(s)
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
# Main script
if len(sys.argv) == 1:
# User didn't pass any value in, show help
parser.print_help()
else:
# Parse input arguments
args = parser.parse_args()
# Now do something ...
if args.verbose:
print("You have activated my talkative mode ...")
if args.input:
echo(args.input)
elif not args.quiet:
print("Eh, I have nothing to print (You can shut me up by passing in the option -q) ...")
if args.verbose:
print("Bye sweetie ...")
pass
if __name__ == "__main__":
main()
| Python | 0.000001 | |
f69de6e6cf63f9b3770ffdf4da32ca2149006a2e | add fit test for record, test is renamed so nose doesn't run it | scipy/stats/tests/test_fit.py | scipy/stats/tests/test_fit.py | # NOTE: contains only one test, _est_cont_fit, that is renamed so that
# nose doesn't run it
# I put this here for the record and for the case when someone wants to
# verify the quality of fit
# with current parameters:
import numpy.testing as npt
import numpy as np
from scipy import stats
from test_continuous_basic import distcont
# this is not a proper statistical test for convergence, but only
# verifies that the estimate and true values don't differ by too much
n_repl1 = 1000 # sample size for first run
n_repl2 = 5000 # sample size for second run, if first run fails
thresh_percent = 0.25 # percent of true parameters for fail cut-off
thresh_min = 0.75 # minimum difference estimate - true to fail test
#distcont = [['genextreme', (3.3184017469423535,)]]
def test_cont_fit():
# this tests the closeness of the estimated parameters to the true
# parameters with fit method of continuous distributions
# Note: is slow, some distributions don't converge with sample size <= 10000
for distname, arg in distcont:
yield check_cont_fit, distname,arg
def check_cont_fit(distname,arg):
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n_repl1,*arg)
est = distfn.fit(rvs) #,*arg) # start with default values
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
txt = ''
diffthreshold = np.max(np.vstack([truearg*thresh_percent,
np.ones(distfn.numargs+2)*thresh_min]),0)
# threshold for location
diffthreshold[-2] = np.max([np.abs(rvs.mean())*thresh_percent,thresh_min])
if np.any(np.isnan(est)):
raise AssertionError, 'nan returned in fit'
else:
if np.any((np.abs(diff) - diffthreshold) > 0.0):
## txt = 'WARNING - diff too large with small sample'
## print 'parameter diff =', diff - diffthreshold, txt
rvs = np.concatenate([rvs,distfn.rvs(size=n_repl2-n_repl1,*arg)])
est = distfn.fit(rvs) #,*arg)
truearg = np.hstack([arg,[0.0,1.0]])
diff = est-truearg
if np.any((np.abs(diff) - diffthreshold) > 0.0):
txt = 'parameter: %s\n' % str(truearg)
txt += 'estimated: %s\n' % str(est)
txt += 'diff : %s\n' % str(diff)
raise AssertionError, 'fit not very good in %s\n' % distfn.name + txt
if __name__ == "__main__":
import nose
#nose.run(argv=['', __file__])
nose.runmodule(argv=[__file__,'-s'], exit=False)
| Python | 0.000002 | |
6908f6cb06ed1d15510bc51780d4109f5bdb7423 | Add cs2cs_test.py to excercise the cs2cs binary via subprocess | python/third_party/proj/cs2cs_test.py | python/third_party/proj/cs2cs_test.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Proj cs2cs command line application."""
import os
import subprocess
import unittest
from pyglib import flags
from pyglib import resources
FLAGS = flags.FLAGS
class Cs2CsTest(unittest.TestCase):
def setUp(self):
self.cs2cs = os.path.join(resources.GetARootDirWithAllResources(),
'third_party/proj4/cs2cs')
def testHelp(self):
cmd = [self.cs2cs]
result = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
self.assertIn('usage:', result)
def testList(self):
cmd = [self.cs2cs, '-l']
result = subprocess.check_output(cmd)
self.assertIn('wintri : Winkel Tripel', result)
def testListLowerP(self):
cmd = [self.cs2cs, '-lp']
result = subprocess.check_output(cmd)
self.assertIn('wink2 : Winkel II', result)
def testListP(self):
# Detailed list
cmd = [self.cs2cs, '-lP']
result = subprocess.check_output(cmd)
self.assertIn('PCyl', result)
def testListEqual(self):
# Detailed list
cmd = [self.cs2cs, '-l=ups']
result = subprocess.check_output(cmd)
self.assertIn('Universal Polar Stereographic', result)
self.assertIn('Azi', result)
self.assertNotIn('PCyl', result)
self.assertNotIn('wintri', result)
def testListEllipsoidIdentifiers(self):
cmd = [self.cs2cs, '-le']
result = subprocess.check_output(cmd)
self.assertIn('evrst30', result)
self.assertIn('a=6377276.345', result)
self.assertIn('rf=300.8017', result)
self.assertIn('Everest 1830', result)
def testListUnits(self):
cmd = [self.cs2cs, '-lu']
result = subprocess.check_output(cmd)
self.assertIn('ch', result)
self.assertIn('20.1168', result)
self.assertIn('International Chain', result)
def testListDatums(self):
cmd = [self.cs2cs, '-ld']
result = subprocess.check_output(cmd)
self.assertIn('NAD27', result)
self.assertIn('clrk66', result)
self.assertIn('conus', result)
def testTransform(self):
cmd = [self.cs2cs, '+proj=latlong', '+datum=NAD83',
'+to', '+proj=utm', '+zone=10', '+datum=NAD27', '-r']
proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Pass in latitude longitude to transform to UTM.
stdout, _ = proc.communicate('45d15\'33.1" 111.5W\n')
result = [float(val) for val in stdout.replace('\t', ' ').split(' ')]
self.assertEqual(len(result), 3)
self.assertAlmostEqual(result[0], 1402285.98, delta=0.001)
self.assertAlmostEqual(result[1], 5076292.42)
self.assertAlmostEqual(result[2], 0.0)
# TODO(schwehr): Add more tests
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
7c8650881879a2c60585b52d3154592621edbf52 | add rapdis | qm/grover3d/rapids/compute-grover3.py | qm/grover3d/rapids/compute-grover3.py | import cudf
iters=20
init=[1,0,1,0,-1,0,-1,0,1,0,1,0]
size=100
maxn = size*size*size
# vertices
n=range(maxn)
v=[]
for d in range(0,12):
v.append([0]*maxn)
center=int(maxn/2)
v[0][center]=init[0]
v[1][center]=init[1]
v[2][center]=init[2]
v[3][center]=init[3]
v[4][center]=init[4]
v[5][center]=init[5]
v[6][center]=init[6]
v[7][center]=init[7]
v[8][center]=init[8]
v[9][center]=init[9]
v[10][center]=init[10]
v[11][center]=init[11]
print("creating verts")
verts = cudf.DataFrame(
{
'id':n,
'v0r':v[0],
'v0i':v[1],
'v1r':v[2],
'v1i':v[3],
'v2r':v[4],
'v2i':v[5],
'v3r':v[6],
'v3i':v[7],
'v4r':v[8],
'v4i':v[9],
'v5r':v[10],
'v5i':v[11]
}
)
print("done creating verts")
#print(verts)
# edges
src=[]
dst=[]
m0=[]
m1=[]
m2=[]
m3=[]
m4=[]
m5=[]
for n in range(maxn):
n0=n-1
if n0>=0:
src.append(n)
dst.append(n0)
m0.append(1)
m1.append(0)
m2.append(0)
m3.append(0)
m4.append(0)
m5.append(0)
n1=n+1
if n1<maxn:
src.append(n)
dst.append(n1)
m0.append(0)
m1.append(1)
m2.append(0)
m3.append(0)
m4.append(0)
m5.append(0)
n2=n-size
if n2>=0:
src.append(n)
dst.append(n2)
m0.append(0)
m1.append(0)
m2.append(1)
m3.append(0)
m4.append(0)
m5.append(0)
n3=n+size
if n3<maxn:
src.append(n)
dst.append(n3)
m0.append(0)
m1.append(0)
m2.append(0)
m3.append(1)
m4.append(0)
m5.append(0)
n4=n-size*size
if n4>=0:
src.append(n)
dst.append(n4)
m0.append(0)
m1.append(0)
m2.append(0)
m3.append(0)
m4.append(1)
m5.append(0)
n5=n+size*size
if n5<maxn:
src.append(n)
dst.append(n5)
m0.append(0)
m1.append(0)
m2.append(0)
m3.append(0)
m4.append(0)
m5.append(1)
print("creating edges")
edges = cudf.DataFrame(
{
'id':src,
'dst':dst,
'm0':m0,
'm1':m1,
'm2':m2,
'm3':m3,
'm4':m4,
'm5':m5
}
)
print("done creating edges")
#print(edges)
# Start loop
for iter in range(iters):
m = verts.merge(edges, on=['id'], how='inner')
#print(len(m))
m['p0r']=(-2 * m.v0r*m.m0 + m.v1r*m.m1 + m.v2r*m.m2 + m.v3r*m.m3 + m.v4r*m.m4 + m.v5r*m.m5 )/3
m['p1r']=(m.v0r*m.m0 + -2 * m.v1r*m.m1 + m.v2r*m.m2 + m.v3r*m.m3 + m.v4r*m.m4 + m.v5r*m.m5 )/3
m['p2r']=(m.v0r*m.m0 + m.v1r*m.m1 + -2 * m.v2r*m.m2 + m.v3r*m.m3 + m.v4r*m.m4 + m.v5r*m.m5 )/3
m['p3r']=(m.v0r*m.m0 + m.v1r*m.m1 + m.v2r*m.m2 + -2 * m.v3r*m.m3 + m.v4r*m.m4 + m.v5r*m.m5 )/3
m['p4r']=(m.v0r*m.m0 + m.v1r*m.m1 + m.v2r*m.m2 + m.v3r*m.m3 + -2 * m.v4r*m.m4 + m.v5r*m.m5 )/3
m['p5r']=(m.v0r*m.m0 + m.v1r*m.m1 + m.v2r*m.m2 + m.v3r*m.m3 + m.v4r*m.m4 + -2 * m.v5r*m.m5 )/3
s = m.groupby('dst').sum().reset_index()
#print(s)
s['id'] = s.dst
s['v0r'] = s.p0r
s['v1r'] = s.p1r
s['v2r'] = s.p2r
s['v3r'] = s.p3r
s['v4r'] = s.p4r
s['v5r'] = s.p5r
s['norm'] = s.p0r+s.p1r+s.p2r+s.p3r+s.p4r+s.p5r
verts = s.loc[:,['id','v0r','v1r','v2r','v3r','v4r','v5r']]
n = s[s.norm > 0].loc[:,'norm']
print(n)
print(iter,'-----------------------------------------------')
| Python | 0.99997 | |
a911d8720ad7dd8bfff2fa4230e1a4cef1a232f5 | add logistic | logRegres.py | logRegres.py | '''
Created on Oct 27, 2015
Logistic Regression Working Module
@author: Gu
'''
from numpy import *
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn, classLabels):
dataMatrix = mat(dataMatIn) #convert to NumPy matrix
labelMat = mat(classLabels).transpose() #convert to NumPy matrix
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles): #heavy on matrix operations
h = sigmoid(dataMatrix*weights) #matrix mult
error = (labelMat - h) #vector subtraction
weights = weights + alpha * dataMatrix.transpose()* error #matrix mult
return weights
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i])== 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x, y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n) #initialize to all ones
for i in range(m):
h = sigmoid(sum(dataMatrix[i]*weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n) #initialize to all ones
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4/(1.0+j+i)+0.0001 #apha decreases with iteration, does not
randIndex = int(random.uniform(0,len(dataIndex)))#go to 0 because of the constant
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt'); frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 1000)
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr =[]
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights))!= int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print ("the error rate of this test is: %f" % errorRate)
return errorRate
def multiTest():
numTests = 10; errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
print ("after %d iterations the average error rate is: %f" % (numTests, errorSum/float(numTests)))
weights=ones((3,1))
print(weights)
| Python | 0.999956 | |
cbb41df3d458b22121eb044f06763fd6d1ed8120 | Working, outputs multiple passwords | passwordy.py | passwordy.py | import random
import string
import sys
# Better way for lots of imports other than import * ?
from PyQt5 import QtCore, QtGui
from PyQt5.QtGui import QBrush, QIcon, QPalette, QPixmap
from PyQt5.QtWidgets import QApplication, QCheckBox, QComboBox, QDesktopWidget, QGridLayout, QInputDialog, QLabel, QLineEdit, QMessageBox, QPushButton, QSizePolicy, QSpinBox, QTextEdit, QWidget
class Passwordy(QWidget):
def __init__(self, parent = None):
super(Passwordy, self).__init__(parent)
QWidget.__init__(self)
# Create Labels
# Choices
self.numbers_label = QLabel('Numbers')
self.lowercase_label_label = QLabel('Lowercase letters')
self.uppercase_label = QLabel('Uppercase letters')
self.special_characters_label = QLabel('Special Characters')
# Input
self.number_of_characters_label = QLabel('Number of Characters')
self.number_of_passwords_label = QLabel('Number of Passwords')
# Output
self.generated_passwords_label = QLabel('Generated Passwords :')
# Create checkboxes
self.numbers_checkbox = QCheckBox()
self.lowercase_checkbox = QCheckBox()
self.uppercase_checkbox = QCheckBox()
self.special_characters_checkbox = QCheckBox()
# Create number boxes
self.number_of_passwords = QSpinBox()
self.number_of_passwords.setMinimum(1)
self.number_of_passwords.setMaximum(64)
self.number_of_characters = QSpinBox()
self.number_of_characters.setMinimum(1)
self.number_of_characters.setMaximum(64)
# Create output box
self.password_output = QTextEdit()
self.password_output.setReadOnly(True)
self.password_output.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.password_output.setStyleSheet('font: bold 12px;background: #FFFFFF; border: 1px solid #272727')
# Create password generate button
self.generate_password_button = QPushButton('Generate Password')
self.generate_password_button.setStyleSheet('font: 12px; background-color:#FFFFFF; border: 1px solid #272727')
# Create layout, add widgets
self.grid = QGridLayout()
# Add widget to row, label to same row, next column
self.grid.addWidget(self.numbers_checkbox, 0, 0)
self.grid.addWidget(self.numbers_label, 0, 1)
self.grid.addWidget(self.lowercase_checkbox, 1, 0)
self.grid.addWidget(self.lowercase_label_label, 1, 1)
self.grid.addWidget(self.uppercase_checkbox, 2, 0)
self.grid.addWidget(self.uppercase_label, 2, 1)
self.grid.addWidget(self.special_characters_checkbox, 3, 0)
self.grid.addWidget(self.special_characters_label, 3, 1)
self.grid.addWidget(self.number_of_characters, 4, 0)
self.grid.addWidget(self.number_of_characters_label, 4, 1)
self.grid.addWidget(self.number_of_passwords, 5, 0)
self.grid.addWidget(self.number_of_passwords_label, 5, 1)
# Add these to next column
self.grid.addWidget(self.generated_passwords_label, 0, 2)
self.grid.addWidget(self.password_output, 1, 2, 4, 1)
self.grid.addWidget(self.generate_password_button, 5, 2)
# Set layout
self.setLayout(self.grid)
# Connect password generate button to password generate functions
self.generate_password_button.clicked.connect(self.generate_passwords)
# Set window
self.setFixedSize(500, 250)
self.setWindowTitle('Passwordy')
self.setWindowIcon(QIcon('../assets/padlock.png'))
self.setStyleSheet('background: #FFFFFF')
def generate_passwords(self):
# Clear the output box
self.password_output.setText('')
# Set strings to get characters from
# Numbers
numbers = string.digits
# Letters
lowercase = string.ascii_lowercase
uppercase = string.ascii_uppercase
# Special Characters
special_characters = '!@#$%^&*()\{\}[]?,.'
# Init output character string
output_characters = ''
# Init empty password list
final_password_list = []
# Check user has used a checkbox, add characters from strings relative to checkboxes, generate password
if True in [self.numbers_checkbox.isChecked(),
self.lowercase_checkbox.isChecked(),
self.uppercase_checkbox.isChecked(),
self.special_characters_checkbox.isChecked()]:
output_characters = (numbers * self.numbers_checkbox.isChecked()
+ lowercase * self.lowercase_checkbox.isChecked()
+ uppercase * self.uppercase_checkbox.isChecked()
+ special_characters * self.special_characters_checkbox.isChecked())
# Check how many passwords the user requires, generate for that amount
for i in range(0, self.number_of_passwords.value()):
password = ''.join(random.choice(output_characters) for i in range(self.number_of_characters.value()))
final_password_list.append(password)
# If user hasn't selected a checkbox, inform them in a popup
else:
informer = QMessageBox()
#informer.setWindowTitle('Passwordy - Error')
informer.setStandardButtons(QMessageBox.Ok)
informer.setDefaultButton(QMessageBox.Ok)
# Warning text
informer.setText('Error: ' + '\n' + 'You must make a selection using one of the checkboxes, please try again...')
informer.exec_()
# Add each password in the password list to the output box
for i in final_password_list:
self.password_output.append(i)
# The usual
def main():
app = QApplication(sys.argv)
ex = Passwordy()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| Python | 0.999794 | |
7720fbc1d8a81430c38598fd96b95d8b4da4a74c | fix a bug about can not import ChineseAnalyzer with change tab to 4 wihte spaces under PEP8 | jieba/analyse/__init__.py | jieba/analyse/__init__.py | import jieba
import os
try:
from analyzer import ChineseAnalyzer
except ImportError:
pass
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
f_name = os.path.join(_curpath,"idf.txt")
content = open(f_name,'rb').read().decode('utf-8')
idf_freq = {}
lines = content.split('\n')
for line in lines:
word,freq = line.split(' ')
idf_freq[word] = float(freq)
median_idf = sorted(idf_freq.values())[len(idf_freq)/2]
stop_words= set([
"the","of","is","and","to","in","that","we","for","an","are","by","be","as","on","with","can","if","from","which","you","it","this","then","at","have","all","not","one","has","or","that"
])
def extract_tags(sentence,topK=20):
words = jieba.cut(sentence)
freq = {}
for w in words:
if len(w.strip())<2: continue
if w.lower() in stop_words: continue
freq[w]=freq.get(w,0.0)+1.0
total = sum(freq.values())
freq = [(k,v/total) for k,v in freq.iteritems()]
tf_idf_list = [(v * idf_freq.get(k,median_idf),k) for k,v in freq]
st_list = sorted(tf_idf_list,reverse=True)
top_tuples= st_list[:topK]
tags = [a[1] for a in top_tuples]
return tags
| import jieba
import os
try:
from analyzer import ChineseAnalyzer
except ImportError:
pass
_curpath=os.path.normpath( os.path.join( os.getcwd(), os.path.dirname(__file__) ) )
f_name = os.path.join(_curpath,"idf.txt")
content = open(f_name,'rb').read().decode('utf-8')
idf_freq = {}
lines = content.split('\n')
for line in lines:
word,freq = line.split(' ')
idf_freq[word] = float(freq)
median_idf = sorted(idf_freq.values())[len(idf_freq)/2]
stop_words= set([
"the","of","is","and","to","in","that","we","for","an","are","by","be","as","on","with","can","if","from","which","you","it","this","then","at","have","all","not","one","has","or","that"
])
def extract_tags(sentence,topK=20):
words = jieba.cut(sentence)
freq = {}
for w in words:
if len(w.strip())<2: continue
if w.lower() in stop_words: continue
freq[w]=freq.get(w,0.0)+1.0
total = sum(freq.values())
freq = [(k,v/total) for k,v in freq.iteritems()]
tf_idf_list = [(v * idf_freq.get(k,median_idf),k) for k,v in freq]
st_list = sorted(tf_idf_list,reverse=True)
top_tuples= st_list[:topK]
tags = [a[1] for a in top_tuples]
return tags
| Python | 0 |
ec96ce58076ba5aa54abeb423937a629cbe1e3d5 | Work in progress | logparser.py | logparser.py | #!/usr/bin/python
""" Log parser. """
from HTMLParser import HTMLParser
import urllib
class DailyParser(HTMLParser):
"""
HTML parser for the donations log of Wikimedia France
Attributes:
status (int): status variable of the parser.
donations (list data.Donation): list of donations read.
"""
START_PARSER = 0
FOUND_DONATION_TABLE = 1
READ_HOURS = 2
READ_DONATOR = 3
READ_DONATION = 4
END_OF_DONATION_TABLE = 5
def __init__(self):
super(DonationsParser, self).__init__()
self.status = DailyParser.START_PARSER
self.donations = []
def handle_starttag(self, tag, attrs):
pass
def handle_endtag(self, tag):
pass
def handle_data(self, data):
pass
class LogParser:
def __init__(self):
self.parser = DailyParser()
@staticmethod
def daypage(day):
""" Returns the page content containing the donations from a specific
day.
Args:
day (datetime.date): day to fetch donation.
Returns:
str: page content with the donation of the day specified as args.
"""
url_args = date.strftime("%Y-%m-%d")
url = "https://dons.wikimedia.fr/journal/%s" % url_args
return urllib.urlopen(url).read()
def fetchday(self, day):
""" Returns donations from a day. """
day_content = self.daypage(day)
self.parser.feed(day_content)
| Python | 0.000003 | |
162b82b64d319e0c854c08b3bd2e412ab5e67d97 | add pytables testing file | blaze/compute/tests/test_pytables_compute.py | blaze/compute/tests/test_pytables_compute.py | from __future__ import absolute_import, division, print_function
import pytest
tables = pytest.importorskip('tables')
import numpy as np
import tempfile
from contextlib import contextmanager
import os
from blaze.compute.core import compute
from blaze.compute.pytables import *
from blaze.compute.numpy import *
from blaze.expr.table import *
from blaze.compatibility import xfail
t = TableSymbol('t', '{id: int, name: string, amount: int}')
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
@contextmanager
def data():
filename = tempfile.mktemp()
f = tables.open_file(filename, 'w')
d = f.createTable('/', 'title', x)
yield d
d.close()
f.close()
os.remove(filename)
def eq(a, b):
return (a == b).all()
def test_table():
with data() as d:
assert compute(t, d) == d
def test_projection():
with data() as d:
assert eq(compute(t['name'], d), x['name'])
@xfail(reason="ColumnWise not yet supported")
def test_eq():
with data() as d:
assert eq(compute(t['amount'] == 100, d),
x['amount'] == 100)
def test_selection():
with data() as d:
assert eq(compute(t[t['amount'] == 100], d), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], d), x[x['amount'] < 0])
@xfail(reason="ColumnWise not yet supported")
def test_arithmetic():
with data() as d:
assert eq(compute(t['amount'] + t['id'], d),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], d),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], d),
x['amount'] % x['id'])
def test_Reductions():
with data() as d:
assert compute(t['amount'].count(), d) == len(x['amount'])
@xfail(reason="TODO: sorting could work if on indexed column")
def test_sort():
with data() as d:
assert eq(compute(t.sort('amount'), d),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), d),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), d),
np.sort(x, order=['amount', 'id']))
def test_head():
with data() as d:
assert eq(compute(t.head(2), d),
x[:2])
| Python | 0 | |
67df732067847af15e41b8eed05137b6ab2bb6d2 | add __version__ (forgot to commit) | libcutadapt/__init__.py | libcutadapt/__init__.py | __version__ = '0.9.2'
| Python | 0.000001 | |
188d583caea0e640f41e400839552fe593154eda | Set 2, challenge 9 completed. | set2/crypto9.py | set2/crypto9.py | #!/usr/local/bin/python
__author__ = 'Walshman23'
import sys
sys.path.insert(1, "../common") # Want to locate modules in our 'common' directory
# A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext.
# But we almost never want to transform a single block; we encrypt irregularly-sized messages.
#
# One way we account for irregularly-sized messages is by padding, creating a plaintext that is an even
# multiple of the blocksize. The most popular padding scheme is called PKCS#7.
#
# So: pad any block to a specific block length, by appending the number of bytes of padding to the end of the block.
# For instance,
#
# "YELLOW SUBMARINE"
#
# ... padded to 20 bytes would be:
#
# "YELLOW SUBMARINE\x04\x04\x04\x04"
# Get block from stdin
# Use 16 as block size
blocksize=16
buf = sys.stdin.read()
if len(buf) < blocksize:
padlen = blocksize - len(buf)
else:
padlen = len(buf) % blocksize
sys.stdout.write(buf)
if padlen != 0:
sys.stdout.write(chr(padlen)*padlen)
| Python | 0 | |
ed33a8dc90468f2873a4a581c22027f10d9393d4 | Add Wordpress_2_Instances testcase | heat/tests/functional/test_WordPress_2_Intances.py | heat/tests/functional/test_WordPress_2_Intances.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
import os
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances.template'])
class WordPress2Instances(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances.template'
stack_paramstr = ';'.join(['InstanceType=m1.xlarge',
'DBUsername=dbuser',
'DBPassword=' + os.environ['OS_PASSWORD']])
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools',
stack_paramstr)
self.DatabaseServer = util.Instance('DatabaseServer')
self.DatabaseServer.check_cfntools()
self.DatabaseServer.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
self.stack.cleanup()
| Python | 0 | |
fb7bc8af34f3ed375d30b43655366e6368080e76 | Create Import_Libraries.py | home/INMOOV/Config/ExtraConfig/Import_Libraries.py | home/INMOOV/Config/ExtraConfig/Import_Libraries.py | from java.lang import String
from org.myrobotlab.net import BareBonesBrowserLaunch
from datetime import datetime
from subprocess import Popen, PIPE
#######################
import threading
import time
import random
import urllib, urllib2
import json
import io
import itertools
import textwrap
import codecs
import socket
import os
import shutil
import hashlib
import subprocess
import csv
| Python | 0 | |
4de971725601ed5f630ec103ad01cf5c624ad866 | Add the occupancy sensor_class (#3176) | homeassistant/components/binary_sensor/__init__.py | homeassistant/components/binary_sensor/__init__.py | """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SENSOR_CLASSES = [
None, # Generic on/off
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'occupancy', # On means occupied, Off means not occupied
'opening', # Door, window, etc.
'power', # Power, over-current, etc
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
SENSOR_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(SENSOR_CLASSES))
def setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {}
if self.sensor_class is not None:
attr['sensor_class'] = self.sensor_class
return attr
| """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SENSOR_CLASSES = [
None, # Generic on/off
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'opening', # Door, window, etc.
'power', # Power, over-current, etc
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
SENSOR_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(SENSOR_CLASSES))
def setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
component.setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {}
if self.sensor_class is not None:
attr['sensor_class'] = self.sensor_class
return attr
| Python | 0.000009 |
6f0b5a0dc44269d9e72f3698317604d90d6cecf3 | add script for migrate user mailchimp | scripts/fix_user_mailchimp.py | scripts/fix_user_mailchimp.py | import logging
import sys
from datetime import datetime
from django.db import transaction
from django.utils import timezone
from website.app import setup_django
setup_django()
from osf.models import OSFUser
from scripts import utils as script_utils
from website.mailchimp_utils import subscribe_mailchimp
from website import settings
logger = logging.getLogger(__name__)
def main():
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
start_time = datetime.strptime('2017-12-20 08:25:25', '%Y-%m-%d %H:%M:%S')
start_time = start_time.replace(tzinfo=timezone.now().tzinfo)
end_time = datetime.strptime('2017-12-20 18:05:00', '%Y-%m-%d %H:%M:%S')
end_time = end_time.replace(tzinfo=timezone.now().tzinfo)
users = OSFUser.objects.filter(is_registered=True, date_disabled__isnull=True, date_registered__range=[start_time, end_time])
if not dry:
for user in users:
subscribe_mailchimp(settings.MAILCHIMP_GENERAL_LIST, user._id)
logger.info('User {} has been subscribed to OSF general mailing list'.format(user._id))
logger.info('{} users have been subscribed to OSF general mailing list'.format(users.count()))
if dry:
raise Exception('Abort Transaction - Dry Run')
print('Done')
if __name__ == '__main__':
main()
| Python | 0 | |
9571acd941cb7ecac96676ead87c43fadda3e74f | Create TimeUpload.py | TimeUpload.py | TimeUpload.py | from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import time
import csv
timeID='0B9ffTjUEqeFEZ28zdTRhMlJlY0k'
for i in range(10):
#get the curret time
date_time=time.asctime()
date_time_split=date_time.split(' ') #gives a list with the date and time components
time_only=date_time_split[3] # gives just the current time
date_only = str(date_time_split[1] + ' ' + date_time_split[2]+' ' +date_time_split[4])
#get the current csv from the GDrive and append the date and time and upload the new file to Gdrive
gauth = GoogleAuth()
# Try to load saved client credentials
gauth.LoadCredentialsFile("mycreds.txt")
if gauth.credentials is None:
# Authenticate if they're not there
gauth.LocalWebserverAuth()
elif gauth.access_token_expired:
# Refresh them if expired
gauth.Refresh()
else:
# Initialize the saved creds
gauth.Authorize()
# Save the current credentials to a file
gauth.SaveCredentialsFile("mycreds.txt")
drive = GoogleDrive(gauth)
#Download the prior file that we will append the new data to
current=drive.CreateFile({'id': timeID})
current.GetContentFile('current.csv')
#delete the prior data file to keep these files from accumulating on the GDrive
#current.DeleteFile(timeID)
with open('current.csv', 'a') as csvfile:
fieldnames = ['Time', 'Date']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'Time': time_only, 'Date': date_only})
csvfile.close()
file1 = drive.CreateFile({'title':'time.csv', 'id': timeID}) #open a new file on the GDrive
file1.SetContentFile('current.csv') #sets the file content to the CSV file created above from the working directory
file1.Upload() #upload the file
timeID=file1['id']
time.sleep(30) #pause for 30seconds
| Python | 0 | |
7c6bbe3860e7cce0f464dc0d95683de3c5ca57a5 | Add test of `ResNet50FeatureProducer()` | testci/test_resnet50_feature.py | testci/test_resnet50_feature.py | from PIL import Image
import collections
import datetime
import numpy as np
import pytest
from pelops.features.resnet50 import ResNet50FeatureProducer
@pytest.fixture
def img_data():
DATA = [[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]],
[[255, 255, 255],
[ 0, 0, 0],
[255, 255, 255]],
[[ 0, 0, 0],
[255, 255, 255],
[ 0, 0, 0]]]
return np.array(DATA, dtype=np.uint8)
@pytest.fixture
def chip_producer(img_data):
Chip = collections.namedtuple("Chip", ["filepath", "car_id", "cam_id", "time", "img_data", "misc"])
CHIPS = (
# filepath, car_id, cam_id, time, img_data, misc
("car1_cam1.png", 1, 1, datetime.datetime(2016, 10, 1, 0, 1, 2, microsecond=100), img_data, {}),
)
chip_producer = {"chips": {}}
for filepath, car_id, cam_id, time, img_data, misc in CHIPS:
chip = Chip(filepath, car_id, cam_id, time, img_data, misc)
chip_producer["chips"][filepath] = chip
return chip_producer
@pytest.fixture
def feature_producer(chip_producer):
res = ResNet50FeatureProducer(chip_producer)
return res
def test_features(feature_producer, chip_producer):
for _, chip in chip_producer["chips"].items():
features = feature_producer.produce_features(chip)
assert features.shape == (1, 2048)
assert np.sum(features) != 0
def test_preprocess_image(feature_producer, img_data):
img = Image.fromarray(img_data)
img_resized = feature_producer.preprocess_image(img, 224, 224)
assert img_resized.shape == (1, 224, 224, 3)
| Python | 0 | |
d8a3f92a06971ba6fe24f71914a466ff91f00f5f | Create WikiBot3.5.py | WikiBot3.5.py | WikiBot3.5.py | import discord
import wikipedia
token = "Mjg3NjU2MjM1MjU0NDE1MzYx.C-5xKQ.khJ9dPouM9783FMA0Ht-92XkS6A"
language = "en"
client = discord.Client()
@client.event
async def on_ready():
print("Bot is ready")
print(client.user.name)
print(client.user.id)
@client.event
async def on_server_join(server):
await client.send_message(server.default_channel, "Oi, i'm the WikiBot! https://en.wikipedia.org/wiki/Main_Page")
@client.event
async def on_message(message):
if message.channel.is_private and message.author.id != client.user.id:
await printout(message, message.content)
else:
ping = "<@" + client.user.id + ">"
if message.content.startswith(ping):
print("I'm called!")
toretract = len(ping)
query = message.content[toretract:]
if query[0] == " ":
query = query[1:]
print("Query = " + query)
await printout(message, query)
async def printout(message, query):
wikipage = None
lookup = True
print("printout")
try:
wikipage = wikipedia.page(query)
print("I found directly")
except wikipedia.exceptions.PageError:
print("Can't access by default. Trying to search")
except Exception:
lookup = False
if wikipage is None and lookup:
wikipage = wikipedia.suggest(query)
if wikipage is None and lookup:
await client.send_message(message.channel, "Sorry, cannot find " + query + " :v")
elif not lookup:
await client.send_message(message.channel, "Something went wrong. Try to be more specific in search, or maybe I can't reach Wikipedia")
else:
imglist = wikipage.images
if len(imglist) == 0:
em = discord.Embed(title=wikipage.title, description=wikipedia.summary(query, sentences=2), colour=0x2DAAED, url=wikipage.url)
else:
em = discord.Embed(title=wikipage.title, description=wikipedia.summary(query, sentences=2), colour=0x2DAAED, url=wikipage.url, image=imglist[0])
em.set_author(name=client.user.name, icon_url="https://wikibot.rondier.io")
await client.send_message(message.channel, embed=em)
await client.send_message(message.channel, "More at " + wikipage.url)
client.run(token)
| Python | 0 | |
3ef6866b39601dfafa10895a69c5d348a77ded3e | add test for eject and eject_all | mpf/tests/test_BallDevice_SmartVirtual.py | mpf/tests/test_BallDevice_SmartVirtual.py | from mpf.tests.MpfTestCase import MpfTestCase
class TestBallDeviceSmartVirtual(MpfTestCase):
def getConfigFile(self):
return 'test_ball_device.yaml'
def getMachinePath(self):
return 'tests/machine_files/ball_device/'
def get_platform(self):
return 'smart_virtual'
def test_eject(self):
# add initial balls to trough
self.hit_switch_and_run("s_ball_switch1", 1)
self.hit_switch_and_run("s_ball_switch2", 1)
self.assertEqual(2, self.machine.ball_devices.test_trough.balls)
self.assertEqual(2, self.machine.ball_devices.test_trough.available_balls)
# call eject
self.machine.ball_devices.test_trough.eject()
self.assertEqual(2, self.machine.ball_devices.test_trough.balls)
self.assertEqual(1, self.machine.ball_devices.test_trough.available_balls)
# one ball should be gone
self.advance_time_and_run(30)
self.assertEqual(1, self.machine.ball_devices.test_trough.balls)
self.assertEqual(1, self.machine.ball_devices.test_trough.available_balls)
def test_eject_all(self):
# add initial balls to trough
self.hit_switch_and_run("s_ball_switch1", 1)
self.hit_switch_and_run("s_ball_switch2", 1)
self.assertEqual(2, self.machine.ball_devices.test_trough.balls)
self.assertEqual(2, self.machine.ball_devices.test_trough.available_balls)
# call eject_all
self.machine.ball_devices.test_trough.eject_all()
self.advance_time_and_run(30)
# all balls should be gone
self.assertEqual(0, self.machine.ball_devices.test_trough.balls)
self.assertEqual(0, self.machine.ball_devices.test_trough.available_balls)
| Python | 0 | |
104fcfc4eed7f3233d329602283093c7f86484c3 | add development server | server.py | server.py | from http.server import HTTPServer, BaseHTTPRequestHandler
class StaticServer(BaseHTTPRequestHandler):
def do_GET(self):
root = 'html'
#print(self.path)
if self.path == '/':
filename = root + '/index.html'
else:
filename = root + self.path
self.send_response(200)
if filename[-4:] == '.css':
self.send_header('Content-type', 'text/css')
elif filename[-5:] == '.json':
self.send_header('Content-type', 'application/javascript')
elif filename[-3:] == '.js':
self.send_header('Content-type', 'application/javascript')
elif filename[-4:] == '.ico':
self.send_header('Content-type', 'image/x-icon')
else:
self.send_header('Content-type', 'text/html')
self.end_headers()
with open(filename, 'rb') as fh:
html = fh.read()
#html = bytes(html, 'utf8')
self.wfile.write(html)
def run(server_class=HTTPServer, handler_class=StaticServer, port=8000):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd on port {}'.format(port))
httpd.serve_forever()
run()
# vim: expandtab
| Python | 0 | |
e88ba0984f3e6045b407342fa7231887142380e2 | Add migration to create roles | corehq/apps/accounting/migrations/0031_create_report_builder_roles.py | corehq/apps/accounting/migrations/0031_create_report_builder_roles.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from corehq.apps.hqadmin.management.commands.cchq_prbac_bootstrap import cchq_prbac_bootstrap
from corehq.sql_db.operations import HqRunPython
class Migration(migrations.Migration):
dependencies = [
('accounting', '0030_remove_softwareplan_visibility_trial_internal'),
]
operations = [
HqRunPython(cchq_prbac_bootstrap),
]
| Python | 0.000001 | |
df34c1a07fa6029efbd4df41cbd2009ac5031aca | Create matrixAlg.py | matrixAlg.py | matrixAlg.py | #!/usr/bin/python
#####################################
# Written by Gavin Heverly-Coulson
# Email: gavin <at> quantumgeranium.com
#####################################
# A set of matrix algebra functions for performing
# basic matrix algebra operations.
#
# Tested with Python 2.6/2.7
#
# This work is licensed under a Simplified BSD License
# Copyright (c) 2014, Gavin Heverly-Coulson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
# Print a nicely formatted matrix
def printMat(mat):
newStr = ""
for i in range(len(mat)):
for j in range(len(mat[0])):
newStr = newStr + str(mat[i][j]) + " "
newStr += "\n"
print newStr
# Calculates the determinant of a 3x3 matrix, using the 2x2 sub-matrices method
def det3(mat):
return ( ( mat[0][0]*det2([[mat[1][1], mat[1][2]], [mat[2][1], mat[2][2]]]) ) - ( mat[0][1]*det2([[mat[1][0], mat[1][2]], [mat[2][0], mat[2][2]]]) ) + (mat[0][2]*det2([[mat[1][0], mat[1][1]], [mat[2][0], mat[2][1]]])) )
# Calculates the determinant of a 2x2 matrix
def det2(mat):
return ((mat[0][0]*mat[1][1]) - (mat[0][1]*mat[1][0]))
# Calculates the transpose of a matrix
# Works for arbitrary NxM size
def transpose(mat):
cols = len(mat) # number of rows in mat
rows = len(mat[0]) # number of columns in mat
transMat = [x[:] for x in [[None]*cols]*rows] # cols, rows
for a in range(rows):
for b in range(cols):
transMat[a][b] = mat[b][a]
return transMat
# Calculates the dot product of two vectors, A and B
def dotProduct(A, B):
counter = 0
product = 0
while counter < len(A):
product = product + (A[counter] * B[counter])
counter += 1
return product
# Calculates the length of a vector
def vectLength(A):
sumSquares = 0
for i in A:
sumSquares = sumSquares + (i**2)
return math.sqrt(sumSquares)
# Multiplies two matrices (A and B) and returns the result
def matMult(A, B):
if len(A[0]) != len(B):
print "Matrix dimensions don't match!\nA has {0} columns and B has {1} rows.".format(len(A[0]), len(B))
else:
newMat = [[0.0 for cols in range(len(B[0]))] for rows in range(len(A))]
for i in range(len(A)):
for j in range(len(B[0])):
for k in range(len(B)):
newMat[i][j] += A[i][k]*B[k][j]
return newMat
# Converts a given matrix (not necessarily square) to
# reduced row echelon form
def toRedRowEchelon(mat):
colPos = 0
rows = len(mat)
cols = len(mat[0])
for r in range(rows):
if colPos >= cols:
return mat
i = r
while mat[i][colPos] == 0.0:
i += 1
if i == rows:
i = r
colPos += 1
if colPos == cols:
return mat
mat[i], mat[r] = mat[r], mat[i] # swap rows i and r
lv = mat[r][colPos]
mat[r] = [mrx / lv for mrx in mat[r]]
for i in range(rows):
if i != r:
lv = mat[i][colPos]
mat[i] = [iv - lv * rv for rv, iv in zip(mat[r], mat[i])]
colPos += 1
return mat
# Finds the inverse of a given matrix
def invMat(mat):
matDim = len(mat)
idenMat = [[0.0 for col in range(matDim)] for row in range(matDim)]
for i in range(matDim):
idenMat[i][i] = 1.0
newMat = [None] * matDim
for i in range(matDim):
newMat[i] = mat[i] + idenMat[i]
solvedMat = toRedRowEchelon(newMat)
invertMat = [None] * matDim
for i in range(matDim):
invertMat[i] = solvedMat[i][-1*matDim:]
return invertMat
| Python | 0.000013 | |
7b7ec9cdd1f0ed213608a5c309702e49e44b36e2 | Add simple test. | tests/integration/test_smoke.py | tests/integration/test_smoke.py | from django.test import TestCase
URLS_PUBLIC = [
"/",
]
class SimpleTests(TestCase):
def test_urls(self):
for url in URLS_PUBLIC:
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| Python | 0.000001 | |
c44001ec697faf7552764f91e52fa927056b1538 | Add solution for porblem 31 | euler031.py | euler031.py | #!/usr/bin/python
LIMIT = 200
coins = [1, 2, 5, 10, 20, 50, 100, 200]
def rec_count(total, step):
if total == LIMIT:
return 1
if total > LIMIT:
return 0
c = 0
for x in coins:
if x < step:
continue
c += rec_count(total + x, x)
return c
count = 0
for x in coins:
count += rec_count(x, x)
print(count)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.