commit
stringlengths 40
40
| subject
stringlengths 1
1.49k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| new_contents
stringlengths 1
29.8k
| old_contents
stringlengths 0
9.9k
| lang
stringclasses 3
values | proba
float64 0
1
|
|---|---|---|---|---|---|---|---|
7a8250e6640c8ebf36cd159607da24b095cf708e
|
Create Fibonacci.py
|
Fibonacci.py
|
Fibonacci.py
|
#Author-Michael Aubry
#Description-This script outputs a spiraling fibinacci sequence onto a Fusion 360 sketch
import adsk.core, adsk.fusion
app= adsk.core.Application.get()
design = app.activeProduct
ui = app.userInterface;
#**User Inputs**
Steps = 15 #How many steps of Fibonacci would you like to plot?
Length = 2 #How long is the first segment? (cm)
#Get root component
rootComp = design.rootComponent
#Create a new sketch on XY plane
sketch = rootComp.sketches.add(rootComp.xYConstructionPlane)
# Create an object collection for the points.
points = adsk.core.ObjectCollection.create()
# R = total steps to be run thru the For loop
R = Steps - 2
#starting x and y coordiantes
x = 0
y = 0
#Create 1st coordinate
points.add(adsk.core.Point3D.create(x,y,0))
#starting values for sequence
fib = 1
fib1 = 1
#1st fib number
#print str(fib)
#Create 2nd coordinate
x = 1 * Length
points.add(adsk.core.Point3D.create(x,y,0))
#bins for shifting x and y coordinates
Bin1 = range(0,R,4)
Bin2 = range(1,R,4)
Bin3 = range(2,R,4)
Bin4 = range(3,R,4)
for i in range(R):
fib2 = fib + fib1
fib = fib1
fib1 = fib2
fibLength = fib*Length #adds the scalar component to coordinates
if i in Bin1:
x = x
y = y + fibLength
points.add(adsk.core.Point3D.create(x,y,0))
if i in Bin2:
x = x - fibLength
y = y
points.add(adsk.core.Point3D.create(x,y,0))
if i in Bin3:
x = x
y = y - fibLength
points.add(adsk.core.Point3D.create(x,y,0))
if i in Bin4:
x = x + fibLength
y = y
points.add(adsk.core.Point3D.create(x,y,0))
# Create the spline.
sketch.sketchCurves.sketchFittedSplines.add(points)
|
Python
| 0.999733
|
|
85c02da33f5e9ed4ef1e72bef3cec094ca8cf4d5
|
add DBMetric class that holds data that has to be recorded
|
django_db_meter/message.py
|
django_db_meter/message.py
|
import datetime
import cPickle as pickle
import pylzma
import json
from collections import namedtuple
from django.core.serializers import serialize, deserialize
from django.conf import settings
from core.log import sclient
from core.utils import run_async
from newsfeed.activity import Actor, Target
from newsfeed.constants import NEWSFEED_QUEUE_NAME
from newsfeed.config import FeedConfig
from realtime.kafka.producer import KafkaProducer
class DBMetric(object):
def __init__(**kwargs):
self.timestamp = kwargs.get('timestamp', datetime.datetime.now())
self.query_start_time = kwargs.get('query_start_time')
self.query_execution_time = kwargs.get('query_execution_time')
self.query_sql = kwargs.get('query_sql')
self.query_tables = kwargs.get('query_tables', [])
self.db_name = kwargs.get('db_name')
self.app_name = kwargs.get('app_name')
self.rows_affected = kwargs.get('rows_affected')
def as_dict(self):
data = {
'timestamp': self.timestamp,
'query_start_time': self.query_start_time,
'query_execution_time': self.query_execution_time,
'query_sql': self.query_sql,
'query_tables': self.query_tables,
'db_name': self.db_name,
'app_name': self.app_name,
'rows_affected': self.rows_affected
}
return data
def as_json(self):
data = self.as_dict()
data_json = json.dumps(data)
return data_json
@classmethod
def from_queryset(cls, queryset):
kwargs = {
'timestamp': datetime.datetime.now(),
'query_start_time': queryset.query_start_time,
'query_execution_time': queryset.query_execution_time,
'query_sql': queryset.query.__str__(),
'query_tables': self._get_query_tables(queryset),
'db_name': self._get_db_name(queryset),
'app_name': queryset.model._meta.app_label,
'rows_affected': queryset.count(),
}
obj = cls(**kwargs)
return obj
def send(self):
msg_json = self.as_json()
@classmethod
def _get_db(cls, queryset):
return settings.DATABASES.get(queryset.db).get('NAME')
@classmethod
def _get_query_tables(self, queryset):
query_tables = queryset.tables
query_tables.extend(queryset.select_related.keys())
return query_tables
def serialize(self):
#self.obj = serialize('json', [self.obj])
#print self.obj
serialized = pickle.dumps(self)
compressed = pylzma.compress(serialized)
return compressed
@staticmethod
def deserialize(compressed_feed_message):
decompressed_msg = pylzma.decompress(compressed_feed_message)
deserialized = pickle.loads(decompressed_msg)
return deserialized
@classmethod
def send_metric(cls, actor_ctype, actor_object_id, action, target_ctype,
target_object_id, properties={},
activity_datetime=None,
activity_source=None):
msg = cls(actor_ctype=actor_ctype,
actor_object_id=actor_object_id,
action=action,
target_ctype=target_ctype,
target_object_id=target_object_id,
properties=properties,
activity_datetime=activity_datetime,
activity_source=activity_source)
msg.send()
def send(self):
|
Python
| 0
|
|
b777872d1b06714f538dc8fb21b790de822b5a66
|
Update Example folder
|
examples/listing_instruments.py
|
examples/listing_instruments.py
|
import visa
rm = visa.ResourceManager()
rm.list_resources()
|
Python
| 0
|
|
735c55d68d4831137255808042684733f93d5c18
|
add iconv clone
|
iconv.py
|
iconv.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import locale
import argparse
import fileinput
preferredenc = locale.getpreferredencoding()
parser = argparse.ArgumentParser(
description="Convert encoding of given files from one encoding to another.")
parser.add_argument(
"-f", "--from-code", metavar='NAME', default=preferredenc,
help="encoding of original text (locale default: %s)" % preferredenc)
parser.add_argument(
"-t", "--to-code", metavar='NAME', default=preferredenc,
help="encoding for output (locale default: %s)" % preferredenc)
parser.add_argument(
"-c", metavar='errors', nargs='?', default='strict', const='ignore',
help="set error handling scheme (default: 'strict', omitted: 'ignore')")
parser.add_argument("-o", metavar='FILE', help="output file")
parser.add_argument("FILE", nargs='*', help="input file")
args = parser.parse_args()
if args.o:
wstream = open(args.o, 'wb')
else:
wstream = sys.stdout.buffer
with fileinput.input(args.FILE, mode='rb') as f, wstream:
for line in f:
wstream.write(
line.decode(args.from_code, args.c).encode(args.to_code, args.c))
|
Python
| 0
|
|
aa7f888605dee0a845a20e1c0869cc5061719151
|
Add rtree spatial index class
|
plotink/rtree.py
|
plotink/rtree.py
|
# -*- coding: utf-8 -*-
# rtree.py
# part of plotink: https://github.com/evil-mad/plotink
#
# See below for version information
#
# Written by Michal Migurski https://github.com/migurski @michalmigurski
# as a contribution to the AxiDraw project https://github.com/evil-mad/axidraw/
#
# Copyright (c) 2022 Windell H. Oskay, Evil Mad Scientist Laboratories
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
rtree.py
Minimal R-tree spatial index class for calculating intersecting regions
"""
import math
class Index:
''' One-shot R-Tree index (no rebalancing, insertions, etc.)
'''
bboxes = []
subtrees = []
xmin = None
ymin = None
xmax = None
ymax = None
def __init__(self, bboxes):
center_x, center_y = 0, 0
self.xmin, self.ymin = math.inf, math.inf
self.xmax, self.ymax = -math.inf, -math.inf
for (_, (xmin, ymin, xmax, ymax)) in bboxes:
center_x += (xmin/2 + xmax/2) / len(bboxes)
center_y += (ymin/2 + ymax/2) / len(bboxes)
self.xmin = min(self.xmin, xmin)
self.ymin = min(self.ymin, ymin)
self.xmax = max(self.xmax, xmax)
self.ymax = max(self.ymax, ymax)
# Make four lists of bboxes, one for each quadrant around the center point
# An original bbox may be present in more than one list
sub_bboxes = [
[
(i, (x_1, y_1, x_2, y_2)) for (i, (x_1, y_1, x_2, y_2)) in bboxes
if x_1 < center_x and y_1 < center_y
],
[
(i, (x_1, y_1, x_2, y_2)) for (i, (x_1, y_1, x_2, y_2)) in bboxes
if x_2 > center_x and y_1 < center_y
],
[
(i, (x_1, y_1, x_2, y_2)) for (i, (x_1, y_1, x_2, y_2)) in bboxes
if x_1 < center_x and y_2 > center_y
],
[
(i, (x_1, y_1, x_2, y_2)) for (i, (x_1, y_1, x_2, y_2)) in bboxes
if x_2 > center_x and y_2 > center_y
],
]
# Store bboxes or subtrees but not both
if max(map(len, sub_bboxes)) == len(bboxes):
# One of the subtrees is identical to the whole tree so just keep all the bboxes
self.bboxes = bboxes
else:
# Make four subtrees, one for each quadrant
self.subtrees = [Index(sub) for sub in sub_bboxes]
def intersection(self, bbox):
''' Get a set of IDs for a given bounding box
'''
ids, (x_1, y_1, x_2, y_2) = set(), bbox
for (i, (xmin, ymin, xmax, ymax)) in self.bboxes:
is_disjoint = x_1 > xmax or y_1 > ymax or x_2 < xmin or y_2 < ymin
if not is_disjoint:
ids.add(i)
for subt in self.subtrees:
is_disjoint = x_1 > subt.xmax or y_1 > subt.ymax or x_2 < subt.xmin or y_2 < subt.ymin
if not is_disjoint:
ids |= subt.intersection(bbox)
return ids
|
Python
| 0.000001
|
|
56ca21d312f34b6a229fe6cdb720ccc96ef712a5
|
add polysites2vcf
|
polysites2vcf.py
|
polysites2vcf.py
|
#This is for converting Shop Mallick's polysite format to vcf
#Probably only useful to you if you are working on the SGDP
#Very specific to this particular format.
from __future__ import division, print_function
import argparse, sys, pdb
#Remember, in eigenstrat, 2 means "2 ref copies"
CODES={
"A":"AA",
"C":"CC",
"G":"GG",
"T":"TT",
"R":"AG",
"Y":"CT",
"S":"GC",
"W":"AT",
"K":"GT",
"M":"AC",
"-":"..",
"N":"..",
}
################################################################################
def parse_options():
"""
argparse
"""
parser=argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=argparse.FileType('r'), default="-")
return parser.parse_args()
################################################################################
def main(options):
"""
Convert
"""
samples=[]
include_ancients=False
include_refs=False
reading_header=True
for line in options.input:
if len(line)==1:
continue
elif line[:2]=="##" and reading_header:
bits=line.split()
if len(bits)<4:
continue
elif bits[1]!="..":
continue
elif bits[2][0]=="4" and include_refs:
samples.append(bits[7])
elif bits[2][0]=="4":
samples.append(bits[7])
elif bits[2][0]=="7" and include_ancients:
samples.append(bits[4].split(":")[0])
elif bits[2][0]=="8":
samples.append(bits[4].split(":")[0])
elif line[0]=="#" and reading_header:
reading_header=False
print("##fileformat=VCFv4.2")
print("##source=polysites2vcf.py")
print("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t"+"\t".join(samples))
elif not reading_header:
bits=line.split()
chrom=bits[0]
poss=bits[1]
idd=chrom+"_"+poss
ref=bits[2][0]
alleles=(bits[3]+bits[4]+bits[7]).upper()
if include_refs and include_ancients:
alleles=(bits[2]+bits[3]+bits[4]+bits[6]+bits[7]).upper()
elif include_refs:
alleles=(bits[2]+bits[3]+bits[4]+bits[7]).upper()
elif include_ancients:
alleles=(bits[3]+bits[4]+bits[6]+bits[7]).upper()
gts = [CODES[x] for x in alleles]
alt_alleles=list(set([x for x in "".join(gts) if (x!=ref and x!=".")]))
if not len(alt_alleles):
continue
alt=",".join(alt_alleles)
allele_map={ref:"0", ".":"."}
for i,a in enumerate(alt_alleles):
allele_map[a]=str(i+1)
gt_strings=[allele_map[x[0]]+"/"+allele_map[x[1]] for x in gts]
print("\t".join([chrom, poss, idd, ref, alt, "100", ".", ".", "GT"]+gt_strings))
else:
print(line, file=sys.stderr)
raise Exception("Header line in unexpected place")
################################################################################
if __name__=="__main__":
options=parse_options()
main(options)
|
Python
| 0.000001
|
|
6a13295ea0e3e763683ec2317502141e4913935b
|
Make prowl debug output actually go to debug log
|
flexget/plugins/output/prowl.py
|
flexget/plugins/output/prowl.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
from requests import RequestException
from flexget.plugin import register_plugin, priority
from flexget.utils.template import RenderError
__version__ = 0.1
log = logging.getLogger('prowl')
headers = {'User-Agent': 'FlexGet Prowl plugin/%s' % str(__version__)}
class OutputProwl(object):
"""
Send prowl notifications
Example::
prowl:
apikey: xxxxxxx
[application: application name, default FlexGet]
[event: event title, default New Release]
[priority: -2 - 2 (2 = highest), default 0]
[description: notification to send]
Configuration parameters are also supported from entries (eg. through set).
"""
def validator(self):
from flexget import validator
config = validator.factory('dict')
config.accept('text', key='apikey', required=True)
config.accept('text', key='application')
config.accept('text', key='event')
config.accept('integer', key='priority')
config.accept('text', key='description')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('apikey', '')
config.setdefault('application', 'FlexGet')
config.setdefault('event', 'New release')
config.setdefault('priority', 0)
return config
# Run last to make sure other outputs are successful before sending notification
@priority(0)
def on_task_output(self, task, config):
config = self.prepare_config(config)
for entry in task.accepted:
# get the parameters
apikey = entry.get('apikey', config['apikey'])
application = entry.get('application', config['application'])
event = entry.get('event', config['event'])
priority = entry.get('priority', config['priority'])
description = config.get('description', entry['title'])
# If event has jinja template, render it
try:
event = entry.render(event)
except RenderError as e:
log.error('Error rendering jinja event: %s' % e)
# If description has jinja template, render it
try:
description = entry.render(description)
except RenderError as e:
description = entry['title']
log.error('Error rendering jinja description: %s' % e)
url = 'https://prowl.weks.net/publicapi/add'
data = {'priority': priority, 'application': application, 'apikey': apikey,
'event': event, 'description': description}
if task.manager.options.test:
log.info('Would send prowl message about: %s', entry['title'])
log.debug('options: %s' % data)
continue
try:
response = task.requests.post(url, headers=headers, data=data, raise_status=False)
except RequestException as e:
log.error('Error with request: %s' % e)
continue
# Check if it succeeded
request_status = response.status_code
# error codes and messages from http://prowl.weks.net/api.php
if request_status == 200:
log.debug("Prowl message sent")
elif request_status == 400:
log.error("Bad request, the parameters you provided did not validate")
elif request_status == 401:
log.error("Not authorized, the API key given is not valid, and does not correspond to a user.")
elif request_status == 406:
log.error("Not acceptable, your IP address has exceeded the API limit.")
elif request_status == 500:
log.error("Internal server error, something failed to execute properly on the Prowl side.")
else:
log.error("Unknown error when sending Prowl message")
register_plugin(OutputProwl, 'prowl', api_ver=2)
|
from __future__ import unicode_literals, division, absolute_import
import logging
from requests import RequestException
from flexget.plugin import register_plugin, priority
from flexget.utils.template import RenderError
__version__ = 0.1
log = logging.getLogger('prowl')
headers = {'User-Agent': 'FlexGet Prowl plugin/%s' % str(__version__)}
class OutputProwl(object):
"""
Send prowl notifications
Example::
prowl:
apikey: xxxxxxx
[application: application name, default FlexGet]
[event: event title, default New Release]
[priority: -2 - 2 (2 = highest), default 0]
[description: notification to send]
Configuration parameters are also supported from entries (eg. through set).
"""
def validator(self):
from flexget import validator
config = validator.factory('dict')
config.accept('text', key='apikey', required=True)
config.accept('text', key='application')
config.accept('text', key='event')
config.accept('integer', key='priority')
config.accept('text', key='description')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {'enabled': config}
config.setdefault('apikey', '')
config.setdefault('application', 'FlexGet')
config.setdefault('event', 'New release')
config.setdefault('priority', 0)
return config
# Run last to make sure other outputs are successful before sending notification
@priority(0)
def on_task_output(self, task, config):
config = self.prepare_config(config)
for entry in task.accepted:
# get the parameters
apikey = entry.get('apikey', config['apikey'])
application = entry.get('application', config['application'])
event = entry.get('event', config['event'])
priority = entry.get('priority', config['priority'])
description = config.get('description', entry['title'])
# If event has jinja template, render it
try:
event = entry.render(event)
except RenderError as e:
log.error('Error rendering jinja event: %s' % e)
# If description has jinja template, render it
try:
description = entry.render(description)
except RenderError as e:
description = entry['title']
log.error('Error rendering jinja description: %s' % e)
url = 'https://prowl.weks.net/publicapi/add'
data = {'priority': priority, 'application': application, 'apikey': apikey,
'event': event, 'description': description}
if task.manager.options.test:
log.info('Would send prowl message about: %s', entry['title'])
log.verbose('options: %s' % data)
continue
try:
response = task.requests.post(url, headers=headers, data=data, raise_status=False)
except RequestException as e:
log.error('Error with request: %s' % e)
continue
# Check if it succeeded
request_status = response.status_code
# error codes and messages from http://prowl.weks.net/api.php
if request_status == 200:
log.debug("Prowl message sent")
elif request_status == 400:
log.error("Bad request, the parameters you provided did not validate")
elif request_status == 401:
log.error("Not authorized, the API key given is not valid, and does not correspond to a user.")
elif request_status == 406:
log.error("Not acceptable, your IP address has exceeded the API limit.")
elif request_status == 500:
log.error("Internal server error, something failed to execute properly on the Prowl side.")
else:
log.error("Unknown error when sending Prowl message")
register_plugin(OutputProwl, 'prowl', api_ver=2)
|
Python
| 0.000001
|
72dcd6857f5f895f0fb9325681302f5875bc50ec
|
Add a new user-defined file
|
profile_collection/startup/31-capillaries.py
|
profile_collection/startup/31-capillaries.py
|
#6.342 mm apart
#6.074
def capillary6_in():
mov(diff.xh,12.41)
mov(diff.yh,-12.58)
def capillary7_in():
mov(diff.xh,6.075)
mov(diff.yh,-12.58)
def capillary8_in():
mov(diff.xh,-.26695)
mov(diff.yh,-12.58)
def capillary9_in():
mov(diff.xh,-6.609)
mov(diff.yh,-12.58)
def capillary10_in():
mov(diff.xh,-12.951)
mov(diff.yh,-12.58)
'''
commands to move capillaries
capillary6_in()
sam = Sample("YT-11")
capillary7_in()
sam = Sample("YT-28")
capillary8_in()
sam = Sample("YT-47")
capillary9_in()
sam = Sample("YT-306")
capillary10_in()
sam = Sample("YT-51")
'''
''' Steps for capillary measurements:
1. Rename "Name Pattern" to sample name (YT-3 for example)
2. type command capillary6_in() (or the number)
3. move and measure
'''
|
Python
| 0
|
|
f1ee6ce108626342b42a2d2a7b5aa4779af87e6c
|
Add python code to plot the histogram
|
plot-histogram.py
|
plot-histogram.py
|
import matplotlib.pyplot as plt
import sys
if __name__ == "__main__":
with open(sys.argv[1]) as f:
data = map(float, f.readlines())
plt.hist(list(data), 100)
plt.show()
|
Python
| 0.00008
|
|
0852aa9328cf3fe2b975581f4e67357fc2c68f06
|
add reprozip installation and trace cmd
|
neurodocker/interfaces/reprozip.py
|
neurodocker/interfaces/reprozip.py
|
"""Add Dockerfile instructions to minimize container with ReproZip.
Project repository: https://github.com/ViDA-NYU/reprozip/
See https://github.com/freesurfer/freesurfer/issues/70 for an example of using
ReproZip to minimize Freesurfer's recon-all command.
"""
# Author: Jakub Kaczmarzyk <jakubk@mit.edu>
from __future__ import absolute_import, division, print_function
import posixpath
from neurodocker.utils import indent, manage_pkgs
class Reprozip(object):
"""Add Dockerfile instructions to minimize a container based on a command
or a list of commands.
First, reprozip trace is run on a command or a list of commands, and then
all files are deleted except those in the reprozip trace output.
Parameters
----------
cmds : str or list
Command(s) to run to minimize the image. Double-quotes within commands
will be escaped automatically.
pkg_manager : {'apt', 'yum'}
Linux package manager.
"""
def __init__(self, cmds, pkg_manager, trace_dir="/reprozip-trace"):
self.cmds = cmds
self.pkg_manager = pkg_manager
self.trace_dir = trace_dir
if isinstance(self.cmds, str):
self.cmds = [self.cmds]
self._conda_root = "/opt/miniconda-reprozip"
def _create_cmd(self):
"""Return full command to install and run ReproZip."""
comment = ("#-----------------\n"
"# Install ReproZip\n"
"#-----------------\n")
cmds = (self._install_miniconda(), self._install_reprozip(),
self.trace())
cmds = indent("RUN", ''.join(cmds))
return comment + cmds
def _install_miniconda(self):
"""Install Miniconda solely for reprozip. Do not add this installation
to PATH.
"""
url = ("https://repo.continuum.io/miniconda/"
"Miniconda3-latest-Linux-x86_64.sh")
return ("curl -ssL -o miniconda.sh {}"
"\n&& bash miniconda.sh -b -p {}"
"\n&& rm -f miniconda.sh".format(url, self._conda_root))
def _install_reprozip(self):
"""Conda install reprozip from the vida-nyu channel."""
conda = posixpath.join(self._conda_root, 'bin', 'conda')
return ("\n&& {conda} install -y -q python=3.5 pyyaml"
"\n&& {conda} install -y -q -c vida-nyu reprozip"
"".format(conda=conda))
def trace(self):
"""Run reprozip trace on the specified commands."""
reprozip = posixpath.join(self._conda_root, 'bin', 'reprozip')
trace_cmds = []
base = ('\n&& {reprozip} trace -d {trace_dir} --dont-identify-packages'
' {continue_}\n\t{cmd}')
for i, cmd in enumerate(self.cmds):
if not cmd:
raise ValueError("Command to trace is empty.")
continue_ = "--continue " if i else ""
trace_cmds.append(base.format(cmd=cmd, reprozip=reprozip,
trace_dir=self.trace_dir,
continue_=continue_))
return "".join(trace_cmds)
def remove_untraced_files(self):
# QUESTION: how do we deal with directories in config.yml?
pass
|
Python
| 0
|
|
c9f64c0e61fb08c43b1c8cb93ec6f9c389b9c31c
|
delete finished pods from cluster
|
XcScripts/deletePods.py
|
XcScripts/deletePods.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import json
import shutil
import subprocess
import time
def ReadPodsToBeDeleted(fname):
"""
self explanatory
"""
listPods = []
with open(fname,'r') as f:
for line in f:
listPods.append(line.rstrip('\n'))
return listPods
def main(pods_fname):
"""
This method takes list of pods and delte them all,
one by one
"""
pods = ReadPodsToBeDeleted(pods_fname)
print("To remove PODs: {0}".format(len(pods)))
for pod in pods:
cmd = "kubectl delete pod " + pod
rc = 0
for k in range(0, 12): # several attempts to make a pod
rc = subprocess.call(cmd, shell=True)
if rc == 0:
break
if rc != 0:
print("Cannot delete pod {0}".format(pod))
sys.exit(1)
if __name__ =='__main__':
nof_args = len(sys.argv)
if nof_args == 1:
print("Use:deletePods list_of_PODs")
sys.exit(1)
pods_fname = ""
if nof_args >= 2:
pods_fname = sys.argv[1]
main(pods_fname)
sys.exit(0)
|
Python
| 0
|
|
375134eba8a7fa1cbf2ab5c94ae0976eebc65de9
|
Solve Code Fights crazyball problem
|
CodeFights/crazyball.py
|
CodeFights/crazyball.py
|
#!/usr/local/bin/python
# Code Fights Crazyball Problem
from itertools import combinations
def crazyball(players, k):
return sorted([sorted(list(l)) for l in combinations(players, k)])
def main():
tests = [
[["Ninja", "Warrior", "Trainee", "Newbie"], 3,
[["Newbie", "Ninja", "Trainee"], ["Newbie", "Ninja", "Warrior"],
["Newbie", "Trainee", "Warrior"],
["Ninja", "Trainee", "Warrior"]]],
[["Ninja", "Warrior", "Trainee", "Newbie"], 4,
[["Newbie", "Ninja", "Trainee", "Warrior"]]],
[["Pooh"], 1, [["Pooh"]]],
[["Browny", "Whitey", "Blacky"], 1,
[["Blacky"], ["Browny"], ["Whitey"]]],
[["One", "Two", "Three", "Four", "Five", "Six", "Seven"], 5,
[["Five", "Four", "One", "Seven", "Six"],
["Five", "Four", "One", "Seven", "Three"],
["Five", "Four", "One", "Seven", "Two"],
["Five", "Four", "One", "Six", "Three"],
["Five", "Four", "One", "Six", "Two"],
["Five", "Four", "One", "Three", "Two"],
["Five", "Four", "Seven", "Six", "Three"],
["Five", "Four", "Seven", "Six", "Two"],
["Five", "Four", "Seven", "Three", "Two"],
["Five", "Four", "Six", "Three", "Two"],
["Five", "One", "Seven", "Six", "Three"],
["Five", "One", "Seven", "Six", "Two"],
["Five", "One", "Seven", "Three", "Two"],
["Five", "One", "Six", "Three", "Two"],
["Five", "Seven", "Six", "Three", "Two"],
["Four", "One", "Seven", "Six", "Three"],
["Four", "One", "Seven", "Six", "Two"],
["Four", "One", "Seven", "Three", "Two"],
["Four", "One", "Six", "Three", "Two"],
["Four", "Seven", "Six", "Three", "Two"],
["One", "Seven", "Six", "Three", "Two"]]]
]
for t in tests:
res = crazyball(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: crazyball({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: crazyball({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Python
| 0.000561
|
|
e7bac459119e32cb79708ae7764a149dc22a1ed8
|
add visitor.py from python svn (python 2.5 doesnt have it)
|
pyjs/src/pyjs/visitor.py
|
pyjs/src/pyjs/visitor.py
|
# XXX should probably rename ASTVisitor to ASTWalker
# XXX can it be made even more generic?
class ASTVisitor:
"""Performs a depth-first walk of the AST
The ASTVisitor will walk the AST, performing either a preorder or
postorder traversal depending on which method is called.
methods:
preorder(tree, visitor)
postorder(tree, visitor)
tree: an instance of ast.Node
visitor: an instance with visitXXX methods
The ASTVisitor is responsible for walking over the tree in the
correct order. For each node, it checks the visitor argument for
a method named 'visitNodeType' where NodeType is the name of the
node's class, e.g. Class. If the method exists, it is called
with the node as its sole argument.
The visitor method for a particular node type can control how
child nodes are visited during a preorder walk. (It can't control
the order during a postorder walk, because it is called _after_
the walk has occurred.) The ASTVisitor modifies the visitor
argument by adding a visit method to the visitor; this method can
be used to visit a child node of arbitrary type.
"""
VERBOSE = 0
def __init__(self):
self.node = None
self._cache = {}
def default(self, node, *args):
for child in node.getChildNodes():
self.dispatch(child, *args)
def dispatch(self, node, *args):
self.node = node
klass = node.__class__
meth = self._cache.get(klass, None)
if meth is None:
className = klass.__name__
meth = getattr(self.visitor, 'visit' + className, self.default)
self._cache[klass] = meth
## if self.VERBOSE > 0:
## className = klass.__name__
## if self.VERBOSE == 1:
## if meth == 0:
## print "dispatch", className
## else:
## print "dispatch", className, (meth and meth.__name__ or '')
return meth(node, *args)
def preorder(self, tree, visitor, *args):
"""Do preorder walk of tree using visitor"""
self.visitor = visitor
visitor.visit = self.dispatch
self.dispatch(tree, *args) # XXX *args make sense?
class ExampleASTVisitor(ASTVisitor):
"""Prints examples of the nodes that aren't visited
This visitor-driver is only useful for development, when it's
helpful to develop a visitor incrementally, and get feedback on what
you still have to do.
"""
examples = {}
def dispatch(self, node, *args):
self.node = node
meth = self._cache.get(node.__class__, None)
className = node.__class__.__name__
if meth is None:
meth = getattr(self.visitor, 'visit' + className, 0)
self._cache[node.__class__] = meth
if self.VERBOSE > 1:
print "dispatch", className, (meth and meth.__name__ or '')
if meth:
meth(node, *args)
elif self.VERBOSE > 0:
klass = node.__class__
if klass not in self.examples:
self.examples[klass] = klass
print
print self.visitor
print klass
for attr in dir(node):
if attr[0] != '_':
print "\t", "%-12.12s" % attr, getattr(node, attr)
print
return self.default(node, *args)
# XXX this is an API change
_walker = ASTVisitor
def walk(tree, visitor, walker=None, verbose=None):
if walker is None:
walker = _walker()
if verbose is not None:
walker.VERBOSE = verbose
walker.preorder(tree, visitor)
return walker.visitor
def dumpNode(node):
print node.__class__
for attr in dir(node):
if attr[0] != '_':
print "\t", "%-10.10s" % attr, getattr(node, attr)
|
Python
| 0
|
|
93c828b7c94004321a3801c2b53ba692532d1c79
|
Update TwitterSearchSensor to retrieve and store last_id in the datastore.
|
packs/twitter/sensors/twitter_search_sensor.py
|
packs/twitter/sensors/twitter_search_sensor.py
|
from TwitterSearch import TwitterSearch
from TwitterSearch import TwitterSearchOrder
from st2reactor.sensor.base import PollingSensor
__all__ = [
'TwitterSearchSensor'
]
BASE_URL = 'https://twitter.com'
class TwitterSearchSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=None):
super(TwitterSearchSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'twitter.matched_tweet'
self._logger = self._sensor_service.get_logger(__name__)
def setup(self):
self._client = TwitterSearch(
consumer_key=self._config['consumer_key'],
consumer_secret=self._config['consumer_secret'],
access_token=self._config['access_token'],
access_token_secret=self._config['access_token_secret']
)
self._last_id = None
def poll(self):
tso = TwitterSearchOrder()
tso.set_keywords([self._config['query']])
language = self._config.get('language', None)
if language:
tso.set_language(language)
tso.set_result_type('recent')
tso.set_count(self._config.get('count', 30))
tso.set_include_entities(False)
last_id = self._get_last_id()
if last_id:
tso.set_since_id(int(last_id))
try:
tweets = self._client.search_tweets(tso)
tweets = tweets['content']['statuses']
except Exception as e:
self._logger.exception('Polling Twitter failed: %s' % (str(e)))
return
tweets = list(reversed(tweets))
if tweets:
self._set_last_id(last_id=tweets[-1]['id'])
for tweet in tweets:
self._dispatch_trigger_for_tweet(tweet=tweet)
def cleanup(self):
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _get_last_id(self):
if not self._last_id and hasattr(self._sensor_service, 'get_value'):
self._last_id = self._sensor_service.get_value(name='last_id')
return self._last_id
def _set_last_id(self, last_id):
self._last_id = last_id
if hasattr(self._sensor_service, 'set_value'):
self._sensor_service.set_value(name='last_id', value=last_id)
def _dispatch_trigger_for_tweet(self, tweet):
trigger = self._trigger_ref
url = '%s/%s/status/%s' % (BASE_URL, tweet['user']['screen_name'], tweet['id'])
payload = {
'id': tweet['id'],
'created_at': tweet['created_at'],
'lang': tweet['lang'],
'place': tweet['place'],
'retweet_count': tweet['retweet_count'],
'favorite_count': tweet['favorite_count'],
'user': {
'screen_name': tweet['user']['screen_name'],
'name': tweet['user']['name'],
'location': tweet['user']['location'],
'description': tweet['user']['description'],
},
'text': tweet['text'],
'url': url
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
|
from TwitterSearch import TwitterSearch
from TwitterSearch import TwitterSearchOrder
from st2reactor.sensor.base import PollingSensor
BASE_URL = 'https://twitter.com'
class TwitterSearchSensor(PollingSensor):
def __init__(self, sensor_service, config=None, poll_interval=None):
super(TwitterSearchSensor, self).__init__(sensor_service=sensor_service,
config=config,
poll_interval=poll_interval)
self._trigger_ref = 'twitter.matched_tweet'
self._logger = self._sensor_service.get_logger(__name__)
def setup(self):
self._client = TwitterSearch(
consumer_key=self._config['consumer_key'],
consumer_secret=self._config['consumer_secret'],
access_token=self._config['access_token'],
access_token_secret=self._config['access_token_secret']
)
self._last_id = None
def poll(self):
tso = TwitterSearchOrder()
tso.set_keywords([self._config['query']])
language = self._config.get('language', None)
if language:
tso.set_language(language)
tso.set_result_type('recent')
tso.set_count(self._config.get('count', 30))
tso.set_include_entities(False)
if self._last_id:
tso.set_since_id(self._last_id)
try:
tweets = self._client.search_tweets(tso)
tweets = tweets['content']['statuses']
except Exception as e:
self._logger.exception('Polling Twitter failed: %s' % (str(e)))
return
tweets = list(reversed(tweets))
if tweets:
self._last_id = tweets[-1]['id']
for tweet in tweets:
self._dispatch_trigger_for_tweet(tweet=tweet)
def cleanup(self):
# TODO: Persist state (id) so we avoid duplicate events
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _dispatch_trigger_for_tweet(self, tweet):
trigger = self._trigger_ref
url = '%s/%s/status/%s' % (BASE_URL, tweet['user']['screen_name'], tweet['id'])
payload = {
'id': tweet['id'],
'created_at': tweet['created_at'],
'lang': tweet['lang'],
'place': tweet['place'],
'retweet_count': tweet['retweet_count'],
'favorite_count': tweet['favorite_count'],
'user': {
'screen_name': tweet['user']['screen_name'],
'name': tweet['user']['name'],
'location': tweet['user']['location'],
'description': tweet['user']['description'],
},
'text': tweet['text'],
'url': url
}
self._sensor_service.dispatch(trigger=trigger, payload=payload)
|
Python
| 0
|
7da94fd5576f4c052e79a8068164c101054d5ae7
|
Add Python / `requests` example
|
python/simple.py
|
python/simple.py
|
import requests # http://python-requests.org/
# Premium user authentication process and API access example
r = requests.post('https://api.masterleague.net/auth/token/', data={'username': 'user', 'password': '12345'})
if 'token' not in r.json():
print(r.text)
raise ValueError("Unable to extract authentication token!")
token = r.json()['token']
s = requests.Session()
s.headers.update({'Authorization': 'Token ' + token})
r = s.get('https://api.masterleague.net/heroes.json')
print(r.text)
# Anonymous user access example
r = requests.get('https://api.masterleague.net/heroes.json')
print(r.text)
|
Python
| 0.000024
|
|
84138d6df951df7a4a44516cec31eca3ad15d308
|
add pytorch support
|
pytorch_model.py
|
pytorch_model.py
|
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
# Training settings
# now the argments setting
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
model = Net()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
def test(epoch):
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).cpu().sum()
test_loss = test_loss
test_loss /= len(test_loader) # loss function already averages over batch size
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
for epoch in range(1, args.epochs + 1):
train(epoch)
test(epoch)
|
Python
| 0
|
|
f6e49221370d8087403be1a4679e62ba217c8be9
|
Create embedhelp.py
|
embedhelp/embedhelp.py
|
embedhelp/embedhelp.py
|
import discord
import os
import collections
from .utils.dataIO import fileIO, dataIO
from .utils import checks
from discord.ext import commands
class Help:
def __init__(self, bot):
self.bot = bot
self.profile = "data/help/toggle.json"
self.riceCog = dataIO.load_json(self.profile)
@commands.command(pass_context=True)
@checks.is_owner()
async def sethelp(self, ctx):
self.profile = "data/help/toggle.json"
self.riceCog = dataIO.load_json(self.profile)
dm_msg = "The help message will now be send in DM."
no_dm_msg = "The help message will now be send into the channel."
if 'toggle' not in self.riceCog:
self.riceCog['toggle'] = "no_dm"
dataIO.save_json(self.profile,
self.riceCog)
msg = no_dm_msg
elif self.riceCog['toggle'] == "dm":
self.riceCog['toggle'] = "no_dm"
dataIO.save_json(self.profile,
self.riceCog)
msg = no_dm_msg
elif self.riceCog['toggle'] == "no_dm":
self.riceCog['toggle'] = "dm"
dataIO.save_json(self.profile,
self.riceCog)
msg = dm_msg
if msg:
await self.bot.say(msg)
@commands.command(name='help', pass_context=True)
async def _help(self, ctx, command = None):
"""Embedded help command"""
author = ctx.message.author
if 'toggle' not in self.riceCog:
self.riceCog['toggle'] = "dm"
dataIO.save_json(self.profile,
self.riceCog)
await self.bot.say("Help message is set to DM by default. use "
"**{}sethelp** to change it!".format(ctx.prefix))
toggle = self.riceCog['toggle']
else:
toggle = self.riceCog['toggle']
if not command:
msg = "**Command list:**"
color = 0xD2B48C
final_coms = {}
com_groups = []
for com in self.bot.commands:
try:
if not self.bot.commands[com].can_run(ctx):
continue
if self.bot.commands[com].module.__name__ not in com_groups:
com_groups.append(self.bot.commands[com].module.__name__)
else:
continue
except Exception as e:
print(e)
continue
com_groups.sort()
alias = []
#print(com_groups)
for com_group in com_groups:
commands = []
for com in self.bot.commands:
if not self.bot.commands[com].can_run(ctx):
continue
if com in self.bot.commands[com].aliases:
continue
if com_group == self.bot.commands[com].module.__name__:
commands.append(com)
final_coms[com_group] = commands
to_send = []
final_coms = collections.OrderedDict(sorted(final_coms.items()))
field_count = 0
page = 0
counter = 0
for group in final_coms:
counter += 1
if field_count == 0:
page += 1
title = "**Command list,** page {}".format(page)
em=discord.Embed(description=title,
color=color)
field_count += 1
is_last = counter == len(final_coms)
msg = ""
final_coms[group].sort()
count = 0
for com in final_coms[group]:
if count == 0:
msg += '`{}`'.format(com)
else:
msg += '~`{}`'.format(com)
count += 1
cog_name = group.replace("cogs.", "").title()
cog = "```\n"
cog += cog_name
cog += "\n```"
em.add_field(name=cog,
value=msg,
inline=False)
if field_count == 15 or is_last:
to_send.append(em)
field_count = 0
if toggle == "dm":
await self.bot.say("Hey there, {}! I sent you a list of "
"commands through DM.".format(author.mention))
for em in to_send:
await self.bot.send_message(ctx.message.author,
embed=em)
await self.bot.send_message(ctx.message.author,
"Spoopy, the discord bot that does stuff because reasons. "
"Made by OKE PlazmA, helped by it's community.")
elif toggle == 'no_dm':
for em in to_send:
await self.bot.say(embed=em)
await self.bot.say("Spoopy, the discord bot that does stuff because reasons. "
"Made by OKE PlazmA, helped by it's community.")
else:
msg = "**Command Help:**"
color = 0xD2B48C
em=discord.Embed(description=msg,
color=color)
try:
if not self.bot.commands[command].can_run(ctx):
await self.bot.say("Might be lacking perms for this "
"command.")
return
commie = "```\n"
commie += command + " " + " ".join(["[" + com + "]" for com in \
self.bot.commands[command].\
clean_params])
commie += "\n```"
info = self.bot.commands[command].help
em.add_field(name=commie,
value=info,
inline=False)
await self.bot.say(embed=em)
except Exception as e:
print(e)
await self.bot.say("Couldn't find command! Try again.")
def check_folder():
if not os.path.exists("data/help"):
print("Creating data/help folder")
os.makedirs("data/help")
def check_file():
data = {}
f = "data/help/toggle.json"
if not dataIO.is_valid_json(f):
print("Creating data/help/toggle.json")
dataIO.save_json(f,
data)
def setup(bot):
check_folder()
check_file()
bot.remove_command('help')
bot.add_cog(Help(bot))
|
Python
| 0.000001
|
|
b4f2c7b8bde0d28f7d1b61718eb7cd0b9159f507
|
add __version__
|
epistasis/__version__.py
|
epistasis/__version__.py
|
__version__ = "0.6.4"
|
Python
| 0.000984
|
|
9498ac9ec27bbef1725b92e84a3b0d4c9e967aa6
|
add ex14
|
lpthw/ex14.py
|
lpthw/ex14.py
|
#!/usr/bin/env python
# Exercise 14: Prompting and Passing
from sys import argv
script, user_name = argv
prompt = '> '
print "Hi %s, I'm the %s script." % (user_name, script)
print "I'd like to ask you a few questions."
print "Do you like me %s?" % user_name
likes = raw_input(prompt)
print "Where do you live %s?" % user_name
lives = raw_input(prompt)
print "What kind of computer do you have?"
computer = raw_input(prompt)
print """
Alright, so you said %r about liking me.
You live in %r. Not sure where that is.
And you have a %r computer. Nice.
""" % (likes, lives, computer)
|
Python
| 0.99846
|
|
1c8fd79c783ba6f21140b4c08bbf648bf5989dd4
|
Add main module
|
core/hybra.py
|
core/hybra.py
|
import data_loader
import descriptives
import network
import timeline
import wordclouds
def load_data( terms = [], data_folder = '' ):
if data_folder == '':
return load_all_data( terms )
else:
if '/' not in data_folder:
data_folder += '/'
loader = data_folder.split( '/' )[0]
return load_data_from_folder( terms, loader, data_folder )
def load_all_data( terms ):
data = {}
for function_name in dir( data_loader ):
if 'load_' in function_name:
if len( terms ) == 0:
f = getattr( data_loader, function_name )
else:
f = getattr( data_loader, function_name )( *terms )
data[function_name] = f()
return data
def load_data_from_folder( terms, loader, data_folder ):
data = []
for function_name in dir( data_loader ):
if loader in function_name:
if len( terms ) == 0:
data += getattr( data_loader, function_name )( data_folder = data_folder )
else:
data += getattr( data_loader, function_name)( terms, data_folder )
return data
def describe( data ):
if isinstance( data, dict ):
for loader in data:
print loader
descriptives.describe( data[loader] )
print '\n'
else:
descriptives.describe( data )
def create_timeline( data ):
timeline.create_timeline( data )
def create_network( data ):
network.create_network( data )
def create_wordcloud( data ):
wordclouds.create_wordcloud( data )
|
Python
| 0.000001
|
|
0ec0398f8e50ed0adca426f9c468fd5154603941
|
add mmd matrix example
|
open_spiel/python/examples/mmd_matrix_example.py
|
open_spiel/python/examples/mmd_matrix_example.py
|
""" Example of using MMD with dilated entropy
to solve for QRE in a Matrix Game """
from absl import app
from absl import flags
from open_spiel.python.algorithms import mmd_dilated
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 1000, "Number of iterations")
flags.DEFINE_float("alpha", 0.1, "QRE parameter, larger value amounts to more regularization")
flags.DEFINE_integer("print_freq", 100, "How often to print the gap")
# create pyspiel perturbed RPS matrix game
game = pyspiel.create_matrix_game([[0, -1, 3],
[1, 0, -3],
[-3, 3, 0]],
[[0, 1, -3],
[-1, 0, 3],
[3, -3, 0]])
game = pyspiel.convert_to_turn_based(game)
def main(_):
mmd = mmd_dilated.MMDDilatedEnt(game, FLAGS.alpha)
for i in range(FLAGS.iterations):
mmd.update_sequences()
if i % FLAGS.print_freq == 0:
conv = mmd.get_gap()
print("Iteration {} gap {}".format(i, conv))
# Extract policies for both players
print(mmd.get_policies().action_probability_array)
# Note the sequence form and behavioural-form coincide
# for a normal-form game (sequence form has extra root value of 1)
print(mmd.current_sequences())
if __name__ == "__main__":
app.run(main)
|
Python
| 0
|
|
eb2cbb45fd78c2e8accdaa6f8ba37ef1403159dd
|
Add brainfuck live shell
|
bf/bf_cmd.py
|
bf/bf_cmd.py
|
#!/usr/bin/env python3
class BracketError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Machine():
def __init__(self):
self.tape = [0]
self.p = 0
def run(self, code):
pc = 0
loop_stack = []
brackets = 0
printed = False
for instr in code:
if instr == '[':
brackets += 1
elif instr == ']':
brackets -= 1
if brackets != 0:
raise BracketError('Error: failed bracket count')
while pc < len(code):
instr = code[pc]
# increment/decrement
if instr == '+':
self.increment(1)
elif instr == '-':
self.increment(-1)
# I/O
elif instr == '.':
print(chr(self.cell()), end='')
printed = True
elif instr == ',':
self.input()
# move tape
elif instr == '<':
if self.p > 0:
self.p -= 1
else:
print("Error: Can't decrement pointer")
elif instr == '>':
if self.p > (len(self.tape)-2):
self.tape.append(0)
self.p += 1
# looping
elif instr == ']':
pc = loop_stack.pop() - 1
elif instr == '[':
if self.cell() == 0:
while code[pc] != ']':
pc += 1
else:
loop_stack.append(pc)
pc += 1
if printed:
print('')
def set(self, val):
self.tape[self.p] = val % 128
def increment(self, amount):
self.set(self.cell() + amount)
def input(self):
character = input()
if character == '':
print("No value given, setting cell to 0 ...")
self.set(0)
else:
self.set(ord(character[0]))
def cell(self):
return self.tape[self.p]
def dump(self):
print("%d," % self.p, self.tape)
if __name__ == "__main__":
helptext = "h: Display this help text\nq: Quit\nd: Print tape, pointer\nr: Reset tape"
tape = Machine()
while True:
try:
command = input("[%d]:%d$ " %(tape.p,tape.cell()))
except EOFError:
break
if command == "":
continue
elif command == "q" or command == "quit":
break
elif command == "d" or command == "dump":
tape.dump()
elif command == "h" or command == "help":
print(helptext)
elif command == "r" or command == "reset":
tape = Machine()
print("Tape Reset.")
else:
tape.run(command)
print("Goodbye!")
|
Python
| 0.000023
|
|
3425d265c32d33c189710bcffd1d0df62ce27b3a
|
update model
|
model.py
|
model.py
|
class User(dict):
""" Every user must have keys for a username, name, passphrase (this
is a bcrypt hash of the password), salt, groups, and an email address.
They can be blank or None, but the keys must exist. """
def __init__(self, dict=None):
for key in ['username', 'name', 'passphrase', 'salt', 'email']:
self[key] = ''
for key in ['groups']:
self[key] = []
if dict:
for key in dict:
self[key] = dict[key]
def __getattr__(self, attr):
return None
|
class User(dict):
""" Every user must have keys for a username, name, passphrase (this
is a md5 hash of the password), groups, and an email address. They can be
blank or None, but the keys must exist. """
def __init__(self, dict=None):
for key in ['username', 'name', 'passphrase', 'email']:
self[key] = ''
for key in ['groups']:
self[key] = []
if dict:
for key in dict:
self[key] = dict[key]
def __getattr__(self, attr):
return None
|
Python
| 0.000001
|
b4eb3a55be9e753496c5fd12a89ef85d6a904c09
|
Annotate zerver/management/commands/realm_emoji.py.
|
zerver/management/commands/realm_emoji.py
|
zerver/management/commands/realm_emoji.py
|
from __future__ import absolute_import
from __future__ import print_function
from argparse import RawTextHelpFormatter
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandParser
from zerver.models import Realm, get_realm
from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji
import sys
import six
class Command(BaseCommand):
help = """Manage emoji for the specified realm
Example: python manage.py realm_emoji --realm=zulip.com --op=add robotheart \\
https://humbug-user-avatars.s3.amazonaws.com/95ffa70fe0e7aea3c052ba91b38a28d8779f5705
Example: python manage.py realm_emoji --realm=zulip.com --op=remove robotheart
Example: python manage.py realm_emoji --realm=zulip.com --op=show
"""
# Fix support for multi-line usage
def create_parser(self, *args, **kwargs):
# type: (*Any, **Any) -> CommandParser
parser = super(Command, self).create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('name', metavar='<name>', type=str, nargs='?', default=None,
help="name of the emoji")
parser.add_argument('img_url', metavar='<image url>', type=str, nargs='?',
help="URL of image to display for the emoji")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm(options["domain"])
if options["op"] == "show":
for name, url in six.iteritems(realm.get_emoji()):
print(name, url)
sys.exit(0)
name = options['name']
if name is None:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
if options["op"] == "add":
img_url = options['img_url']
if img_url is None:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
check_add_realm_emoji(realm, name, img_url)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_emoji(realm, name)
sys.exit(0)
else:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
|
from __future__ import absolute_import
from __future__ import print_function
from argparse import RawTextHelpFormatter
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from zerver.models import Realm, get_realm
from zerver.lib.actions import check_add_realm_emoji, do_remove_realm_emoji
import sys
import six
class Command(BaseCommand):
help = """Manage emoji for the specified realm
Example: python manage.py realm_emoji --realm=zulip.com --op=add robotheart \\
https://humbug-user-avatars.s3.amazonaws.com/95ffa70fe0e7aea3c052ba91b38a28d8779f5705
Example: python manage.py realm_emoji --realm=zulip.com --op=remove robotheart
Example: python manage.py realm_emoji --realm=zulip.com --op=show
"""
# Fix support for multi-line usage
def create_parser(self, *args, **kwargs):
parser = super(Command, self).create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('name', metavar='<name>', type=str, nargs='?', default=None,
help="name of the emoji")
parser.add_argument('img_url', metavar='<image url>', type=str, nargs='?',
help="URL of image to display for the emoji")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm(options["domain"])
if options["op"] == "show":
for name, url in six.iteritems(realm.get_emoji()):
print(name, url)
sys.exit(0)
name = options['name']
if name is None:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
if options["op"] == "add":
img_url = options['img_url']
if img_url is None:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
check_add_realm_emoji(realm, name, img_url)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_emoji(realm, name)
sys.exit(0)
else:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
|
Python
| 0
|
f95d7011ff89badfadbd07da0226f67f6dbd27a5
|
Remove unused `organizations:new-tracebacks` flag. (#4083)
|
src/sentry/features/__init__.py
|
src/sentry/features/__init__.py
|
from __future__ import absolute_import
from .base import * # NOQA
from .handler import * # NOQA
from .manager import * # NOQA
default_manager = FeatureManager() # NOQA
default_manager.add('auth:register')
default_manager.add('organizations:api-keys', OrganizationFeature) # NOQA
default_manager.add('organizations:create')
default_manager.add('organizations:sso', OrganizationFeature) # NOQA
default_manager.add('organizations:onboarding', OrganizationFeature) # NOQA
default_manager.add('organizations:callsigns', OrganizationFeature) # NOQA
default_manager.add('organizations:reports:prepare', OrganizationFeature) # NOQA
default_manager.add('organizations:reports:deliver', OrganizationFeature) # NOQA
default_manager.add('projects:global-events', ProjectFeature) # NOQA
default_manager.add('projects:quotas', ProjectFeature) # NOQA
default_manager.add('projects:plugins', ProjectPluginFeature) # NOQA
# expose public api
add = default_manager.add
get = default_manager.get
has = default_manager.has
|
from __future__ import absolute_import
from .base import * # NOQA
from .handler import * # NOQA
from .manager import * # NOQA
default_manager = FeatureManager() # NOQA
default_manager.add('auth:register')
default_manager.add('organizations:api-keys', OrganizationFeature) # NOQA
default_manager.add('organizations:create')
default_manager.add('organizations:sso', OrganizationFeature) # NOQA
default_manager.add('organizations:onboarding', OrganizationFeature) # NOQA
default_manager.add('organizations:callsigns', OrganizationFeature) # NOQA
default_manager.add('organizations:new-tracebacks', OrganizationFeature) # NOQA
default_manager.add('organizations:reports:prepare', OrganizationFeature) # NOQA
default_manager.add('organizations:reports:deliver', OrganizationFeature) # NOQA
default_manager.add('projects:global-events', ProjectFeature) # NOQA
default_manager.add('projects:quotas', ProjectFeature) # NOQA
default_manager.add('projects:plugins', ProjectPluginFeature) # NOQA
# expose public api
add = default_manager.add
get = default_manager.get
has = default_manager.has
|
Python
| 0
|
bbfcddbb21a6b6f40fafe8c88ca76ab4a0b4667b
|
add script to analysis the flow map
|
FlowNet/flowAnalysis.py
|
FlowNet/flowAnalysis.py
|
# When the movement of the objects in the video is not distinct to be
# captured by optical flow algorithm, training this "noisy" flow map
# against the ground truth labeling is risky. In this code, we would
# like to iterate through all the generated flow videos, and filter
# out the noisy flow map.
#
#
# Contact: Chih-Yao Ma at cyma@gatech.edu
# Last update: 05/17/2016
import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
# cap = cv2.VideoCapture('v_HandStandPushups_g01_c04_flow.avi')
cap = cv2.VideoCapture('v_HandStandPushups_g12_c06_flow.avi')
# information of the video
# property identifier:
# 1: ?; 2: s/frame; 3: width; 4: height; 6: ?; 7: ?
Fr = round(1 / cap.get(2))
Wd = int(cap.get(3))
Ht = int(cap.get(4))
# Define the codec and create VideoWriter object
# fourcc = cv2.cv.CV_FOURCC('X', 'V', 'I', 'D') # opencv 2.4
fourcc = cv2.VideoWriter_fourcc(*'XVID') # opencv 3.0
out = cv2.VideoWriter('out_flow.avi', fourcc, Fr, (Wd, Ht))
indFrame = 1
def close_event():
plt.close() #timer calls this function after 3 seconds and closes the window
while(cap.isOpened):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
print('--------------------------------------')
print('Frame # ', indFrame)
# convert back to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# extract the channels and flat them
channel_0 = hsv[..., 0].flatten()
channel_1 = hsv[..., 1].flatten()
channel_2 = hsv[..., 2].flatten()
# out.write(frame)
# Display the resulting frame
cv2.imshow('Processed frame', frame)
# plot histogram for each channel
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(12, 4))
ax0.hist(channel_0, 20, normed=1, histtype='bar', facecolor='r', alpha=0.75)
ax0.set_title('Channel #0')
ax1.hist(channel_1, 20, normed=1, histtype='bar', facecolor='g', alpha=0.75)
ax1.set_title('Channel #1')
ax2.hist(channel_2, 20, normed=1, histtype='bar', facecolor='b', alpha=0.75)
ax2.set_title('Channel #2')
# plot the figure for a short time
plt.tight_layout()
timer = fig.canvas.new_timer(interval = 4000) #creating a timer object and setting an interval of 3000 milliseconds
timer.add_callback(close_event)
timer.start()
plt.show()
# fname = 'histogramFrame_' + str(indFrame)
# plt.savefig(fname)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
indFrame = indFrame + 1
# When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows()
|
Python
| 0
|
|
629ccdc27d2eb3522def903cc42606e43c3f816b
|
Add script to write network related files
|
AdaptivePELE/analysis/writeNetworkFiles.py
|
AdaptivePELE/analysis/writeNetworkFiles.py
|
import os
import sys
import argparse
from AdaptivePELE.utilities import utilities
import matplotlib.pyplot as plt
def parseArguments():
desc = "Write the information related to the conformation network to file\n"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("clusteringObject", type=str, help="Path to the clustering object")
parser.add_argument("suffix", type=str, help="Suffix to append to file names")
parser.add_argument("metricCol", type=int, help="Column of the metric of interest")
parser.add_argument("-o", type=str, default=None, help="Output path where to write the files")
args = parser.parse_args()
return args.clusteringObject, args.suffix, args.metricCol, args.o
if __name__ == "__main__":
clusteringObject, suffix, metricCol, outputPath = parseArguments()
if outputPath is not None:
outputPath = os.path.join(outputPath, "")
else:
outputPath = ""
if not os.path.exists(outputPath):
os.makedirs(outputPath)
sys.stderr.write("Reading clustering object...\n")
cl = utilities.readClusteringObject(clusteringObject)
optimalCluster = cl.getOptimalMetric()
pathway = cl.createPathwayToCluster(optimalCluster)
sys.stderr.write("Writing conformation network...\n")
cl.writeConformationNetwork(outputPath+"conformationNetwork%s.edgelist" % suffix)
sys.stderr.write("Writing FDT...\n")
cl.writeFDT(outputPath+"FDT%s.edgelist" % suffix)
sys.stderr.write("Writing pathway to optimal cluster...\n")
# cl.writePathwayOptimalCluster(outputPath+"pathwayFDT%s.pdb" % suffix)
cl.writePathwayTrajectory(pathway, outputPath+"pathwayFDT%s.pdb" % suffix)
sys.stderr.write("Writing nodes population...\n")
cl.writeConformationNodePopulation(outputPath+"nodesPopulation%s.txt" % suffix)
sys.stderr.write("Writing nodes metrics...\n")
cl.writeConformationNodeMetric(outputPath+"nodesMetric%s.txt" % suffix, metricCol)
sys.stderr.write("Writing metastability indeces...\n")
metInd = cl.calculateMetastabilityIndex()
cl.writeMetastabilityIndex(outputPath+"nodesMetIndex%s.txt" % suffix)
plt.figure()
plt.plot(pathway, [cl.clusters.clusters[i].getMetricFromColumn(5) for i in pathway])
plt.xlabel("Cluster number")
plt.ylabel("Binding energy(kcal/mol)")
plt.savefig(outputPath+"bindingEnergy_%s.png" % suffix)
plt.figure()
plt.plot(pathway, [cl.clusters.clusters[i].getMetricFromColumn(3) for i in pathway])
plt.xlabel("Cluster number")
plt.ylabel("Energy(kcal/mol)")
plt.savefig(outputPath+"totalEnergy_%s.png" % suffix)
plt.figure()
plt.plot(pathway, [metInd[i] for i in pathway])
plt.xlabel("Cluster number")
plt.ylabel("Metastability index")
plt.savefig(outputPath+"metIndex_%s.png" % suffix)
plt.show()
|
Python
| 0
|
|
daa4565abe4059e8588ddf374fde0f51d9ec784e
|
Create a skeleton for node propagation integration tests
|
test/integration/test_node_propagation.py
|
test/integration/test_node_propagation.py
|
class TestPropagation(object):
def test_node_propagation(self):
"""
Tests that check node propagation
1) Spin up four servers.
2) Make the first one send a sync request to all three others.
3) Count the numbers of requests made.
4) Check databases to see that they all know each other.
"""
pass
|
Python
| 0.000001
|
|
5b9b27d98cad06f0bbd67026b6533dee7c218df7
|
update series server code shifted from custom script to py file
|
setup/doctype/update_series/update_series.py
|
setup/doctype/update_series/update_series.py
|
# Please edit this list and import only required elements
import webnotes
from webnotes.utils import add_days, add_months, add_years, cint, cstr, date_diff, default_fields, flt, fmt_money, formatdate, generate_hash, getTraceback, get_defaults, get_first_day, get_last_day, getdate, has_common, month_name, now, nowdate, replace_newlines, sendmail, set_default, str_esc_quote, user_format, validate_email_add
from webnotes.model import db_exists
from webnotes.model.doc import Document, addchild, removechild, getchildren, make_autoname, SuperDocType
from webnotes.model.doclist import getlist, copy_doclist
from webnotes.model.code import get_obj, get_server_obj, run_server_obj, updatedb, check_syntax
from webnotes import session, form, is_testing, msgprint, errprint
set = webnotes.conn.set
sql = webnotes.conn.sql
get_value = webnotes.conn.get_value
in_transaction = webnotes.conn.in_transaction
convert_to_lists = webnotes.conn.convert_to_lists
# -----------------------------------------------------------------------------------------
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def update_series(self):
series = sql("select name,current from `tabSeries` where name = %s", self.doc.prefix,as_dict = 1)
if series:
msgprint("This is going to update Series with Prefix : " + series[0]['name'] + " from Current : " + cstr(series[0]['current']) + " to Current : "+ cstr(self.doc.current))
sql("update `tabSeries` set current = '%s' where name = '%s'" % (self.doc.current,series[0]['name']))
msgprint("Series Updated Successfully")
else:
msgprint("Please Check Prefix as there is no such Prefix : "+ self.doc.prefix +" Or Try Insert Button")
def insert_series(self):
#sql("start transaction")
series = sql("select name,current from `tabSeries` where name = %s", self.doc.prefix, as_dict = 1)
if series:
msgprint("Series with Prefix : " + series[0]['name'] + "already in the system . Try Update Button")
else:
msgprint("This is going to Insert Series with Prefix : " + cstr(self.doc.prefix) + " Current: " + cstr(self.doc.current))
sql("insert into `tabSeries` (name,current) values ('%s','%s')" % (self.doc.prefix, self.doc.current))
msgprint("Series Inserted Successfully")
|
Python
| 0
|
|
4eab434002c99daf9c302cb1007e7ec384453aae
|
Fix cherrypy example
|
examples/cherrypysample.py
|
examples/cherrypysample.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
import bottle
@bottle.get('/')
def index():
return {'key': 'value'}
bottle.run(port=8080, host="0.0.0.0", server="cherrypy")
|
Python
| 0.000045
|
|
93e07841d961fb7956612339f13dfd4e8ddd8bac
|
Create RPi_Final.py
|
RPi_Final.py
|
RPi_Final.py
|
from random import *
|
Python
| 0.000001
|
|
2eba3f5072b547829964eac9d2d5b03076a49faf
|
add firmwareupdate sample
|
examples/firmwareupdate.py
|
examples/firmwareupdate.py
|
from sakuraio.hardware.rpi import SakuraIOGPIO
#from sakuraio.hardware.rpi import SakuraIOSMBus
import time
sakuraio = SakuraIOGPIO()
#sakuraio = SakuraIOSMBus()
sakuraio.unlock()
time.sleep(1)
sakuraio.update_firmware()
#print(sakuraio.get_firmware_version())
|
Python
| 0
|
|
16c57e5f3bd63667c7ca0b828e1f0fcd85d64b76
|
Create SecureMSG.py
|
SecureMSG.py
|
SecureMSG.py
|
#!/usr/python
#
# I dedicate this application for my best friend, Robert Niemiec :)
#
# Copyright (c) 2015 Dawid Wiktor
# This app is writed for all whistleblowers, journalists and
# cryptoanarchists. Use it when you need. Be carefull! NSA watchin'
#
# This is the Open Source Software. You can freely use it, edit code, and
# ditribute. But you should respect Attribution.
def encryption():
key = input("Please, input a number here to be used as the key.\n")
key = int(key)
dummy = 0
rawData = input("Enter string here.\n")
rawlist = list(rawData)
data = rawlist[0 + dummy]
number = len(rawlist)
while dummy != number:
data = ord(data)
data = data + key
print(data)
dummy = dummy + 1
data = rawlist[0 + dummy]
run = "y"
while run == "y":
encryption()
run = input("Do you want to encrypt this? (y/n)\n")
if run !="y":
exit()
|
Python
| 0.000001
|
|
522fb2e4b9fdf46abed3b5ca8ba43758b22253a1
|
add missing file
|
addons/web/ir_module.py
|
addons/web/ir_module.py
|
from openerp.osv import osv
import openerp.wsgi.core as oewsgi
from common.http import Root
class ir_module(osv.Model):
_inherit = 'ir.module.module'
def update_list(self, cr, uid, context=None):
result = super(ir_module, self).update_list(cr, uid, context=context)
if tuple(result) != (0, 0):
for handler in oewsgi.module_handlers:
if isinstance(handler, Root):
handler._load_addons()
return result
|
Python
| 0.000001
|
|
9c7d04b4eecb392b368e4b84a48197682ea63b8d
|
Add blackroom/python/auto-push.py
|
blackroom/python/auto-push.py
|
blackroom/python/auto-push.py
|
#!/usr/bin/python3
# -*-coding:utf-8 -*-
# ProjectName:Bilibili小黑屋爬虫v2
# Author:zhihaofans
# Github:https://github.com/zhihaofans/Bilibili/tree/master/blackroom
# PythonVersion:3.x
import requests
import os
import json
import time
from bilibili import blackRoom
from zhihaofans import file as f
savePath = f.getUpPath(f.getMyPyPath()) + '/data/'
savePath_forever = savePath + '/forever/'
savePath_noForever = savePath + '/user/'
savePath_backup = savePath + '/backup/'
savePath_history = savePath + '/history/'
blList = []
updateTime = 0
gitPath = 'git'
gitLocalBranch = 'origin'
gitRemoteBranch = 'master'
def saveData(data):
thisTime = str(time.time()).split(".")[0]
updateTime = thisTime
f.write(savePath + "blackroom.json", json.dumps(data))
# 备份数据
print("备份数据")
f.write(savePath_backup + thisTime + ".json", json.dumps(data))
# 历史数据
print("历史数据")
for a in data:
f.write(savePath_history + str(a['id']) + ".json", json.dumps(a), True)
# 永久封禁
print("永久封禁与限时封禁数据分开按用户储存")
for b in data:
if b["blockedForever"]:
f.write(savePath_forever +
str(b['uid']) + ".json", json.dumps(b), True)
else:
filePath = savePath_noForever + str(b['uid']) + "/"
f.mk(filePath)
f.write(filePath + str(b['id']) + ".json", json.dumps(b), True)
f.write(savePath + "update.txt", thisTime)
def mkdirs():
f.mk(savePath_forever)
f.mk(savePath_noForever)
f.mk(savePath_backup)
f.mk(savePath_history)
def getPush():
cmdPath = os.getcwd()
os.chdir(f.getUpPath(f.getMyPyPath()))
print(getCmd("git add *"))
print(getCmd("git status"))
print(getCmd("git commit -m \"Auto update blackroom(" + str(updateTime) + "\")"))
print(getCmd("git push " + gitLocalBranch + " " + gitRemoteBranch))
def getCmd(cmdText):
return os.popen(cmdText).read()
def main():
mkdirs()
print("开始抓取小黑屋数据")
brList = blackRoom.getData()
print("保存数据")
saveData(brList)
print("抓取完成")
input("按回车退出")
exit()
if __name__ == '__main__':
main()
|
Python
| 0.000013
|
|
380a87e71c347eab5d9c5d22a255753e62e1d739
|
Add the original game code to the files to show progress made during the week using classes and other skills
|
Original_python_game.py
|
Original_python_game.py
|
import random
GuessesTaken = 0
print ("Hello and welcome to my higher or lower number guessing game.")
print ("Whats your name?")
myName = input()
number = random.randint(1, 20)
number1 = random.randint(1, 20)
number2 = random.randint(1, 20)
number3 = random.randint(1, 20)
number4 = random.randint(1, 20)
number5 = random.randint(1, 20)
number6 = random.randint(1, 20)
number7 = random.randint(1, 20)
number8 = random.randint(1, 20)
number9 = random.randint(1, 20)
number10 = random.randint(1, 20)
number11 = random.randint(1, 20)
number12 = random.randint(1, 20)
number13 = random.randint(1, 20)
number14 = random.randint(1, 20)
number15 = random.randint(1, 20)
number16 = random.randint(1, 20)
number17 = random.randint(1, 20)
number18 = random.randint(1, 20)
number19 = random.randint(1, 20)
number20 = random.randint(1, 20)
print ("So, your names " + myName + " Hmmmmmmm")
print ("Ok " + myName + " here is your first number")
print ("")
print (number)
print ("")
print ("Also keep in mind that the numbers range from 1 to 20")
print ("")
print ("So will the next number be higher or lower?")
print ("")
print ("")
print ("Use h to guess Higher and use l to guess Lower.")
guess = input('Enter either h or l: ')
if number > number1 and guess == "l":
print ("Well done the number was " + number1 + " Now onto stage 2")
elif number > number1 and guess == "h":
print ("Incorrect the number was " + number1 + "GAME OVER")
elif number < number1 and guess == "h":
print ("Well done the number was " + number1 + " Now onto stage 2")
elif number < number1 and guess == "l":
print ("Incorrect the number was " + number1 + " GAME OVER")
|
Python
| 0
|
|
958e6ca0ba5be68802e61a450aeb2bf39ea5d5ba
|
Create psf2pdb.py
|
psf2pdb.py
|
psf2pdb.py
|
import sys
pdbfile = open(sys.argv[1],'r')
psfile = open(sys.argv[2],'r')
inline = pdbfile.readline()
output = ''
while inline != 'END\n':
output = output + inline
inline = pdbfile.readline()
if inline == '': #sanity check
print "Error"
exit()
inline = psfile.readline().split()
while inline[1] != '!NBOND:':
inline = psfile.readline().split()
while len(inline)<1:
inline = psfile.readline().split()
bondlist = psfile.readline().split()
for i in range(int(inline[0])):
new = bondlist.pop(0)
output = output + 'CONECT '+new+' '+bondlist.pop(0)+'\n'
if len(bondlist)==0:
bondlist = psfile.readline().split()
outfile = open(sys.argv[3],'w')
outfile.write(output)
|
Python
| 0.000012
|
|
e73d16d4051c6bc66daf415d2da4e8d204a97004
|
Add rainbow function
|
rainbow.py
|
rainbow.py
|
import re
import colorsys
from pymol import cmd
def rainbow(range_string):
"""
DESCRIPTION
Colors rainbow spectrum for a selection given in range string.
The difference between coloring in rainbow with built-in 'spectrum' is that
this relies on the segment order in range string (not alphabetically
sorted), so it can handle multiple chain domain as in insulin where usually
chain B should be before chain A in many cases.
USAGE
rainbow range_string
ARGUMENTS
range_string = 'B:2-29,A:1-21'
"""
seg_ptn = re.compile(r'([A-Za-z0-9]{1}):(-?[0-9]+[A-Z]?)-(-?[0-9]+[A-Z]?)')
all_resi = []
for seg in seg_ptn.finditer(range_string):
chain = seg.group(1)
local_space = {'resnums' : [], 'chain': chain}
groups = list(seg.groups())
for i in [1, 2]:
# excape minus index
if groups[i].startswith('-'):
groups[i] = '\\' + groups[i]
cmd.iterate('c. %s and i. %s-%s and n. CA' % seg.groups(),
'resnums.append(resi)', space=local_space)
all_resi.append(local_space)
total = reduce(lambda x, y: x + len(y['resnums']), all_resi, 0)
cnt = 0
for seg in all_resi:
chain = seg['chain']
for i in seg['resnums']:
hue = colorsys.TWO_THIRD - colorsys.TWO_THIRD * cnt / (total - 1)
red, green, blue = colorsys.hsv_to_rgb(hue, 1, 1)
hexcolor = hex((int(red * 255) << 16) + (int(green * 255) << 8) +
int(blue * 255))
cmd.color(hexcolor, 'c. %s and i. %s' % (chain, i))
cnt += 1
if __name__ != "rainbow":
cmd.extend('rainbow', rainbow)
|
Python
| 0
|
|
cbaed7d194f4a91198fc097d4657ad327819af4b
|
Add new migration.
|
invite/migrations/0004_auto_20191126_1740.py
|
invite/migrations/0004_auto_20191126_1740.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-11-26 17:40
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('invite', '0003_abstract_invitation_auto_now_add'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='activation_code',
field=models.CharField(default=uuid.uuid4, editable=False, help_text='unique id, generated on email submission', max_length=36, unique=True),
),
migrations.AlterField(
model_name='invitation',
name='date_invited',
field=models.DateField(auto_now_add=True, help_text='the day on which the superuser invited the potential member'),
),
migrations.AlterField(
model_name='invitation',
name='email',
field=models.EmailField(help_text="the potential member's email address", max_length=41),
),
migrations.AlterField(
model_name='passwordresetinvitation',
name='activation_code',
field=models.CharField(default=uuid.uuid4, editable=False, help_text='unique id, generated on email submission', max_length=36, unique=True),
),
migrations.AlterField(
model_name='passwordresetinvitation',
name='date_invited',
field=models.DateField(auto_now_add=True, help_text='the day on which the superuser invited the potential member'),
),
migrations.AlterField(
model_name='passwordresetinvitation',
name='email',
field=models.EmailField(help_text="the potential member's email address", max_length=41),
),
]
|
Python
| 0
|
|
47044317e4067fb38bf9e0fdb2e9c5f9ccb78053
|
add migration
|
pokemon_v2/migrations/0006_auto_20200725_2205.py
|
pokemon_v2/migrations/0006_auto_20200725_2205.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("pokemon_v2", "0005_auto_20200709_1930"),
]
operations = [
migrations.AlterField(
model_name="pokemon",
name="height",
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="pokemon",
name="weight",
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name="pokemon",
name="base_experience",
field=models.IntegerField(blank=True, null=True),
),
]
|
Python
| 0.000001
|
|
bad97abfe7fd93cefac10d46b5434b63cc7e3d2b
|
add line to end of file
|
keras_contrib/constraints.py
|
keras_contrib/constraints.py
|
from __future__ import absolute_import
from . import backend as K
from keras.utils.generic_utils import get_from_module
from keras.constraints import *
class Clip(Constraint):
"""Clips weights to [-c, c].
# Arguments
c: Clipping parameter.
"""
def __init__(self, c=0.01):
self.c = c
def __call__(self, p):
return K.clip(p, -self.c, self.c)
def get_config(self):
return {'name': self.__class__.__name__,
'c': self.c}
# Aliases.
clip = Clip
|
from __future__ import absolute_import
from . import backend as K
from keras.utils.generic_utils import get_from_module
from keras.constraints import *
class Clip(Constraint):
"""Clips weights to [-c, c].
# Arguments
c: Clipping parameter.
"""
def __init__(self, c=0.01):
self.c = c
def __call__(self, p):
return K.clip(p, -self.c, self.c)
def get_config(self):
return {'name': self.__class__.__name__,
'c': self.c}
# Aliases.
clip = Clip
|
Python
| 0.000001
|
d558ed9875cf99ebdf6915e7acd877fc7fae69f3
|
Add missing migration
|
candidates/migrations/0028_auto_20160411_1055.py
|
candidates/migrations/0028_auto_20160411_1055.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('candidates', '0027_create_standard_complex_fields'),
]
operations = [
migrations.AlterField(
model_name='complexpopolofield',
name='info_type_key',
field=models.CharField(help_text="Name of the field in the array that stores the type ('note' for links, 'contact_type' for contacts, 'scheme' for identifiers)", max_length=100),
),
migrations.AlterField(
model_name='complexpopolofield',
name='info_value_key',
field=models.CharField(help_text="Name of the field in the array that stores the value, e.g 'url' for links, 'value' for contact_type, 'identifier' for identifiers", max_length=100),
),
migrations.AlterField(
model_name='complexpopolofield',
name='old_info_type',
field=models.CharField(help_text="Used for supporting info_types that have been renamed. As such it's rarely used.", max_length=100, blank=True),
),
]
|
Python
| 0.0002
|
|
90a5242a93beda053ad91adca0728995232e23d2
|
Create toggle_editor_text_console.py
|
cg/blender/scripts/toggle_editor_text_console.py
|
cg/blender/scripts/toggle_editor_text_console.py
|
import bpy
keyconfig = bpy.context.window_manager.keyconfigs.user
args = ('wm.context_set_enum', 'ESC', 'PRESS')
kwargs = {'shift':True}
for source, destination in (('Console', 'TEXT_EDITOR'), ('Text', 'CONSOLE')):
kmi = keyconfig.keymaps[source].keymap_items.new(*args, **kwargs)
properties = kmi.properties
properties.data_path = 'area.type'
properties.value = destination
|
Python
| 0
|
|
cb505bd4c86c39bd7ce575a7d72e4a3d33875b93
|
Create polyDataMake.py
|
figureCode/polyDataMake.py
|
figureCode/polyDataMake.py
|
import numpy as np
from random import seed, getstate, setstate
def polyDataMake(n=21,deg=3,sampling='sparse'):
old_state = getstate()
seed(0)
if sampling == 'irregular':
xtrain = np.array([np.linspace(-1,-.5,6),np.linspace(3,3.5,6)]).reshape(-1,1)
elif sampling == 'sparse':
xtrain = np.array([-3, -2, 0, 2, 3])
elif sampling == 'dense':
xtrain = np.array(np.arange(-5,5,.6))
elif sampling == 'thibaux':
xtrain = np.linspace(0,20,n)
else:
raise ValueError('Unrecognized sampling provided.')
if sampling == 'thibaux':
seed(654321)
xtest = np.linspace(0,20,201)
sigma2 = 4
w = np.array([-1.5,1.0/9.0]).T
def fun(x):
return w[0]*x + w[1]*(x**2)
else:
xtest = np.linspace(-7,7,141)
if deg == 2:
def fun(x):
return 10 + x + x**2
elif deg == 3 :
def fun(x):
return 10 + x + x**3
else:
raise ValueError('Unrecognized degree.')
sigma2 = 25
ytrain = fun(xtrain) + np.random.normal(size=xtrain.shape[0])*np.sqrt(sigma2)
ytestNoisefree = fun(xtest)
ytestNoisy = ytestNoisefree + np.random.normal(size=xtest.shape[0])*np.sqrt(sigma2)
def shp(x):
return np.asarray(x).reshape(-1,1)
setstate(old_state)
return shp(xtrain), shp(ytrain), shp(xtest), shp(ytestNoisefree), shp(ytestNoisy), sigma2
|
Python
| 0
|
|
53dc0a5a1e8cc94dd23f6b6cfa1997f7b8b6f926
|
call FSL NIDM export from command line
|
nidm-results_fsl.py
|
nidm-results_fsl.py
|
#!/usr/bin/python
"""
Export neuroimaging results created with FSL feat following NIDM-Results
specification. The path to feat directory must be passed as first argument.
@author: Camille Maumet <c.m.j.maumet@warwick.ac.uk>
@copyright: University of Warwick 2013-2014
"""
import sys
import os
from fsl_exporter.fsl_exporter import FSLtoNIDMExporter
if __name__ == "__main__":
# Remove first argument (script name)
num_args = len(sys.argv)-1
sys.argv.pop(0)
args = sys.argv
usage = "Usage: python nidm-results_fsl.py path/to/feat/dir"
if num_args != 1:
raise Exception(usage)
feat_dir = args[0]
if not os.path.isdir(feat_dir):
raise Exception("Unknown directory: "+str(feat_dir))
fslnidm = FSLtoNIDMExporter(feat_dir=feat_dir, version="0.2.0")
fslnidm.parse()
fslnidm.export()
print 'NIDM export available at: '+str(os.path.join(feat_dir, "nidm"))
|
Python
| 0
|
|
88fe28ea1bca1f0f0784828592c2414e85e5ceb9
|
add update service
|
homeassistant/components/sensor/speedtest.py
|
homeassistant/components/sensor/speedtest.py
|
"""
homeassistant.components.sensor.speedtest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Speedtest.net sensor based on speedtest-cli.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import logging
import sys
import re
from datetime import timedelta
from subprocess import check_output
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_change
from homeassistant.components.sensor import DOMAIN
import homeassistant.util.dt as dt_util
REQUIREMENTS = ['speedtest-cli==0.3.4']
_LOGGER = logging.getLogger(__name__)
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms\nDownload:\s(\d+\.\d+)'
r'\sMbit/s\nUpload:\s(\d+\.\d+)\sMbit/s\n')
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MINUTE = 'minute'
CONF_HOUR = 'hour'
CONF_DAY = 'day'
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Setup the Speedtest sensor. """
data = SpeedtestData(hass, config)
dev = []
for sensor in config[CONF_MONITORED_CONDITIONS]:
if sensor not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', sensor)
else:
dev.append(SpeedtestSensor(data, sensor))
add_devices(dev)
def update(call=None):
""" Update service for manual updates. """
data.update(dt_util.now())
for sensor in dev:
sensor.update()
hass.services.register(DOMAIN, 'update_speedtest', update)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
""" Implements a speedtest.net sensor. """
def __init__(self, speedtest_data, sensor_type):
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
@property
def name(self):
return '{} {}'.format('Speedtest', self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the latest data from Forecast.io and updates the states. """
data = self.speedtest_client.data
if data is not None:
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
class SpeedtestData(object):
""" Gets the latest data from speedtest.net. """
def __init__(self, hass, config):
self.data = None
self.hass = hass
self.path = hass.config.path
track_time_change(self.hass, self.update,
minute=config.get(CONF_MINUTE, 0),
hour=config.get(CONF_HOUR, None),
day=config.get(CONF_DAY, None))
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, now):
""" Gets the latest data from speedtest.net. """
_LOGGER.info('Executing speedtest')
re_output = _SPEEDTEST_REGEX.split(
check_output([sys.executable, self.path(
'lib', 'speedtest_cli.py'), '--simple']).decode("utf-8"))
self.data = {'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2)}
|
"""
homeassistant.components.sensor.speedtest
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Speedtest.net sensor based on speedtest-cli.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.speedtest/
"""
import logging
import sys
import re
from datetime import timedelta
from subprocess import check_output
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_time_change
REQUIREMENTS = ['speedtest-cli==0.3.4']
_LOGGER = logging.getLogger(__name__)
_SPEEDTEST_REGEX = re.compile(r'Ping:\s(\d+\.\d+)\sms\nDownload:\s(\d+\.\d+)'
r'\sMbit/s\nUpload:\s(\d+\.\d+)\sMbit/s\n')
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MINUTE = 'minute'
CONF_HOUR = 'hour'
CONF_DAY = 'day'
SENSOR_TYPES = {
'ping': ['Ping', 'ms'],
'download': ['Download', 'Mbit/s'],
'upload': ['Upload', 'Mbit/s'],
}
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Setup the Speedtest sensor. """
data = SpeedtestData(hass, config)
dev = []
for sensor in config[CONF_MONITORED_CONDITIONS]:
if sensor not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', sensor)
else:
dev.append(SpeedtestSensor(data, sensor))
add_devices(dev)
# pylint: disable=too-few-public-methods
class SpeedtestSensor(Entity):
""" Implements a speedtest.net sensor. """
def __init__(self, speedtest_data, sensor_type):
self._name = SENSOR_TYPES[sensor_type][0]
self.speedtest_client = speedtest_data
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
@property
def name(self):
return '{} {}'.format('Speedtest', self._name)
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def update(self):
""" Gets the latest data from Forecast.io and updates the states. """
data = self.speedtest_client.data
if data is not None:
if self.type == 'ping':
self._state = data['ping']
elif self.type == 'download':
self._state = data['download']
elif self.type == 'upload':
self._state = data['upload']
class SpeedtestData(object):
""" Gets the latest data from speedtest.net. """
def __init__(self, hass, config):
self.data = None
self.hass = hass
self.path = hass.config.path
track_time_change(self.hass, self.update,
minute=config.get(CONF_MINUTE, 0),
hour=config.get(CONF_HOUR, None),
day=config.get(CONF_DAY, None))
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, now):
""" Gets the latest data from speedtest.net. """
_LOGGER.info('Executing speedtest')
re_output = _SPEEDTEST_REGEX.split(
check_output([sys.executable, self.path(
'lib', 'speedtest_cli.py'), '--simple']).decode("utf-8"))
self.data = {'ping': round(float(re_output[1]), 2),
'download': round(float(re_output[2]), 2),
'upload': round(float(re_output[3]), 2)}
|
Python
| 0
|
cfb3384ee31945d0afef6c558b873d956247e791
|
Add link to docs
|
homeassistant/components/switch/tellstick.py
|
homeassistant/components/switch/tellstick.py
|
"""
homeassistant.components.switch.tellstick
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Tellstick switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tellstick.html
"""
import logging
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP,
ATTR_FRIENDLY_NAME)
from homeassistant.helpers.entity import ToggleEntity
import tellcore.constants as tellcore_constants
from tellcore.library import DirectCallbackDispatcher
SINGAL_REPETITIONS = 1
REQUIREMENTS = ['tellcore-py==1.1.2']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return Tellstick switches. """
try:
import tellcore.telldus as telldus
except ImportError:
logging.getLogger(__name__).exception(
"Failed to import tellcore")
return
core = telldus.TelldusCore(callback_dispatcher=DirectCallbackDispatcher())
signal_repetitions = config.get('signal_repetitions', SINGAL_REPETITIONS)
switches_and_lights = core.devices()
switches = []
for switch in switches_and_lights:
if not switch.methods(tellcore_constants.TELLSTICK_DIM):
switches.append(
TellstickSwitchDevice(switch, signal_repetitions))
def _device_event_callback(id_, method, data, cid):
""" Called from the TelldusCore library to update one device """
for switch_device in switches:
if switch_device.tellstick_device.id == id_:
switch_device.update_ha_state()
break
callback_id = core.register_device_event(_device_event_callback)
def unload_telldus_lib(event):
""" Un-register the callback bindings """
if callback_id is not None:
core.unregister_callback(callback_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, unload_telldus_lib)
add_devices_callback(switches)
class TellstickSwitchDevice(ToggleEntity):
""" Represents a Tellstick switch. """
last_sent_command_mask = (tellcore_constants.TELLSTICK_TURNON |
tellcore_constants.TELLSTICK_TURNOFF)
def __init__(self, tellstick_device, signal_repetitions):
self.tellstick_device = tellstick_device
self.state_attr = {ATTR_FRIENDLY_NAME: tellstick_device.name}
self.signal_repetitions = signal_repetitions
@property
def should_poll(self):
""" Tells Home Assistant not to poll this entity. """
return False
@property
def name(self):
""" Returns the name of the switch if any. """
return self.tellstick_device.name
@property
def state_attributes(self):
""" Returns optional state attributes. """
return self.state_attr
@property
def is_on(self):
""" True if switch is on. """
last_command = self.tellstick_device.last_sent_command(
self.last_sent_command_mask)
return last_command == tellcore_constants.TELLSTICK_TURNON
def turn_on(self, **kwargs):
""" Turns the switch on. """
for _ in range(self.signal_repetitions):
self.tellstick_device.turn_on()
self.update_ha_state()
def turn_off(self, **kwargs):
""" Turns the switch off. """
for _ in range(self.signal_repetitions):
self.tellstick_device.turn_off()
self.update_ha_state()
|
"""
homeassistant.components.switch.tellstick
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for Tellstick switches.
Because the tellstick sends its actions via radio and from most
receivers it's impossible to know if the signal was received or not.
Therefore you can configure the switch to try to send each signal repeatedly
with the config parameter signal_repetitions (default is 1).
signal_repetitions: 3
"""
import logging
from homeassistant.const import (EVENT_HOMEASSISTANT_STOP,
ATTR_FRIENDLY_NAME)
from homeassistant.helpers.entity import ToggleEntity
import tellcore.constants as tellcore_constants
from tellcore.library import DirectCallbackDispatcher
SINGAL_REPETITIONS = 1
REQUIREMENTS = ['tellcore-py==1.1.2']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return Tellstick switches. """
try:
import tellcore.telldus as telldus
except ImportError:
logging.getLogger(__name__).exception(
"Failed to import tellcore")
return
core = telldus.TelldusCore(callback_dispatcher=DirectCallbackDispatcher())
signal_repetitions = config.get('signal_repetitions', SINGAL_REPETITIONS)
switches_and_lights = core.devices()
switches = []
for switch in switches_and_lights:
if not switch.methods(tellcore_constants.TELLSTICK_DIM):
switches.append(
TellstickSwitchDevice(switch, signal_repetitions))
def _device_event_callback(id_, method, data, cid):
""" Called from the TelldusCore library to update one device """
for switch_device in switches:
if switch_device.tellstick_device.id == id_:
switch_device.update_ha_state()
break
callback_id = core.register_device_event(_device_event_callback)
def unload_telldus_lib(event):
""" Un-register the callback bindings """
if callback_id is not None:
core.unregister_callback(callback_id)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, unload_telldus_lib)
add_devices_callback(switches)
class TellstickSwitchDevice(ToggleEntity):
""" Represents a Tellstick switch. """
last_sent_command_mask = (tellcore_constants.TELLSTICK_TURNON |
tellcore_constants.TELLSTICK_TURNOFF)
def __init__(self, tellstick_device, signal_repetitions):
self.tellstick_device = tellstick_device
self.state_attr = {ATTR_FRIENDLY_NAME: tellstick_device.name}
self.signal_repetitions = signal_repetitions
@property
def should_poll(self):
""" Tells Home Assistant not to poll this entity. """
return False
@property
def name(self):
""" Returns the name of the switch if any. """
return self.tellstick_device.name
@property
def state_attributes(self):
""" Returns optional state attributes. """
return self.state_attr
@property
def is_on(self):
""" True if switch is on. """
last_command = self.tellstick_device.last_sent_command(
self.last_sent_command_mask)
return last_command == tellcore_constants.TELLSTICK_TURNON
def turn_on(self, **kwargs):
""" Turns the switch on. """
for _ in range(self.signal_repetitions):
self.tellstick_device.turn_on()
self.update_ha_state()
def turn_off(self, **kwargs):
""" Turns the switch off. """
for _ in range(self.signal_repetitions):
self.tellstick_device.turn_off()
self.update_ha_state()
|
Python
| 0
|
775a86179c321ac3cab73c9556edaa798f4273fd
|
add PassiveTotal OneShotAnalytics
|
plugins/analytics/passive_total.py
|
plugins/analytics/passive_total.py
|
import requests
import json
from datetime import datetime
from core.analytics import OneShotAnalytics
from core.observables import Observable, Hostname
class PassiveTotal(OneShotAnalytics):
default_values = {
"name": "PassiveTotal Passive DNS",
"description": "Perform passive DNS (reverse) lookups on domain names or IP addresses."
}
settings = {
"passivetotal_api_key": {
"name": "PassiveTotal API Key",
"description": "API Key provided by PassiveTotal."
}
}
ACTS_ON = ["Hostname", "Ip"]
API_URL = 'https://api.passivetotal.org/api/v1/passive'
@staticmethod
def analyze(observable, settings):
links = set()
params = {
'api_key': settings['passivetotal_api_key'],
'query': observable.value
}
r = requests.get(PassiveTotal.API_URL, params=params)
r.raise_for_status()
result = json.loads(r.content)
for record in result['results']['records']:
first_seen = datetime.strptime(record['firstSeen'], "%Y-%m-%d %H:%M:%S")
last_seen = datetime.strptime(record['lastSeen'], "%Y-%m-%d %H:%M:%S")
new = Observable.add_text(record['resolve'])
if isinstance(observable, Hostname):
links.update(observable.link_to(new, "A record", 'PassiveTotal', first_seen, last_seen))
else:
links.update(new.link_to(observable, "A record", 'PassiveTotal', first_seen, last_seen))
return links
|
Python
| 0.000001
|
|
24aad104e2cdc8e37e66c4d87401b30619c8cd97
|
Fix code style
|
chainer/functions/softplus.py
|
chainer/functions/softplus.py
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Softplus(function.Function):
"""Softplus function."""
def __init__(self, beta=1.0):
self.beta = numpy.float32(beta)
self.beta_inv = numpy.float32(1.0 / beta)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
)
def check_type_backward(self, in_types, out_types):
type_check.expect(
in_types.size() == 1,
out_types.size() == 1,
)
x_type, = in_types
g_type, = out_types
type_check.expect(
g_type.dtype == numpy.float32,
x_type.shape == g_type.shape,
)
def forward_cpu(self, inputs):
x, = inputs
# y = log(1 + exp(beta * x)) / beta
bx = self.beta * x
y = (numpy.fmax(bx, numpy.float32(0.0)) +
numpy.log1p(numpy.exp(-numpy.fabs(bx)))) * self.beta_inv
return y,
def forward_gpu(self, inputs):
x, = inputs
y = cuda.empty(x.shape)
cuda.elementwise(
'float* y, const float* x, float beta, float beta_inv',
'''
float bx = beta * x[i];
y[i] = (max(bx, 0.f) + log1pf(__expf(-fabsf(bx)))) * beta_inv;
''',
'softplus'
)(y, x, self.beta, self.beta_inv)
return y,
def backward_cpu(self, inputs, grads):
x, = inputs
g, = grads
return (1 - 1 / (1 + numpy.exp(self.beta * x))) * g,
def backward_gpu(self, inputs, grads):
x, = inputs
g, = grads
gx = cuda.empty(x.shape, numpy.float32)
cuda.elementwise(
'float* gx, const float* x, const float* g, float beta',
'gx[i] = (1.f - 1.f / (1.f + __expf(beta * x[i]))) * g[i];',
'softplus_backward'
)(gx, x, g, self.beta)
return gx,
def softplus(x, beta=1.0):
"""Elementwise softplus function.
This function is expressed as
:math:`f(x) = \\frac{1}{\\beta}\\log(1 + \\exp(\\beta x))`,
where :math:`\\beta` is a parameter.
Args:
x (~chainer.Variable): Input variable.
beta (float): Parameter :math:`\\beta`.
Returns:
~chainer.Variable: Output variable.
"""
return Softplus(beta=beta)(x)
|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Softplus(function.Function):
"""Softplus function."""
def __init__(self, beta=1.0):
self.beta = numpy.float32(beta)
self.beta_inv = numpy.float32(1.0 / beta)
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
)
def check_type_backward(self, in_types, out_types):
type_check.expect(
in_types.size() == 1,
out_types.size() == 1,
)
x_type, = in_types
g_type, = out_types
type_check.expect(
g_type.dtype == numpy.float32,
x_type.shape == g_type.shape,
)
def forward_cpu(self, inputs):
x, = inputs
# y = log(1 + exp(beta * x)) / beta
bx = self.beta * x
y = (numpy.fmax(bx, numpy.float32(0.0)) +
numpy.log1p(numpy.exp(-numpy.fabs(bx)))) * self.beta_inv
return y,
def forward_gpu(self, inputs):
x, = inputs
y = cuda.empty(x.shape)
cuda.elementwise(
'float* y, const float* x, float beta, float beta_inv',
'''
float bx = beta * x[i];
y[i] = (max(bx, 0.f) + log1pf(__expf(-fabsf(bx)))) * beta_inv;
''',
'softplus'
)(y, x, self.beta, self.beta_inv)
return y,
def backward_cpu(self, inputs, grads):
x, = inputs
g, = grads
return (1 - 1 / (1 + numpy.exp(self.beta * x))) * g,
def backward_gpu(self, inputs, grads):
x, = inputs
g, = grads
gx = cuda.empty(x.shape, numpy.float32)
cuda.elementwise(
'float* gx, const float* x, const float* g, float beta',
'gx[i] = (1.f - 1.f / (1.f + __expf(beta * x[i]))) * g[i];',
'softplus_backward'
)(gx, x, g, self.beta)
return gx,
def softplus(x, beta=1.0):
"""Elementwise softplus function.
This function is expressed as
:math:`f(x) = \\frac{1}{\\beta}\\log(1 + \\exp(\\beta x))`, where
:math:`\\beta` is a parameter.
Args:
x (~chainer.Variable): Input variable.
beta (float): Parameter :math:`\\beta`.
Returns:
~chainer.Variable: Output variable.
"""
return Softplus(beta=beta)(x)
|
Python
| 0.000169
|
2bd913c6cad94f3bc244d92a1ae1caffda82dcf8
|
Add humble plugin
|
plugins/humble.py
|
plugins/humble.py
|
import lxml.html
import requests
from smartbot import utils
class Plugin:
def __call__(self, bot):
bot.on_respond(r"humble( weekly)?( sale)?", self.on_respond)
bot.on_help("humble", self.on_help)
def on_respond(self, bot, msg, reply):
page = requests.get("https://www.humblebundle.com/weekly")
tree = lxml.html.fromstring(page.text)
try:
title = tree.cssselect("title")[0].text_content().strip()
clock = tree.cssselect("#heading-time-remaining .mini-digit-holder")[0]
c0 = clock.cssselect(".c0 .heading-num")[0].text_content()
c1 = clock.cssselect(".c1 .heading-num")[0].text_content()
c2 = clock.cssselect(".c2 .heading-num")[0].text_content()
c3 = clock.cssselect(".c3 .heading-num")[0].text_content()
c4 = clock.cssselect(".c4 .heading-num")[0].text_content()
c5 = clock.cssselect(".c5 .heading-num")[0].text_content()
c6 = clock.cssselect(".c6 .heading-num")[0].text_content()
c7 = clock.cssselect(".c7 .heading-num")[0].text_content()
reply("{0} - {1}{2}:{3}{4}:{5}{6}:{7}{8} left".format(title, c0, c1, c2, c3, c4, c5, c6, c7))
except IndexError:
reply("No weekly sale.")
def on_help(self, bot, msg, reply):
reply("Syntax: humble [weekly] [deal]")
|
Python
| 0
|
|
23ffdaf1ed0e1739975de058b9f8c1adeef15531
|
Add "nice guy bot" strategy
|
nicebotbot.py
|
nicebotbot.py
|
#!/bin/python
def calculate_bid(player,pos,first_moves,second_moves):
remaining = remaining_amount(player, first_moves, second_moves)
amortized_bid = remaining / steps_remaining(player, pos)
if(amortized_bid < 1):
amortized_bid = 1
default_bid = 14
last_first_bid = 0
last_second_bid = 0
if( len(first_moves) > 0 ):
last_first_bid = first_moves[-1]
last_second_bid = second_moves[-1]
if player == 1:
if pos == 1:
return remaining
else:
#If the last move was greater than my last
if last_second_bid > last_first_bid:
#Take revenge
return min(last_second_bid + 1, amortized_bid)
else:
return min(amortized_bid, default_bid)
else:
if pos == 9:
return remaining
else:
#If the last move was greater than my last
if last_first_bid > last_second_bid:
#Take revenge
return min(last_first_bid + 1, amortized_bid)
else:
return min(amortized_bid, default_bid)
def steps_remaining(player, pos):
if player == 1:
return pos
else:
return 10 - pos
def get_draw_advantage_holder(first_moves, second_moves):
holder = 1
draws = 0
for i in range(0, len(first_moves)):
if first_moves[i] == second_moves[i]:
draws += 1
if draws % 2 == 0:
return 1
else:
return 2
# def get_opponent(player):
# if(player == 1):
# return 2
# else:
# return 1
# def get_opponents_remaining_amount(player, first_moves, second_moves):
# opponent = get_opponent(player)
# return remaining_amount(opponent, first_moves, second_moves)
#Calculate how much we've spent
def remaining_amount(player, first_moves, second_moves):
starting_amount = 100
first_spent = 0
second_spent = 0
for i in range(0, len(first_moves)):
if first_moves[i] > second_moves[i]:
first_spent += first_moves[i]
elif first_moves[i] < second_moves[i]:
second_spent += second_moves[i]
else:
trimmed_first = first_moves[:i]
trimmed_second = second_moves[:i]
# get current draw advantage
holder = get_draw_advantage_holder(trimmed_first, trimmed_second)
if(holder != 1):
second_spent += second_moves[i]
else:
first_spent += first_moves[i]
if player == 1:
return starting_amount - first_spent
else:
return starting_amount - second_spent
#gets the id of the player
player = input()
scotch_pos = input() #current position of the scotch
first_moves = [int(i) for i in raw_input().split()]
second_moves = [int(i) for i in raw_input().split()]
bid = calculate_bid(player,scotch_pos,first_moves,second_moves)
print bid
|
Python
| 0.000777
|
|
48951aa7c2c82ca03e801e1bfce09be5492ce27b
|
Add python_analytics package
|
python_analytics/__init__.py
|
python_analytics/__init__.py
|
import logging
try: # pragma: no cover
from ._version import full_version as __version__
except ImportError: # pragma: no cover
__version__ = "not-built"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
|
Python
| 0.000031
|
|
9fa9b339cb0da0ae6a4318288afd8c75e6890e4e
|
prepare for provider
|
flask_oauthlib/provider.py
|
flask_oauthlib/provider.py
|
# coding: utf-8
"""
Flask-OAuthlib
--------------
Implemnts OAuth2 provider support for Flask.
:copyright: (c) 2013 by Hsiaoming Yang.
"""
|
Python
| 0
|
|
f8e64d26c86e84ce9efe36db1155fdf5a4c6d5f8
|
Add example to show of icons.
|
flexx/ui/examples/icons.py
|
flexx/ui/examples/icons.py
|
# doc-export: Icons
"""
This example demonstrates the use of icons in Flexx.
"""
import os
import flexx
from flexx import app, ui
# todo: support icons in widgets like Button, TabWidget, etc.
# todo: support fontawesome icons
class Icons(ui.Widget):
def init(self):
ui.Button(text='Not much to see here yet')
if __name__ == '__main__':
fname = os.path.join(os.path.dirname(flexx.__file__), 'resources', 'flexx.ico')
black_png = ('iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAIUlEQVR42mNgY'
'GD4TyEeTAacOHGCKDxqwKgBtDVgaGYmAD/v6XAYiQl7AAAAAElFTkSuQmCC')
# Select application icon. Can be a url, a relative url to a shared asset,
# a base64 encoded image, or a local filename. Note that the local filename
# works for setting the aplication icon in a desktop-like app, but not for
# a web app. File types can be ico or png.
icon = None # use default
# icon = 'https://assets-cdn.github.com/favicon.ico'
# icon = app.assets.add_shared_asset('ico.icon', open(fname, 'rb'))
# icon = 'data:image/png;base64,' + black_png
# icon = fname
m = app.App(Icons, title='Icon demo', icon=icon).launch('firefox-browser')
app.start()
|
Python
| 0
|
|
5e574a24d95e686bc2592af439e148e68036c61d
|
Add unit test for nova connector
|
tests/unit/cloud/clouds/nova_test.py
|
tests/unit/cloud/clouds/nova_test.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase
from salt.cloud.clouds import nova
from salttesting.mock import MagicMock, patch
from tests.unit.cloud.clouds import _preferred_ip
class NovaTestCase(TestCase):
'''
Test case for openstack
'''
PRIVATE_IPS = ['0.0.0.0', '1.1.1.1', '2.2.2.2']
@patch('salt.cloud.clouds.nova.show_instance',
MagicMock(return_value={'state': 'ACTIVE',
'public_ips': [],
'addresses': [],
'private_ips': PRIVATE_IPS}))
@patch('salt.cloud.clouds.nova.rackconnect', MagicMock(return_value=False))
@patch('salt.cloud.clouds.nova.rackconnectv3', MagicMock(return_value={'mynet': ['1.1.1.1']}))
@patch('salt.cloud.clouds.nova.cloudnetwork', MagicMock(return_value=False))
@patch('salt.cloud.clouds.nova.managedcloud', MagicMock(return_value=False))
@patch('salt.cloud.clouds.nova.preferred_ip', _preferred_ip(PRIVATE_IPS, ['0.0.0.0']))
@patch('salt.cloud.clouds.nova.ssh_interface', MagicMock(return_value='public_ips'))
def test_query_node_data_filter_preferred_ip_addresses(self):
'''
Test if query node data is filtering out unpreferred IP addresses.
'''
nova.__opts__ = {}
vm = {'name': None}
data = MagicMock()
data.public_ips = []
assert nova._query_node_data(vm, data).public_ips == ['0.0.0.0']
|
Python
| 0
|
|
e6a7fa5412f5bde58345491011336d74723170a2
|
Add script to generate relabeled HDF5 volume
|
toolbox/json_result_to_labelimage.py
|
toolbox/json_result_to_labelimage.py
|
import argparse
import h5py
import vigra
from vigra import numpy as np
import sys
import json
sys.path.append('.')
from progressbar import ProgressBar
def get_uuid_to_traxel_map(traxelIdPerTimestepToUniqueIdMap):
timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
uuidToTraxelMap = {}
for t in timesteps:
for i in traxelIdPerTimestepToUniqueIdMap[t].keys():
uuid = traxelIdPerTimestepToUniqueIdMap[t][i]
if uuid not in uuidToTraxelMap:
uuidToTraxelMap[uuid] = []
uuidToTraxelMap[uuid].append((int(t), int(i)))
# sort the list of traxels per UUID by their timesteps
for v in uuidToTraxelMap.values():
v.sort(key=lambda timestepIdTuple: timestepIdTuple[0])
return uuidToTraxelMap
def getLabelImageForFrame(labelImageFilename, labelImagePath, timeframe, shape):
"""
Get the label image(volume) of one time frame
"""
with h5py.File(labelImageFilename, 'r') as h5file:
labelImage = h5file[labelImagePath % (timeframe, timeframe+1, shape[0], shape[1], shape[2])][0, ..., 0].squeeze().astype(np.uint32)
return labelImage
def getShape(labelImageFilename, labelImagePath):
"""
extract the shape from the labelimage
"""
with h5py.File(labelImageFilename, 'r') as h5file:
shape = h5file['/'.join(labelImagePath.split('/')[:-1])].values()[0].shape[1:4]
return shape
def relabelImage(volume, replace):
"""
Apply a set of label replacements to the given volume.
Parameters:
volume - numpy array
replace - dictionary{[(oldValueInVolume)->(newValue), ...]}
"""
mp = np.arange(0, np.amax(volume) + 1, dtype=volume.dtype)
mp[1:] = 1
labels = np.unique(volume)
for label in labels:
if label > 0:
try:
r = replace[label]
mp[label] = r
except:
pass
return mp[volume]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Perform the segmentation as in ilastik for a new predicition map,'
+ 'using the same settings as stored in the given ilastik project',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', required=True, type=str, dest='modelFilename',
help='Filename of the JSON graph model')
parser.add_argument('--result', required=True, type=str, dest='resultFilename',
help='Filename of the JSON result file')
parser.add_argument('--label-image-file', required=True, type=str, dest='labelImageFilename',
help='Filename of the HDF5/ILP file containing the segmentation')
parser.add_argument('--label-image-path', type=str, dest='labelImagePath',
help='Path inside result file to the label image',
default='/TrackingFeatureExtraction/LabelImage/0000/[[%d, 0, 0, 0, 0], [%d, %d, %d, %d, 1]]')
parser.add_argument('--out', type=str, dest='out', required=True, help='Filename of the resulting HDF5 with relabeled objects')
args = parser.parse_args()
# get the dataset shape per frame:
shape = getShape(args.labelImageFilename, args.labelImagePath)
# load json model and results
with open(args.modelFilename, 'r') as f:
model = json.load(f)
with open(args.resultFilename, 'r') as f:
result = json.load(f)
# load forward mapping and create reverse mapping from json uuid to (timestep,ID)
traxelIdPerTimestepToUniqueIdMap = model['traxelToUniqueId']
uuidToTraxelMap = get_uuid_to_traxel_map(traxelIdPerTimestepToUniqueIdMap)
# load links and map indices
links = [(uuidToTraxelMap[int(entry['src'])][-1], uuidToTraxelMap[int(entry['dest'])][0]) for entry in result['linkingResults'] if entry['value'] > 0]
# add all internal links of tracklets
for v in uuidToTraxelMap.values():
prev = None
for timestepIdTuple in v:
if prev is not None:
links.append((prev, timestepIdTuple))
prev = timestepIdTuple
# group by timestep
timesteps = [t for t in traxelIdPerTimestepToUniqueIdMap.keys()]
linksPerTimestep = dict([(t, [(a[1], b[1]) for a, b in links if b[0] == int(t)]) for t in timesteps])
assert(len(linksPerTimestep['0']) == 0)
# create output array
resultVolume = np.zeros((len(timesteps),) + shape, dtype='uint32')
# iterate over timesteps and label tracks from front to back in a distinct color
nextUnusedColor = 1
lastFrameColorMap = {}
lastFrameLabelImage = getLabelImageForFrame(args.labelImageFilename, args.labelImagePath, 0, shape)
for t in range(len(timesteps)):
thisFrameColorMap = {}
thisFrameLabelImage = getLabelImageForFrame(args.labelImageFilename, args.labelImagePath, t, shape)
for a, b in linksPerTimestep[str(t)]:
# propagate color if possible, otherwise assign a new one
if a in lastFrameColorMap:
thisFrameColorMap[b] = lastFrameColorMap[a]
else:
thisFrameColorMap[b] = nextUnusedColor
lastFrameColorMap[a] = thisFrameColorMap[b] # also store in last frame's color map as it must have been present to participate in a link
nextUnusedColor += 1
# see which objects have been assigned a color in the last frame. set all others to 0 (1?)
unusedLabels = set(np.unique(lastFrameLabelImage)) - set([0]) - set(lastFrameColorMap.keys())
for l in unusedLabels:
lastFrameColorMap[l] = 0
# write relabeled image
resultVolume[t-1,...,0] = relabelImage(lastFrameLabelImage, lastFrameColorMap)
# swap the color maps so that in the next frame we use "this" as "last"
lastFrameColorMap, thisFrameColorMap = thisFrameColorMap, lastFrameColorMap
lastFrameLabelImage = thisFrameLabelImage
# handle last frame:
# see which objects have been assigned a color in the last frame. set all others to 0 (1?)
unusedLabels = set(np.unique(lastFrameLabelImage)) - set([0]) - set(lastFrameColorMap.keys())
for l in unusedLabels:
lastFrameColorMap[l] = 0
# write last frame relabeled image
resultVolume[t,...,0] = relabelImage(lastFrameLabelImage, lastFrameColorMap)
# save to disk
vigra.impex.writeHDF5(resultVolume, args.out, 'exported_data')
|
Python
| 0
|
|
2b8ff3b38e4f8bdc9da30c7978062174b0259f76
|
Add lc0068_text_justification.py
|
lc0068_text_justification.py
|
lc0068_text_justification.py
|
"""Leetcode 68. Text Justification
Hard
URL: https://leetcode.com/problems/text-justification/
Given an array of words and a width maxWidth, format the text such that each line has
exactly maxWidth characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you
can in each line. Pad extra spaces ' ' when necessary so that each line has exactly
maxWidth characters.
Extra spaces between words should be distributed as evenly as possible. If the number
of spaces on a line do not divide evenly between words, the empty slots on the left
will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted
between words.
Note:
- A word is defined as a character sequence consisting of non-space characters only.
- Each word's length is guaranteed to be greater than 0 and not exceed maxWidth.
- The input array words contains at least one word.
Example 1:
Input:
words = ["This", "is", "an", "example", "of", "text", "justification."]
maxWidth = 16
Output:
[
"This is an",
"example of text",
"justification. "
]
Example 2:
Input:
words = ["What","must","be","acknowledgment","shall","be"]
maxWidth = 16
Output:
[
"What must be",
"acknowledgment ",
"shall be "
]
Explanation: Note that the last line is "shall be " instead of "shall be",
because the last line must be left-justified instead of fully-justified.
Note that the second line is also left-justified becase it contains only
one word.
Example 3:
Input:
words = ["Science","is","what","we","understand","well","enough","to","explain",
"to","a","computer.","Art","is","everything","else","we","do"]
maxWidth = 20
Output:
[
"Science is what we",
"understand well",
"enough to explain to",
"a computer. Art is",
"everything else we",
"do "
]
"""
class Solution(object):
def fullJustify(self, words, maxWidth):
"""
:type words: List[str]
:type maxWidth: int
:rtype: List[str]
"""
pass
def main():
pass
if __name__ == '__main__':
main()
|
Python
| 0.000002
|
|
63a8f4af91048d0847cb7628f2ea15bb2b5f0e0a
|
Add abstract base classes to fs.archive
|
fs/archive/base.py
|
fs/archive/base.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import io
import abc
import six
import shutil
import tempfile
from .. import errors
from ..base import FS
from ..proxy.writer import ProxyWriter
@six.add_metaclass(abc.ABCMeta)
class ArchiveSaver(object):
def __init__(self, output, overwrite=False, stream=True, **options):
self.output = output
self.overwrite = overwrite
self.stream = stream
if hasattr(output, 'tell'):
self._initial_position = output.tell()
def save(self, fs):
if self.stream:
self.to_stream(fs)
else:
self.to_file(fs)
def to_file(self, fs):
if self.overwrite: # If we need to overwrite, use temporary file
tmp = '.'.join([self.output, 'tmp'])
self._to(tmp, fs)
shutil.move(tmp, self.output)
else:
self._to(self.output, fs)
def to_stream(self, fs):
if self.overwrite: # If we need to overwrite, use temporary file
fd, temp = tempfile.mkstemp()
os.close(fd)
self._to(temp, fs)
self.output.seek(self._initial_position)
with open(temp, 'rb') as f:
shutil.copyfileobj(f, self.output)
else:
self._to(self.output, fs)
@abc.abstractmethod
def _to(self, handle, fs):
raise NotImplementedError()
@six.add_metaclass(abc.ABCMeta)
class ArchiveReadFS(FS):
def __init__(self, handle, **options):
super(ArchiveReadFS, self).__init__()
self._handle = handle
def __repr__(self):
return "{}({!r})".format(
self.__class__.__name__,
getattr(self._handle, 'name', self._handle),
)
def __str__(self):
return "<{} '{}'>".format(
self.__class__.__name__.lower(),
getattr(self._handle, 'name', self._handle),
)
def _on_modification_attempt(self, path):
raise errors.ResourceReadOnly(path)
def setinfo(self, path, info):
self.check()
self._on_modification_attempt(path)
def makedir(self, path, permissions=None, recreate=False):
self.check()
self._on_modification_attempt(path)
def remove(self, path):
self.check()
self._on_modification_attempt(path)
def removedir(self, path):
self.check()
self._on_modification_attempt(path)
@six.add_metaclass(abc.ABCMeta)
class ArchiveFS(ProxyWriter):
_read_fs_cls = ArchiveReadFS
_saver_cls = ArchiveSaver
def __init__(self, handle, proxy=None, **options):
if isinstance(handle, six.text_type):
stream = False
saver = True
if os.path.exists(handle):
read_only = self._read_fs_cls(handle, **options)
else:
read_only = None
elif isinstance(handle, io.IOBase):
stream = True
saver = handle.writable()
if handle.readable() and handle.seekable():
read_only = self._read_fs_cls(handle, **options)
else:
read_only = None
else:
raise errors.CreateFailed("cannot use {}".format(handle))
if saver:
self._saver = self._saver_cls(handle, read_only is not None, stream)
else:
self._saver = None
super(ArchiveFS, self).__init__(read_only, proxy)
def close(self):
if not self.isclosed():
if self._saver is not None:
self._saver.save(self)
super(ArchiveFS, self).close()
|
Python
| 0.000001
|
|
ed463c7ea52bea26d724ee372fbd7319bfee8e1f
|
add preprocessor
|
src/preprocessor.py
|
src/preprocessor.py
|
#! /usr/bin/python
import os
import re
import sys
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
def process(input_file):
invalidchar = ('\t')
blockcomment = ['#{','}#']
stack = [0]
output = StringIO()
newindent = False
commented = False
linejoin = False
debug = False
for i, line in enumerate(input_file):
lineout = remove_inline(line)
if lineout:
for x in invalidchar:
if x in lineout:
error("SyntaxError: Invalid character {} found on line {}".format(x,i))
# Check if first statement is a block comment
lstripline = lineout.lstrip()
if len(lstripline) > 1 and blockcomment[0] == lstripline[:2]:
commented = True
# Checks if line gets uncommented
if commented:
if len(lineout) > 1 and blockcomment[1] == lineout[-2:]:
commented = False
else:
if not linejoin:
wcount = len(lineout) - len(lineout.lstrip(' '))
# If the previous line began an indentation, add the new indentation level to the block (so long as the new indentation
# level is greater than the previous one)
if newindent == True:
if wcount > stack[-1]:
stack.append(wcount)
newindent = False
else:
error("IndentationError on line {}".format(i))
# If the indentation level is greater than expected, throw an error
if wcount > stack[-1]:
if debug:
print "=== ERROR 1 ==="
print "proc. line: '{}'".format(lineout)
print "wcount: {}".format(wcount)
print "stack[-1]: {}".format(stack[-1])
print "newindent: {}".format(wcount)
error("IndentationError on line {}".format(i))
else:
# If the indentation level is less than the current level, return to a previous indentation block. Throw an error if you return to an indentation level that doesn't exist
while(wcount < stack[-1]):
lineout = "}" + lineout
stack.pop()
if wcount != stack[-1]:
if debug:
print "=== ERROR 2 ==="
print "proc. line: '{}'".format(lineout)
print "wcount: {}".format(wcount)
print "stack[-1]: {}".format(stack[-1])
print "newindent: {}".format(wcount)
error("IndentationError on line {}".format(i))
# Given that the indentation level is correct, check for the start of a new code block (where a line ends with a ':') and insert a '{'. At the end of a line, add a semicolon ';' unless if there is a linejoin character '\'.
if lineout[-1] == ':':
lineout = lineout + '{\n'
newindent = True
elif lineout[-1] == '\\':
linejoin = True
lineout = lineout[:-1]
else:
lineout = lineout + '\n'
linejoin = False
output.write(lineout)
while 0 < stack[-1]:
output.write("}")
stack.pop()
if debug:
print output.getvalue()
return output
def error(msg):
sys.stderr.write(msg+"\n")
sys.exit(2)
def remove_inline(line):
if "##" in line:
regex = re.compile("^(.*?)#.*|.*")
m = regex.match(line)
comments_removed = m.group(1)
else:
comments_removed = line
return comments_removed.rstrip()
def usage():
print"""
python preprocessor.py [input.yo]
"""
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
sys.exit(2)
try:
f_in = open(sys.argv[1],"r")
except IOError:
error("IOError: Cannot read input file %s.\n" % sys.argv[1])
name_ext = os.path.basename(f_in.name)
dir_ext = os.path.dirname(f_in.name)+"/"
if name_ext.lower().endswith(".yo"):
fname = os.path.splitext(name_ext)[0]
else:
error('NameError: Input must have yo file extension')
out_str = process(f_in)
f_out = open(dir_ext+fname+".processed.yo", 'w')
f_out.write(out_str.getvalue())
|
Python
| 0.000059
|
|
8fe5e768f20abfdd790870075950b6537c5cad6a
|
Add class containing test state and report + print methods
|
ptest.py
|
ptest.py
|
#!/usr/bin/python3
from sys import exit
class Ptest(object):
def __init__(self, module_name):
self.module_name = module_name
self.passed = 0
self.failed = 0
print('\nRunning tests for module "', module_name, '"', sep='')
def report(self, test_name, test_result):
if test_result not in (True, False):
print('Invalid report argument for test "', test_name, '"', sep='')
exit(1)
NORMAL = '\x1B[0m'
RED = '\x1B[31m'
GREEN = '\x1B[32m'
if test_result:
self.passed += 1
print('[', GREEN, 'PASSED', NORMAL, '] ', test_name, sep='')
else:
self.failed += 1
print('[', RED, 'FAILED', NORMAL, '] ', test_name, sep='')
def print_statistics(self):
test_count = self.passed + self.failed
if test_count == 0:
print('No tests yet...')
return
pass_rate = 0
if self.passed != 0:
pass_rate = round(float(self.passed) / float(test_count), 3) * 100
print('Passed: ', self.passed, '/', test_count,
' (', pass_rate, '%)', sep='', end='\n\n')
|
Python
| 0
|
|
20375ca41cce0ee6a9a22bfe6faa766ab6db53fc
|
add tests for coordinate rounding and basic pen commands
|
Lib/fontTools/pens/t2CharStringPen_test.py
|
Lib/fontTools/pens/t2CharStringPen_test.py
|
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.pens.t2CharStringPen import T2CharStringPen
import unittest
class T2CharStringPenTest(unittest.TestCase):
def assertAlmostEqualProgram(self, expected, actual):
self.assertEqual(len(expected), len(actual))
for i1, i2 in zip(expected, actual):
if isinstance(i1, basestring):
self.assertIsInstance(i2, basestring)
self.assertEqual(i1, i2)
else:
self.assertAlmostEqual(i1, i2)
def test_draw_lines(self):
pen = T2CharStringPen(100, {})
pen.moveTo((0, 0))
pen.lineTo((10, 0))
pen.lineTo((10, 10))
pen.lineTo((0, 10))
pen.closePath() # no-op
charstring = pen.getCharString(None, None)
self.assertEqual(
[100,
0, 0, 'rmoveto',
10, 0, 'rlineto',
0, 10, 'rlineto',
-10, 0, 'rlineto',
'endchar'],
charstring.program)
def test_draw_curves(self):
pen = T2CharStringPen(100, {})
pen.moveTo((0, 0))
pen.curveTo((10, 0), (20, 10), (20, 20))
pen.curveTo((20, 30), (10, 40), (0, 40))
pen.endPath() # no-op
charstring = pen.getCharString(None, None)
self.assertEqual(
[100,
0, 0, 'rmoveto',
10, 0, 10, 10, 0, 10, 'rrcurveto',
0, 10, -10, 10, -10, 0, 'rrcurveto',
'endchar'],
charstring.program)
def test_default_width(self):
pen = T2CharStringPen(None, {})
charstring = pen.getCharString(None, None)
self.assertEqual(['endchar'], charstring.program)
def test_no_round(self):
# no rounding is the default
pen = T2CharStringPen(100.1, {}, roundTolerance=0.0)
pen.moveTo((0, 0))
pen.curveTo((10.1, 0.1), (19.9, 9.9), (20.49, 20.49))
pen.curveTo((20.49, 30.49), (9.9, 39.9), (0.1, 40.1))
pen.closePath()
charstring = pen.getCharString(None, None)
self.assertAlmostEqualProgram(
[100.1,
0, 0, 'rmoveto',
10.1, 0.1, 9.8, 9.8, 0.59, 10.59, 'rrcurveto',
0, 10, -10.59, 9.41, -9.8, 0.2, 'rrcurveto',
'endchar'],
charstring.program)
def test_round_all(self):
# 1.0 rounds everything
pen = T2CharStringPen(100.1, {}, roundTolerance=1.0)
pen.moveTo((0, 0))
pen.curveTo((10.1, 0.1), (19.9, 9.9), (20.49, 20.49))
pen.curveTo((20.49, 30.49), (9.9, 39.9), (0.1, 40.1))
pen.closePath()
charstring = pen.getCharString(None, None)
self.assertEqual(
[100,
0, 0, 'rmoveto',
10, 0, 10, 10, 0, 10, 'rrcurveto',
0, 10, -10, 10, -10, 0, 'rrcurveto',
'endchar'],
charstring.program)
def test_round_some(self):
pen = T2CharStringPen(100, {}, roundTolerance=0.2)
pen.moveTo((0, 0))
# the following two are rounded as within the tolerance
pen.lineTo((10.1, 0.1))
pen.lineTo((19.9, 9.9))
# this one is not rounded as it exceeds the tolerance
pen.lineTo((20.49, 20.49))
pen.closePath()
charstring = pen.getCharString(None, None)
self.assertAlmostEqualProgram(
[100,
0, 0, 'rmoveto',
10, 0, 'rlineto',
10, 10, 'rlineto',
0.49, 10.49, 'rlineto',
'endchar'],
charstring.program)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
Python
| 0
|
|
1669f9a3a9fabc2ded8fa92542dca65036c201e5
|
Create sizes.py
|
plantcv/plantcv/visualize/sizes.py
|
plantcv/plantcv/visualize/sizes.py
|
# Visualize an annotated image with object sizes
import os
import cv2
import random
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import find_objects
from plantcv.plantcv import color_palette
def sizes(img, mask, num_objects=100):
""" Visualize an RGB image in all potential colorspaces
Inputs:
img = RGB or grayscale image data
mask = Binary mask made from selected contours
num_objects = Optional parameter to limit the number of objects that will get annotated.
Returns:
plotting_img = Plotting image containing the original image and L,A,B,H,S, and V colorspaces
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param num_objects: int
:return plotting_img: numpy.ndarray
"""
plotting_img = np.copy(img)
# Store debug
debug = params.debug
params.debug = None
id_objects, obj_hierarchy = find_objects(img=img, mask=mask)
rand_color = color_palette(num=len(id_objects), saved=False)
random.shuffle(rand_color)
label_coord_x = []
label_coord_y = []
area_vals = []
for i, cnt in enumerate(id_objects):
# Calculate geodesic distance, divide by two since cv2 seems to be taking the perimeter of the contour
area_vals.append(cv2.contourArea(cnt))
cv2.drawContours(plotting_img, id_objects, i, rand_color[i], thickness=-1)
# Store coordinates for labels
label_coord_x.append(id_objects[i][0][0][0])
label_coord_y.append(id_objects[i][0][0][1])
segment_ids = []
# Put labels of length
for c, value in enumerate(area_vals):
text = "{:.0f}".format(value)
w = label_coord_x[c]
h = label_coord_y[c]
if c < int(num_objects):
cv2.putText(img=plotting_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
else:
print("There were " + str(len(area_vals)-num_objects) + " objects not annotated.")
break
# Auto-increment device
params.device += 1
# Reset debug mode
params.debug = debug
if params.debug == 'print':
print_image(plotting_img, os.path.join(params.debug_outdir, str(params.device) + '_object_sizes.png'))
elif params.debug == 'plot':
plot_image(plotting_img)
return plotting_img
|
Python
| 0.000001
|
|
e03ecf68055e820106172413967713f98f7905ac
|
copy api_util to client to make it self-contained
|
net/client/api_util.py
|
net/client/api_util.py
|
import simplejson
def json2python(json):
try:
return simplejson.loads(json)
except:
pass
return None
python2json = simplejson.dumps
|
Python
| 0.000001
|
|
addc7f33af75070333369a01c71e8acd231376ba
|
Add FilterNotifier for keyword based notification filtering
|
reconbot/notifiers/filter.py
|
reconbot/notifiers/filter.py
|
class FilterNotifier:
""" Filters notifications based on their type or keywords """
def __init__(self, notifier, keywords=[], ignore=[]):
self.notifier = notifier
self.keywords = keywords
self.ignore = ignore
def notify(self, text, options={}):
if len(self.ignore) > 0 and any(keyword in text for keyword in self.ignore):
return False
if len(self.keywords) == 0 or any(keyword in text for keyword in self.keywords):
self.notifier.notify(text, options)
|
Python
| 0
|
|
b09b11de1a025196cceb1c8fd71bda5515437a10
|
Add max31855 example driver
|
sw/examples/drivers/max31855.py
|
sw/examples/drivers/max31855.py
|
#!/usr/bin/env python
#
# SPI example (using the STM32F407 discovery board)
#
import sys
import time
import ctypes
from silta import stm32f407
def bytes_to_int(byte_list):
num = 0
for byte in range(len(byte_list)):
num += byte_list[byte] << ((len(byte_list) - 1 - byte) * 8)
return num
class MAX31855(object):
def __init__(self, bridge, cs_pin):
self.bridge = bridge
self.cs_pin = cs_pin
self.last_fault = 0
# Set the CS line as an output
self.bridge.gpiocfg(self.cs_pin, 'output')
# Configure ~1.05MHz clock with CPOL=0,CPHA=0
self.bridge.spicfg(10500000, 0, 0)
# CS is active low in this case
self.bridge.gpio(self.cs_pin, 1)
def read(self):
# Read 32 bits
txbuff = [0x00, 0x00, 0x00, 0x00]
rval = self.bridge.spi(self.cs_pin, txbuff)
if isinstance(rval, list):
reg = bytes_to_int(rval)
fault = ((reg >> 16) & 1) == 1
if fault:
temperature = None
last_fault = reg & 0x7
else:
temperature = ctypes.c_int16((reg >> 16) & 0xFFFC).value >> 2
temperature = temperature * 0.25
return temperature
else:
print('SPI Error: ' + str(rval))
return None
def get_last_fault(self):
return last_fault
|
Python
| 0
|
|
a15e363718ab41c5e02b9eaa919fb689cd266af6
|
Add common module for our tests
|
nose2/tests/_common.py
|
nose2/tests/_common.py
|
"""Common functionality."""
import os.path
import tempfile
import shutil
import sys
class TestCase(unittest2.TestCase):
"""TestCase extension.
If the class variable _RUN_IN_TEMP is True (default: False), tests will be
performed in a temporary directory, which is deleted afterwards.
"""
_RUN_IN_TEMP = False
def setUp(self):
super(TestCase, self).setUp()
if self._RUN_IN_TEMP:
self.__orig_dir = os.getcwd()
work_dir = self.__work_dir = tempfile.mkdtemp()
os.chdir(self.__work_dir)
# Make sure it's possible to import modules from current directory
sys.path.insert(0, work_dir)
def tearDown(self):
super(TestCase, self).tearDown()
if self._RUN_IN_TEMP:
os.chdir(self.__orig_dir)
shutil.rmtree(self.__work_dir, ignore_errors=True)
class _FakeEventBase(object):
"""Baseclass for fake Events."""
|
Python
| 0
|
|
b802f1d5453840ea4b16113d5d03f6c27224ce0c
|
Add try/except example.
|
examples/try.py
|
examples/try.py
|
# Honeybadger for Python
# https://github.com/honeybadger-io/honeybadger-python
#
# This file is an example of how to catch an exception in Python and report it
# to Honeybadger without re-raising. To run this example:
# $ pip install honeybadger
# $ HONEYBADGER_API_KEY=your-api-key python try.py
from honeybadger import honeybadger
# Uncomment the following line or use the HONEYBADGER_API_KEY environment
# variable to configure the API key for your Honeybadger project:
# honeybadger.configure(api_key='your api key')
import logging
logging.getLogger('honeybadger').addHandler(logging.StreamHandler())
def method_two():
mydict = dict(a=1)
try:
print mydict['b']
except KeyError, exc:
honeybadger.notify(exc, context={'foo': 'bar'})
def method_one():
method_two()
if __name__ == '__main__':
honeybadger.set_context(user_email="user@example.com")
method_one()
|
Python
| 0
|
|
f81d32b0ec19c2f370df31ec4d86b1c735d414cf
|
Create extract_all_features.py
|
preprocess/extract_all_features.py
|
preprocess/extract_all_features.py
|
import os
from scipy.io import loadmat
from skimage.color import rgb2gray
from numpy import array, vstack, reshape, delete
from skimage.feature import local_binary_pattern
from skimage.feature import hog
from skimage.filters import sobel_h,sobel_v
from scipy.io import savemat
from sklearn.metrics import precision_recall_fscore_support
from sklearn.ensemble import AdaBoostClassifier
from sklearn import preprocessing
from sklearn.feature_selection import VarianceThreshold
from sklearn.svm import SVC
def normalize_data(x):
scaler = preprocessing.StandardScaler().fit(x)
return scaler
def make_feature_vector(D, num_of_files, input_fldr, file_heading):
feature = D['feature_vector'] #first 13 features
target = D['target']
all_super_features = [] #to have all 18 features
for n in range(num_of_files):
#get r, g, b values for superpixels of the image being processed now
super_image = vstack((feature[0])[n][:,k] for k in range(3))
super_image = super_image.T
#this if does the following:
#1. limits number of superpixels to 385 in the hope that all images have at least 385 images
#Note: Case where images have less than 385 superpixels - throws error
#2. The superpixels on the extreme right edge of the image is removed. This is based on an approximation that there are approximately 11 superpixels along the y-axis of the image.
#This assumptions seems to be consistent throughout the training data - um, umm and uu.
#3. When these superpixels are removed, their corresponding features in the feature vector and the targets in the target vector too are removed.
if(super_image.shape[0] > 385):
diff = super_image.shape[0] - 385
for i in range(diff):
#remove from r, g, b of superpixels
super_image = delete(super_image, (i+1)*11, 0) #super_image.shape[0]-1
#remove from targets
((target[:,n])[0]) = delete(((target[:,n])[0]), (i+1)*11, 0) #delete extreme rightmost column of superpixels(hopefully)
#remove from feature vector
(feature[:,n])[0] = delete((feature[:,n])[0], (i+1)*11, 0)
#reshape the superpixel to an approximated dimension of 11*35 (This can be later automated and read from the feature vector for better performance and accuracy. Leaving this for now.)
super_image = reshape(super_image,(11, 35, 3))
#convert to grayscale
gray = rgb2gray(super_image)
#these features are dependent on the shape of the image, i.e. image as a whole. image is reshaped for this.
#60, 10 are values selected by cross-validation
l = local_binary_pattern(gray, 60, 10)
h_gradient = sobel_h(gray)
v_gradient = sobel_v(gray)
#combine all 17 features together into 1 feature_vector
#The 9th(0 index) feature - "v" - does not have 385 entries (only about 100 or so; don't know why). So I am not including that.
#, reshape((feature[0])[n][:9], (1, (feature[0])[n][:9].size))
all_features = vstack((reshape((feature[0])[n][:,0], (1, 385)), reshape((feature[0])[n][:,1], (1, 385)), reshape((feature[0])[n][:,2], (1, 385)), reshape((feature[0])[n][:,3], (1, 385)), reshape((feature[0])[n][:,4], (1, 385)), reshape((feature[0])[n][:,5], (1, 385)), reshape((feature[0])[n][:,6], (1, 385)), reshape((feature[0])[n][:,7], (1, 385)), reshape((feature[0])[n][:,8], (1, 385)), reshape((feature[0])[n][:,10], (1, 385)), reshape((feature[0])[n][:,11], (1, 385)), reshape((feature[0])[n][:,12], (1, 385)), reshape((feature[0])[n][:,13], (1, 385)), reshape((feature[0])[n][:,14], (1, 385)), reshape(l,(1, 385)), reshape(h_gradient, (1, 385)) , reshape(v_gradient, (1, 385)) ))
all_features = all_features.T
if n!=0:
all_super_features = vstack((all_super_features, all_features))
else:
all_super_features = all_features
#save the new feature vector with 17 features. "v" is not included
feature_explanation = ["r", "g", "b", "nr", "ng", "o1", "o2", "h", "s", "l", "a", "b", "x", "y", "texture_lbp", "h_gradient", "v_gradient"]
params_dict = {}
params_dict['feature_vector'] = all_super_features
params_dict['feature_explanation'] = feature_explanation
params_dict['target'] = target
params_dict['feature_dim'] = [17]
params_dict['total_pic'] = [num_of_files]
save_path = os.path.join(input_fldr, "%s_data" % file_heading)
savemat(save_path, params_dict)
if __name__ == '__main__':
#change the path; i know this is sloppy
#the um_data and the other files are the files with the 15 features: (including x, y)
UM = loadmat("C:\Users\Joms\Desktop\um\um_data.mat")
make_feature_vector(UM, 95, "C:\Users\Joms\Desktop\um", "um_all")
UMM = loadmat("C:\Users\Joms\Desktop\umm\umm_data.mat")
make_feature_vector(UM, 95, "C:\Users\Joms\Desktop\umm", "umm_all")
UU = loadmat("C:\Users\Joms\Desktop\um\uu_data.mat")
make_feature_vector(UM, 95, "C:\Users\Joms\Desktop\uu", "uu_all")
|
Python
| 0
|
|
6bbef11c982ddee4981318e6bca9fa85610f1cc8
|
Increase revision content lenght
|
src/ggrc/migrations/versions/20170112112254_177a979b230a_update_revision_content_field.py
|
src/ggrc/migrations/versions/20170112112254_177a979b230a_update_revision_content_field.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Update revision content field.
Create Date: 2017-01-12 11:22:54.998164
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '177a979b230a'
down_revision = '275cd0dcaea'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.alter_column(
"revisions",
"content",
existing_type=sa.Text(),
type_=mysql.LONGTEXT,
nullable=False
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column(
"revisions",
"content",
existing_type=mysql.LONGTEXT,
type_=sa.Text(),
nullable=False
)
|
Python
| 0
|
|
eb4fbb28ed06b223282b02bb31f5f91e1eeb3f9f
|
Add RenormalizeWeight callback
|
seya/callbacks.py
|
seya/callbacks.py
|
import numpy as np
from keras.callbacks import Callback
class RenormalizeWeight(Callback):
def __init__(self, W):
Callback.__init__(self)
self.W = W
self.W_shape = self.W.get_value().shape
def on_batch_start(self, batch, logs={}):
W = self.W.get_value()
if self.W_shape == 4:
W = W.reshape((self.W_shape[0], -1))
norm = np.sqrt((W**2).sum(axis=-1))
W /= norm[:, None]
W = W.reshape(self.W_shape)
self.W.set_value(W)
|
Python
| 0
|
|
c575f030feb90d3c6383d11265fcf7f80414ce34
|
Add an example hook script for checking valid commits
|
contrib/check-valid-commit.py
|
contrib/check-valid-commit.py
|
#!/usr/bin/env python
import commands
import getopt
import sys
SSH_USER = 'bot'
SSH_HOST = 'localhost'
SSH_PORT = 29418
SSH_COMMAND = 'ssh %s@%s -p %d gerrit approve ' % (SSH_USER, SSH_HOST, SSH_PORT)
FAILURE_SCORE = '--code-review=-2'
FAILURE_MESSAGE = 'This commit message does not match the standard.' \
+ ' Please correct the commit message and upload a replacement patch.'
PASS_SCORE = '--code-review=0'
PASS_MESSAGE = ''
def main():
change = None
project = None
branch = None
commit = None
patchset = None
try:
opts, args = getopt.getopt(sys.argv[1:], '', \
['change=', 'project=', 'branch=', 'commit=', 'patchset='])
except getopt.GetoptError, err:
print 'Error: %s' % (err)
usage()
sys.exit(-1)
for arg, value in opts:
if arg == '--change':
change = value
elif arg == '--project':
project = value
elif arg == '--branch':
branch = value
elif arg == '--commit':
commit = value
elif arg == '--patchset':
patchset = value
else:
print 'Error: option %s not recognized' % (arg)
usage()
sys.exit(-1)
if change == None or project == None or branch == None \
or commit == None or patchset == None:
usage()
sys.exit(-1)
command = 'git cat-file commit %s' % (commit)
status, output = commands.getstatusoutput(command)
if status != 0:
print 'Error running \'%s\'. status: %s, output:\n\n%s' % \
(command, status, output)
sys.exit(-1)
commitMessage = output[(output.find('\n\n')+2):]
commitLines = commitMessage.split('\n')
if len(commitLines) > 1 and len(commitLines[1]) != 0:
fail(commit, 'Invalid commit summary. The summary must be ' \
+ 'one line followed by a blank line.')
i = 0
for line in commitLines:
i = i + 1
if len(line) > 80:
fail(commit, 'Line %d is over 80 characters.' % i)
passes(commit)
def usage():
print 'Usage:\n'
print sys.argv[0] + ' --change <change id> --project <project name> ' \
+ '--branch <branch> --commit <sha1> --patchset <patchset id>'
def fail( commit, message ):
command = SSH_COMMAND + FAILURE_SCORE + ' -m \\\"' \
+ _shell_escape( FAILURE_MESSAGE + '\n\n' + message) \
+ '\\\" ' + commit
commands.getstatusoutput(command)
sys.exit(1)
def passes( commit ):
command = SSH_COMMAND + PASS_SCORE + ' -m \\\"' \
+ _shell_escape(PASS_MESSAGE) + ' \\\" ' + commit
commands.getstatusoutput(command)
def _shell_escape(x):
s = ''
for c in x:
if c in '\n':
s = s + '\\\"$\'\\n\'\\\"'
else:
s = s + c
return s
if __name__ == '__main__':
main()
|
Python
| 0
|
|
1f1d2df36a16b80c770974a9ac2bf48ccbebc3ab
|
add callable list
|
jasily/collection/funcs.py
|
jasily/collection/funcs.py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from functools import partial
class CallableList(list):
'''
a simple callable list.
'''
def __call__(self):
ret = None
for func in self:
ret = func()
return ret
def append_func(self, func, *args, **kwargs):
'''
append func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.append(wraped_func)
def insert_func(self, index, func, *args, **kwargs):
'''
insert func with given arguments and keywords.
'''
wraped_func = partial(func, *args, **kwargs)
self.insert(index, wraped_func)
|
Python
| 0.000002
|
|
bb925f03cbbb3b4a6f9abfa70c3f6df7a4f0ae16
|
Add script to update perf_expectations.json.
|
tools/perf_expectations/make_expectations.py
|
tools/perf_expectations/make_expectations.py
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import optparse
import re
import simplejson
import subprocess
import sys
import time
import urllib2
__version__ = '1.0'
DEFAULT_EXPECTATIONS_FILE = 'perf_expectations.json'
DEFAULT_VARIANCE = 0.05
USAGE = ''
def ReadFile(filename):
try:
file = open(filename, 'r')
except IOError, e:
print >> sys.stderr, ('I/O Error reading file %s(%s): %s' %
(filename, e.errno, e.strerror))
raise e
if not file:
return None
contents = file.read()
file.close()
return contents
def ConvertJsonIntoDict(string):
"""Read a JSON string and convert its contents into a Python datatype."""
if len(string) == 0:
print >> sys.stderr, ('Error could not parse empty string')
raise Exception('JSON data missing')
try:
json = simplejson.loads(string)
except ValueError, e:
print >> sys.stderr, ('Error parsing string: "%s"' % string)
raise e
return json
# Floating point representation of last time we fetched a URL.
last_fetched_at = None
def FetchUrlContents(url):
global last_fetched_at
if last_fetched_at and ((time.time() - last_fetched_at) <= 0.5):
# Sleep for half a second to avoid overloading the server.
time.sleep(0.5)
try:
last_fetched_at = time.time()
connection = urllib2.urlopen(url)
except urllib2.HTTPError, e:
if e.code == 404:
return None
raise e
text = connection.read().strip()
connection.close()
return text
def WriteJson(filename, data, keys):
"""Write a list of |keys| in |data| to the file specified in |filename|."""
try:
file = open(filename, 'w')
except IOError, e:
print >> sys.stderr, ('I/O Error writing file %s(%s): %s' %
(filename, e.errno, e.strerror))
return False
jsondata = []
for key in keys:
rowdata = []
for subkey in ['reva', 'revb', 'improve', 'regress']:
if subkey in data[key]:
rowdata.append('"%s": %s' % (subkey, data[key][subkey]))
jsondata.append('"%s": {%s}' % (key, ', '.join(rowdata)))
jsondata.append('"load": true')
json = '{%s\n}' % ',\n '.join(jsondata)
file.write(json + '\n')
file.close()
return True
def Main(args):
parser = optparse.OptionParser(usage=USAGE, version=__version__)
options, args = parser.parse_args(args)
# Get the list of summaries for a test.
base_url = 'http://build.chromium.org/f/chromium/perf'
perf = ConvertJsonIntoDict(ReadFile(DEFAULT_EXPECTATIONS_FILE))
# Fetch graphs.dat for this combination.
perfkeys = perf.keys()
# In perf_expectations.json, ignore the 'load' key.
perfkeys.remove('load')
perfkeys.sort()
write_new_expectations = False
for key in perfkeys:
value = perf[key]
variance = DEFAULT_VARIANCE
# Skip expectations that are missing a reva or revb. We can't generate
# expectations for those.
if not(value.has_key('reva') and value.has_key('revb')):
print '%s (skipping, missing revision range)' % key
continue
revb = int(value['revb'])
reva = int(value['reva'])
# Ensure that reva is less than revb.
if reva > revb:
temp = reva
reva = revb
revb = temp
# Get the system/test/graph/tracename and reftracename for the current key.
matchData = re.match(r'^([^/]+)\/([^/]+)\/([^/]+)\/([^/]+)$', key)
if not matchData:
print '%s (skipping, cannot parse key)' % key
continue
system = matchData.group(1)
test = matchData.group(2)
graph = matchData.group(3)
tracename = matchData.group(4)
reftracename = tracename + '_ref'
# Create the summary_url and get the json data for that URL.
# FetchUrlContents() may sleep to avoid overloading the server with
# requests.
summary_url = '%s/%s/%s/%s-summary.dat' % (base_url, system, test, graph)
summaryjson = FetchUrlContents(summary_url)
if not summaryjson:
print '%s (skipping, missing json data)' % key
continue
summarylist = summaryjson.split('\n')
trace_values = {}
for trace in [tracename, reftracename]:
trace_values.setdefault(trace, {})
# Find the high and low values for each of the traces.
scanning = False
printed_error = False
for line in summarylist:
json = ConvertJsonIntoDict(line)
if int(json['rev']) <= revb:
scanning = True
if int(json['rev']) < reva:
break
# We found the upper revision in the range. Scan for trace data until we
# find the lower revision in the range.
if scanning:
for trace in [tracename, reftracename]:
if trace not in json['traces']:
if not printed_error:
print '%s (error)' % key
printed_error = True
print ' trace %s missing' % trace
continue
if type(json['traces'][trace]) != type([]):
if not printed_error:
print '%s (error)' % key
printed_error = True
print ' trace %s format not recognized' % trace
continue
try:
tracevalue = float(json['traces'][trace][0])
except ValueError:
if not printed_error:
print '%s (error)' % key
printed_error = True
print ' trace %s value error: %s' % (
trace, str(json['traces'][trace][0]))
continue
for bound in ['high', 'low']:
trace_values[trace].setdefault(bound, tracevalue)
trace_values[trace]['high'] = max(trace_values[trace]['high'],
tracevalue)
trace_values[trace]['low'] = min(trace_values[trace]['low'],
tracevalue)
if 'high' not in trace_values[tracename]:
print '%s (skipping, no suitable traces matched)' % key
continue
# Calculate assuming high deltas are regressions and low deltas are
# improvements.
regress = (float(trace_values[tracename]['high']) -
float(trace_values[reftracename]['low']))
improve = (float(trace_values[tracename]['low']) -
float(trace_values[reftracename]['high']))
# If the existing values assume regressions are low deltas relative to
# improvements, swap our regress and improve. This value must be a
# scores-like result.
if perf[key]['regress'] < perf[key]['improve']:
temp = regress
regress = improve
improve = temp
if regress < improve:
regress = int(math.floor(regress - abs(regress*variance)))
improve = int(math.ceil(improve + abs(improve*variance)))
else:
improve = int(math.floor(improve - abs(improve*variance)))
regress = int(math.ceil(regress + abs(regress*variance)))
if (perf[key]['regress'] == regress and perf[key]['improve'] == improve):
print '%s (no change)' % key
continue
write_new_expectations = True
print key
print ' before = %s' % perf[key]
print ' traces = %s' % trace_values
perf[key]['regress'] = regress
perf[key]['improve'] = improve
print ' after = %s' % perf[key]
if write_new_expectations:
print 'writing expectations... ',
WriteJson(DEFAULT_EXPECTATIONS_FILE, perf, perfkeys)
print 'done'
else:
print 'no updates made'
if __name__ == '__main__':
sys.exit(Main(sys.argv))
|
Python
| 0.00012
|
|
467170482c97c3b586d58c4729d051c1b1b99f3d
|
Add sentence level classifier.
|
actionizer_sentences.py
|
actionizer_sentences.py
|
#! /usr/bin/python
import numpy as np
import os
import re
from sklearn import datasets, cross_validation
from sklearn.base import TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics import classification_report
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
MESSAGES_DIR = "data/messages/"
MESSAGE_FILENAME_FORMAT = "msg-%d.txt"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
# Read message files in numeric order. os.listdir() returns them sorted by string, not message
# number.
filenames = os.listdir(MESSAGES_DIR)
num_messages = len(filenames)
for i in range(num_messages):
filename = MESSAGE_FILENAME_FORMAT % i
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def load_sentence_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append([int(x) for x in line.split()[2:] if len(line.split()) > 2])
return judgments
def load_sentences():
messages = load_messages()
judgments = load_sentence_judgments()
action_sentences = []
no_action_sentences = []
for i in range(len(messages)):
message = messages[i]
sentences = parse_sentences(message)
action_indices = judgments[i]
if len(action_indices) > 0:
for i in range(0, len(action_indices), 2):
start_index = action_indices[i]
length = action_indices[i+1]
stop_index = start_index + length
action_sentence = message[start_index:stop_index].strip().replace('\n', ' ')
if action_sentence in sentences:
action_sentences.append(action_sentence)
sentences.remove(action_sentence)
no_action_sentences.extend(sentences)
target = [1 for _ in action_sentences]
target.extend([0 for _ in no_action_sentences])
action_sentences.extend(no_action_sentences)
return action_sentences, target
def parse_sentences(message):
# Split the sentence on periods, exclamation marks, and double newlines. Recombine punctuation
# marks with their sentences.
sentences = reduce(lambda acc, elem: acc[:-1] + [acc[-1] + elem] \
if elem == '.' or elem == '?' or elem == '!' \
else acc + [elem], re.split(r'([\.\!\?]|\n\n)', message), [])
# Strip sentences of extra white space.
# Replace internal newlines with spaces so that newlines don't trip up sklearn tokenizers.
# Remove all sentences that have length 0 or are completely comprised of whitespace.
# Remove any sentence starting with the 'From:' header, which should remove the From:, To:,
# and Subject:
sentences = [s.strip().replace('\n', ' ') for s in sentences if len(s) > 0 and not s.isspace() and not s.startswith('From:')]
return sentences
# Transformer to transform a sparse matrix into a dense matrix for use in an sklearn pipeline.
class DenseTransformer(TransformerMixin):
def transform(self, X, y=None, **fit_params):
return X.todense()
def fit_transform(self, X, y=None, **fit_params):
self.fit(X, y, **fit_params)
return self.transform(X)
def fit(self, X, y=None, **fit_params):
return self
def main():
sentences, target = load_sentences()
pipeline = Pipeline([('vect', CountVectorizer(ngram_range=(1, 3))), ('to_dense', DenseTransformer()), ('clf', GaussianNB())])
pipeline.fit(sentences, target)
scores = cross_validation.cross_val_score(pipeline, sentences, target, scoring='f1', cv=5)
print "F1: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2)
if __name__ == "__main__":
main()
|
Python
| 0.999983
|
|
5350af9b80e6317c6c5c65720216c8e539194e87
|
Add account test file
|
tests/jcl/model/test_account.py
|
tests/jcl/model/test_account.py
|
##
## test_account.py
## Login : David Rousselie <dax@happycoders.org>
## Started on Wed Nov 22 19:32:53 2006 David Rousselie
## $Id$
##
## Copyright (C) 2006 David Rousselie
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
import unittest
import os
from sqlobject import *
from sqlobject.dbconnection import TheURIOpener
from jcl.jabber.error import FieldError
from jcl.model import account
from jcl.model.account import Account
from tests.jcl.model.account import AccountExample
DB_PATH = "/tmp/test.db"
DB_URL = DB_PATH# + "?debug=1&debugThreading=1"
class AccountModule_TestCase(unittest.TestCase):
def test_default_post_func(self):
result = account.default_post_func("test")
self.assertEquals(result, "test")
def test_boolean_post_func_1(self):
result = account.boolean_post_func("1")
self.assertTrue(result)
def test_boolean_post_func_0(self):
result = account.boolean_post_func("0")
self.assertFalse(result)
def test_boolean_post_func_True(self):
result = account.boolean_post_func("True")
self.assertTrue(result)
def test_boolean_post_func_true(self):
result = account.boolean_post_func("true")
self.assertTrue(result)
def test_boolean_post_func_False(self):
result = account.boolean_post_func("False")
self.assertFalse(result)
def test_boolean_post_func_false(self):
result = account.boolean_post_func("false")
self.assertFalse(result)
def test_int_post_func(self):
result = account.int_post_func("42")
self.assertEquals(result, 42)
def test_string_not_null_post_func_not_null(self):
result = account.string_not_null_post_func("ok")
self.assertEquals(result, "ok")
def test_string_not_null_post_func_none(self):
self.assertRaises(FieldError, \
account.string_not_null_post_func, \
None)
def test_string_not_null_post_func_empty(self):
self.assertRaises(FieldError, \
account.string_not_null_post_func, \
"")
def test_mandatory_field(self):
self.assertRaises(FieldError, \
account.mandatory_field, \
"")
class Account_TestCase(unittest.TestCase):
def setUp(self):
if os.path.exists(DB_PATH):
os.unlink(DB_PATH)
def tearDown(self):
account.hub.threadConnection = connectionForURI('sqlite://' + DB_URL)
AccountExample.dropTable(ifExists = True)
Account.dropTable(ifExists = True)
del TheURIOpener.cachedURIs['sqlite://' + DB_URL]
account.hub.threadConnection.close()
del account.hub.threadConnection
if os.path.exists(DB_PATH):
os.unlink(DB_PATH)
def test_set_status(self):
account.hub.threadConnection = connectionForURI('sqlite://' + DB_URL)
Account.createTable(ifNotExists = True)
account11 = Account(user_jid = "test1@test.com", \
name = "account11", \
jid = "account11@jcl.test.com")
account11.status = account.OFFLINE
self.assertEquals(account11.status, account.OFFLINE)
# TODO : test first_check attribute
del account.hub.threadConnection
def test_set_status_live_password(self):
account.hub.threadConnection = connectionForURI('sqlite://' + DB_URL)
AccountExample.createTable(ifNotExists = True)
account11 = AccountExample(user_jid = "test1@test.com", \
name = "account11", \
jid = "account11@jcl.test.com", \
login = "mylogin", \
password = "mypassword", \
store_password = False, \
test_enum = "choice3", \
test_int = 21)
account11.waiting_password_reply = True
account11.status = account.OFFLINE
self.assertEquals(account11.status, account.OFFLINE)
self.assertEquals(account11.waiting_password_reply, False)
self.assertEquals(account11.password, None)
del account.hub.threadConnection
|
Python
| 0.000001
|
|
44b6b0ff5efc6d9fcda4f886640663b68e7d6c14
|
Add initial code for getting batting stats over a specified timeframe
|
pybaseball/league_batting_stats.py
|
pybaseball/league_batting_stats.py
|
"""
TODO
pull batting stats over specified time period
allow option to get stats for full seasons instead of ranges
"""
import requests
import pandas as pd
from bs4 import BeautifulSoup
def get_soup(start_dt, end_dt):
# get most recent standings if date not specified
if((start_dt is None) or (end_dt is None)):
print('Error: a date range needs to be specified')
return None
url = "http://www.baseball-reference.com/leagues/daily.cgi?user_team=&bust_cache=&type=b&lastndays=7&dates=fromandto&fromandto={}.{}&level=mlb&franch=&stat=&stat_value=0".format(start_dt, end_dt)
s=requests.get(url).content
return BeautifulSoup(s, "html.parser")
def get_table(soup):
table = soup.find_all('table')[0]
data = []
headings = [th.get_text() for th in table.find("tr").find_all("th")][1:]
data.append(headings)
table_body = table.find('tbody')
rows = table_body.find_all('tr')
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
data.append([ele for ele in cols if ele])
data = pd.DataFrame(data)
data = data.rename(columns=data.iloc[0])
data = data.reindex(data.index.drop(0))
return data
def batting_stats(start_dt=None, end_dt=None):
# retrieve html from baseball reference
soup = get_soup(start_dt, end_dt)
table = get_table(soup)
return table
|
Python
| 0
|
|
072423365ad1c03dd593f5b8528a7b60c0c9bee9
|
Add AuctionHouse table.
|
pydarkstar/tables/auction_house.py
|
pydarkstar/tables/auction_house.py
|
"""
.. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com>
"""
from sqlalchemy import Column, Integer, SmallInteger, String, text
from pydarkstar.tables.base import Base
class AuctionHouse(Base):
__tablename__ = 'auction_house'
id = Column(Integer, primary_key=True)
itemid = Column(SmallInteger, nullable=False, index=True, server_default=text("'0'"))
stack = Column(Integer, nullable=False, server_default=text("'0'"))
seller = Column(Integer, nullable=False, server_default=text("'0'"))
seller_name = Column(String(15))
date = Column(Integer, nullable=False, server_default=text("'0'"))
price = Column(Integer, nullable=False, server_default=text("'0'"))
buyer_name = Column(String(15))
sale = Column(Integer, nullable=False, server_default=text("'0'"))
sell_date = Column(Integer, nullable=False, server_default=text("'0'"))
if __name__ == '__main__':
pass
|
Python
| 0
|
|
2cf3694ac4f5a5920a65e9b4b914262e7e375500
|
1-4. MNIST-Classification with softmax function
|
mnist.py
|
mnist.py
|
# tensorflow를 사용할 것이기에 import 합니다.
import tensorflow as tf
# MNIST data를 받아옵니다. MNIST data는 0~9까지의 숫자들 입니다.
# 본 Code에서는 MNIST data를 학습하고 새로운 입력(숫자)를 판단해내는 방법을 설명합니다.
from tensorflow.examples.tutorials.mnist import input_data
# one-hot encoding을 합니다. 예를들어 숫자가 1이라면 [1, 0, 0, 0, 0, 0, 0, 0, 0, 0] 이 됩니다.
# 숫자가 2라면 [0, 1, 0, 0, 0, 0, 0, 0, 0, 0] 이 됩니다.
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
# Step 1 Neural network
# 숫자 이미지는 28 x 28 픽셀로 이루어져 있어 총 784개의 데이터를 가집니다.
X = tf.placeholder(tf.float32)
# 결과는 0~9의 10 가지 경우입니다.
Y = tf.placeholder(tf.float32)
# (1 x 784) x (784 x 300) x (300 x 512) x (512 x 1024) x (1024 x 10) = (1 x 10)
# 4 layer의 가중치를 선언해줍니다.
W1 = tf.Variable(tf.random_normal([784,300],stddev = 0.01))
W2 = tf.Variable(tf.random_normal([300,512],stddev = 0.01))
W3 = tf.Variable(tf.random_normal([512,1024],stddev = 0.01))
W4 = tf.Variable(tf.random_normal([1024,10],stddev = 0.01))
# 각 Layer의 연산을 보여줍니다.
L1 = tf.nn.sigmoid(tf.matmul(X,W1))
L2 = tf.nn.sigmoid(tf.matmul(L1,W2))
L3 = tf.nn.sigmoid(tf.matmul(L2,W3))
hypothesis = tf.matmul(L3,W4)
# Cost function으로는 soft max function을 사용합니다.
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits= hypothesis,labels= Y))
# 이전 GradientDescent와 다른 AdamOptimizer를 사용합니다.
train = tf.train.AdamOptimizer(0.001).minimize(cost)
# Session을 선언합니다.
with tf.Session() as sess:
# 변수를 초기화 해줍니다.
init = tf.global_variables_initializer()
sess.run(init)
batch_size = 100
total_batch = int(mnist.train.num_examples/batch_size)
# 총 20번의 학습을 진행합니다.
for step in range(20):
# cost 값을 저장해주는 변수를 추가합니다.
sum_cost = 0
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
sess.run(train, feed_dict={X: batch_xs, Y: batch_ys})
sum_cost += sess.run(cost, feed_dict={X: batch_xs, Y: batch_ys})
print("Step:",step,"Average cost:",sum_cost/total_batch)
print("Optimization Finished")
# 학습 이후의 모델이 얼마나 잘 예측을 하는지 확인합니다.
pred = tf.equal(tf.argmax(hypothesis,1),tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(pred,tf.float32))
# mnist.test.images를 입력으로 넣어서 판단한 것이 실제 숫자와 얼마나 같은지를 나타냅니다.
# 1에 해당하는 Image를 넣었을 때에 1로 판단을 하면 이 경우에는 Accuracy가 100이 됩니다.
# 이를 전체 mnist.test.images를 넣어서 실제 숫자와 같은지를 확인하고 Accuracy를 보여줍니다.
# 저는 97.8%가 나왔습니다.
print("Accuracy:",sess.run(accuracy,feed_dict={X:mnist.test.images, Y:mnist.test.labels}))
|
Python
| 0.999571
|
|
f85f6ba07c47a6ccbd38a9e7bc2e9a2c69ebd09a
|
read senor values from rpi
|
pythonLib/ArduinoMoistureSensor.py
|
pythonLib/ArduinoMoistureSensor.py
|
import smbus
import time
bus = smbus.SMBus(1)
address = int(sys.argv[1])
data = bus.read_i2c_block_data(address,0)
for i in range (0,6):
print (data[2*i] << 8)+ data[2*i+1]
|
Python
| 0.000001
|
|
47d7cfcd9db1a54e52532819895060527e1988b9
|
update qlcoder
|
qlcoder/scheme_study/functional.py
|
qlcoder/scheme_study/functional.py
|
if __name__ == '__main__':
my_arr = [None] * 7654321
for i in range(0, 7654321):
my_arr[i]=i
|
Python
| 0.000001
|
|
7618697cdb892388d7c5ddb731f5b9f138389ca4
|
add A4
|
A4/TestHashtable.py
|
A4/TestHashtable.py
|
#!/usr/bin/env python2
from hashtable import Hashtable, LinkedList, hashFunction
import unittest
import collections
class TestHashtable(unittest.TestCase):
def setUp(self):
buildings = {
"CSCI" : "McGlothlin-Street",
"GSWS" : "Tucker",
"ENGL" : "Tucker",
"LING" : "Tyler",
"GERM" : "Washington",
}
def testWithoutFunction(self):
testingFunction = lambda key, numBuckets: sum(map(ord, key)) % numBuckets
q = Hashtable(testingFunction, 1000)
for key, value in buildings.items:
q[key] = value
for key, expected in buildings.items:
observed = q[key]
self.assertEquals(observed, expected, "small hashtable without your hash function: value changed after being added!\nkey:{}\nexpected value:{}\nobserved value:{}".format(key, value, q[key]))
def testWithFunction(self):
q = Hashtable(hashFunction, 1000)
for key, value in buildings.items:
q[key] = value
for key, expected in buildings.items:
observed = q[key]
self.assertEquals(observed, expected, "small hashtable with your hash function: value changed after being added! check __getitem__/__setitem__\nkey:{}\nexpected value:{}\nobserved value:{}".format(key, value, q[key]))
def testContains(self):
q = Hashtable(hashFunction, 1000)
for key, value in buildings.items:
q[key] = value
for key in buildings.keys:
self.assertIn(key, q, "membership in small hashtable: `in` keyword didn't work! check __contains__.\nkey:{}".format(key,))
def testLen(self):
q = Hashtable(hashFunction, 1000)
for key, value in buildings.items:
q[key] = value
self.assertLessEqual(len(q), len(buildings), "length: {} items is too many! check __len__.".format(len(q)))
self.assertGreaterEqual(len(q), len(buildings), "length: {} items is not enough! check __len__.".format(len(q)))
if __name__ == "__main__":
unittest.main()
|
Python
| 0.999996
|
|
c26b20a44c47474f88c8f155b36c8c6f0dcfd072
|
Move packet processing into its own class
|
innovate/packet.py
|
innovate/packet.py
|
"""One data packet in Innovate Serial Protocol version 2 (ISP2).
For data format specifications, see
http://www.innovatemotorsports.com/support/downloads/Seriallog-2.pdf
"""
import struct
class InnovatePacket(object):
"""An packet in the Innovate Serial Protocol version 2 (ISP2).
ISP2 packets are composed of 16 bit words.
"""
# Define some bitmasks
START_MARKER_MASK = 0b1000000000000000
# In a header word, bits 13, 9, and 7 will be 1.
HEADER_MASK = START_MARKER_MASK | 0b0010001010000000
RECORDING_TO_FLASH_MASK = 0b0100000000000000 # In header. 1 is is recording.
SENSOR_DATA_MASK = 0b0001000000000000 # In header. 1 if data, 0 if reply to command.
CAN_LOG_MASK = 0b0000100000000000 # In header. 1 if originating device can do internal logging.
LC1_HIGH_MASK = 0b0100001000000000 # First of two words from an LC-1, bits always high
LC1_LOW_MASK = 0b1010000010000000 # First of two words from an LC-1, bits always low
def __init__(self, header=None, data=None):
self.header = header
self.data = data
def _to_words(self, bytestring):
"""Convert a byte string to a list of words.
Each word is an integer.
"""
if bytestring is None:
return None
# Each word is two bytes long
n_words = len(bytestring)/2
# ISP2 words are big endian, indicated by ">"
# ISP2 words are unsigned short, indicated by "H"
return struct.unpack(">%dH" % n_words, bytestring)
@property
def header(self):
return self._header
@header.setter
def header(self, header):
"""Input header as a bytestring.
"""
header = self._to_words(header)
if len(header) != 1:
raise Exception('Header must be exactly one word long.')
header = header[0]
if not header & self.HEADER_MASK == self.HEADER_MASK:
raise Exception('Invalid header')
self._header = header
## Data stored in the header ##
@property
def packet_length(self):
"""Get the packet length from the header.
Packet length is the number of data words after the header.
Note that each word is 2 bytes long.
"""
if not self._header:
return None
# Packet length is encoded in bit 8 and bits 6-0
# First, get bits 6-0
packet_length = self._header[0] & 0b0000000001111111
# Bit 8 is the 7th (zero-indexed) bit in the length
if self._headeri[0] & 0b0000000100000000:
packet_length += 0b10000000 # 128
return packet_length
@property
def is_recording_to_flash(self):
"""Return boolean indicating whether the data is being recorded to flash.
"""
if not self._header:
return None
return self.header & self.RECORDING_TO_FLASH_MASK == self.RECORDING_TO_FLASH_MASK
@property
def is_sensor_data(self):
"""Return True if the packet contains sensor data, False if it is a reply to a command.
"""
if not self._header:
return None
return self.header & self.SENSOR_DATA_MASK == self.SENSOR_DATA_MASK
@property
def can_log(self):
"""Return boolean indicating whether the originating device can do internal logging.
"""
if not self._header:
return None
return self.header & self.CAN_LOG_MASK == self.CAN_LOG_MASK
@property
def data(self):
return self._data
@data.setter
def data(self, data):
"""Input data as a bytestring.
"""
self._data = self._to_words(data)
|
Python
| 0
|
|
d635a60140c11c64db4ac887bc79396484bb55e3
|
Add model_utils.print_graph_layer_shapes to handle Graph models. Also handle Merge layers
|
keras/utils/model_utils.py
|
keras/utils/model_utils.py
|
from __future__ import print_function
import numpy as np
import theano
def print_graph_layer_shapes(graph, input_shapes):
"""
Utility function to print the shape of the output at each layer of a Graph
Arguments:
graph: An instance of models.Graph
input_shapes: A dict that gives a shape for each input to the Graph
"""
input_vars = [graph.inputs[name].input
for name in graph.input_order]
output_vars = [graph.outputs[name].get_output()
for name in graph.output_order]
input_dummy = [np.zeros(input_shapes[name], dtype=np.float32)
for name in graph.input_order]
print("input shapes : ", input_shapes)
for name, l in graph.nodes.items():
shape_f = theano.function(input_vars,
l.get_output(train=False).shape,
on_unused_input='ignore')
out_shape = shape_f(*input_dummy)
print('shape after', l.get_config()['name'], "(", name, ") :", out_shape)
def print_model_layer_shapes(model, input_shapes):
"""
Utility function that prints the shape of the output at each layer.
Arguments:
model: An instance of models.Model
input_shape: The shape of the input you will provide to the model.
Either a tuple (for a single input) or a list of tuple
"""
# This is to handle the case where a model has been connected to a previous
# layer (and therefore get_input would recurse into previous layer's
# output).
if hasattr(model.layers[0], 'previous'):
# TODO: If the model is used as a part of another model, get_input will
# return the input of the whole model and this won't work. So this is
# not handled yet
raise Exception("This function doesn't work on model used as subparts "
" for other models")
# We allow the shortcut input_shapes=(1, 1, 28) instead of
# input_shapes=[(1, 1, 28)].
if not isinstance(input_shapes[0], tuple):
input_shapes = [input_shapes]
input_vars = model.get_input(train=False)
# theano.function excepts a list of variables
if not isinstance(input_vars, list):
input_vars = [input_vars]
input_dummy = [np.zeros(shape, dtype=np.float32)
for shape in input_shapes]
print("input shapes : ", input_shapes)
for l in model.layers:
shape_f = theano.function(input_vars,
l.get_output(train=False).shape)
out_shape = shape_f(*input_dummy)
print('shape after', l.get_config()['name'], ":", out_shape)
|
from __future__ import print_function
import numpy as np
import theano
def print_layer_shapes(model, input_shape):
"""
Utility function that prints the shape of the output at each layer.
Arguments:
model: An instance of models.Model
input_shape: The shape of the input you will provide to the model.
"""
# This is to handle the case where a model has been connected to a previous
# layer (and therefore get_input would recurse into previous layer's
# output).
if hasattr(model.layers[0], 'previous'):
# TODO: If the model is used as a part of another model, get_input will
# return the input of the whole model and this won't work. So this is
# not handled yet
raise Exception("This function doesn't work on model used as subparts "
" for other models")
input_var = model.get_input(train=False)
input_tmp = np.zeros(input_shape, dtype=np.float32)
print("input shape : ", input_shape)
for l in model.layers:
shape_f = theano.function([input_var], l.get_output(train=False).shape)
out_shape = shape_f(input_tmp)
print('shape after', l.get_config()['name'], ":", out_shape)
|
Python
| 0
|
f379160e56a94359d9571ea1b1db1f7544677a57
|
Fix reference to `latestEvent` in tests.
|
tests/sentry/api/serializers/test_grouphash.py
|
tests/sentry/api/serializers/test_grouphash.py
|
from __future__ import absolute_import
from sentry.api.serializers import serialize
from sentry.models import Event, GroupHash
from sentry.testutils import TestCase
class GroupHashSerializerTest(TestCase):
def test_no_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
result = serialize(hash, user=user)
assert result['latestEvent'] is None
def test_missing_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
['invalid'],
)
result = serialize(hash, user=user)
assert result['latestEvent'] is None
def test_mismatched_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = self.create_event(group=self.create_group())
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latestEvent'] is None
def test_valid_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = Event.objects.get(id=self.create_event(group=group).id)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latestEvent'] == serialize(event, user=user)
|
from __future__ import absolute_import
from sentry.api.serializers import serialize
from sentry.models import Event, GroupHash
from sentry.testutils import TestCase
class GroupHashSerializerTest(TestCase):
def test_no_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
result = serialize(hash, user=user)
assert result['latest_event'] is None
def test_missing_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
['invalid'],
)
result = serialize(hash, user=user)
assert result['latest_event'] is None
def test_mismatched_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = self.create_event(group=self.create_group())
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latest_event'] is None
def test_valid_latest_event(self):
user = self.create_user()
group = self.create_group()
hash = GroupHash.objects.create(
project=group.project,
group=group,
hash='xyz',
)
event = Event.objects.get(id=self.create_event(group=group).id)
GroupHash.record_last_processed_event_id(
group.project_id,
[hash.id],
event.event_id,
)
result = serialize(hash, user=user)
assert result['latest_event'] == serialize(event, user=user)
|
Python
| 0
|
91e04b558b95aa21d5f7c730fc8355e5413ab83c
|
Use values and values_list in API closes #433
|
judge/views/api.py
|
judge/views/api.py
|
from operator import attrgetter
from django.db.models import Prefetch
from django.http import JsonResponse, Http404
from django.shortcuts import get_object_or_404
from dmoj import settings
from judge.models import Contest, Problem, Profile, Submission, ContestTag
def sane_time_repr(delta):
days = delta.days
hours = delta.seconds / 3600
minutes = (delta.seconds % 3600) / 60
return '%02d:%02d:%02d' % (days, hours, minutes)
def api_contest_list(request):
contests = {}
for c in Contest.objects.filter(is_public=True, is_private=False).prefetch_related(
Prefetch('tags', queryset=ContestTag.objects.only('name'), to_attr='tag_list')).defer('description'):
contests[c.key] = {
'name': c.name,
'start_time': c.start_time.isoformat(),
'end_time': c.end_time.isoformat(),
'time_limit': c.time_limit and sane_time_repr(c.time_limit),
'labels': map(attrgetter('name'), c.tag_list),
}
return JsonResponse(contests)
def api_problem_list(request):
qs = Problem.objects.filter(is_public=True)
if settings.ENABLE_FTS and 'search' in request.GET:
query = ' '.join(request.GET.getlist('search')).strip()
if query:
qs = qs.search(query)
problems = {}
for code, points, partial, name, group in qs.values_list('code', 'points', 'partial', 'name', 'group__full_name'):
problems[code] = {
'points': points,
'partial': partial,
'name': name,
'group': group
}
return JsonResponse(problems)
def api_problem_info(request, problem):
p = get_object_or_404(Problem, code=problem)
if not p.is_accessible_by(request.user):
raise Http404()
return JsonResponse({
'name': p.name,
'authors': list(p.authors.values_list('user__username', flat=True)),
'types': list(p.types.values_list('full_name', flat=True)),
'group': p.group.full_name,
'time_limit': p.time_limit,
'memory_limit': p.memory_limit,
'points': p.points,
'partial': p.partial,
'languages': list(p.allowed_languages.values_list('key', flat=True)),
})
def api_user_list(request):
users = {}
for username, name, points, rank in Profile.objects.values_list('user__username', 'name', 'points', 'display_rank'):
users[username] = {
'display_name': name,
'points': points,
'rank': rank
}
return JsonResponse(users)
def api_user_info(request, user):
p = get_object_or_404(Profile, user__username=user)
return JsonResponse({
'display_name': p.name,
'points': p.points,
'rank': p.display_rank,
'solved_problems': [], # TODO
})
def api_user_submissions(request, user):
p = get_object_or_404(Profile, user__username=user)
subs = Submission.objects.filter(user=p, problem__is_public=True)
data = {}
for s in subs.values('id', 'problem__code', 'time', 'memory', 'points', 'language__key', 'status', 'result'):
data[s['id']] = {
'problem': s['problem__code'],
'time': s['time'],
'memory': s['memory'],
'points': s['points'],
'language': s['language__key'],
'status': s['status'],
'result': s['result'],
}
return JsonResponse(data)
|
from operator import attrgetter
from django.db.models import Prefetch
from django.http import JsonResponse, Http404
from django.shortcuts import get_object_or_404
from dmoj import settings
from judge.models import Contest, Problem, Profile, Submission, ContestTag
def sane_time_repr(delta):
days = delta.days
hours = delta.seconds / 3600
minutes = (delta.seconds % 3600) / 60
return '%02d:%02d:%02d' % (days, hours, minutes)
def api_contest_list(request):
contests = {}
for c in Contest.objects.filter(is_public=True, is_private=False).prefetch_related(
Prefetch('tags', queryset=ContestTag.objects.only('name'), to_attr='tag_list')):
contests[c.key] = {
'name': c.name,
'start_time': c.start_time.isoformat(),
'end_time': c.end_time.isoformat(),
'time_limit': c.time_limit and sane_time_repr(c.time_limit),
'labels': map(attrgetter('name'), c.tag_list),
}
return JsonResponse(contests)
def api_problem_list(request):
qs = Problem.objects.filter(is_public=True)
if settings.ENABLE_FTS and 'search' in request.GET:
query = ' '.join(request.GET.getlist('search')).strip()
if query:
qs = qs.search(query)
problems = {}
for p in qs:
problems[p.code] = {
'points': p.points,
'partial': p.partial,
'name': p.name,
'group': p.group.full_name
}
return JsonResponse(problems)
def api_problem_info(request, problem):
p = get_object_or_404(Problem, code=problem)
if not p.is_accessible_by(request.user):
raise Http404()
return JsonResponse({
'name': p.name,
'authors': list(p.authors.values_list('user__username', flat=True)),
'types': list(p.types.values_list('full_name', flat=True)),
'group': p.group.full_name,
'time_limit': p.time_limit,
'memory_limit': p.memory_limit,
'points': p.points,
'partial': p.partial,
'languages': list(p.allowed_languages.values_list('key', flat=True)),
})
def api_user_list(request):
users = {}
for p in Profile.objects.select_related('user').only('user__username', 'name', 'points', 'display_rank'):
users[p.user.username] = {
'display_name': p.name,
'points': p.points,
'rank': p.display_rank
}
return JsonResponse(users)
def api_user_info(request, user):
p = get_object_or_404(Profile, user__username=user)
return JsonResponse({
'display_name': p.name,
'points': p.points,
'rank': p.display_rank,
'solved_problems': [], # TODO
})
def api_user_submissions(request, user):
p = get_object_or_404(Profile, user__username=user)
subs = Submission.objects.filter(user=p, problem__is_public=True).select_related('problem', 'language') \
.only('id', 'problem__code', 'time', 'memory', 'points', 'language__key', 'status', 'result')
data = {}
for s in subs:
data[s.id] = {
'problem': s.problem.code,
'time': s.time,
'memory': s.memory,
'points': s.points,
'language': s.language.key,
'status': s.status,
'result': s.result
}
return JsonResponse(data)
|
Python
| 0
|
db13f88055d5ea2357ecc4b996f80d3392655516
|
Create parse.py
|
parse.py
|
parse.py
|
__version__ = "1.0"
import os
from ciscoconfparse import CiscoConfParse
# -----------------------------------------------
# Create the db dictionary to store all records
# -----------------------------------------------
db = {}
# ----------------------------------------------------------------
# Update the dictionary below to search for new search parameters
# ----------------------------------------------------------------
data_to_search = {"NTP" : r"ntp server",
"SNMP" : r"snmp server",
"USERNAME" : r"username",
"AAA" : r"aaa",
"VERSION" : r"System image file"}
print ("--------------------------------------------------------------------")
print (" Searching current directory and sub-directories for .txt files....")
print ("--------------------------------------------------------------------")
for path, dirs, files in os.walk("."):
for f in files:
if f.endswith('.txt'):
hostname = f.replace(".txt","")
print ("Reading data from: {}".format(os.path.join(path, f)))
# Create an entry for the devices based on the hostname
db[hostname] = {}
for search_parameter in data_to_search:
db[hostname][search_parameter] = []
# Read the configuration file
parse = CiscoConfParse(os.path.join(path, f))
#----------------------------------------------------------
# Search for all relevant items and store findings in the
# db dictionary so that we can use later on
#----------------------------------------------------------
for search_parameter in data_to_search:
for obj in parse.find_objects(data_to_search[search_parameter]):
db[hostname][search_parameter].append(obj.text)
print ("-----------------------")
print (" Configuration snapshot")
print ("-----------------------")
# Cycle through all the devices in the database
for device in sorted(db):
print ("[{}]".format(device))
# Cycle through each item in data_to_search
for search_parameter in data_to_search:
# If there is a value then print it
if db[device][search_parameter]:
for line in db[device][search_parameter]:
print (" {}: {}".format(search_parameter.ljust(10),line))
# Otherwise print that nothing was found
else:
print (" {}: NOT FOUND".format(search_parameter.ljust(10)))
print ("")
print ("-------------------------------")
print (" Devices with missing entries ")
print ("-------------------------------")
for device in sorted(db):
for entry in data_to_search:
if not db[device][entry]:
print ("[{}] has no entry defined for '{}'".format(device.ljust(25),entry))
|
Python
| 0.00002
|
|
bee35885bb845ea77aa4586bca33da3e54b92ed2
|
Add `albumtypes` plugin
|
beetsplug/albumtypes.py
|
beetsplug/albumtypes.py
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2021, Edgars Supe.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds an album template field for formatted album types."""
from __future__ import division, absolute_import, print_function
from beets.autotag.mb import VARIOUS_ARTISTS_ID
from beets.library import Album
from beets.plugins import BeetsPlugin
class AlbumTypesPlugin(BeetsPlugin):
"""Adds an album template field for formatted album types."""
def __init__(self):
"""Init AlbumTypesPlugin."""
super(AlbumTypesPlugin, self).__init__()
self.album_template_fields['atypes'] = self._atypes
def _atypes(self, item: Album):
self.config.add({
'types': [],
'ignore_va': [],
'brackets': '[]'
})
types = self.config['types'].as_pairs()
ignore_va = self.config['ignore_va'].as_str_seq()
bracket = self.config['bracket'].as_str()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = u''
bracket_r = u''
res = ''
albumtypes = item.albumtypes.split('; ')
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
for type in types:
if type[0] in albumtypes and type[1]:
if not is_va or (not type[0] in ignore_va and is_va):
res += bracket_l + type[1] + bracket_r
return res
|
Python
| 0.000001
|
|
f859eb67fdc66b930c3664a3586c454f5c9afe87
|
Add files via upload
|
subunits/blink.py
|
subunits/blink.py
|
from nanpy import ArduinoApi
from nanpy import SerialManager
from time import sleep
link = SerialManager(device='/dev/ttyACM0')
A = ArduinoApi(connection=link)
led = 13
# SETUP:
A.pinMode(led, A.OUTPUT)
# LOOP:
while True:
A.digitalWrite(led, A.HIGH) # turn the LED on (HIGH is the voltage level)
print "blink on"
sleep(1) # use Python sleep instead of arduino delay
A.digitalWrite(led, A.LOW) # turn the LED off by making the voltage LOW
print "blink off"
sleep(1)
|
Python
| 0
|
|
a11cee952e1abc7e7310b760c8a4845c4f46fbae
|
add date_range.py
|
date_range.py
|
date_range.py
|
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import deferred
from sqlalchemy import or_
from sqlalchemy import sql
from sqlalchemy import text
from sqlalchemy import orm
import requests
from time import sleep
from time import time
import datetime
import shortuuid
from urllib import quote
from app import logger
from app import db
from util import elapsed
from util import safe_commit
from util import clean_doi
class DateRange(db.Model):
id = db.Column(db.DateTime, primary_key=True)
# end_date = db.Column(db.DateTime)
@property
def first(self):
return self.id
@property
def first_day(self):
return self.id.isoformat()[0:10]
@property
def last_day(self):
return self.last.isoformat()[0:10]
@property
def last(self):
return self.first + datetime.timedelta(days=1)
def get_crossref_api_raw(self, rows=100):
headers={"Accept": "application/json", "User-Agent": "impactstory.org"}
base_url_with_last = "http://api.crossref.org/works?filter=from-created-date:{first},until-created-date:{last}&rows={rows}&cursor={next_cursor}"
# but if want all changes, use "indexed" not "created" as per https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#notes-on-incremental-metadata-updates
next_cursor = "*"
has_more_responses = True
num_so_far = 0
num_between_commits = 0
while has_more_responses:
start_time = time()
url = base_url_with_last.format(
first=self.first_day,
last=self.last_day,
rows=rows,
next_cursor=next_cursor)
logger.info(u"calling url: {}".format(url))
resp = requests.get(url, headers=headers)
logger.info(u"getting crossref response took {} seconds".format(elapsed(start_time, 2)))
if resp.status_code != 200:
logger.info(u"error in crossref call, status_code = {}".format(resp.status_code))
return
resp_data = resp.json()["message"]
next_cursor = resp_data.get("next-cursor", None)
if next_cursor:
next_cursor = quote(next_cursor)
if not resp_data["items"] or not next_cursor:
has_more_responses = False
for api_raw in resp_data["items"]:
doi = clean_doi(api_raw["DOI"])
crossref_api_obj = CrossrefApi(doi=doi, api_raw=api_raw)
db.session.add(crossref_api_obj)
num_between_commits += 1
num_so_far += 1
if num_between_commits > 100:
safe_commit(db)
num_between_commits = 0
logger.info(u"at bottom of loop, got {} records".format(len(resp_data["items"])))
# make sure to get the last ones
logger.info(u"done everything, saving last ones")
safe_commit(db)
return num_so_far
def __repr__(self):
return u"<DateRange (starts: {})>".format(self.id)
class CrossrefApi(db.Model):
id = db.Column(db.Text, primary_key=True)
doi = db.Column(db.Text)
updated = db.Column(db.DateTime)
api_raw = db.Column(JSONB)
def __init__(self, **kwargs):
self.id = shortuuid.uuid()[0:10]
self.updated = datetime.datetime.utcnow()
super(CrossrefApi, self).__init__(**kwargs)
|
Python
| 0.000905
|
|
7d258bdb68119ad54a69e92ac7c7c1c2fc51e087
|
Create scrap.py
|
scrap.py
|
scrap.py
|
#!usr/bin/env python
import requests
from bs4 import BeautifulSoup
uri = requests.get("http://video9.in/english/")
soup=BeautifulSoup(url.text)
for link in soup.find_all("div",{"class": "updates"}):
print link.text
|
Python
| 0.000001
|
|
2aae4701fd98f560e7e112084f47f66515f6f574
|
Add setup.py
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import go_nogo_rig
setup(
name='Go-NoGo',
version=go_nogo_rig.__version__,
packages=find_packages(),
install_requires=['moa', 'pybarst', 'moadevs'],
author='Matthew Einhorn',
author_email='moiein2000@gmail.com',
url='https://cpl.cornell.edu/',
license='MIT',
description='Go/NoGo experiment.',
entry_points={'console_scripts': ['go_nogo=go_nogo_rig.main:run_app']},
)
|
Python
| 0.000001
|
|
7ae1d4b99e2354f76bed894493281d4885d97f34
|
Add newer template rendering code
|
cms/test_utils/project/placeholderapp/views.py
|
cms/test_utils/project/placeholderapp/views.py
|
from django.http import HttpResponse
from django.shortcuts import render
from django.template import RequestContext
from django.template.base import Template
from django.views.generic import DetailView
from cms.test_utils.project.placeholderapp.models import (
Example1, MultilingualExample1, CharPksExample)
from cms.utils import get_language_from_request
from cms.utils.compat import DJANGO_1_7
def example_view(request):
context = {}
context['examples'] = Example1.objects.all()
return render(request, 'placeholderapp.html', context)
def _base_detail(request, instance, template_name='detail.html', item_name="char_1",
template_string='',):
context = {}
context['instance'] = instance
context['instance_class'] = instance.__class__()
context['item_name'] = item_name
if hasattr(request, 'toolbar'):
request.toolbar.set_object(instance)
if template_string:
template = Template(template_string)
if DJANGO_1_7:
return HttpResponse(template.render(RequestContext(request=request, dict_=context)))
else:
from django.template.context import make_context
context = make_context(context, request)
return HttpResponse(template.render(context))
else:
return render(request, template_name, context)
def list_view_multi(request):
context = {}
context['examples'] = MultilingualExample1.objects.language(
get_language_from_request(request)).all()
context['instance_class'] = MultilingualExample1
return render(request, 'list.html', context)
def detail_view_multi(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.language(
get_language_from_request(request)).get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def detail_view_multi_unfiltered(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name, template_string)
def list_view(request):
context = {}
context['examples'] = Example1.objects.all()
context['instance_class'] = Example1
return render(request, 'list.html', context)
def detail_view(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
if request.user.is_staff and request.toolbar:
instance = Example1.objects.get(pk=pk)
else:
instance = Example1.objects.get(pk=pk, publish=True)
return _base_detail(request, instance, template_name, item_name, template_string)
def detail_view_char(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
instance = CharPksExample.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
class ClassDetail(DetailView):
model = Example1
template_name = "detail.html"
template_string = ''
def render_to_response(self, context, **response_kwargs):
if self.template_string:
template = Template(self.template_string)
if DJANGO_1_7:
return HttpResponse(template.render(
RequestContext(request=self.request, dict_=context)
))
else:
from django.template.context import make_context
context = make_context(context, self.request)
return HttpResponse(template.render(context))
else:
return super(ClassDetail, self).render_to_response(context, **response_kwargs)
def get_context_data(self, **kwargs):
context = super(ClassDetail, self).get_context_data(**kwargs)
context['instance_class'] = self.model
return context
|
from django.http import HttpResponse
from django.shortcuts import render
from django.template import RequestContext
from django.template.base import Template
from django.views.generic import DetailView
from cms.test_utils.project.placeholderapp.models import (
Example1, MultilingualExample1, CharPksExample)
from cms.utils import get_language_from_request
from cms.utils.compat import DJANGO_1_7
def example_view(request):
context = {}
context['examples'] = Example1.objects.all()
return render(request, 'placeholderapp.html', context)
def _base_detail(request, instance, template_name='detail.html',
item_name="char_1", template_string='',):
context = {}
context['instance'] = instance
context['instance_class'] = instance.__class__()
context['item_name'] = item_name
if hasattr(request, 'toolbar'):
request.toolbar.set_object(instance)
if template_string:
template = Template(template_string)
if DJANGO_1_7:
return HttpResponse(template.render(RequestContext(request=request, dict_=context)))
else:
from django.template.context import make_context
context = make_context(context, request)
return HttpResponse(template.render(context))
else:
return render(request, template_name, context)
def list_view_multi(request):
context = {}
context['examples'] = MultilingualExample1.objects.language(
get_language_from_request(request)).all()
context['instance_class'] = MultilingualExample1
return render(request, 'list.html', context)
def detail_view_multi(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.language(
get_language_from_request(request)).get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
def detail_view_multi_unfiltered(request, pk, template_name='detail_multi.html',
item_name="char_1", template_string='',):
instance = MultilingualExample1.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name, template_string)
def list_view(request):
context = {}
context['examples'] = Example1.objects.all()
context['instance_class'] = Example1
return render(request, 'list.html', context)
def detail_view(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
if request.user.is_staff and request.toolbar:
instance = Example1.objects.get(pk=pk)
else:
instance = Example1.objects.get(pk=pk, publish=True)
return _base_detail(request, instance, template_name, item_name, template_string)
def detail_view_char(request, pk, template_name='detail.html', item_name="char_1",
template_string='',):
instance = CharPksExample.objects.get(pk=pk)
return _base_detail(request, instance, template_name, item_name,
template_string)
class ClassDetail(DetailView):
model = Example1
template_name = "detail.html"
template_string = ''
def render_to_response(self, context, **response_kwargs):
if self.template_string:
template = Template(self.template_string)
if DJANGO_1_7:
return HttpResponse(template.render(
RequestContext(request=self.request, dict_=context)
))
else:
from django.template.context import make_context
context = make_context(context, self.request)
return HttpResponse(template.render(context))
else:
return super(ClassDetail, self).render_to_response(context, **response_kwargs)
def get_context_data(self, **kwargs):
context = super(ClassDetail, self).get_context_data(**kwargs)
context['instance_class'] = self.model
return context
|
Python
| 0
|
aef67e19a3494880620fd87a68ff581edaa9ce81
|
Add unittest for madx.evaluate
|
test/test_madx.py
|
test/test_madx.py
|
import unittest
from cern.madx import madx
from math import pi
class TestMadX(unittest.TestCase):
"""Test methods of the madx class."""
def setUp(self):
self.madx = madx()
def tearDown(self):
del self.madx
def testEvaluate(self):
self.madx.command("FOO = PI*3;")
val = self.madx.evaluate("1/FOO")
self.assertAlmostEqual(val, 1/(3*pi))
|
Python
| 0.000001
|
|
0a55f6f2bf49c679a422d44007df3f66c323e719
|
mask unit test
|
test/test_mask.py
|
test/test_mask.py
|
import numpy as np
from minimask.mask import Mask
from minimask.spherical_poly import spherical_polygon
def test_mask_sample():
""" """
vertices = [[0,0],[10,0],[10,10],[0,10]]
S = spherical_polygon(vertices)
M = Mask(polys=[S], fullsky=False)
x,y = M.sample(100)
assert len(x) == 1000
assert len(y) == 1000
assert np.abs(x.min()) < 1
assert np.abs(y.min()) < 1
assert np.abs(x.max() - 10) < 1
assert np.abs(y.max() - 10) < 1
r = M.contains(x, y)
assert np.sum(r) == 0
|
Python
| 0
|
|
25495d675c44a75d7dedfe123f30a858f9cd60be
|
Add minimal (no asserts) test for play plugin
|
test/test_play.py
|
test/test_play.py
|
# -*- coding: utf-8 -*-
"""Tests for the play plugin"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from mock import patch, Mock
from test._common import unittest
from test.helper import TestHelper
from beetsplug.play import PlayPlugin
class PlayPluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('play')
self.add_item(title='aNiceTitle')
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
@patch('beetsplug.play.util.interactive_open', Mock())
def test_basic(self):
self.run_command('play', 'title:aNiceTitle')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
Python
| 0.000001
|
|
b3ad7a7735d55a91682ea6798e6ebcfcf94b1969
|
287. Find the Duplicate Number. Brent's
|
p287_brent.py
|
p287_brent.py
|
import unittest
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
t = nums[0]
h = nums[t]
max_loop_length = 1
loop_length = 1
while t != h:
if loop_length == max_loop_length:
t = h
max_loop_length <<= 1
loop_length = 0
h = nums[h]
loop_length += 1
t = 0
h = 0
for i in xrange(loop_length):
h = nums[h]
while t != h:
t = nums[t]
h = nums[h]
return t
class Test(unittest.TestCase):
def test(self):
self._test([1, 2, 3, 4, 4, 5], 4)
self._test([5, 1, 3, 4, 2, 4], 4)
self._test([1, 2, 3, 4, 5, 5, 6], 5)
self._test([1, 3, 4, 5, 6, 6, 6], 6)
self._test([1, 3, 4, 5, 6, 6, 6, 7], 6)
self._test([1, 3, 4, 2, 1], 1)
def _test(self, nums, expected):
actual = Solution().findDuplicate(nums)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.999999
|
|
83d00fea8adf611984c3b56a63f080f144612c69
|
Create data_tool.py
|
data_tool.py
|
data_tool.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import pickle
import random
def load_data():
with open('dataset.pkl', 'r') as file:
data_set = pickle.load(file)
return data_set
def feature_format(data_set):
features = []
labels = []
for item in data_set:
features.append(item[:-1])
labels.append(item[-1])
return features, labels
def train_test_split(features, test_rate):
random.shuffle(features)
total_number = len(features)
test_number = int(round(len(features) * test_rate))
train_data = features[0:-test_number]
test_data = features[-test_number:total_number]
features_train, labels_train = feature_format(train_data)
features_test, labels_test = feature_format(test_data)
return features_train, labels_train, features_test, labels_test
|
Python
| 0.000001
|
|
c488e446aee3d28fa84bb24d446ca22af20e461c
|
Add setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
def lt27():
import sys
v = sys.version_info
return (v[0], v[1]) < (2, 7)
tests_require = [
'nose>=1.0',
'mock',
]
if lt27():
tests_require.append('unittest2')
setup(
name='dynsupdate',
description='Dynamic DNS update like nsupdate',
install_requires=[
'dnspython',
],
tests_require=tests_require,
packages=find_packages(),
test_suite="nose.collector"
)
|
Python
| 0.000001
|
|
6d3a9f41bec03405fa648ce169b9565f937e4598
|
add setup.py
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name="timekeeper",
version="0.1.0",
description="Send runtime measurements of your code to InfluxDB",
author="Torsten Rehn",
author_email="torsten@rehn.email",
license="ISC",
url="https://github.com/trehn/timekeeper",
keywords=["profiling", "profile", "metrics", "instrumentation", "measure", "influxdb"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Monitoring",
],
install_requires=[
"influxdb >= 2.0.0",
],
py_modules=['timekeeper'],
)
|
Python
| 0.000001
|
|
bc9401da60e8f10827f37772af937d4fb11ca248
|
Add PyPI setup.py file
|
setup.py
|
setup.py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='component',
author='Daniel Chatfield',
author_email='chatfielddaniel@gmail.com',
version='0.0.1',
url='http://github.com/import/component',
py_modules=['component'],
description='A python library that makes component(1) play nicely with python.',
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
|
Python
| 0.000001
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.