commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
8982f8d7e00ddf55782c2cd5a9c895d44afd1fca | Add wolfram plugin | plugins/wolfram.py | plugins/wolfram.py | import lxml.etree
import io
import re
import requests
import urllib.parse
import sys
class Plugin:
def __init__(self, appid):
self.appid = appid
def format_pod(self, pod):
subpod = pod.find("subpod")
return self.format_subpod(subpod)
def convert_unicode_chars(self, text):
def filter(match):
try:
return chr(int(match.group(), 16))
except ValueError:
return match.group()
return re.sub(r"\:([A-Za-z0-9]+)", filter, text)
def format_subpod(self, subpod):
text = (subpod.find("plaintext").text or "").strip()
if len(text) <= 0:
return subpod.find("img").get("src")
s = text
if len(s) >= 400:
s = text[:400] + "โฆ"
# first convert unicode characters
s = self.convert_unicode_chars(s)
# then turn it into a table... if it is one
if "|" in s:
rows = s.splitlines()
max_column_widths = [0] * 128
def format_row(row):
def format_col(arg):
i = arg[0]
col = arg[1].strip()
if len(col) > max_column_widths[i]:
max_column_widths[i] = len(col)
return col
return list(map(format_col, enumerate(row.split("|"))))
rows = list(map(format_row, rows)) # list to force max_column_widths evaluation
result = ""
for row in rows:
result += "|"
for i, col in enumerate(row):
result += " " + col
result += " " * (max_column_widths[i] + 2 - len(col))
result += "|"
result += "\n"
return result.strip()
else:
return s.strip()
def on_message(self, bot, msg):
if msg["message"].startswith("? "):
query = msg["message"][2:]
old_stdout = sys.stdout
old_stdin = sys.stdin
try:
sys.stdout = io.StringIO()
sys.stdin = io.StringIO(query)
self.on_command(bot, None) # msg is unused
output = sys.stdout.getvalue().strip()
sys.stdout = old_stdout
sys.stdin = old_stdin
bot.send(msg["reply_to"], output)
finally:
sys.stdout = old_stdout
sys.stdin = old_stdin
def on_command(self, bot, msg):
query = " ".join(sys.argv[1:])
if not query:
query = sys.stdin.read().strip()
if query:
url = "http://api.wolframalpha.com/v2/query?input={0}&appid={1}".format(
urllib.parse.quote(query),
urllib.parse.quote(self.appid)
)
headers = {"User-Agent": "SmartBot"}
page = requests.get(url, headers=headers, timeout=15)
if page.status_code == 200:
tree = lxml.etree.fromstring(page.content)
pods = []
for pod in tree.xpath("//pod"):
pods.append(pod)
if len(pods) >= 2:
small_result = self.format_pod(pods[0]) + " -> " + self.format_pod(pods[1])
if len(small_result) <= 100 and "\n" not in small_result:
print(small_result)
else:
for pod in pods[:2]:
print("# {0}".format(pod.get("title")))
for subpod in pod.findall("subpod"):
if subpod.get("title"):
print("## {0}".format(subpod.get("title")))
print(self.format_subpod(subpod))
else:
print("Nothing more to say.")
else:
print(self.on_help(bot))
def on_help(self, bot):
return "Syntax: ?|q|question|wfa|calc|calculate <query>"
| Python | 0 | |
9a237141c9635d2a1dad6349ad73d24e969d8460 | Add runner | hud-runner.py | hud-runner.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Convenience wrapper for running hud directly from source tree."""
from hud.hud import main
if __name__ == '__main__':
main()
| Python | 0.000022 | |
bfb7d8d9356fe66f433556977a333e4256c6fb61 | Create series.py | trendpy/series.py | trendpy/series.py | # series.py
# MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import matplotlib.pyplot as plt
from trendpy.mcmc import MCMC
from trendpy.factory import StrategyFactory
from pandas import DataFrame, read_csv
class Series(object):
def __init__(self):
self.data=None
self.is_log_price = False
def __len__(self):
return self.data.size
def __str__(self):
return self.data.__str__()
@staticmethod
def from_csv(filename, nomalise=True):
ts=Series()
ts.nomalise = nomalise
ts.data=read_csv(filename,index_col=0)
return ts
def returns(self,period=1):
pass
def save(self,filename='export.csv',type='csv',separator=','):
if type=='csv':
pass
if type=='json':
pass
def plot(self):
self.data.plot()
plt.show()
def filter(self, method="L1Filter",number_simulations=100, burns=50,total_variation=2):
mcmc = MCMC(self, StrategyFactory.create(method,self.data.as_matrix()[:,0],total_variation_order=total_variation))
mcmc.run(number_simulations)
trend = mcmc.output(burns,"trend")
self.data = self.data.join(DataFrame(trend,index=self.data.index,columns=[method]))
def regression(self,method="lasso", number_simulations=100, burns=50):
pass
def export(self, filename, as_txt=False):
pass
| Python | 0 | |
250d1c20c16b6c0846a9fb94ef4ebc6e780221df | Create solution.py | hackerrank/algorithms/implementation/easy/equalize_the_array/py/solution.py | hackerrank/algorithms/implementation/easy/equalize_the_array/py/solution.py | def solution(nums):
import collections
if len(nums) == 0:
return 0
item, count = collections.Counter(nums).most_common()[0]
return len(nums) - count
n = int(input())
nums = tuple(map(int, input().split()))
cnt = solution(nums)
print(cnt)
| Python | 0.000018 | |
fca6421c53e286549d861c65c114991602f310ea | Add some adaptors. | pykmer/adaptors.py | pykmer/adaptors.py | """
This module provides some adaptors for converting between
different data formats:
`k2kf`
Convert a sequence of k-mers to k-mer frequency pairs
`kf2k`
Convert a sequence of k-mer frequency pairs to k-mers
`keyedKs`
Provide keyed access to a sequence of k-mers
`keyedKFs`
Provide keyed access to a sequence of k-mer frequency pairs
"""
def k2kf(xs, f=1):
for x in xs:
yield (x, f)
def kf2k(xs):
for (x, _) in xs:
yield x
class keyedKs:
def __init__(self, itr):
self.itr = itr
self.more = True
self.next()
def valid(self):
return self.more
def kmer(self):
assert self.valid()
return self.curr
def item(self):
assert self.valid()
return self.curr
def next(self):
assert self.valid()
try:
self.curr = self.itr.next()
except StopIteration:
self.more = False
class keyedKfs:
def __init__(self, itr):
self.itr = itr
self.more = True
self.next()
def valid(self):
return self.more
def kmer(self):
assert self.valid()
return self.curr[0]
def item(self):
assert self.valid()
return self.curr
def next(self):
assert self.valid()
try:
self.curr = self.itr.next()
except StopIteration:
self.more = False
| Python | 0 | |
b7f3e32827bb9a0f122928d218f4d535febb0829 | add command | Command.py | Command.py | # -*- coding: utf-8 -*-
"""
Command pattern
"""
from os import listdir, curdir
class ListCommand(object):
def __init__(self, path=None):
self.path = path or curdir
def execute(self):
self._list(self.path)
@staticmethod
def _list(path=None):
print 'list path {} :'.format(path)
print listdir(path)
if __name__ == "__main__":
command = ListCommand()
command.execute()
| Python | 0.000292 | |
bd865a9fdc941b99be40a5ba3dcc02b819b2e9da | add cpm.utils.refstring | cpm/utils/refstring.py | cpm/utils/refstring.py | # Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import re
from . import semver
Ref = collections.namedtuple('Ref', 'package version module function')
spec = '[<package>[@<version>]][/<module>][:<function>]'
regex = re.compile('''^
(?:
(?P<package> [A-z0-9\.\-_]+)
(?: @(?P<version> [0-9\.]+[A-z0-9\.\-\+]*))?
)?
(?: /(?P<module> [A-z0-9\.\-_]+))?
(?: :(?P<function> [A-z0-9\.\-_]+))?
$''', re.X)
def parse(s):
"""
Parse a reference string and returns a #Ref which is a namedtuple consisting
of the members *package*, *version*, *module* and *function*. The parameter
*s* must be a string of the format
[<package>[@<version>]][/<module>][:<function>]
# Raises
ValueError: If the string is invalid.
"""
m = regex.match(s)
if not m:
raise ValueError('invalid refstring: "{}"'.format(s))
package, version, module, function = m.groups()
if version:
try:
version = semver.Version(version)
except ValueError as exc:
raise ValueError('invalid refstring: "{}" ({})'.format(s, exc))
return Ref(package, version, module, function)
def join(package=None, version=None, module=None, function=None):
"""
Concatenes the components of a reference back into a string. To use this
function with a #Ref object, simply use argument-unpacking like this:
`join(*ref)`.
"""
if package:
result = package
if version:
result += '@' + str(version)
else:
if version:
raise ValueError('version can not be specified without a package')
result = ''
if module:
result += '/' + module
if function:
result += ':' + function
return result
| Python | 0.000001 | |
edb904ca105abfb767f94f366e19ed05374a8014 | Create URL Shortner | URLShortner.py | URLShortner.py | import uuid
import json
import os
from glob import iglob
from pprint import pprint
mapping={}
mapping['URL']=[]
#Getting JSON file of initial Tika parsing containing list of file paths categorized by MIME types
file="C:/Users/rahul/Documents/GitHub/Scientific-Content-Enrichment-in-the-Text-Retrieval-Conference-TREC-Polar-Dynamic-Domain-Dataset/fulldump-path-all-json/"
outFile='output-from-url-shortner-all-types'+'.json'
output_file=open(outFile,'w')
for filepath in iglob(os.path.join(file, '*.json')):
with open(filepath) as data_file:
data = json.load(data_file)
for i in data['files']:
#Getting a unique md5 hash for the file path relative to the current directory
d={}
d['filePath']=i
s="polar.usc.edu/"+str(uuid.uuid4())[:8]
d['shortURL']=s
mapping['URL'].append(d)
print "\'"+ i+ "\'" + " : " +"\'"+ s+ "\'"
#print dispString
#output_file.write(dispString)
data_file.close()
#Dumping JSON object with mapped shortened URLs and file path
keys=json.dumps(mapping, sort_keys=True)
output_file.write(keys)
output_file.close()
| Python | 0 | |
62a13341610d476ba8ff9e3fd5a3476cbdb18225 | Create convert.py | convert.py | convert.py | import gensim
#word2vec embeddings start with a line with the number of lines (tokens?) and the number of dimensions of the file. This allows
#gensim to allocate memory accordingly for querying the model. Larger dimensions mean larger memory is held captive. Accordingly, this line
#has to be inserted into the GloVe embeddings file.
#GloVe Model File
#More models can be downloaded from http://nlp.stanford.edu/projects/glove/
fname="glove.6B.50d.txt"
#convert Glove vectors to word2vec format
word2vec_convert_file="C:/Users/Manas/Desktop/ML/Topics_Multiclass/Zomato_Reviews/Data/IMDB/word2vec_line.txt"
#to be a first line insert
num_lines = sum(1 for line in open(fname))
dims=50
print '%d lines with %d dimensions' %(num_lines,dims)
with open(word2vec_convert_file,'w') as f:
f.write(str(num_lines)+ " " +str(dims) + '\n')
f.close()
model_file='glove_model.txt'
filenames = [word2vec_convert_file,fname]
with open(model_file, 'w') as outfile:
for fname in filenames:
with open(fname) as infile:
for line in infile:
outfile.write(line)
outfile.close()
#load converted model file
model=gensim.models.Word2Vec.load_word2vec_format(model_file,binary=False) #GloVe Model
print model.most_similar(positive=['australia'], topn=10)
print model.similarity('woman', 'man')
| Python | 0.000002 | |
5d5ccc84eaaec6b6d749a9054f744a5a44f9dac9 | add script for reading from PCF8574 | i2c/PCF8574.py | i2c/PCF8574.py | #!/usr/bin/python
import sys
import smbus
import time
# Reads data from PCF8574 and prints the state of each port
def readPCF8574(busnumber,address):
address = int(address,16)
busnumber = int(1)
bus = smbus.SMBus(busnumber)
state = bus.read_byte(address);
for i in range(0,8):
port = "port " + str(i)
value = 1&(state>>7-i)
print str(port) + ': ' + str(value)
if len(sys.argv) != 3:
print "Usage: python PCF8574.py bus address"
exit(1)
bus = sys.argv[1]
address = sys.argv[2]
readPCF8574(bus,address)
| Python | 0 | |
37b92cdd13bd9c86b91bac404a8c73c62ebafa53 | Add VPC_With_VPN_Connection example | examples/VPC_With_VPN_Connection.py | examples/VPC_With_VPN_Connection.py | # Converted from VPC_With_VPN_Connection.template located at:
# http://aws.amazon.com/cloudformation/aws-cloudformation-templates/
from troposphere import Base64, FindInMap, GetAtt, Join, Output
from troposphere import Parameter, Ref, Tags, Template
from troposphere.cloudfront import Distribution, DistributionConfig
from troposphere.cloudfront import Origin, DefaultCacheBehavior
from troposphere.ec2 import PortRange
from troposphere.ec2 import NetworkAcl
from troposphere.ec2 import Route
from troposphere.ec2 import VPCGatewayAttachment
from troposphere.ec2 import SubnetRouteTableAssociation
from troposphere.ec2 import Subnet
from troposphere.ec2 import CustomerGateway
from troposphere.ec2 import VPNConnectionRoute
from troposphere.ec2 import RouteTable
from troposphere.ec2 import VPC
from troposphere.ec2 import NetworkAclEntry
from troposphere.ec2 import VPNGateway
from troposphere.ec2 import SubnetNetworkAclAssociation
from troposphere.ec2 import VPNConnection
t = Template()
t.add_version("2010-09-09")
t.add_description("""\
AWS CloudFormation Sample Template VPC_With_VPN_Connection.template: \
Sample template showing how to create a private subnet with a VPN connection \
using static routing to an existing VPN endpoint. NOTE: The VPNConnection \
created will define the configuration you need yonk the tunnels to your VPN \
endpoint - you can get the VPN Gateway configuration from the AWS Management \
console. You will be billed for the AWS resources used if you create a stack \
from this template.""")
VPNAddress = t.add_parameter(Parameter(
"VPNAddress",
Type="String",
Description="IP Address of your VPN device",
MinLength="7",
AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})",
MaxLength="15",
ConstraintDescription="must be a valid IP address of the form x.x.x.x",
))
OnPremiseCIDR = t.add_parameter(Parameter(
"OnPremiseCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for your existing infrastructure",
Default="10.0.0.0/16",
MinLength="9",
AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
VPCCIDR = t.add_parameter(Parameter(
"VPCCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for the VPN connected VPC",
Default="10.1.0.0/16",
MinLength="9",
AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
SubnetCIDR = t.add_parameter(Parameter(
"SubnetCIDR",
ConstraintDescription=(
"must be a valid IP CIDR range of the form x.x.x.x/x."),
Description="IP Address range for the VPN connected Subnet",
Default="10.1.0.0/24",
MinLength="9",
AllowedPattern="(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
MaxLength="18",
Type="String",
))
PrivateNetworkAcl = t.add_resource(NetworkAcl(
"PrivateNetworkAcl",
VpcId=Ref("VPC"),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="Private",
)
))
PrivateRoute = t.add_resource(Route(
"PrivateRoute",
GatewayId=Ref("VPNGateway"),
DestinationCidrBlock="0.0.0.0/0",
RouteTableId=Ref("PrivateRouteTable"),
))
VPNGatewayAttachment = t.add_resource(VPCGatewayAttachment(
"VPNGatewayAttachment",
VpcId=Ref("VPC"),
VpnGatewayId=Ref("VPNGateway"),
))
PrivateSubnetRouteTableAssociation = t.add_resource(
SubnetRouteTableAssociation(
"PrivateSubnetRouteTableAssociation",
SubnetId=Ref("PrivateSubnet"),
RouteTableId=Ref("PrivateRouteTable"),
)
)
PrivateSubnet = t.add_resource(Subnet(
"PrivateSubnet",
VpcId=Ref("VPC"),
CidrBlock=Ref(SubnetCIDR),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected Subnet",
)
))
CustomerGateway = t.add_resource(CustomerGateway(
"CustomerGateway",
BgpAsn="65000",
IpAddress=Ref(VPNAddress),
Type="ipsec.1",
Tags=Tags(
Application=Ref("AWS::StackName"),
VPN=Join("", ["Gateway to ", Ref(VPNAddress)]),
)
))
VPNConnectionRoute = t.add_resource(VPNConnectionRoute(
"VPNConnectionRoute",
VpnConnectionId=Ref("VPNConnection"),
DestinationCidrBlock=Ref(OnPremiseCIDR),
))
PrivateRouteTable = t.add_resource(RouteTable(
"PrivateRouteTable",
VpcId=Ref("VPC"),
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected Subnet",
)
))
VPC = t.add_resource(VPC(
"VPC",
EnableDnsSupport="true",
CidrBlock=Ref(VPCCIDR),
EnableDnsHostnames="true",
Tags=Tags(
Application=Ref("AWS::StackName"),
Network="VPN Connected VPC",
)
))
OutBoundPrivateNetworkAclEntry = t.add_resource(NetworkAclEntry(
"OutBoundPrivateNetworkAclEntry",
NetworkAclId=Ref(PrivateNetworkAcl),
RuleNumber="100",
Protocol="6",
PortRange=PortRange(To="65535", From="0"),
Egress="true",
RuleAction="allow",
CidrBlock="0.0.0.0/0",
))
VPNGateway = t.add_resource(VPNGateway(
"VPNGateway",
Type="ipsec.1",
Tags=Tags(
Application=Ref("AWS::StackName"),
)
))
PrivateSubnetNetworkAclAssociation = t.add_resource(
SubnetNetworkAclAssociation(
"PrivateSubnetNetworkAclAssociation",
SubnetId=Ref(PrivateSubnet),
NetworkAclId=Ref(PrivateNetworkAcl),
)
)
VPNConnection = t.add_resource(VPNConnection(
"VPNConnection",
CustomerGatewayId=Ref(CustomerGateway),
StaticRoutesOnly="true",
Type="ipsec.1",
VpnGatewayId=Ref(VPNGateway),
))
InboundPrivateNetworkAclEntry = t.add_resource(NetworkAclEntry(
"InboundPrivateNetworkAclEntry",
NetworkAclId=Ref(PrivateNetworkAcl),
RuleNumber="100",
Protocol="6",
PortRange=PortRange(To="65535", From="0"),
Egress="false",
RuleAction="allow",
CidrBlock="0.0.0.0/0",
))
PrivateSubnet = t.add_output(Output(
"PrivateSubnet",
Description="SubnetId of the VPN connected subnet",
Value=Ref(PrivateSubnet),
))
VPCId = t.add_output(Output(
"VPCId",
Description="VPCId of the newly created VPC",
Value=Ref(VPC),
))
print(t.to_json())
| Python | 0.000001 | |
b792a8cb3d61dbac1c48a16585c7bb6725bc06a0 | add barebones | barebones_ssg/ssg.py | barebones_ssg/ssg.py |
# hack to get unicode working with jinja2
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import glob
import metadata as meta
from tag_ontology import *
import commands as c
import json
from jinja2 import Template, Environment, FileSystemLoader
import os
pages_pat = "pages/*.md"
pages_lst = glob.glob(pages_pat)
all_tags = []
page_data = []
for page in pages_lst:
output = c.run_command("pandoc -f markdown -t json {page}".format(page=page))
json_lst = json.loads(output)
file_dict = meta.organize_tags(json_lst, tag_synonyms, tag_implications)
tags_lst = meta.get_tags(file_dict['json'])
all_tags.extend(tags_lst)
json_str = json.dumps(file_dict['json'], separators=(',',':'))
body = c.run_command("pandoc -f json -t html", pipe_in=json_str)
title = meta.get_metadata_field(json_lst, "title")
math = meta.get_metadata_field(json_lst, "math")
license = meta.get_metadata_field(json_lst, "license")
env = Environment(loader=FileSystemLoader('.'))
skeleton = env.get_template('templates/skeleton.html')
tags = []
for tag in tags_lst:
tags.append({'name': tag, 'path': ("tags/" + tag)})
final = skeleton.render(body=body, title=title, tags=tags, license=license, math=math)
inter = os.path.split(os.path.splitext(page)[0])[1]
write_to = "_site/" + inter
page_data.append((title, inter, tags_lst))
with open(write_to, 'w') as f:
f.write(final)
all_tags = list(set(all_tags))
for tag in all_tags:
pages = []
for page_tuple in page_data:
if tag in page_tuple[2]:
pages.append({'title': page_tuple[0], 'url': "../" + page_tuple[1]})
write_to = "_site/tags/" + tag
env = Environment(loader=FileSystemLoader('.'))
page_list = env.get_template('templates/page-list.html')
body = page_list.render(pages=pages)
skeleton = env.get_template('templates/skeleton.html')
final = skeleton.render(body=body, title="Tag page for " + tag)
with open(write_to, 'w') as f:
f.write(final)
print write_to
env = Environment(loader=FileSystemLoader('.'))
page_list = env.get_template('templates/page-list.html')
pages = [{'title': tag, 'url': tag} for tag in all_tags]
body = page_list.render(pages=pages)
skeleton = env.get_template('templates/skeleton.html')
final = skeleton.render(title="All tags", body=body)
with open("_site/tags/index", 'w') as f:
f.write(final)
env = Environment(loader=FileSystemLoader('.'))
page_list = env.get_template('templates/page-list.html')
pages = [{'title': page_tup[0], 'url': page_tup[1]} for page_tup in page_data]
body = page_list.render(pages=pages)
skeleton = env.get_template('templates/skeleton.html')
final = skeleton.render(title="All pages on the site", body=body)
with open("_site/all", 'w') as f:
f.write(final)
| Python | 0.999962 | |
ddbe9de5cfc5b412812096291db6a37d120e03ce | add plotting the distribution of fields and apgoee | py/plot_dustwapogee.py | py/plot_dustwapogee.py | ###############################################################################
# plot_dustwapogee: plot the dust-map at 5 kpc with the APOGEE fields in the
# sample overlayed
###############################################################################
import sys
import numpy
import healpy
from galpy.util import bovy_plot
import apogee.select.apogeeSelect
import dust
import define_rcsample
# nside to work at, 2048 is the max
_NSIDE=2048
def plot_dustwapogee(plotname):
# Load the dust map
green15map= dust.load_green15(5.,nest=True,nside_out=_NSIDE)
green15map[green15map == healpy.UNSEEN]= -1.
# plot it
healpy.visufunc.mollview(green15map,
nest=True,
xsize=4000,min=0.,max=.8,
format=r'$%g$',
title='',
cmap='gist_yarg',
unit='$A_H\,(\mathrm{mag})$')
# Load the RC data to get the fields
data= define_rcsample.get_rcsample()
loc_ids= numpy.array(list(set(data['LOCATION_ID'])))
# Load the selection function, just to get the field centers
apo= apogee.select.apogeeSelect(_justprocessobslog=True)
theta= numpy.empty(len(loc_ids))
phi= numpy.empty(len(loc_ids))
for ii,loc_id in enumerate(loc_ids):
tl, tb= apo.glonGlat(loc_id)
theta[ii]= (90.-tb)/180.*numpy.pi
phi[ii]= tl/180.*numpy.pi
hib= numpy.fabs((numpy.pi/2.-theta)) > (8./180.*numpy.pi)
healpy.visufunc.projplot(theta[hib],phi[hib],'o',ms=5.,mfc='none',mew=0.8,
mec='k')
lowb= True-hib
healpy.visufunc.projplot(theta[lowb],phi[lowb],'o',ms=5.,mfc='none',
mec='w',mew=0.8)
bovy_plot.bovy_end_print(plotname)
if __name__ == '__main__':
plot_dustwapogee(sys.argv[1])
| Python | 0 | |
139a634515061674d3832320791d35ff512d8a5a | Add a snippet. | python/print_stderr.py | python/print_stderr.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
print("Error message", file=sys.stderr)
| Python | 0.000002 | |
2909b4a7e46fe4a466e0c99abf90222c43f34d93 | add tests for Every Election wrapper | polling_stations/apps/data_finder/tests/test_ee_wrapper.py | polling_stations/apps/data_finder/tests/test_ee_wrapper.py | import mock
from django.test import TestCase
from data_finder.helpers import EveryElectionWrapper
# mock get_data() functions
def get_data_exception(self, postcode):
raise Exception()
def get_data_no_elections(self, postcode):
return []
def get_data_with_elections(self, postcode):
return [
{}, # no explanation key
{'explanation': None}, # null explanation key
{'explanation': 'some text'}, # explanation key contains text
]
class EveryElectionWrapperTest(TestCase):
@mock.patch("data_finder.helpers.EveryElectionWrapper.get_data", get_data_exception)
def test_exception(self):
ee = EveryElectionWrapper('AA11AA')
self.assertFalse(ee.request_success)
self.assertTrue(ee.has_election())
self.assertEqual([], ee.get_explanations())
@mock.patch("data_finder.helpers.EveryElectionWrapper.get_data", get_data_no_elections)
def test_no_elections(self):
ee = EveryElectionWrapper('AA11AA')
self.assertTrue(ee.request_success)
self.assertFalse(ee.has_election())
self.assertEqual([], ee.get_explanations())
@mock.patch("data_finder.helpers.EveryElectionWrapper.get_data", get_data_with_elections)
def test_elections(self):
ee = EveryElectionWrapper('AA11AA')
self.assertTrue(ee.request_success)
self.assertTrue(ee.has_election())
self.assertEqual(['some text'], ee.get_explanations())
| Python | 0 | |
8d31c4091edbd955dc292d8b9ebc75fb69477df9 | Add image processor for ros | Turtlebot/image.py | Turtlebot/image.py | #!/usr/bin/env python
import cv2
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import datetime
import numpy as np
import socket
import sys
import threading
import select
import signal
class ImageConvertor:
c = 0
def __init__(self):
self.server = None
self.old = None
# Refresh rate( Framse/sec)
self.refresh_rate = 40
print "Starting saving process"
rospy.init_node('image_converter', anonymous=True)
self.bridge = CvBridge()
# Subscribe to listen the for new row images
self.image_sub = rospy.Subscriber(
'/camera/rgb/image_raw', Image, self._save_image)
self.c = 0
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
def _save_image(self, data):
now = datetime.datetime.now()
milis = 0
if self.old:
diff = now - self.old
milis = diff.microseconds / 10
if milis < self.refresh_rate:
return
if self.server == None:
t1 = threading.Thread(target=self.start_server)
t1.setDaemon(True)
t1.start()
try:
# Convert the bites to open cv image
cv_image = self.bridge.imgmsg_to_cv(data, "bgr8")
# Generate the image object
_, data = cv2.imencode(
'.jpg', np.asarray(cv_image), [cv2.cv.CV_IMWRITE_JPEG_QUALITY, 20])
if not self.server:
return
for c in self.server.threads:
# Send the image as a string to each connected client(Limited
# to 1 client)
c.q = data.tostring()
except CvBridgeError, e:
print "image conversion error:" + str(e)
self.old = now
def start_server(self):
self.server = Server()
self.server.run()
class Server:
def __init__(self):
self.host = ''
self.port = 8090
self.backlog = 5
self.size = 1024
self.server = None
self.threads = []
def open_socket(self):
# Open a new socket
try:
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind((self.host, self.port)) # Tuple
self.server.listen(5)
except socket.error, (value, message):
if self.server:
self.server.close()
print "Could not open socket :" + str(message)
sys.exit(1)
def run(self):
self.open_socket()
input = [self.server, sys.stdin]
running = 1
while running == 1:
inputready, outputready, exceptready = select.select(input, [], [])
for s in inputready:
if s == self.server:
c = Client(self.server.accept())
c.start()
self.threads.append(c)
elif s == sys.stdin:
sys.stdin.readline()
self.close()
def close(self):
self.server.close()
print "Exiting..."
for c in self.threads:
c.join()
class Client(threading.Thread):
def __init__(self, (client, address)):
threading.Thread.__init__(self)
self.client = client
self.address = address
self.size = 1024
self.send_header = False
self.q = None
def generate_http_header(self):
""" Multipart image header to allow jpg streaming to a <img> tag"""
response = []
response.append('HTTP/1.1 200 OK')
response.append(
'Content-Type: multipart/x-mixed-replace; boundary=--spionisto')
response.append("\r\n")
return '\r\n'.join(response)
def generate_chunck_header(self, size):
""" Frame header """
response = []
response.append("--spionisto")
response.append("Content-Type:image/jpeg")
response.append("Content-Length:" + str(size))
response.append("\r\n")
return '\r\n'.join(response)
def run(self):
data = self.client.recv(self.size)
# First send the stream header
self.client.send(self.generate_http_header())
while True:
try:
data = self.q
if not data:
continue
# Send the frame header
self.client.send(self.generate_chunck_header(len(data)))
# Send the frame
self.client.send(data)
# Send the frame separator
self.client.send("\r\n\r\n")
except Exception, e:
print "image processor error:" + str(e)
self.client.close()
return
def signal_handler(signal, frame):
if ic:
ic.server.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
ic = None
if __name__ == '__main__':
ic = ImageConvertor() | Python | 0.000001 | |
67d760f0a3ed081d43237e1b2106b86a4e6a56c6 | add log handler | Util/LogHandler.py | Util/LogHandler.py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name๏ผ LogHandler.py
Description :
Author : JHao
date๏ผ 2017/3/6
-------------------------------------------------
Change Activity:
2017/3/6: log handler
-------------------------------------------------
"""
__author__ = 'JHao'
import logging
from logging.handlers import TimedRotatingFileHandler
# ๆฅๅฟ็บงๅซ
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
class LogHandler(logging.Logger):
"""
LogHandler
"""
def __init__(self, name, level=DEBUG):
self.name = name
self.level = level
logging.Logger.__init__(self, self.name, level=level)
self.__setFileHandler__()
self.__setStreamHandler__()
def __setFileHandler__(self, level=None):
"""
set file handler
:param level:
:return:
"""
file_name = '../log/%s' % self.name
# ่ฎพ็ฝฎๆฅๅฟๅๆป, ไฟๅญๅจlog็ฎๅฝ, ไธๅคฉไฟๅญไธไธชๆไปถ, ไฟ็15ๅคฉ
file_handler = TimedRotatingFileHandler(filename=file_name, when='D', interval=1, backupCount=15)
file_handler.suffix = '%Y%m%d.log'
if not level:
file_handler.setLevel(self.level)
else:
file_handler.setLevel(level)
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
self.addHandler(file_handler)
def __setStreamHandler__(self, level=None):
"""
set stream handler
:param level:
:return:
"""
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
stream_handler.setFormatter(formatter)
if not level:
stream_handler.setLevel(self.level)
else:
stream_handler.setLevel(level)
self.addHandler(stream_handler)
if __name__ == '__main__':
# log = get_logger("aa")
# log.error("aa")
pass
| Python | 0.000002 | |
7331e1d1061a7a1ac9abc583d45746facfde9180 | Create search-in-a-binary-search-tree.py | Python/search-in-a-binary-search-tree.py | Python/search-in-a-binary-search-tree.py | # Time: O(h)
# Space: O(1)
# Given the root node of a binary search tree (BST) and a value.
# You need to find the node in the BST that the node's value equals the given value.
# Return the subtree rooted with that node.
# If such node doesn't exist, you should return NULL.
#
# For example,
#
# Given the tree:
# 4
# / \
# 2 7
# / \
# 1 3
#
# And the value to search: 2
# You should return this subtree:
#
# 2
# / \
# 1 3
# In the example above,
# if we want to search the value 5,
# since there is no node with value 5, we should return NULL.
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def searchBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
while root and val != root.val:
if val < root.val:
root = root.left
else:
root = root.right
return root
| Python | 0.000023 | |
7ccd01315c3be8f8742d6bde0351f10aa431057a | Add grid_2D.py | worlds/grid_2D.py | worlds/grid_2D.py | '''
Created on Jan 11, 2012
@author: brandon_rohrer
'''
import stub_world
import agent_stub as ag
import pickle
import numpy as np
#import matplotlib.pyplot as plt
class World(stub_world.StubWorld):
''' grid_2D.World
Two-dimensional grid task
In this task, the agent steps North, South, East, or West in a
5 x 5 grid-world. Position (4,4) is rewarded (1/2) and (2,2) is
punished (-1/2). There is also a penalty of -1/20 for each horizontal
or vertical step taken.
Horizonal and vertical positions are reported
separately as basic features, rather than raw sensory inputs.
This is intended to be a
simple-as-possible-but-slightly-more-interesting-
that-the-one-dimensional-task task for troubleshooting BECCA.
Optimal performance is between 0.3 and 0.35 reward per time step.
'''
def __init__(self):
''' default constructor
'''
def initialize(self):
''' performs initialization, but gets around the fact that __init__()
can't return objects
'''
self.filename_prefix = "grid_2D"
self.agent_filename = self.filename_prefix + "_agent.pickle"
self.world_filename = self.filename_prefix + "_world.pickle"
# if there is a stored version of the world and agent, loads it
try:
with open(self.world_filename, 'rb') as world_data:
self = pickle.load(world_data)
with open(self.agent_filename, 'rb') as agent_data:
agent = pickle.load(agent_data)
print('World restored at timestep ' + str(self.timestep))
# otherwise initializes from scratch
except:
print('Initializing world and agent...')
self.REPORTING_PERIOD = 10 ** 3
self.BACKUP_PERIOD = 10 ** 3
self.LIFESPAN = 10 ** 4
self.ENERGY_PENALTY = 0.05
self.timestep = 0
self.num_sensors = 1
self.num_actions = 9
self.world_size = 5
self.num_primitives = self.world_size ** 2
self.world_state = np.array([1, 1])
self.simple_state = self.world_state.copy()
self.target = (4,4)
self.obstacle = (2,2)
self.sensors = np.zeros(self.num_sensors)
self.primitives = np.zeros(self.num_primitives)
self.actions = np.zeros(self.num_actions)
self.reward = 0
self.cumulative_reward = 0
self.reward_history = np.array([])
self.motor_output_history = np.array([])
self.display_features_flag = False
"""
plt.figure(1)
plt.clf
plt.xlabel('block (' + str(self.REPORTING_PERIOD) + ' time steps per block)');
plt.ylabel('reward per block');
plt.ion()
"""
agent = ag.Agent(self.num_sensors, self.num_primitives, self.num_actions)
self.set_agent_parameters(agent)
return(self, agent)
def set_agent_parameters(self, agent):
''' sets parameters in the BECCA agent that are specific to a particular world.
Strictly speaking, this method violates the minimal interface between the
agent and the world (observations, action, and reward). Ideally, it will
eventually become obselete. As BECCA matures it will be able to handle
more tasks without changing its parameters.
'''
pass
def display(self):
''' provides an intuitive display of the current state of the World
to the user
'''
if (self.display_features_flag):
state_img = ['.'] * self.num_primitives
state_img[self.world_state] = 'O'
print('world timestep ' + str(self.timestep) + ' ' + ''.join(state_img))
if (np.mod(self.timestep, self.REPORTING_PERIOD) == 0):
self.reward_history = np.append(self.reward_history, self.cumulative_reward)
self.cumulative_reward = 0
#plt.plot(self.reward_history)
def log(self, agent):
''' logs the state of the world into a history that can be used to
evaluate and understand BECCA's behavior
'''
self.cumulative_reward += self.reward
if (np.mod(self.timestep, self.BACKUP_PERIOD) == 0):
# stores the world and the agent
try:
with open(self.world_filename, 'wb') as world_data:
pickle.dump(self, world_data)
with open(self.agent_filename, 'wb') as agent_data:
pickle.dump(agent, agent_data)
print('agent data saved at ' + str(self.timestep) + ' time steps')
except IOError as err:
print('File error: ' + str(err) + ' encountered while saving data')
except pickle.PickleError as perr:
print('Pickling error: ' + str(perr) + ' encountered while saving data')
def step(self, action, agent):
''' advances the World by one timestep.
Accepts agent as an argument only so that it can occasionally backup
the agent's state to disk.
'''
self.timestep += 1
action = np.round(action)
self.world_state += (action[0:2] + 2 * action[2:4] - action[4:6] - 2 * action[6:8]).transpose()
energy = np.sum(action[0:2]) + np.sum(2 * action[2:4]) + np.sum(action[4:6]) - np.sum(2 * action[6:8])
#enforces lower and upper limits on the grid world by looping them around.
#It actually has a toroidal topology.
indices = self.world_state >= self.world_size - 0.5
self.world_state[indices] -= self.world_size
indices = self.world_state <= -0.5
self.world_state[indices] += self.world_size
self.simple_state = np.round(self.world_state)
self.basic_feature_input = np.zeros((self.world_size ** 2,))
self.basic_feature_input[self.simple_state[1] + self.simple_state[0] * self.world_size] = 1
self.reward = 0
if tuple(self.simple_state.flatten()) == self.obstacle:
self.reward = -0.5
elif tuple(self.simple_state.flatten()) == self.target:
self.reward = 0.5
self.reward -= self.ENERGY_PENALTY * energy
self.log(agent)
self.display()
def final_performance(self):
''' When the world terminates, this returns the average performance
of the agent on the last 3 blocks.
'''
if (self.timestep > self.LIFESPAN):
performance = np.mean(self.reward_history[-3:]) / self.REPORTING_PERIOD
#plt.ioff()
#plt.show()
assert(performance >= -1.)
return performance
return -2
| Python | 0.000005 | |
130d94001810a72e3647ec169b5a26d556bf0101 | Create pregunta2.py | pregunta2.py | pregunta2.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import random
from adivinaconf import *
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
def trad_sino(s):
if len(s)==0:
return None
while s.lower() not in ACEPTABLE_SINO:
s = raw_input('> ')
if s in POS_SI:
return SI
if s in POS_NO:
return NO
def si_es_no(s):
if s ==SI:
return NO
return SI
def aprender(cosa):
for preg,resp in respuestas_actual.iteritems():
db[preg][resp]=set(db[preg][resp])
db[preg][resp].add(cosa)
def obt_resp(preg):
n = 0
for resp in respuestas:
if resp in db[preg][SI]:
# if resp in db[preg][SI] or resp in db[preg][NO]:
n+=1
return n
def masgrande(st):
t = zip([len(db[x][SI])+len(db[x][NO]) for x in st],[x for x in st])
print t
return max(t)[1]
def masgrande2(st):
t = zip([obt_resp(x)for x in st],[x for x in st])
return max(t)[1]
def loaddb():
fh = open('database_{}.json'.format(COSA),'r')
rs = fh.read()
db = json.loads(str(rs))
return db
def savedb():
global db
fw = open('database_{}.json'.format(COSA),'w')
fw.write(json.dumps(db,indent=0,cls=SetEncoder))
fw.close()
def generarsets():
preguntas = set(db.iterkeys())
respuestas = set()
for resp in db.values():
for t in resp.values():
for cosa in t:
respuestas.add(cosa)
puntos = dict()
for r in respuestas:
puntos[r]=1
return preguntas,respuestas,puntos
def maxpuntos():
if len(puntos)<1: return (0,0)
return max(zip(puntos.values(),puntos.keys()))
def main():
print 'Vamos a jugar un juego, piensa en un {}, cuando quieras, aprieta enter.\n Cuando no sepa la respuesta, presione enter para saltar la pregunta'.format(COSA)
raw_input('')
global db
global respuestas_actual
global respuestas
global puntos
win = False
db = loaddb()
preguntas,respuestas,puntos = generarsets()
respuestas_actual = dict()
intentos = MAXINTENTOS
while True:
#Mientras queden respuestas y preguntas, y si estamos jugando con puntos, no haya un animal
#que supere el puntaje mรกximo, hace una pregunta segรบn su criterio (RANDOM, NUEVA_AI o tradicional)
while len(respuestas)>0 and len(preguntas)>0 and (not PUNTOS or maxpuntos()[0]<PUNTAJEMAX):
if RANDOM:
p_actual = random.choice(list(preguntas))
elif NUEVA_AI:
p_actual = masgrande2(preguntas)
else:
p_actual = masgrande(preguntas)
preguntas.remove(p_actual)
print 'ยฟTu {} {}? (s/n) '.format(COSA,p_actual),
ra=trad_sino(raw_input(''))
if ra is None:
continue
respuestas_actual[p_actual]=ra
for e in respuestas.copy():
# Elimina el animal de los posibles si la respuesta no es la correcta, Y si estรก
# activado el modo rรกpido ร si sabe que la respuesta es la contraria
if e not in db[p_actual][ra] and (RAPIDO or e in db[p_actual][si_es_no(ra)]):
respuestas.remove(e)
puntos.pop(e,None)
elif e in db[p_actual][ra]:
puntos[e] = puntos.get(e,0) + 1
#Si quedan respuestas posibles, y algรบn animal tiene 1 punto menos que el puntaje mรกximo, intenta achuntarle
while (len(respuestas)>0 and (not PUNTOS or maxpuntos()[0] == PUNTAJEMAX)) or len(respuestas) in [1,2]:
if PUNTOS:
actual = maxpuntos()[1]
else:
actual= random.choice(list(respuestas))
print 'ยฟTu {} es {}? (s/n) '.format(COSA,actual),
ra= trad_sino(raw_input(''))
if ra == SI:
print 'Ganรฉ, jejeje'
aprender(actual)
win = True
break
if ra == NO:
print 'No puede ser, nooooo!'
intentos -=1
puntos.pop(actual,None)
if actual in respuestas: respuestas.remove(actual)
if win: break
#Si quedan respuestas, volver a intentar ()
if intentos >0 and len(respuestas)>0 and len(preguntas)>0: continue
#Si se acaban las respuestas, pregunta por nuevos animal y pregunta, y graba la informaciรณn en la DB.
else:
print 'Me rindo.ยฟQuรฉ {} era?: '.format(COSA)
nueva_cosa = raw_input(u'').lower()
print "\nPerfecto.\nPodrรญas formular una pregunta de tipo 'sรญ o no' para diferenciar tu {}? (la respuesta debe ser sรญ)".format(COSA)
print "Completa la siguiente oraciรณn: 'ยฟTu {} ...... ?'".format(COSA)
while True:
print "ยฟTu {}...".format(COSA),
pregunta_nueva = raw_input(u'').lower()
if pregunta_nueva in preguntas or pregunta_nueva in respuestas_actual:
print 'Esa pregunta ya me la sรฉ, intenta con otra.'
respuestas_actual[pregunta_nueva]=SI
continue
elif len(pregunta_nueva)<1:
break
respuestas_actual[pregunta_nueva]=SI
db[pregunta_nueva]=dict()
db[pregunta_nueva][SI] = set()
db[pregunta_nueva][NO] = set()
break
aprender(nueva_cosa)
print 'Querrรญas jugar de nuevo? ',
ra = trad_sino(raw_input(''))
if ra == SI:
savedb()
preguntas,respuestas,puntos = generarsets()
respuestas_actual = dict()
intentos = MAXINTENTOS
continue
if ra == NO or ra is None:
savedb()
break
if __name__ == '__main__':
main()
| Python | 0.999989 | |
58311387849f8785fa964eb01e728c92bc0d8b61 | Create levenshtein.py | levenshtein.py | levenshtein.py |
# source: http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance
def levenshtein(source, target):
if len(source) < len(target):
return levenshtein(target, source)
# So now we have len(source) >= len(target).
if len(target) == 0:
return len(source)
# We call tuple() to force strings to be used as sequences
# ('c', 'a', 't', 's') - numpy uses them as values by default.
source = np.array(tuple(source))
target = np.array(tuple(target))
# We use a dynamic programming algorithm, but with the
# added optimization that we only need the last two rows
# of the matrix.
previous_row = np.arange(target.size + 1)
for s in source:
# Insertion (target grows longer than source):
current_row = previous_row + 1
# Substitution or matching:
# Target and source items are aligned, and either
# are different (cost of 1), or are the same (cost of 0).
current_row[1:] = np.minimum(
current_row[1:],
np.add(previous_row[:-1], target != s))
# Deletion (target grows shorter than source):
current_row[1:] = np.minimum(
current_row[1:],
current_row[0:-1] + 1)
previous_row = current_row
return previous_row[-1]
| Python | 0.000001 | |
d205c9a5a2d92190676a30156e039f8cdd400629 | Correct base API | pysis/sis.py | pysis/sis.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from datetime import datetime
import calendar
class SIS(object):
__BASE_URL__ = 'http://api.ndustrial.io/v1/'
__API_DOMAIN__ = 'api.ndustrial.io'
#__BASE_URL__ = 'http://localhost:3000/v1/'
#__API_DOMAIN__ = 'localhost:3000'
"""Main SIS object
You can configure all services globally using the config dict.
See the attributes in `pysis.core.client`.
Examples:
s = SIS(token='xyz...')
s = SIS(token='xyz...', base_url='http://api.sustainableis.com/v2/')
s = SIS(token='xyz...', enableParamChecks=False)
"""
def __init__(self, **config):
from pysis.core.client import Client
from pysis.resources.base import Resource
from pysis.services.organizations import Organizations
from pysis.services.facilities import Facilities
from pysis.services.utilities import Utilities
from pysis.services.outputs import Outputs
from pysis.services.buildings import Buildings
from pysis.services.feeds import Feeds
from pysis.services.users import Users
from pysis.services.blastcells import Blastcells
from pysis.services.weather import Weather
from pysis.services.configurations import Configurations
from pysis.services.oauth import Oauth
from pysis.services.workers import Workers
from pysis.services.alerts import Alerts
from pysis.services.emails import Emails
from pysis.services.reports import Reports
from pysis.services.metrics import Metrics
enableParamChecks = True
if 'enableParamChecks' in config:
enableParamChecks = config['enableParamChecks']
Resource.setParamCheck(enableParamChecks)
if 'api_domain' not in config:
config['api_domain'] = self.__API_DOMAIN__
if 'base_url' not in config:
config['base_url'] = self.__BASE_URL__
self._client = Client(**config)
self._organizations = Organizations(self._client)
self._facilities = Facilities(self._client)
self._utilities = Utilities(self._client)
self._outputs = Outputs(self._client)
self._buildings = Buildings(self._client)
self._feeds = Feeds(self._client)
self._users = Users(self._client)
self._blastcells = Blastcells(self._client)
self._weather = Weather(self._client)
self._configurations = Configurations(self._client)
self._oauth = Oauth(self._client)
self._workers = Workers(self._client)
self._alerts = Alerts(self._client)
self._emails = Emails(self._client)
self._reports = Reports(self._client)
self._metrics = Metrics(self._client)
@property
def organizations(self):
return self._organizations
@property
def facilities(self):
return self._facilities
@property
def outputs(self):
return self._outputs
@property
def buildings(self):
return self._buildings
@property
def users(self):
return self._users
@property
def feeds(self):
return self._feeds
@property
def blastcells(self):
return self._blastcells
@property
def weather(self):
return self._weather
@property
def configurations(self):
return self._configurations
@property
def oauth(self):
return self._oauth
@property
def workers(self):
return self._workers
@property
def alerts(self):
return self._alerts
@property
def emails(self):
return self._emails
@property
def reports(self):
return self._reports
@property
def utilities(self):
return self._utilities
@property
def metrics(self):
return self._metrics
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from datetime import datetime
import calendar
class SIS(object):
__BASE_URL__ = 'http://api.sustainableis.com/v1/'
__API_DOMAIN__ = 'api.sustainableis.com'
#__BASE_URL__ = 'http://localhost:3000/v1/'
#__API_DOMAIN__ = 'localhost:3000'
"""Main SIS object
You can configure all services globally using the config dict.
See the attributes in `pysis.core.client`.
Examples:
s = SIS(token='xyz...')
s = SIS(token='xyz...', base_url='http://api.sustainableis.com/v2/')
s = SIS(token='xyz...', enableParamChecks=False)
"""
def __init__(self, **config):
from pysis.core.client import Client
from pysis.resources.base import Resource
from pysis.services.organizations import Organizations
from pysis.services.facilities import Facilities
from pysis.services.utilities import Utilities
from pysis.services.outputs import Outputs
from pysis.services.buildings import Buildings
from pysis.services.feeds import Feeds
from pysis.services.users import Users
from pysis.services.blastcells import Blastcells
from pysis.services.weather import Weather
from pysis.services.configurations import Configurations
from pysis.services.oauth import Oauth
from pysis.services.workers import Workers
from pysis.services.alerts import Alerts
from pysis.services.emails import Emails
from pysis.services.reports import Reports
from pysis.services.metrics import Metrics
enableParamChecks = True
if 'enableParamChecks' in config:
enableParamChecks = config['enableParamChecks']
Resource.setParamCheck(enableParamChecks)
if 'api_domain' not in config:
config['api_domain'] = self.__API_DOMAIN__
if 'base_url' not in config:
config['base_url'] = self.__BASE_URL__
self._client = Client(**config)
self._organizations = Organizations(self._client)
self._facilities = Facilities(self._client)
self._utilities = Utilities(self._client)
self._outputs = Outputs(self._client)
self._buildings = Buildings(self._client)
self._feeds = Feeds(self._client)
self._users = Users(self._client)
self._blastcells = Blastcells(self._client)
self._weather = Weather(self._client)
self._configurations = Configurations(self._client)
self._oauth = Oauth(self._client)
self._workers = Workers(self._client)
self._alerts = Alerts(self._client)
self._emails = Emails(self._client)
self._reports = Reports(self._client)
self._metrics = Metrics(self._client)
@property
def organizations(self):
return self._organizations
@property
def facilities(self):
return self._facilities
@property
def outputs(self):
return self._outputs
@property
def buildings(self):
return self._buildings
@property
def users(self):
return self._users
@property
def feeds(self):
return self._feeds
@property
def blastcells(self):
return self._blastcells
@property
def weather(self):
return self._weather
@property
def configurations(self):
return self._configurations
@property
def oauth(self):
return self._oauth
@property
def workers(self):
return self._workers
@property
def alerts(self):
return self._alerts
@property
def emails(self):
return self._emails
@property
def reports(self):
return self._reports
@property
def utilities(self):
return self._utilities
@property
def metrics(self):
return self._metrics
| Python | 0.000003 |
ef7834df028b0d04d12298183dbc5602bd7ba92a | Add some rough unit tests for the API | spiff/api/tests.py | spiff/api/tests.py | from django.test import TestCase
from django.test.client import Client
from spiff import membership, inventory
from django.contrib.auth.models import User
import json
class APITestMixin(TestCase):
def setupAPI(self):
self.user = User.objects.create_user('test', 'test@example.com', 'test')
self.user.first_name = 'Test'
self.user.last_name = 'McTesterson'
self.user.save()
self.client = Client()
def login(self):
self.client.login(username='test', password='test')
def postAPIRaw(self, endpoint, struct=None):
if struct:
return self.client.post(
endpoint,
json.dumps(struct),
content_type="application/json"
)
else:
return self.client.post(endpoint)
def getAPIRaw(self, endpoint, args=None):
if args:
return self.client.get(endpoint, args)
return self.client.get(endpoint)
def postAPI(self, endpoint, struct=None, status=200):
ret = self.postAPIRaw(endpoint, struct)
self.assertEqual(ret.status_code, status)
if len(ret.content):
ret = json.loads(ret.content)
self.assertIn('objects', ret)
else:
ret = None
return ret
def getAPI(self, endpoint, struct=None, status=200):
ret = self.getAPIRaw(endpoint, struct)
self.assertEqual(ret.status_code, status)
ret = json.loads(ret.content)
self.assertIn('objects', ret)
return ret
class ResourceTestMixin(TestCase):
def setupResource(self, name='Resource'):
resource = inventory.models.Resource.objects.create(
name='Resource',
trainable=True,
)
if not hasattr(self, 'resource'):
self.resource = resource
return resource
class ResourceMetadataTest(APITestMixin, ResourceTestMixin):
def setUp(self):
self.setupAPI()
self.setupResource()
def addMeta(self, resource, name, value, type=inventory.models.META_TYPES[0][0]):
meta = inventory.models.Metadata.objects.create(
resource=resource,
name=name,
value=value,
type=type
)
return meta
def getMeta(self, resource=None):
if resource is None:
resource = self.resource
return self.getAPI('/v1/resource/%s/metadata/'%(resource.id))
def testGetBlankMeta(self):
meta = self.getMeta()
self.assertTrue(len(meta['objects']) == 0)
def testGetSingleMeta(self):
self.addMeta(self.resource, 'meta-test', 'meta-test-value')
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 1)
self.assertEqual(meta['objects'][0]['name'], 'meta-test')
self.assertEqual(meta['objects'][0]['value'], 'meta-test-value')
def testUnauthedCreateMeta(self):
self.postAPI('/v1/metadata/',{
'resource': '/v1/resource/%s/'%(self.resource.id),
'name': 'api-meta',
'value': 'api-meta-test',
'type': 0
}, status=401)
def testCreateMeta(self):
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 0)
self.login()
self.postAPI('/v1/metadata/',{
'resource': '/v1/resource/%s/'%(self.resource.id),
'name': 'api-meta',
'value': 'api-meta-test',
'type': 0
}, status=201)
meta = self.getMeta()
self.assertEqual(len(meta['objects']), 1)
self.assertEqual(meta['objects'][0]['name'], 'api-meta')
self.assertEqual(meta['objects'][0]['value'], 'api-meta-test')
class MemberTest(APITestMixin):
def setUp(self):
self.setupAPI()
def search(self, status=200, **kwargs):
return self.getAPI('/v1/member/search/', kwargs, status=status)
def testLogin(self):
response = self.postAPIRaw('/v1/member/login/', {'username': 'test', 'password': 'test'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.cookies.has_key('sessionid'))
def testBadLogin(self):
response = self.postAPIRaw('/v1/member/login/', {'username': 'test',
'password': 'nottest'})
self.assertEqual(response.status_code, 401)
self.assertFalse(response.cookies.has_key('sessionid'))
def testDisabledLogin(self):
self.user.is_active = False
self.user.save()
response = self.postAPIRaw('/v1/member/login/', {'username': 'test',
'password': 'test'})
self.assertEqual(response.status_code, 403)
self.assertFalse(response.cookies.has_key('sessionid'))
def testLogout(self):
response = self.postAPIRaw('/v1/member/login/', {'username': 'test', 'password': 'test'})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.cookies.has_key('sessionid'))
session = response.cookies.get('sessionid')
response = self.getAPIRaw('/v1/member/logout/')
self.assertTrue(response.cookies.has_key('sessionid'))
self.assertNotEqual(session, response.cookies.has_key('sessionid'))
def testSearchFullname(self):
ret = self.search(fullName='Test McTesterson')
self.assertIn('objects', ret)
self.assertEqual(len(ret['objects']), 1)
def testSearchPartialFirst(self):
ret = self.search(fullName='Test')
self.assertIn('objects', ret)
self.assertEqual(len(ret['objects']), 1)
def testSearchPartialLast(self):
ret = self.search(fullName='McTesterson')
self.assertEqual(len(ret['objects']), 1)
def testSearchPartialMultiple(self):
guesterson = User.objects.create_user('guest', 'guest@example.com', 'guest')
guesterson.first_name = 'Guest'
guesterson.last_name = 'McGuesterson'
guesterson.save()
ret = self.search(fullName='esterson')
self.assertIn('objects', ret)
self.assertEqual(len(ret['objects']), 2)
| Python | 0 | |
a5b012db4cb4cc8a988c0ed37411194639dd1bbd | add tester.py module to pytools | lib/tester.py | lib/tester.py | #!/usr/bin/env python
"""
Package: pytools
Author: Christopher Hanley
Purpose:
========
Provide driver function for package tests.
Dependencies:
=============
- nose 0.10.4 or greater.
Usage Example:
==============
All packages will need to import jwtools.tester and add the following
function to the __init__.py of their package:
def test(*args,**kwds):
thisdir = os.path.dirname(os.path.abspath(__file__))
pytools.tester.test(curdir=thisdir)
This assumes that all software packages are installed with the structure:
package/
__init__.py
modules.py
/tests
Where the /tests subdirectory containts the nose tests.
"""
from __future__ import division
import os,sys
def test(*args,**kwds):
"""
Purpose:
========
test: Run refcore nosetest suite of tests. The tests are located in the
/test directory of the installed modules.
"""
try:
thisdir = kwds['curdir']
except KeyError:
thisdir = os.path.dirname(os.path.abspath(__file__))
DIRS=['/tests']
args=[]
for dirname in DIRS:
args.append('-w')
args.append(thisdir+dirname)
result = False
try:
import nose, nose.core
result = nose.run(argv=args)
except ImportError:
print "Nose 0.10.4 or greater is required for running tests."
return result
| Python | 0.000006 | |
6efc045d34f432723b52aa094c1caec3bf102e96 | add sparse repeated updates benchmark | benchmarks/sparse_repeated_updates.py | benchmarks/sparse_repeated_updates.py | import numpy as np
import theano
import theano.tensor as T
fX = theano.config.floatX
s = theano.shared(np.ones((10, 1), dtype=fX))
idxs = [0, 1, 1]
fn = theano.function([], updates=[(s, T.inc_subtensor(s[idxs], s[idxs] ** 2))])
fn()
print s.get_value()
| Python | 0 | |
4b43906004f9bfb6164bb2c0b95efaf1dbb881c8 | add py | correction_image.py | correction_image.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Project Apricot
# Copyright (c) 2015 Daiki, Takanori.
| Python | 0.00008 | |
fc44d4463045e458796d13b3c97b34cf6ba47f61 | Add script to create the player pitch weights. | bluechip/player/createpitchweights.py | bluechip/player/createpitchweights.py | import random
from player.models import Player, Pitch, PlayerPitchWeight
#TODO: Need to centralize this function call.
random.seed(123456789)
pitch_records = Pitch.objects.all().order_by('id')
pitches_count = pitch_records.count()
for p in Player.objects.all():
weights = []
sum_weights = 0
for _ in xrange(pitches_count):
mu = 1.0 / pitches_count
sigma = (2.0 / 3.0) * mu
w = random.normalvariate(mu, sigma)
w = max(w, 0.0)
weights.append(w)
sum_weights += w
# Normalize weights before creating records
for i in xrange(len(weights)):
weights[i] /= sum_weights
j = 0
for pitch in pitch_records:
ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j])
ppw.save()
j += 1
| Python | 0 | |
d3f68c385da4d2fa864ba748f41785be01c26c34 | Add py solution for 551. Student Attendance Record I | py/student-attendance-record-i.py | py/student-attendance-record-i.py | class Solution(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
A = False
L = 0
for c in s:
if c == 'L':
L += 1
if L > 2:
return False
else:
L = 0
if c == 'A':
if A:
return False
else:
A = True
return True
| Python | 0.998417 | |
a1ee4d90e0cf159f27274423b989c98844fbeba1 | Create mytask1b.py | ml/mytask1b.py | ml/mytask1b.py | """ Features
The objective of this task is to explore the corpus, deals.txt.
The deals.txt file is a collection of deal descriptions, separated by a new line, from which
we want to glean the following insights:
1. What is the most popular term across all the deals?
2. What is the least popular term across all the deals?
3. How many types of guitars are mentioned across all the deals?
"""
####################################################
# Solution 2 of Q1:
# Use topia.termextract 1.1.0 for term extraction
#
####################################################
# load term extraction library
from topia.termextract import extract
extractor = extract.TermExtractor()
# define the trivial permissive filter
extractor.filter = extract.permissiveFilter
# load data
openfile = open('..\data\deals.txt', 'r')
d = {}
numberguitars = 0
for line in openfile:
terms = extractor(line)
# empty
if not terms:
continue
# take each term from terms
for term in terms:
# aggregate dictionary for each term
if not (term[0] in d):
d[term[0]] = 0
d[term[0]] += term[1]
# count guitar
if 'guitar' in term or 'guitars' in term:
numberguitars += 1
else:
if 'Guitar' in term or 'Guitars' in term:
numberguitars += 1
v = list(d.values())
maxvalue = max(v)
minvalue = min(v)
maxkeys = []
minkeys = []
for k, v in d.items():
if v == maxvalue:
maxkeys.append(k)
if v == minvalue:
minkeys.append(k)
# output results
print "1. the most popular terms\n", maxkeys
#print "2. the least popular terms\n", minkeys
print "3. the number of types of guitars", numberguitars
| Python | 0.999813 | |
714e2e2ae5e8412ef522dc64666e6548307eec07 | Add the init method to the topic model. | model/topic.py | model/topic.py | class TopicModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "topic"
super(TopicModel, self).__init__()
| Python | 0 | |
168c45fa913670c7f6d89ffc799fa9d13454d734 | add multi-layer convolutional net for mnist | multi-layer.py | multi-layer.py | """
solving mnist classification problem using tensorflow
multi-layer architecture
"""
# Config
BATCH_SIZE = 50
ITERATIONS = 20000
# Setup Logging
import logging
logging_format = '%(asctime)s - %(levelname)s - %(message)s'
log_level = logging.DEBUG
logging.basicConfig(filename='logfile.log',format=logging_format,level=log_level)
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(logging_format)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.debug('STARTING MULTI-LAYER MNIST')
# Load MNIST dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
# Import TensorFlow and start interactive Session
import tensorflow as tf
session = tf.InteractiveSession()
# Create tf placeholders for input data and predictions
# x will be a 2d tensor with all images of the current batch * flattened pixel
# of the input image.
# y_ will be the probabilities for every image in the batch and every digit
# class
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
# functionality to create weight-variables and bias-variables
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# convolution and pooling
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# first convolutional layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# reshape x
x_image = tf.reshape(x, [-1,28,28,1])
# convolve x_image
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# second convolutional layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# densely conntected layer
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# readout layer
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# train & evaluate model
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# start time measurement
import time
start = time.time()
# initial logging
logger.debug('starting computation (batch-size: %d, iterations=%d)'%(BATCH_SIZE, ITERATIONS))
session.run(tf.global_variables_initializer())
for i in range(ITERATIONS):
batch = mnist.train.next_batch(BATCH_SIZE)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
logging.debug("step %d, training accuracy %g"%(i, train_accuracy))
time_elapsed = time.time() - start
logger.debug('time elapsed: %.2fs'%(time_elapsed))
logger.debug('mean seconds/batch: %fs'%(time_elapsed/(i+1)))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
# stop time measurement
end = time.time()
computation_time = end - start
# print accuracy of test data & computation time
logger.debug("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
logger.debug('computation time: %.2fs'%(computation_time))
| Python | 0.000003 | |
c795f8e21d2b400134cb52ef7eae2cc7e26cfd99 | Create ada.py | ada.py | ada.py | Python | 0.00017 | ||
028831c53d27452168b7a430eb713e01c966acb0 | add privacy policy as first legal check | accelerator/migrations/0006_add_privacy_policy_legal_check.py | accelerator/migrations/0006_add_privacy_policy_legal_check.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-05-14 09:09
from __future__ import unicode_literals
from django.db import migrations
def add_privacy_policy_legal_check(apps, schema_editor):
LegalCheck = apps.get_model('accelerator', 'LegalCheck')
LegalCheck.objects.create(
name='accepted_privacy_policy',
title='The MassChallenge Privacy Policy',
url='https://masschallenge.org/privacy-policy'
)
def remove_privacy_policy_legal_check(apps, schema_editor):
LegalCheck = apps.get_model('accelerator', 'LegalCheck')
LegalCheck.objects.filter(name='accepted_privacy_policy').delete()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0005_legalcheck_legalcheckacceptance'),
]
operations = [
migrations.RunPython(add_privacy_policy_legal_check,
remove_privacy_policy_legal_check),
]
| Python | 0 | |
a2975adeedcc4aa33ee8b63bd404675bb3453089 | Add broker app. | apps/broker.py | apps/broker.py | """
Alter item database.
"""
import logging
import sys
import os
# import hack to avoid PYTHONPATH
try:
import pydarkstar
except ImportError:
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
root, dirs, files = next(os.walk(root))
if 'pydarkstar' in dirs:
sys.path.insert(1, root)
import pydarkstar
else:
raise
import pydarkstar.logutils
import pydarkstar.options
import pydarkstar.common
class Options(pydarkstar.options.Options):
"""
Reads options from config file, then from command line.
"""
def __init__(self):
super(Options, self).__init__(config='broker.yaml', description=__doc__)
# logging
self.verbose = False # error, info, and debug
self.silent = False # error only
# input and output
self.save = False # save config
# logging
self.add_argument('--verbose', action='store_true',
help='report debug, info, and error')
self.add_argument('--silent', action='store_true',
help='report error only')
# output
self.add_argument('--save', action='store_true',
help='save config file (and exit)')
def main():
"""
Main function.
"""
# get options
opts = Options()
opts.parse_args()
pydarkstar.logutils.basicConfig(
verbose=opts.verbose, silent=opts.silent, fname='broker.log')
logging.debug('start')
# log options
opts.log_values(level=logging.INFO)
# save options
if opts.save:
opts.save = False
opts.dump()
return
def cleanup():
logging.info('exit\n')
if __name__ == '__main__':
with pydarkstar.logutils.capture():
main()
cleanup() | Python | 0 | |
997339beae952b0a3d63644546d02b257164a1a2 | add tests for marching cubes and mesh surface area | skimage/measure/tests/test_marching_cubes.py | skimage/measure/tests/test_marching_cubes.py | import numpy as np
from numpy.testing import assert_raises
from scipy.special import (ellipkinc as ellip_F, ellipeinc as ellip_E)
from skimage.measure import marching_cubes, mesh_surface_area
def _ellipsoid(a, b, c, sampling=(1., 1., 1.), info=False, tight=False,
levelset=False):
"""
Generates ellipsoid with semimajor axes aligned with grid dimensions,
on grid with specified `sampling`.
Parameters
----------
a : float
Length of semimajor axis aligned with x-axis
b : float
Length of semimajor axis aligned with y-axis
c : float
Length of semimajor axis aligned with z-axis
sampling : tuple of floats, length 3
Sampling in each spatial dimension
info : bool
If False, only `bool_arr` returned.
If True, (`bool_arr`, `vol`, `surf`) returned; the additional
values are analytical volume and surface area calculated for
this ellipsoid.
tight : bool
Controls if the ellipsoid will precisely be contained within
the returned volume (tight=True) or if each dimension will be
2 longer than necessary (tight=False). For algorithms which
need both sides of a contour, use False.
levelset : bool
If True, returns the level set for this ellipsoid (signed level
set about zero, with positive denoting interior) as np.float64.
False returns a binarized version of said level set.
Returns
-------
bool_arr : (N, M, P) array
Sphere in an appropriately sized boolean array.
vol : float
Analytically calculated volume of ellipsoid. Only returned if
`info` is True.
surf : float
Analytically calculated surface area of ellipsoid. Only returned
if `info` is True.
"""
if not tight:
offset = np.r_[1, 1, 1] * np.r_[sampling]
else:
offset = np.r_[0, 0, 0]
# Calculate limits, and ensure output volume is odd & symmetric
low = np.ceil((-np.r_[a, b, c] - offset))
high = np.floor((np.r_[a, b, c] + offset + 1))
for dim in range(3):
if (high[dim] - low[dim]) % 2 == 0:
low[dim] -= 1
num = np.arange(low[dim], high[dim], sampling[dim])
if 0 not in num:
low[dim] -= np.max(num[num < 0])
# Generate (anisotropic) spatial grid
x, y, z = np.mgrid[low[0]:high[0]:sampling[0],
low[1]:high[1]:sampling[1],
low[2]:high[2]:sampling[2]]
if not levelset:
arr = ((x / float(a)) ** 2 +
(y / float(b)) ** 2 +
(z / float(c)) ** 2) <= 1
else:
arr = ((x / float(a)) ** 2 +
(y / float(b)) ** 2 +
(z / float(c)) ** 2) - 1
if not info:
return arr
else:
# Surface calculation requires a >= b >= c and a != c.
abc = [a, b, c]
abc.sort(reverse=True)
a = abc[0]
b = abc[1]
c = abc[2]
# Volume
vol = 4 / 3. * np.pi * a * b * c
# Analytical ellipsoid surface area
phi = np.arcsin((1. - (c ** 2 / (a ** 2.))) ** 0.5)
d = float((a ** 2 - c ** 2) ** 0.5)
m = (a ** 2 * (b ** 2 - c ** 2) /
float(b ** 2 * (a ** 2 - c ** 2)))
F = ellip_F(phi, m)
E = ellip_E(phi, m)
surf = 2 * np.pi * (c ** 2 +
b * c ** 2 / d * F +
b * d * E)
return arr, vol, surf
def test_marching_cubes_isotropic():
ellipsoid_isotropic, _, surf = _ellipsoid(6, 10, 16,
levelset=True,
info=True)
verts, faces = marching_cubes(ellipsoid_isotropic, 0.)
surf_calc = mesh_surface_area(verts, faces)
# Test within 1% tolerance for isotropic. Will always underestimate.
assert surf > surf_calc and surf_calc > surf * 0.99
def test_marching_cubes_anisotropic():
sampling = (1., 10 / 6., 16 / 6.)
ellipsoid_isotropic, _, surf = _ellipsoid(6, 10, 16,
sampling=sampling,
levelset=True,
info=True)
verts, faces = marching_cubes(ellipsoid_isotropic, 0.,
sampling=sampling)
surf_calc = mesh_surface_area(verts, faces)
# Test within 1.5% tolerance for anisotropic. Will always underestimate.
assert surf > surf_calc and surf_calc > surf * 0.985
def test_invalid_input():
assert_raises(ValueError, marching_cubes, np.zeros((2, 2, 1)), 0)
assert_raises(ValueError, marching_cubes, np.zeros((2, 2, 1)), 1)
assert_raises(ValueError, marching_cubes, np.ones((3, 3, 3)), 1,
sampling=(1, 2))
assert_raises(ValueError, marching_cubes, np.zeros((20, 20)), 0)
if __name__ == '__main__':
np.testing.run_module_suite()
| Python | 0 | |
edb498113441acb68511a478f2ec18c1be4f1384 | Add tests for provision state commands | ironicclient/tests/functional/osc/v1/test_baremetal_node_provision_states.py | ironicclient/tests/functional/osc/v1/test_baremetal_node_provision_states.py | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient.tests.functional.osc.v1 import base
class ProvisionStateTests(base.TestCase):
"""Functional tests for baremetal node provision state commands."""
def setUp(self):
super(ProvisionStateTests, self).setUp()
self.node = self.node_create()
def test_deploy_rebuild_undeploy(self):
"""Deploy, rebuild and undeploy node.
Test steps:
1) Create baremetal node in setUp.
2) Check initial "available" provision state.
3) Set baremetal node "deploy" provision state.
4) Check baremetal node provision_state field value is "active".
5) Set baremetal node "rebuild" provision state.
6) Check baremetal node provision_state field value is "active".
7) Set baremetal node "undeploy" provision state.
8) Check baremetal node provision_state field value is "available".
"""
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("available", show_prop["provision_state"])
# deploy
self.openstack('baremetal node deploy {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("active", show_prop["provision_state"])
# rebuild
self.openstack('baremetal node rebuild {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("active", show_prop["provision_state"])
# undeploy
self.openstack('baremetal node undeploy {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("available", show_prop["provision_state"])
def test_manage_provide(self):
"""Manage and provide node back.
Steps:
1) Create baremetal node in setUp.
2) Check initial "available" provision state.
3) Set baremetal node "manage" provision state.
4) Check baremetal node provision_state field value is "manageable".
5) Set baremetal node "provide" provision state.
6) Check baremetal node provision_state field value is "available".
"""
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("available", show_prop["provision_state"])
# manage
self.openstack('baremetal node manage {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("manageable", show_prop["provision_state"])
# provide back
self.openstack('baremetal node provide {0}'.format(self.node['uuid']))
show_prop = self.node_show(self.node['uuid'], ["provision_state"])
self.assertEqual("available", show_prop["provision_state"])
| Python | 0.999685 | |
a8c08baeb2ee6268ac61613a23cc86cf885a9d09 | Handle NULL deleted_at in migration 112. | nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py | nova/db/sqlalchemy/migrate_repo/versions/112_update_deleted_instance_data.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import MetaData, Table
from sqlalchemy import and_, between
TABLES = ('instance_metadata',
'instance_system_metadata',
'block_device_mapping')
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instance_list = list(instances.select().\
where(instances.c.deleted == True).execute())
for table_name in TABLES:
table = Table(table_name, meta, autoload=True)
for instance in instance_list:
if not instance['deleted_at']:
continue
table.update(
(and_(table.c.deleted == True,
table.c.instance_uuid == instance['uuid'],
between(table.c.deleted_at,
instance['deleted_at'] - datetime.timedelta(seconds=2),
instance['deleted_at'] + datetime.timedelta(seconds=2)))
),
{table.c.deleted: False,
table.c.deleted_at: None}
).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instance_list = list(instances.select().\
where(instances.c.deleted == True).execute())
for table_name in TABLES:
table = Table(table_name, meta, autoload=True)
for instance in instance_list:
table.update(
(and_(table.c.deleted == False,
table.c.instance_uuid == instance['uuid'])
),
{table.c.deleted: True,
table.c.deleted_at: instance['deleted_at']}
).execute()
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from sqlalchemy import MetaData, Table
from sqlalchemy import and_, between
TABLES = ('instance_metadata',
'instance_system_metadata',
'block_device_mapping')
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instance_list = list(instances.select().\
where(instances.c.deleted == True).execute())
for table_name in TABLES:
table = Table(table_name, meta, autoload=True)
for instance in instance_list:
table.update(
(and_(table.c.deleted == True,
table.c.instance_uuid == instance['uuid'],
between(table.c.deleted_at,
instance['deleted_at'] - datetime.timedelta(seconds=2),
instance['deleted_at'] + datetime.timedelta(seconds=2)))
),
{table.c.deleted: False,
table.c.deleted_at: None}
).execute()
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instance_list = list(instances.select().\
where(instances.c.deleted == True).execute())
for table_name in TABLES:
table = Table(table_name, meta, autoload=True)
for instance in instance_list:
table.update(
(and_(table.c.deleted == False,
table.c.instance_uuid == instance['uuid'])
),
{table.c.deleted: True,
table.c.deleted_at: instance['deleted_at']}
).execute()
| Python | 0.000002 |
7dbab1a6615a49513fe16c74550ddf2f52b0f698 | Create 4-keys-keyboard.py | Python/4-keys-keyboard.py | Python/4-keys-keyboard.py | # Time: O(n)
# Space: O(1)
class Solution(object):
def maxA(self, N):
"""
:type N: int
:rtype: int
"""
if N <= 6:
return N
dp = [i for i in range(N+1)]
for i in xrange(7, N+1):
dp[i % 6] = max(dp[(i-4) % 6]*3,dp[(i-5) % 6]*4)
return dp[N % 6]
| Python | 0.999774 | |
be9c88b630ea243afdef3d87ac0b316bd3300281 | Add 283-move-zeroes.py | 283-move-zeroes.py | 283-move-zeroes.py | """
Question:
Move Zeroes
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.
For example, given nums = [0, 1, 0, 3, 12], after calling your function, nums should be [1, 3, 12, 0, 0].
Note:
You must do this in-place without making a copy of the array.
Minimize the total number of operations.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
Performance:
1. Total Accepted: 15730 Total Submissions: 38045 Difficulty: Easy
2. Sorry. We do not have enough accepted submissions.
"""
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
reached_zero_count = 0
for idx, num in enumerate(nums):
if num == 0:
reached_zero_count += 1
if num != 0:
if reached_zero_count > 0: # make sure has reached at least a zero.
nums[idx - reached_zero_count] = num
nums[idx] = 0
def test_func(nums, result):
Solution().moveZeroes(nums)
assert nums == result, [nums, result]
test_func([], [])
test_func([0], [0])
test_func([1], [1])
test_func([0, 0], [0, 0])
test_func([0, 1], [1, 0])
test_func([1, 1], [1, 1])
test_func([0, 1, 0, 3, 12], [1, 3, 12, 0, 0])
test_func([0, 1, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0])
test_func([0, 1, 0, 0, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0, 0, 0])
| Python | 0.011626 | |
5d7f2fdfb1b850aacaf29ba76c7e5ed441e6db63 | Create 32losmasgrandes.py | 32losmasgrandes.py | 32losmasgrandes.py | #Integrantes del equipo
#Chavez Pavon Jose Manuel
#Ramirez Ramirez Servando
#Saules Rojas David
#Lopez Adriana
import random
#Funcino para crear una lista
#La cual usaremos para simular las alturas de las 32 personas
#La llenaremos de forma aleatoria
def lista ():
l = [] #Creamos la lista de las "alturas"
for x in range (0,32):
l.append(random.randint(1,300))
return l #Regresamos la lista
#Funcion para obtener cual es la persona mas alta
#Recorremos la lista de dos en dos pregutando cual es mayor
#Los elementos mayores los agregamos a una nueva lista
#A los elementos que fueron comparados los metemos en un diccionario
#Para despues con ellos obtener el segundo mayor
#var "lista" = La lista de las alturas
#var "dic" = el diciconario de los elementos comparados
def primero (lista, dic):
a=0 #Iterador 'a' a utilizar para recorrer la lista inicializado en cero
l2 = [] #Lista para ir agregando a los elementos mayores
#Clausula de escape
if len(lista) == 1:
print lista[0]
#Llamada a la funcion segundo, la cual nos dara el segundo mas alto
segundo(dic[lista[0]])
return
#Recorremos la lista buscando a los elementos mayores
while a<len(lista):
#Verificamos que elmento es mayor
if lista[a] > lista[a+1]:
l2.append(lista[a])#El mayor lo agregamos a l2
dic[lista[a]] = str(lista[a+1]) + " "#Al menor lo agregamos al diccionario pasandole como llave al elemento mayor
#El caso contrario del if
else:
l2.append(lista[a+1])#El mayor lo agregamos a l2
dic[lista[a+1]] = str(lista[a]) + " "#Al menor lo agregamos al diccionario pasandole como llave al elemento mayor
a+=2 #Aumentos nuestro iterador dos posiciones
primero(l2, dic) #Llamada recursiva de la funcion
#Funcion para obtener el segundo elementos mas grande
#var "cadena" = la cadena que nos da el diccionario que tiene como llave
#al elemento mas grande
def segundo (cadena):
repe = cadena.split()#Separamos la cadena por espacios con split()
print max(repe)#Obtenemos el elemento mayor de la cadena con max() y lo imprimimos
return
l = lista()#Creamos la lista a ejecutar
dicc={}#Diccionario para los elementos que fueron comparados pero no fueron mayores
primero(l,dicc)#Llamada de la funcion primero
| Python | 0.000132 | |
a7ccd7bc02476cfad85280ff1e742671453360de | Add Digital Outcomes and Specialists to frameworks | migrations/versions/420_dos_is_coming.py | migrations/versions/420_dos_is_coming.py | """DOS is coming
Revision ID: 420
Revises: 410_remove_empty_drafts
Create Date: 2015-11-16 14:10:35.814066
"""
# revision identifiers, used by Alembic.
revision = '420'
down_revision = '410_remove_empty_drafts'
from alembic import op
import sqlalchemy as sa
from app.models import Framework
def upgrade():
op.execute("COMMIT")
op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'")
framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first()
if not framework:
op.execute("""
INSERT INTO frameworks (name, framework, status, slug)
values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists')
""")
def downgrade():
op.execute("""
DELETE FROM frameworks where slug='digital-outcomes-and-specialists'
""")
| Python | 0 | |
273f0bd289d62c6980f095b0a8bb41a973b0678f | add import script for Bradford | polling_stations/apps/data_collection/management/commands/import_bradford.py | polling_stations/apps/data_collection/management/commands/import_bradford.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E08000032'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsvJune2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsvJune2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| Python | 0 | |
12ba7e0c6db91f5ee46a1be9acaece110f98b911 | add bigwig file reader | PyMaSC/bwreader.py | PyMaSC/bwreader.py | import os
import wWigIO
class BigWigFile(object):
@staticmethod
def wigToBigWig(wigfile, sizefile, bwfile):
wWigIO.wigToBigWig(wigfile, sizefile, bwfile)
@staticmethod
def bigWigToWig(bwfile, wigfile):
wWigIO.bigWigToWig(bwfile, wigfile)
def __init__(self, path, chrom_size=None):
if not os.path.exists(path) and path != '-':
raise IOError("input file '{0}' dose not exist.".format(path))
elif path == '-':
path = "stdin"
prefix, ext = os.path.splitext(path)[0]
if ext == "wig":
bwfile = prefix + ".bw"
if os.path.exists(bwfile):
self.path = bwfile
else:
if chrom_size is None:
raise IOError("Failed to convet wig to bigwig. 'chrom_size' file required.")
BigWigFile.wigToBigWig(path, chrom_size, bwfile)
self.path = bwfile
else:
self.path = path
wWigIO.open(self.path)
self.set_chromsizes()
self.closed = False
def set_chromsizes(self):
self.chromsizes = wWigIO.getChromSize(self.path)
def _getIntervals(self, chrom, begin, end):
wigs = wWigIO.getIntervals(self.path, chrom, begin, end)
if wigs == 1:
raise ValueError("wWigIO.getIntervals doesn't have correct parameters.")
if wigs == 2:
raise ValueError("Fail to open BigWig file.")
return wigs
def fetch(self, chrom=None, begin=None, end=None):
if chrom is None:
chroms = self.chromsizes.keys()
else:
chroms = [chrom]
if begin is None or begin < 0:
begin = 0
if end is None:
end = 0
for chrom in chroms:
for wig in self._getIntervals(chrom, begin, end):
yield chrom, wig[0], wig[1], wig[2]
def close(self):
if not self.closed:
wWigIO.close(self.infile)
self.closed = True
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
| Python | 0 | |
c03cd3f85e9df113ef10833eaedfc846adde45f6 | Add an example for the job board feature | taskflow/examples/job_board_no_test.py | taskflow/examples/job_board_no_test.py | # -*- encoding: utf-8 -*-
#
# Copyright ยฉ 2013 eNovance <licensing@enovance.com>
#
# Authors: Dan Krause <dan@dankrause.net>
# Cyril Roelandt <cyril.roelandt@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This example shows how to use the job board feature.
#
# Let's start by creating some jobs:
# $ python job_board_no_test.py create my-board my-job '{}'
# $ python job_board_no_test.py create my-board my-job '{"foo": "bar"}'
# $ python job_board_no_test.py create my-board my-job '{"foo": "baz"}'
# $ python job_board_no_test.py create my-board my-job '{"foo": "barbaz"}'
#
# Make sure they were registered:
# $ python job_board_no_test.py list my-board
# 7277181a-1f83-473d-8233-f361615bae9e - {}
# 84a396e8-d02e-450d-8566-d93cb68550c0 - {u'foo': u'bar'}
# 4d355d6a-2c72-44a2-a558-19ae52e8ae2c - {u'foo': u'baz'}
# cd9aae2c-fd64-416d-8ba0-426fa8e3d59c - {u'foo': u'barbaz'}
#
# Perform one job:
# $ python job_board_no_test.py consume my-board \
# 84a396e8-d02e-450d-8566-d93cb68550c0
# Performing job 84a396e8-d02e-450d-8566-d93cb68550c0 with args \
# {u'foo': u'bar'}
# $ python job_board_no_test.py list my-board
# 7277181a-1f83-473d-8233-f361615bae9e - {}
# 4d355d6a-2c72-44a2-a558-19ae52e8ae2c - {u'foo': u'baz'}
# cd9aae2c-fd64-416d-8ba0-426fa8e3d59c - {u'foo': u'barbaz'}
#
# Delete a job:
# $ python job_board_no_test.py delete my-board \
# cd9aae2c-fd64-416d-8ba0-426fa8e3d59c
# $ python job_board_no_test.py list my-board
# 7277181a-1f83-473d-8233-f361615bae9e - {}
# 4d355d6a-2c72-44a2-a558-19ae52e8ae2c - {u'foo': u'baz'}
#
# Delete all the remaining jobs
# $ python job_board_no_test.py clear my-board
# $ python job_board_no_test.py list my-board
# $
import argparse
import contextlib
import json
import os
import sys
import tempfile
import taskflow.jobs.backends as job_backends
from taskflow.persistence import logbook
import example_utils # noqa
@contextlib.contextmanager
def jobboard(*args, **kwargs):
jb = job_backends.fetch(*args, **kwargs)
jb.connect()
yield jb
jb.close()
conf = {
'board': 'zookeeper',
'hosts': ['127.0.0.1:2181']
}
def consume_job(args):
def perform_job(job):
print("Performing job %s with args %s" % (job.uuid, job.details))
with jobboard(args.board_name, conf) as jb:
for job in jb.iterjobs(ensure_fresh=True):
if job.uuid == args.job_uuid:
jb.claim(job, "test-client")
perform_job(job)
jb.consume(job, "test-client")
def clear_jobs(args):
with jobboard(args.board_name, conf) as jb:
for job in jb.iterjobs(ensure_fresh=True):
jb.claim(job, "test-client")
jb.consume(job, "test-client")
def create_job(args):
store = json.loads(args.details)
book = logbook.LogBook(args.job_name)
if example_utils.SQLALCHEMY_AVAILABLE:
persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
backend_uri = "sqlite:///%s" % (persist_path)
else:
persist_path = os.path.join(tempfile.gettempdir(), "persisting")
backend_uri = "file:///%s" % (persist_path)
with example_utils.get_backend(backend_uri) as backend:
backend.get_connection().save_logbook(book)
with jobboard(args.board_name, conf, persistence=backend) as jb:
jb.post(args.job_name, book, details=store)
def list_jobs(args):
with jobboard(args.board_name, conf) as jb:
for job in jb.iterjobs(ensure_fresh=True):
print("%s - %s" % (job.uuid, job.details))
def delete_job(args):
with jobboard(args.board_name, conf) as jb:
for job in jb.iterjobs(ensure_fresh=True):
if job.uuid == args.job_uuid:
jb.claim(job, "test-client")
jb.consume(job, "test-client")
def main(argv):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help')
# Consume command
parser_consume = subparsers.add_parser('consume')
parser_consume.add_argument('board_name')
parser_consume.add_argument('job_uuid')
parser_consume.set_defaults(func=consume_job)
# Clear command
parser_consume = subparsers.add_parser('clear')
parser_consume.add_argument('board_name')
parser_consume.set_defaults(func=clear_jobs)
# Create command
parser_create = subparsers.add_parser('create')
parser_create.add_argument('board_name')
parser_create.add_argument('job_name')
parser_create.add_argument('details')
parser_create.set_defaults(func=create_job)
# Delete command
parser_delete = subparsers.add_parser('delete')
parser_delete.add_argument('board_name')
parser_delete.add_argument('job_uuid')
parser_delete.set_defaults(func=delete_job)
# List command
parser_list = subparsers.add_parser('list')
parser_list.add_argument('board_name')
parser_list.set_defaults(func=list_jobs)
args = parser.parse_args(argv)
args.func(args)
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0.000033 | |
c0f690fe1d43edc4fc5cc4b3aeb40594c1abd674 | Create pollard_rho_algorithm.py | daedalus/attacks/pollard_rho_algorithm.py | daedalus/attacks/pollard_rho_algorithm.py | #pollard rho algorithm of integer factorization
def gcd(a,b):
if a is 0:
return b
return gcd(b%a,a)
def pollard_rho(number,x,y):
d = 1
while d is 1:
x = (x**2+1)%number
for i in range(0,2,1):
y = (y**2+1)%number
if x>y:
z = x-y
else:
z=y-x
d = gcd(z,number)
return d
x=2
y=2
number = 84923983
factor = pollard_rho(number,x,y)
while factor is number or 1:
x = x+1
y = y+1
pollard_rho(number,x,y)
factor2 = int(number/factor)
print(factor,factor2)
| Python | 0.000772 | |
c5dfcffdf743e2c26b8dba6e3be8aee7d7aaa608 | Test `write_*` and `join_*` on bytes | test/test_join_bytes.py | test/test_join_bytes.py | import re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
| Python | 0.000004 | |
a30cd68e77242df4efadc75c4390dd8a3ce68612 | Add data migration for Audit's empty status | src/ggrc/migrations/versions/20170103101308_42b22b9ca859__fix_audit_empty_status.py | src/ggrc/migrations/versions/20170103101308_42b22b9ca859__fix_audit_empty_status.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
| Python | 0 | |
56b44a5a510390913e2b9c9909218428842dcde8 | Match old_user_id to user_id and old_team_id to team_id | migrations/versions/542fd8471e84_match_old_to_new_user_and_team_columns.py | migrations/versions/542fd8471e84_match_old_to_new_user_and_team_columns.py | # -*- coding: utf-8 -*-
"""Match old to new user and team columns
Revision ID: 542fd8471e84
Revises: 382cde270594
Create Date: 2020-04-07 03:52:04.415019
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '542fd8471e84'
down_revision = '382cde270594'
branch_labels = None
depends_on = None
# (table, old, new)
migrate_user_columns = [
('comment', 'old_user_id', 'user_id'),
('contact_exchange', 'old_user_id', 'user_id'),
('participant', 'old_user_id', 'user_id'),
('project', 'old_user_id', 'user_id'),
('proposal', 'old_speaker_id', 'speaker_id'),
('proposal', 'old_user_id', 'user_id'),
('rsvp', 'old_user_id', 'user_id'),
('saved_project', 'old_user_id', 'user_id'),
('saved_session', 'old_user_id', 'user_id'),
('vote', 'old_user_id', 'user_id'),
]
# (table, old, new)
migrate_team_columns = [
('profile', 'old_admin_team_id', 'admin_team_id'),
('project', 'old_admin_team_id', 'admin_team_id'),
('project', 'old_checkin_team_id', 'checkin_team_id'),
('project', 'old_review_team_id', 'review_team_id'),
]
def upgrade():
for table, old, new in migrate_user_columns:
print(f"Upgrading {table}.{new}") # NOQA: T001
op.execute(
sa.DDL(
f'''
UPDATE "{table}" SET "{new}" = "user"."id"
FROM "user", "old_user"
WHERE "{table}"."{old}" = "old_user"."id"
AND "old_user"."uuid" = "user"."uuid";
'''
)
)
for table, old, new in migrate_team_columns:
print(f"Upgrading {table}.{new}") # NOQA: T001
op.execute(
sa.DDL(
f'''
UPDATE "{table}" SET "{new}" = "team"."id"
FROM "team", "old_team"
WHERE "{table}"."{old}" = "old_team"."id"
AND "old_team"."uuid" = "team"."uuid";
'''
)
)
def downgrade():
pass
| Python | 0.999503 | |
52eb461f1679f134aed25c221cfcc63abd8d3768 | add test | test/test_importers/test_youtube_importer.py | test/test_importers/test_youtube_importer.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2016 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from mock import patch, Mock
from pybossa.importers.youtubeapi import BulkTaskYoutubeImport
def create_importer_with_form_data(**form_data):
with patch('pybossa.importers.youtubeapi.build'):
form_data['youtube_api_server_key'] = 'apikey'
importer = BulkTaskYoutubeImport(**form_data)
importer.client.api = Mock()
return importer
class TestBulkYoutubeImport(object):
form_data = {
'playlist_url': 'https://www.youtube.com/playlist?list=playlistid'
'youtube_api_server_key': 'apikey'
}
def test_count_tasks_returns_0_if_no_files_to_import(self):
form_data = {
'playlist_url': '',
'youtube_api_server_key': 'apikey'
}
number_of_tasks = BulkTaskYoutubeImport(**form_data).count_tasks()
assert number_of_tasks == 0, number_of_tasks
| Python | 0.000002 | |
1e9a64fe6324d8b4ac96daafa7427e9f55e6dd38 | add Geom.decompose tests | tests/gobj/test_geom.py | tests/gobj/test_geom.py | from panda3d import core
empty_format = core.GeomVertexFormat.get_empty()
def test_geom_decompose_in_place():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
geom.decompose_in_place()
prim = geom.get_primitive(0)
assert tuple(prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
def test_geom_decompose():
vertex_data = core.GeomVertexData("", empty_format, core.GeomEnums.UH_static)
prim = core.GeomTristrips(core.GeomEnums.UH_static)
prim.add_vertex(0)
prim.add_vertex(1)
prim.add_vertex(2)
prim.add_vertex(3)
prim.close_primitive()
geom = core.Geom(vertex_data)
geom.add_primitive(prim)
new_geom = geom.decompose()
new_prim = new_geom.get_primitive(0)
assert tuple(new_prim.get_vertex_list()) == (0, 1, 2, 2, 1, 3)
# Old primitive should still be unchanged
assert prim == geom.get_primitive(0)
| Python | 0.000001 | |
66b5a1089ed0ce2e615f889f35b5e39db91950ae | Fix serving uploaded files during development. | mezzanine/core/management/commands/runserver.py | mezzanine/core/management/commands/runserver.py |
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
| Python | 0 |
93a7f4cb914de537e477a6c6bd45e0aa28ce2e4f | update model fields | modelview/migrations/0053_auto_20200408_1442.py | modelview/migrations/0053_auto_20200408_1442.py | # Generated by Django 3.0 on 2020-04-08 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0052_auto_20200408_1308'),
]
operations = [
migrations.AddField(
model_name='energyframework',
name='data_postprocessing',
field=models.BooleanField(default=False, help_text='Which output format(s) can the framework apply? Please list!', verbose_name='data postprocessing'),
),
migrations.AlterField(
model_name='energyframework',
name='agricultural_demand',
field=models.BooleanField(default=False, help_text='Which agricultural demands are already modelled with the framework?', verbose_name='Agricultural demand'),
),
migrations.AlterField(
model_name='energyframework',
name='gm_singleNode',
field=models.BooleanField(default=False, verbose_name='Single-node model'),
),
]
| Python | 0.000001 | |
bc52778a5ed9ee44f40400cc2693f86318434527 | Add missing file | metashare/repository/editor/lang.py | metashare/repository/editor/lang.py |
from xml.etree.ElementTree import XML
import os
import logging
from metashare.settings import LOG_LEVEL, LOG_HANDLER
import pycountry
# Setup logging support.
logging.basicConfig(level=LOG_LEVEL)
LOGGER = logging.getLogger('metashare.xml_utils')
LOGGER.addHandler(LOG_HANDLER)
def read_langs(filename):
if not os.path.isfile(filename):
LOGGER.error('read_langs: {0} not found'.format(filename))
return None
file_hnd = os.open(filename, os.O_RDONLY)
data = os.read(file_hnd, 10000)
print data
xml_langs = XML(data)
return xml_langs
def read_languages():
langs = pycountry.languages
lang_list = []
for index in range(len(langs.objects)):
lang = langs.objects[index]
if hasattr(lang, 'alpha2'):
lang_item = (index, lang.alpha2, lang.name)
lang_list.append(lang_item)
else:
#lang_item = (index, '', lang.name)
pass
return lang_list
def read_lang_alpha2():
langs = pycountry.languages
lang_list = []
for index in range(len(langs.objects)):
lang = langs.objects[index]
if hasattr(lang, 'alpha2'):
lang_item = (lang.alpha2)
lang_list.append(lang_item)
return lang_list
def get_lang_list(xml_tree):
lang_el_list = xml_tree.findall('lang')
lang_list = []
for el in lang_el_list:
lang_id = el.find('id').text
lang_name = el.find('name').text
lang_list.append((lang_id, lang_name))
return lang_list
| Python | 0.000006 | |
e580995de78c3658951b119577a0f7c335352e13 | Create feature_class_info_to_csv.py | feature_class_info_to_csv.py | feature_class_info_to_csv.py | import arcpy
import os
import time
import csv
begin_time = time.clock()
arcpy.env.workspace = ws = r"\\192-86\DFSRoot\Data\allenj\Desktop\gdb\test.gdb"
mrcsv = r"\\192-86\DFSRoot\Data\allenj\Desktop\gdb\write.csv"
ls = [1,2,3]
writer = csv.writer(open(mrcsv, 'a'))
writer.writerow(["Feature","Feature_Count","Extents"])
c = 0
for fds in arcpy.ListDatasets('','feature') + ['']:
for fc in arcpy.ListFeatureClasses('','',fds):
print fc
x = fc
y = arcpy.GetCount_management(fc)
z = "meow"
row = [(x),(y),(z)]
writer.writerow(row)
c = c + 1
print "Feature Class Count:"
print c
print "--------------"
end_time = time.clock()
print "Elapsed Time:"
print (end_time - begin_time)
print "Seconds"
print "--------------"
print "Goodbye"
| Python | 0.000003 | |
ae477223f296de9ee6b81a15d56d7140a5bf26ac | Create __init__.py | requests/packages/urllib3/contrib/packages/ssl_match_hostname/__init__.py | requests/packages/urllib3/contrib/packages/ssl_match_hostname/__init__.py | Python | 0.000429 | ||
2ef9fce02be94f8c4e9b5c52ca04a05cce1b5ede | Allow to start server as a module | LiSE/LiSE/server/__main__.py | LiSE/LiSE/server/__main__.py | import cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
| Python | 0.000001 | |
9e2d025384dd58c87bf8d292008711c317cb45df | extract human face | otherFaces.py | otherFaces.py | import cv2
print(cv2.__file__)
import os
import sys
IMAGE_DIR = 'D:\DATA\girl2\girl2'
OUTPUT_DIR = './other_people'
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
# http://blog.topspeedsnail.com/archives/10511
# wget https://raw.githubusercontent.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_default.xml
face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_haar.load('D:/Program Files (x86)/Miniconda3/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
for (dirpath, dirnames, filenames) in os.walk(IMAGE_DIR):
for filename in filenames:
if filename.endswith('.jpg'):
image_path = os.path.join(dirpath, filename)
print('process: ', image_path)
img = cv2.imread(image_path)
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_haar.detectMultiScale(gray_image, 1.3, 5)
for face_x, face_y, face_w, face_h in faces:
face = img[face_y:face_y + face_h, face_x:face_x + face_w]
face = cv2.resize(face, (64, 64))
cv2.imshow("img", face)
cv2.imwrite(os.path.join(OUTPUT_DIR, filename), face)
key = cv2.waitKey(30) & 0xff
if key == 27:
sys.exit(0) | Python | 0.999999 | |
a0124a990b4afe0cd5fd3971bae1e43f417bc1b2 | Add management command to find domains impacted by 502 bug | corehq/apps/domain/management/commands/find_secure_submission_image_domains.py | corehq/apps/domain/management/commands/find_secure_submission_image_domains.py | from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
| Python | 0 | |
71f1bc5d981952f275500a2b62a67488b33e205b | Longest increasing subsequence algo | LongestIncreasingSubsequence.py | LongestIncreasingSubsequence.py | #Finds a largest increasing subsequence in O(n^2) time
#algorithm at http://www.algorithmist.com/index.php/Longest_Increasing_Subsequence
def LongestSubsequence(array):
n=len(array)
q=[0]*n
p=[-1]*n # Contains all the previos elements to the increasing sequence
for i in range(n):
maxLen=0
for j in range(i):
if array[i]>array[j] :
if q[j]>maxLen :
maxLen=q[j]
p[i]=j
q[i]=maxLen+1
idx=q.index(max(q))
seq=[]
while(idx!=-1):
seq=[array[idx]]+seq
idx=p[idx]
return seq
def main():
print(LongestSubsequence([4,2,6,1,9,0,11,7,12]))
if __name__=='__main__':
main() | Python | 0.999398 | |
361a075efed0ca4a9877f7268b2e91725ef8be65 | Add encoder.py | encoder.py | encoder.py | """
Source: https://trac.ffmpeg.org/wiki/Encode/H.264
"""
import os
import sys
import subprocess
FFMPEG_PATH = '/usr/local/bin/ffmpeg'
VIDEO_CODEC = 'h264'
VIDEO_ENCODER = 'h264_omx'
AUDIO_CODEC = 'aac'
AUDIO_ENCODER = 'aac'
BITRATE = '2500k'
SRC_DIR = os.path.expanduser('~/Desktop')
DEST_DIR = os.path.expanduser('~/Desktop/Media')
INPUT_EXTS = ['.mkv']
OUTPUT_EXT = '.mp4'
def stream_codec(stream, filename):
"""return the codec name for a stream"""
return subprocess.check_output([
'ffprobe',
'-v',
'error',
'-select_streams',
stream,
'-show_entries',
'stream=codec_name',
'-of',
'default=nokey=1:noprint_wrappers=1',
filename
])
def walk_src_media(callback):
"""get a sorted list of files that have a valid input extension"""
for root, _dirs, files in os.walk(os.path.expanduser(SRC_DIR)):
for filename in files:
if os.path.splitext(filename)[1] in INPUT_EXTS:
callback(root, filename)
def encode(root, filename, opts):
"""encode file using ffmpeg"""
input_filename = os.path.join(root, filename)
path_to_create = os.path.dirname(os.path.relpath(input_filename, SRC_DIR))
path_to_create = os.path.join(DEST_DIR, path_to_create)
output_filename = os.path.join(path_to_create, os.path.splitext(filename)[0] + OUTPUT_EXT)
if os.path.isfile(output_filename):
return
command = [FFMPEG_PATH, '-i', os.path.expanduser(input_filename)]
v_encoder = 'copy' if stream_codec('v:0', input_filename) == VIDEO_CODEC else VIDEO_ENCODER
command += ['-c:v', v_encoder]
a_encoder = 'copy' if stream_codec('a:0', input_filename) == AUDIO_CODEC else AUDIO_ENCODER
command += ['-c:a', a_encoder]
command += ['-b:v', BITRATE]
if '--debug' in opts:
command += ['-to', '15']
command += [os.path.expanduser(output_filename)]
if '--dry' in opts:
print(' '.join(command), '\n')
else:
os.makedirs(path_to_create, exist_ok=True)
subprocess.run(command)
def process(args):
"""encode media from the source directory into the destination directory"""
walk_src_media(lambda root, filename: encode(root, filename, args))
if __name__ == "__main__":
process(sys.argv[1:])
| Python | 0.000011 | |
00c7e9a020b60b9bfbc2c8c8e1b3e40869f9a73e | Add unit tests for agent membership | midonet/neutron/tests/unit/test_extension_agent_membership.py | midonet/neutron/tests/unit/test_extension_agent_membership.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2015 Midokura SARL.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import uuid
import webob.exc
from midonet.neutron.extensions import agent_membership as ext_am
from midonet.neutron.tests.unit import test_midonet_plugin as test_mn
from neutron.openstack.common import uuidutils
from neutron.tests.unit import test_extensions as test_ex
FAKE_AGENT_ID = uuidutils.generate_uuid()
FAKE_IP = '10.0.0.3'
class AgentMembershipExtensionManager(object):
def get_resources(self):
return ext_am.Agent_membership.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class AgentMembershipTestCase(test_mn.MidonetPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
ext_mgr = AgentMembershipExtensionManager()
super(AgentMembershipTestCase, self).setUp()
self.ext_api = test_ex.setup_extensions_middleware(ext_mgr)
def _create_agent_membership(self, agent_id, ip_address):
data = {'agent_membership': {'id': agent_id,
'tenant_id': str(uuid.uuid4()),
'ip_address': ip_address}}
am_req = self.new_create_request('agent_memberships', data, self.fmt)
return am_req.get_response(self.ext_api)
def _make_agent_membership(self, agent_id, ip_address):
res = self._create_agent_membership(agent_id, ip_address)
if res.status_int >= webob.exc.HTTPBadRequest.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
@contextlib.contextmanager
def agent_membership(self, agent_id=FAKE_AGENT_ID, ip_address=FAKE_IP):
am = self._make_agent_membership(agent_id, ip_address)
yield am
def test_create_agent_membership(self):
expected = {'id': FAKE_AGENT_ID, 'ip_address': FAKE_IP}
with self.agent_membership() as am:
for k, v in expected.iteritems():
self.assertEqual(am['agent_membership'][k], v)
def test_delete_agent_membership(self):
with self.agent_membership() as am:
req = self.new_delete_request('agent_memberships',
am['agent_membership']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_show_agent_membership(self):
expected = {'id': FAKE_AGENT_ID, 'ip_address': FAKE_IP}
with self.agent_membership() as am:
req = self.new_show_request('agent_memberships',
am['agent_membership']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in expected.iteritems():
self.assertEqual(res['agent_membership'][k], v)
def test_list_agent_memberships(self):
with self.agent_membership():
with self.agent_membership(uuidutils.generate_uuid(), '10.0.0.4'):
req = self.new_list_request('agent_memberships')
res = self.deserialize(
self.fmt, req.get_response(self.ext_api))
self.assertEqual(len(res['agent_memberships']), 2)
| Python | 0 | |
3344c49bf36a4bd74fb9db079297b98a2e0ee46f | Implement cht.sh release script | bin/release.py | bin/release.py | #!/usr/bin/env python
from __future__ import print_function
from datetime import datetime
import os
from os import path
import re
import shutil
import subprocess
from subprocess import Popen
import sys
SHARE_DIR = path.join(path.dirname(__file__), "../share/")
def run(args):
return Popen(args, stdout=sys.stdout, stderr=sys.stderr).wait()
status = subprocess.check_output(["git", "status", "--porcelain"])
if len(status) > 0:
print("Unclean working tree. Commit or stash changes first.", file=sys.stderr)
sys.exit(1)
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S +0000")
cht_curr = path.join(SHARE_DIR, "cht.sh.txt")
cht_new = path.join(SHARE_DIR, "cht.sh.txt.new")
re_version = re.compile(r"^__CHTSH_VERSION=(.*)$")
re_timestamp = re.compile(r"^__CHTSH_DATETIME=.*$")
with open(cht_curr, "rt") as fin:
with open(cht_new, "wt") as fout:
for line in fin:
match = re_version.match(line)
if match:
version = int(match.group(1)) + 1
fout.write("__CHTSH_VERSION=%s\n" % version)
continue
match = re_timestamp.match(line)
if match:
fout.write('__CHTSH_DATETIME="%s"\n' % timestamp)
continue
fout.write(line)
shutil.copymode(cht_curr, cht_new)
os.remove(cht_curr)
os.rename(cht_new, cht_curr)
message = "cht: v%s" % version
run(["git", "add", cht_curr])
run(["git", "commit", "-m", message])
run(["git", "tag", "cht@%s" % version, "-m", message])
| Python | 0 | |
278cd37ada508701896c2669a215365785f5a261 | Add eval dispatch (copied from compyle) | evalExp.py | evalExp.py | from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
| Python | 0 | |
0ffd8c1b52b95ef61bcb2ecf7183d1abab55a3ce | Rename Documents to Attachments | smile_attachment/models/models.py | smile_attachment/models/models.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp import api, fields, models
native__init__ = models.Model.__init__
native_fields_view_get = models.Model.fields_view_get
@api.one
@api.depends()
def _get_attachments(self):
self.attachment_ids = False
def _search_attachments(self, operator, value):
recs = self.env['ir.attachment'].search([('res_model', '=', self._name),
'|', '|',
('description', operator, value),
('index_content', operator, value),
('datas_fname', operator, value)])
return [('id', 'in', [rec.res_id for rec in recs])]
def new__init__(self, pool, cr):
native__init__(self, pool, cr)
name = 'attachment_ids'
if name not in self._columns and name not in self._fields:
field = fields.One2many('ir.attachment', 'res_id', 'Attachments', automatic=True,
compute='_get_attachments', search='_search_attachments')
self._add_field(name, field)
def new_fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = native_fields_view_get(self, cr, uid, view_id, view_type, context, toolbar, submenu)
if view_type == 'search':
View = self.pool['ir.ui.view']
arch_etree = etree.fromstring(res['arch'])
element = etree.Element('field', name='attachment_ids')
arch_etree.insert(-1, element)
res['arch'], res['fields'] = View.postprocess_and_fields(cr, uid, self._name, arch_etree, view_id, context=context)
return res
models.Model.__init__ = new__init__
models.Model._get_attachments = _get_attachments
models.Model._search_attachments = _search_attachments
models.Model.fields_view_get = new_fields_view_get
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from lxml import etree
from openerp import api, fields, models
native__init__ = models.Model.__init__
native_fields_view_get = models.Model.fields_view_get
@api.one
@api.depends()
def _get_attachments(self):
self.attachment_ids = False
def _search_attachments(self, operator, value):
recs = self.env['ir.attachment'].search([('res_model', '=', self._name),
'|', '|',
('description', operator, value),
('index_content', operator, value),
('datas_fname', operator, value)])
return [('id', 'in', [rec.res_id for rec in recs])]
def new__init__(self, pool, cr):
native__init__(self, pool, cr)
name = 'attachment_ids'
if name not in self._columns and name not in self._fields:
field = fields.One2many('ir.attachment', 'res_id', 'Documents', automatic=True,
compute='_get_attachments', search='_search_attachments')
self._add_field(name, field)
def new_fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = native_fields_view_get(self, cr, uid, view_id, view_type, context, toolbar, submenu)
if view_type == 'search':
View = self.pool['ir.ui.view']
arch_etree = etree.fromstring(res['arch'])
element = etree.Element('field', name='attachment_ids')
arch_etree.insert(-1, element)
res['arch'], res['fields'] = View.postprocess_and_fields(cr, uid, self._name, arch_etree, view_id, context=context)
return res
models.Model.__init__ = new__init__
models.Model._get_attachments = _get_attachments
models.Model._search_attachments = _search_attachments
models.Model.fields_view_get = new_fields_view_get
| Python | 0 |
5de57ff00037d6f9a04307e60685f47f368cb29f | add example script to test calling the ffi | example.py | example.py | import scipcffi.ffi as s
scip_ptr = s.ffi.new('SCIP**')
rc = s.lib.SCIPcreate(scip_ptr)
assert rc == s.lib.SCIP_OKAY
scip = scip_ptr[0]
| Python | 0 | |
4ce96ed8ab49555d3cd29ac6ab420cc438b60af8 | set is_example flag | msgvis/apps/importer/management/commands/import_coded_data.py | msgvis/apps/importer/management/commands/import_coded_data.py | import traceback
import sys
import path
import csv
from time import time
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.conf import settings
from django.contrib.auth.models import User
from msgvis.apps.importer.models import create_an_instance_from_json
from msgvis.apps.corpus.models import Dataset, Code
from msgvis.apps.coding.models import CodeAssignment
class Command(BaseCommand):
"""
Import a corpus of message data into the database.
.. code-block :: bash
$ python manage.py import_corpus <file_path>
"""
args = '<dataset_id> <coded_data_file>[...]'
help = "Import coded data"
option_list = BaseCommand.option_list + (
make_option('-u', '--user',
action='store',
dest='user',
default='master',
help='Set source user of the coded data'
),
)
def handle(self, dataset_id, *filenames, **options):
if not dataset_id:
raise CommandError("Dataset id is required.")
try:
dataset_id = int(dataset_id)
except ValueError:
raise CommandError("Dataset id must be a number.")
if len(filenames) == 0:
raise CommandError('At least one filename must be provided.')
for f in filenames:
if not path.path(f).exists():
raise CommandError("Filename %s does not exist" % f)
user = options.get('user')
start = time()
dataset_obj = Dataset.objects.get(id=dataset_id)
user_obj = User.objects.get(username=user)
for i, corpus_filename in enumerate(filenames):
with open(corpus_filename, 'rb') as fp:
if len(filenames) > 1:
print "Reading file %d of %d %s" % (i + 1, len(filenames), corpus_filename)
else:
print "Reading file %s" % corpus_filename
csvreader = csv.reader(fp, delimiter=',')
importer = Importer(csvreader, dataset_obj, user_obj)
importer.run()
print "Time: %.2fs" % (time() - start)
class Importer(object):
commit_every = 100
print_every = 1000
def __init__(self, csvreader, dataset, user):
self.csvreader = csvreader
self.dataset = dataset
self.source = user
self.line = 0
self.imported = 0
self.not_valid = 0
self.errors = 0
self.codes = []
self.col_map = {}
self._get_all_codes()
self._build_col_map()
def _get_all_codes(self):
self.codes = list(Code.objects.all())
def _import_group(self, rows):
with transaction.atomic(savepoint=False):
for cols in rows:
if len(cols) > 0:
try:
message = self.create_an_instance_from_csv_cols(cols)
if message:
self.imported += 1
else:
self.not_valid += 1
except:
self.errors += 1
print >> sys.stderr, "Import error on line %d" % self.line
traceback.print_exc()
#if settings.DEBUG:
# prevent memory leaks
# from django.db import connection
# connection.queries = []
def _build_col_map(self):
header = self.csvreader.next()
for i in range(len(header)):
self.col_map[header[i]] = i
self.col_map[i] = header[i]
def _col_is_checked(self, cols, col_name):
col_idx = self.col_map.get(col_name)
return cols[col_idx] == "x"
def create_an_instance_from_csv_cols(self, cols):
try:
message_ori_id = cols[self.col_map["id"]]
is_ambiguous = self._col_is_checked(cols, "is_ambiguous")
flag = False
message = self.dataset.message_set.get(original_id=message_ori_id)
for code in self.codes:
if self._col_is_checked(cols, code.text):
code_assignments = CodeAssignment.objects.filter(message=message,
source=self.source,
code=code)
code_assignments.update(valid=False)
CodeAssignment.objects.create(message=message, source=self.source,
code=code, is_ambiguous=is_ambiguous,
is_example=True)
flag = True
return flag
except:
return False
def run(self):
transaction_group = []
start = time()
for cols in self.csvreader:
self.line += 1
transaction_group.append(cols)
if len(transaction_group) >= self.commit_every:
self._import_group(transaction_group)
transaction_group = []
if self.line > 0 and self.line % self.print_every == 0:
print "%6.2fs | Reached line %d. Imported: %d; Not-valid: %d; Errors: %d" % (
time() - start, self.line, self.imported, self.not_valid, self.errors)
if len(transaction_group) >= 0:
self._import_group(transaction_group)
print "%6.2fs | Finished %d lines. Imported: %d; Not-valid: %d; Errors: %d" % (
time() - start, self.line, self.imported, self.not_valid, self.errors)
| Python | 0.998652 | |
e0871cd8c106a5f66bffd7a93759747b2f282c46 | make CommCareBuild.create_from_zip tolorate having directory entries like 'dist/' (by ignoring them) | corehq/apps/builds/models.py | corehq/apps/builds/models.py | from datetime import datetime
from zipfile import ZipFile
from couchdbkit.exceptions import ResourceNotFound
from couchdbkit.ext.django.schema import *
from corehq.apps.builds.jadjar import JadJar
class CommCareBuild(Document):
"""
#python manage.py shell
#>>> from corehq.apps.builds.models import CommCareBuild
#>>> build = CommCareBuild.create_from_zip('/Users/droberts/Desktop/zip/7106.zip', '1.2.dev', 7106)
"""
build_number = IntegerProperty()
version = StringProperty()
time = DateTimeProperty()
def put_file(self, payload, path, filename=None):
"""
Add an attachment to the build (useful for constructing the build)
payload should be a file-like object
filename should be something like "Nokia/S40-generic/CommCare.jar"
"""
if filename:
path = '/'.join([path, filename])
content_type = {
'jad': 'text/vnd.sun.j2me.app-descriptor',
'jar': 'application/java-archive',
}.get(path.split('.')[-1])
self.put_attachment(payload, path, content_type)
def fetch_file(self, path, filename=None):
if filename:
path = '/'.join([path, filename])
return self.fetch_attachment(path)
def get_jadjar(self, path):
"""
build.get_jadjar("Nokia/S40-generic")
"""
try:
jad = self.fetch_file(path, "CommCare.jad")
except ResourceNotFound:
jad = None
return JadJar(
jad=jad,
jar=self.fetch_file(path, "CommCare.jar"),
version=self.version,
build_number=self.build_number
)
@classmethod
def create_from_zip(cls, f, version, build_number):
"""f should be a file-like object or a path to a zipfile"""
self = cls(build_number=build_number, version=version, time=datetime.utcnow())
self.save()
try:
z = ZipFile(f)
for name in z.namelist():
path = name.split('/')
if path[0] == "dist" and path[-1] != "":
path = '/'.join(path[1:])
self.put_file(z.read(name), path)
except:
z.close()
self.delete()
raise
z.close()
return self
@classmethod
def get_build(cls, version, build_number):
build_number = int(build_number)
self = cls.view('builds/all',
startkey=[version, build_number],
endkey=[version, build_number, {}],
limit=1,
include_docs=True,
).one()
if not self:
raise KeyError()
return self
| from datetime import datetime
from zipfile import ZipFile
from couchdbkit.exceptions import ResourceNotFound
from couchdbkit.ext.django.schema import *
from corehq.apps.builds.jadjar import JadJar
class CommCareBuild(Document):
"""
#python manage.py shell
#>>> from corehq.apps.builds.models import CommCareBuild
#>>> build = CommCareBuild.create_from_zip('/Users/droberts/Desktop/zip/7106.zip', '1.2.dev', 7106)
"""
build_number = IntegerProperty()
version = StringProperty()
time = DateTimeProperty()
def put_file(self, payload, path, filename=None):
"""
Add an attachment to the build (useful for constructing the build)
payload should be a file-like object
filename should be something like "Nokia/S40-generic/CommCare.jar"
"""
if filename:
path = '/'.join([path, filename])
content_type = {
'jad': 'text/vnd.sun.j2me.app-descriptor',
'jar': 'application/java-archive',
}.get(path.split('.')[-1])
self.put_attachment(payload, path, content_type)
def fetch_file(self, path, filename=None):
if filename:
path = '/'.join([path, filename])
return self.fetch_attachment(path)
def get_jadjar(self, path):
"""
build.get_jadjar("Nokia/S40-generic")
"""
try:
jad = self.fetch_file(path, "CommCare.jad")
except ResourceNotFound:
jad = None
return JadJar(
jad=jad,
jar=self.fetch_file(path, "CommCare.jar"),
version=self.version,
build_number=self.build_number
)
@classmethod
def create_from_zip(cls, f, version, build_number):
"""f should be a file-like object or a path to a zipfile"""
self = cls(build_number=build_number, version=version, time=datetime.utcnow())
self.save()
try:
z = ZipFile(f)
for name in z.namelist():
path = name.split('/')
if path[0] == "dist":
path = '/'.join(path[1:])
self.put_file(z.read(name), path)
except:
z.close()
self.delete()
raise
z.close()
return self
@classmethod
def get_build(cls, version, build_number):
build_number = int(build_number)
self = cls.view('builds/all',
startkey=[version, build_number],
endkey=[version, build_number, {}],
limit=1,
include_docs=True,
).one()
if not self:
raise KeyError()
return self
| Python | 0 |
3a2a311c3c3f8a6bc2f027bfa247d912122e512e | Add test for gaussian | tests/functions_tests/test_gaussian.py | tests/functions_tests/test_gaussian.py | import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestGaussian(unittest.TestCase):
def setUp(self):
self.m = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.v = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def check_backward(self, m_data, v_data, y_grad):
m = chainer.Variable(m_data)
v = chainer.Variable(v_data)
y = functions.gaussian(m, v)
self.assertEqual(y.data.dtype, numpy.float32)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((m.data, v.data))
gm, gv = gradient_check.numerical_grad(f, (m.data, v.data), (y.grad,))
gradient_check.assert_allclose(gm, m.grad)
gradient_check.assert_allclose(gv, v.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.m, self.v, self.gy)
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.m),
cuda.to_gpu(self.v),
cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| Python | 0.000004 | |
945c2c620634c2c816aa446d91773adb75cb87e3 | Add airmass tool | airmass.py | airmass.py | #!/usr/env/python
import argparse
import numpy as np
from astropy import units as u
##-------------------------------------------------------------------------
## Parse Command Line Arguments
##-------------------------------------------------------------------------
## create a parser object for understanding command-line arguments
p = argparse.ArgumentParser(description=
'''Convert an elevation above the horizon to an airmass using the Pickering
(2002) formula:
1 / sin(h + 244/(165 + 47*h^1.1))
and estimate the extinction.
''')
## add arguments
p.add_argument('elevation', type=float,
help="Elevation (in degrees) above the horizon")
## add options
p.add_argument("--extinction", dest="extinction", type=float,
default=0.13, help="Extinction in magnitudes per airmass.")
args = p.parse_args()
##-------------------------------------------------------------------------
## Main Program
##-------------------------------------------------------------------------
def main():
h = args.elevation * u.degree # elevation of target above horizon
magnitudes_per_airmass = args.extinction * u.mag
# Pickering 2002 Airmass
value = h.value + 244/(165.0 + 47.0*h.value**1.1)
airmass = 1.0 / np.sin(value*u.degree)
print(f'for EL = {h:.1f}')
print(f'airmass = {airmass:.2f}')
extinction = airmass * magnitudes_per_airmass
print(f'extinction = {extinction:.2f}')
if __name__ == '__main__':
main()
| Python | 0 | |
12f2198a53d474bb69a6b9118fca0638dcce8aac | add data migration | accelerator/migrations/0088_remove_community_participation_read_more_prompts.py | accelerator/migrations/0088_remove_community_participation_read_more_prompts.py | # Generated by Django 2.2.24 on 2022-03-07 12:10
import re
from django.db import migrations
def remove_community_participation_read_more_prompts(apps, schema_editor):
"""
Target read more prompts:
For more information, read about Judging at Mass Challenge.
Read more about Mentoring at Mass Challenge.
Read more about being an Entrepreneur at Mass Challenge.
Read more about Office Hours at Mass Challenge.
Read more about Speaking at Mass Challenge.
"""
CommunityParticipation = apps.get_model(
'accelerator', 'CommunityParticipation')
for participation in CommunityParticipation.objects.all():
# remove prompts starting with "Read more about"
participation.description = re.sub(
r' Read more about[a-zA-Z ]*.$', '', participation.description)
# remove prompts starting with "For more information"
participation.description = re.sub(
r' For more information[a-zA-Z, ]*.$', '', participation.description)
# replace non-ascii char "โ" with "'"
participation.description = participation.description.replace('\u2019', "'")
participation.save()
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0087_update_startup_profile'),
]
operations = [
migrations.RunPython(remove_community_participation_read_more_prompts,
migrations.RunPython.noop)
]
| Python | 0 | |
b166cd8cc95ceb56f8d03cacb8903b0936e69210 | Create solution.py | data_structures/linked_list/problems/find_pattern_in_linked_list/py/solution.py | data_structures/linked_list/problems/find_pattern_in_linked_list/py/solution.py | import LinkedList
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def FindPatternInLinkedList(head: LinkedList.Node, pattern: LinkedList.Node) -> int:
if head == None or pattern == None:
return -1
index = 0
tslow = head
pnode = pattern
while tslow != None:
if tslow.val == pattern.val:
tfast = tslow
pnode = pattern
while tfast != None and pnode != None:
if tfast.val == pnode.val:
tfast = tfast.nxt
pnode = pnode.nxt
else:
break
if pnode == None:
return index
tslow = tslow.nxt
index += 1
return -1
| Python | 0.000018 | |
869480c1b813d167f30de71040d891af484e1cce | add eight-puzzle example | eight_puzzle_example.py | eight_puzzle_example.py | #!/usr/bin/env python
"""
Example of using PyDDL to solve an eight-puzzle. Each number is a tile that
can slide vertically or horizontally to fill in the blank space. This "hard"
instance (requiring the maximum of 31 steps) is taken from the following paper:
Reinefeld, Alexander. "Complete Solution of the Eight-Puzzle and the Benefit of
Node Ordering in IDA*." International Joint Conference on Artificial
Intelligence. 1993.
Initial State:
+---+---+---+
| 8 7 6 |
| 4 1 |
| 2 5 3 |
+---+---+---+
Goal State:
+---+---+---+
| 1 2 |
| 3 4 5 |
| 6 7 8 |
+---+---+---+
"""
from pyddl import Domain, Problem, Action, neg, planner
def problem(verbose):
domain = Domain((
Action(
'move-up',
parameters=(
('tile', 't'),
('position', 'px'),
('position', 'py'),
('position', 'by'),
),
preconditions=(
('dec', 'by', 'py'),
('blank', 'px', 'by'),
('at', 't', 'px', 'py'),
),
effects=(
neg(('blank', 'px', 'by')),
neg(('at', 't', 'px', 'py')),
('blank', 'px', 'py'),
('at', 't', 'px', 'by'),
),
),
Action(
'move-down',
parameters=(
('tile', 't'),
('position', 'px'),
('position', 'py'),
('position', 'by'),
),
preconditions=(
('inc', 'by', 'py'),
('blank', 'px', 'by'),
('at', 't', 'px', 'py'),
),
effects=(
neg(('blank', 'px', 'by')),
neg(('at', 't', 'px', 'py')),
('blank', 'px', 'py'),
('at', 't', 'px', 'by'),
),
),
Action(
'move-left',
parameters=(
('tile', 't'),
('position', 'px'),
('position', 'py'),
('position', 'bx'),
),
preconditions=(
('dec', 'bx', 'px'),
('blank', 'bx', 'py'),
('at', 't', 'px', 'py'),
),
effects=(
neg(('blank', 'bx', 'py')),
neg(('at', 't', 'px', 'py')),
('blank', 'px', 'py'),
('at', 't', 'bx', 'py'),
),
),
Action(
'move-right',
parameters=(
('tile', 't'),
('position', 'px'),
('position', 'py'),
('position', 'bx'),
),
preconditions=(
('inc', 'bx', 'px'),
('blank', 'bx', 'py'),
('at', 't', 'px', 'py'),
),
effects=(
neg(('blank', 'bx', 'py')),
neg(('at', 't', 'px', 'py')),
('blank', 'px', 'py'),
('at', 't', 'bx', 'py'),
),
),
))
problem = Problem(
domain,
{
'tile': (1, 2, 3, 4, 5, 6, 7, 8),
'position': (1, 2, 3),
},
init=(
('inc', 1, 2),
('inc', 2, 3),
('dec', 3, 2),
('dec', 2, 1),
('at', 8, 1, 1),
('at', 7, 2, 1),
('at', 6, 3, 1),
('blank', 1, 2),
('at', 4, 2, 2),
('at', 1, 3, 2),
('at', 2, 1, 3),
('at', 5, 2, 3),
('at', 3, 3, 3),
),
goal=(
('blank', 1, 1),
('at', 1, 2, 1),
('at', 2, 3, 1),
('at', 3, 1, 2),
('at', 4, 2, 2),
('at', 5, 3, 2),
('at', 6, 1, 3),
('at', 7, 2, 3),
('at', 8, 3, 3),
)
)
def to_coordinates(state):
grid = {}
for p in state:
if p[0] == 'at':
grid[p[1]] = (p[2], p[3])
return grid
goal_coords = to_coordinates(problem.goals)
def manhattan_distance_heuristic(state):
state_coords = to_coordinates(state.predicates)
dist = 0
for k in goal_coords.keys():
c1, r1 = goal_coords[k]
c2, r2 = state_coords[k]
dist += (abs(c1 - c2) + abs(r1 - r2))
return dist
plan = planner(problem, heuristic=manhattan_distance_heuristic, verbose=verbose)
if plan is None:
print 'No Plan!'
else:
for action in plan:
print action
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(usage="Usage: %prog [options]")
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help="don't print statistics to stdout")
# Parse arguments
opts, args = parser.parse_args()
problem(opts.verbose)
| Python | 0.998914 | |
9bffe981c018213b87d015a20603c092567bbdf4 | Initialize multiple class setup; add remaining APIs | cobaltuoft/cobalt.py | cobaltuoft/cobalt.py | from .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
| Python | 0 | |
54864841267c4d2cb53ce581c05d8ba9c15eef0c | Add lexer | balloon.py | balloon.py | from pygments.lexer import *
from pygments.token import *
class CustomLexer(RegexLexer):
name = 'Balloon'
aliases = ['balloon']
filenames = '*.bl'
tokens = {
'root': [
include('keywords'),
(r'[]{}(),:;[]', Punctuation),
(r'#.*?$', Comment),
(r'[+-]?[0-9]+\.[0-9]+', Number.Float),
(r'[+-]?[0-9]+', Number.Integer),
(r'<=|>=|==|[+*<>=%\-\/]', Operator),
(r'(and|or|not)\b', Operator.Word),
(r'".*"', String),
(r'(var|fn)\b', Keyword.Declaration),
(r'[a-zA-Z_][a-zA-Z0-9_]*[!?]?', Name),
(r'\s+', Text)
],
'keywords': [
(words((
'if', 'else', 'loop', 'break', 'continue', 'return',
'Number', 'Bool', 'String', 'Function', 'Tuple',
'any', 'void', 'true', 'false'), suffix=r'\b'),
Keyword),
],
}
| Python | 0.000001 | |
d0306518dcc395a051460115d7ef9488f26426cc | Add paper shortening tool: input text, output shorter text | shorten-pdf/shorten.py | shorten-pdf/shorten.py | #!/usr/bin/python
import sys
LONG_PARAGRAPH_THRESH = 400
LONG_START_LEN = 197
LONG_END_LEN = 197
if len(sys.argv) < 2:
print 'Give me a text file as an argument.'
sys.exit(0)
f = open(sys.argv[1]) # open file
t = f.read() # read text
ps = t.split('\n\n') # get paragraphs
ps_ = [] # shortened paragraphs go here
for p in ps:
if len(p) < LONG_PARAGRAPH_THRESH:
ps_.append(p)
continue
ss = p.split('. ') # get sentences
ss_ = [] # short paragraph sentences go here
totlen = 0 # total length of accepted sentences
for s in ss:
if totlen + len(s) > LONG_START_LEN:
ss_.append(s[:LONG_START_LEN - totlen] + "..")
break;
ss_.append(s)
totlen += len(s)
index = len(ss_) # index to insert end sentences
totlen = 0
ss.reverse()
for s in ss:
if totlen + len(s) > LONG_END_LEN:
ss_.insert(index, "..." + s[len(s) - (LONG_END_LEN - totlen):])
break;
ss_.insert(index, s)
totlen += len(s)
p_ = '. '.join(ss_)
ps_.append(p_)
t_ = '\n\n'.join(ps_)
print t_
| Python | 0.999999 | |
0267ada9eed8c9759c4fe5ec5b4cd184bc2d5de1 | Create ode.py | ode.py | ode.py | import sys
def rk4(func, x, y, step, xmax):
"""
Integrates y'=f(x,y) using 4th step-order Runge-Kutta.
@param func: a differential equation
@type func: list
@param x: initial value of x-axis, which is usually starting time
@type x: float
@param y: initial value for y-axis
@type y: float
@param step: step size on the x-axis (also known as step in calculus)
@type step: float
@param xmax: maximum value of x-axis, which is usually ending time
@type xmax: float
"""
yield [x, y]
while x < xmax:
f1 = func(x, y)
f2 = func(x+0.5*step, y+0.5*step*f1)
f3 = func(x+0.5*step, y+0.5*step*f2)
f4 = func(x+step, y+step*f3)
x = x + step
y = y + step*(f1+2.0*f2+2.0*f3+f4)/6.0
yield [x, y]
def boundary_checker(y, boundary, type):
for k in boundary.keys():
if y[int(k)] < boundary[k][0] and type == 'lower':
y[int(k)] = boundary[k][1]
if y[int(k)] > boundary[k][0] and type == 'higher':
y[int(k)] = boundary[k][1]
return y
def multirk4(funcs, x0, y0, step, xmax,
lower_bound=None, upper_bound=None):
"""
Integrates a system of ODEs, y' = f(x, y), using fourth
order Runge-Kutta method.
@param funcs: system of differential equations
@type funcs: list
@param x0: initial value of x-axis, which is usually starting time
@type x0: float
@param y0: initial values for variables
@type y0: list
@param step: step size on the x-axis (also known as step in calculus)
@type step: float
@param xmax: maximum value of x-axis, which is usually ending time
@type xmax: float
"""
n = len(funcs)
yield [x0] + y0
f1, f2, f3, f4 = [0]*n, [0]*n, [0]*n, [0]*n
max = 1e100
while x0 < xmax:
y1 = [0]*n
for i in range(n):
try: f1[i] = funcs[i](x0, y0)
except TypeError: pass
except ZeroDivisionError: f1[i] = max
except OverflowError: f1[i] = max
for j in range(n):
y1[j] = y0[j] + (0.5*step*f1[j])
for i in range(n):
try: f2[i] = funcs[i]((x0+(0.5*step)), y1)
except TypeError: pass
except ZeroDivisionError: f2[i] = max
except OverflowError: f2[i] = max
for j in range(n):
y1[j] = y0[j] + (0.5*step*f2[j])
for i in range(n):
try: f3[i] = funcs[i]((x0+(0.5*step)), y1)
except TypeError: pass
except ZeroDivisionError: f3[i] = max
except OverflowError: f3[i] = max
for j in range(n):
y1[j] = y0[j] + (step*f3[j])
for i in range(n):
try: f4[i] = funcs[i]((x0+step), y1)
except TypeError: pass
except ZeroDivisionError: f4[i] = max
except OverflowError: f4[i] = max
x0 = x0 + step
for i in range(n):
y1[i] = y0[i] + (step * \
(f1[i] + (2.0*f2[i]) + (2.0*f3[i]) + f4[i]) / 6.0)
if lower_bound:
y1 = boundary_checker(y1, lower_bound, 'lower')
if upper_bound:
y1 = boundary_checker(y1, upper_bound, 'upper')
y0 = y1
yield [x0] + y1
| Python | 0.000002 | |
316a82c5465a13770404b6a302348f192618cd27 | Add an interface for eagerly evaluating command graph elements | libqtile/command_interface.py | libqtile/command_interface.py | # Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
| Python | 0 | |
dff1f9176d7ce77a242263bfc9a0760cd31f0585 | Add a prototype for cached regex.compile() | regex_proxy.py | regex_proxy.py | from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
| Python | 0.000001 | |
231e19ed29314bc0d9aad3cd1d69b757364fce7d | Create pms.py | pms.py | pms.py | import serial
# we stop terminal with raspi-config,
# we stop bluethooth from /boot/config.txt first,
# and currently UART device is /dev/ttyAMAO,
# but we still cannot read data from device
# failure devices
#dev = "ttyS0"
# work devices
#dev = "ttyAMA0"
#dev = "serial0"
dev = "ttyUSB0"
ser = serial.Serial(port="/dev/"+dev,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS, timeout=2)
while True:
data = ser.read()
print str(data), len(data)
ser.close()
| Python | 0 | |
dd1e3a665298a616d9b78f0c019288a9d6d883b8 | Add unit tests for the OfficeAdminExtraGeoLocation model | labonneboite/tests/app/test_models.py | labonneboite/tests/app/test_models.py | # coding: utf8
import unittest
from labonneboite.common.models import OfficeAdminExtraGeoLocation
class OfficeAdminExtraGeoLocationTest(unittest.TestCase):
"""
Tests for the OfficeAdminExtraGeoLocation model.
"""
def test_codes_as_list(self):
codes = u" 57070\n\n\n\n\n\n 75010 \n 54 "
codes_as_list = OfficeAdminExtraGeoLocation.codes_as_list(codes)
self.assertItemsEqual(codes_as_list, [u'54', u'57070', u'75010'])
codes = u"75\r57\n13"
codes_as_list = OfficeAdminExtraGeoLocation.codes_as_list(codes)
self.assertItemsEqual(codes_as_list, [u'13', u'57', u'75'])
def test_codes_as_geolocations(self):
codes = u"75\n57070"
codes_as_geolocations = OfficeAdminExtraGeoLocation.codes_as_geolocations(codes)
expected = [
# Found for the departement 75.
('48.8264581543', '2.32690527897'),
('48.8280603003', '2.3544809727'),
('48.8365381105', '2.42075934432'),
('48.8421891171', '2.29652252417'),
('48.8449537128', '2.37608588424'),
('48.846262612', '2.34839040879'),
('48.8501003498', '2.33402139523'),
('48.8543439464', '2.31294138206'),
('48.8553815318', '2.35541102422'),
('48.8566390262', '2.25972331102'),
('48.8566390262', '2.25972331102'),
('48.8590284068', '2.37705679761'),
('48.8622892805', '2.36158587519'),
('48.8628435865', '2.33807010768'),
('48.8643142257', '2.39961435812'),
('48.8684296759', '2.34149433888'),
('48.8729556556', '2.31369616661'),
('48.8758285242', '2.33869789273'),
('48.8761941084', '2.36107097577'),
('48.8878020912', '2.30862255671'),
('48.8928608126', '2.3479701879'),
('49.157869706', '6.2212499254'),
# Found for 57070.
('48.8840228115', '2.38234715656'),
]
self.assertItemsEqual(expected, codes_as_geolocations)
def test_codes_as_json_geolocations(self):
codes = u"75010"
codes_as_json_geolocations = OfficeAdminExtraGeoLocation.codes_as_json_geolocations(codes)
expected = '[["48.8761941084", "2.36107097577"]]'
self.assertEqual(expected, codes_as_json_geolocations)
| Python | 0 | |
7b1b343c552ee6f124ccceee05f1a6732657c9e1 | Add initial startup program (pox.py) | pox.py | pox.py | #!/usr/bin/python
from pox.core import core
import pox.openflow.openflow
import pox.topology.topology
import pox.openflow.of_01
import pox.dumb_l3_switch.dumb_l3_switch
# Set default log level
import logging
logging.basicConfig(level=logging.DEBUG)
# Turn on extra info for event exceptions
import pox.lib.revent.revent as revent
revent.showEventExceptions = True
def startup ():
core.register("topology", pox.topology.topology.Topology())
core.register("openflow", pox.openflow.openflow.OpenFlowHub())
core.register("switch", pox.dumb_l3_switch.dumb_l3_switch.dumb_l3_switch())
pox.openflow.of_01.start()
if __name__ == '__main__':
try:
startup()
core.goUp()
except:
import traceback
traceback.print_exc()
import code
code.interact('Ready.')
pox.core.core.quit()
| Python | 0.000002 | |
35887b39b0151432423cca7832f1c9bc4ab7d836 | Create OutputNeuronGroup.py | examples/OutputNeuronGroup.py | examples/OutputNeuronGroup.py | '''
Example of a spike receptor (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(45, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
# my_simulation = BrainConnectUDP(main_NeuronGroup, NumOfNeuronsInput=45, post_simulation_function=post_simulation_function,
# UDP_IPI="192.168.1.123", UDP_PORTI=20202, simclock_dt=5, inputclock_dt=5, TotalSimulationTime=5000, sim_repetitions=0)
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=45, post_simulation_function=post_simulation_function,
UDP_IPI="127.0.0.1", UDP_PORTI=10101, simclock_dt=5, inputclock_dt=5, TotalSimulationTime=5000, sim_repetitions=0)
| Python | 0 | |
3834af9b3a6381ac7a2334c7bd2ae6d562e0f20b | Create HR_pythonIsLeap.py | HR_pythonIsLeap.py | HR_pythonIsLeap.py | def is_leap(year):
leap = False
# Write your logic here
# thought process
#if year%4==0:
# return True
#elif year%100==0:
# return False
#elif year%400==0:
# return True
# Optimized, Python 3
return ((year%4==0)and(year%100!=0)or(year%400==0))
| Python | 0.000764 | |
b5cc6ead2e17ef54612b3072c7991166955bee77 | Add user commands | dropbox-cli.py | dropbox-cli.py | #!/usr/bin/env python
import os
import logging
import dropbox
import argparse
APP_NAME = "dropbox-static-cli"
DEFAULT_KEY_PATH = "{}/.dropbox-static-cli-key".format(os.environ["HOME"])
L = None
def parse_arguments():
parser = argparse.ArgumentParser(
prog="dropbox-static-cli",
description="A command line tool for interfacing with Dropbox without the need for local sync storage",
epilog="Note: Put your API key in {} to avoid having to pass in --api-key with every command!".format(DEFAULT_KEY_PATH)
)
parser.add_argument("-v", "--verbose", action="count", default=0, help="Verbose output")
parser.add_argument("-k", "--api-key", default=DEFAULT_KEY_PATH, help="Dropbox API key")
parser.set_defaults(func=exec_default)
subparsers = parser.add_subparsers(title="Available subcommands")
parser_list = subparsers.add_parser("list", help="List items in Dropbox")
parser_list.add_argument("DROPBOX_PATH")
parser_list.add_argument("-m", "--more", action="count", help="Display more pages (if available)")
parser_list.set_defaults(func=exec_list)
parser_get = subparsers.add_parser("get", help="Download items from Dropbox")
parser_get.add_argument("-o", "--output", required=True, help="Save path for the downloaded file")
parser_get.add_argument("DROPBOX_PATH", help="Path inside your Dropbox")
parser_get.set_defaults(func=exec_get)
parser_put = subparsers.add_parser("put", help="Upload items to Dropbox")
parser_put.add_argument("-f", "--file", required=True, help="File to upload")
parser_put.add_argument("DROPBOX_PATH", help="Path inside your Dropbox")
parser_put.set_defaults(func=exec_put)
parser_info = subparsers.add_parser("info", help="Dropbox account information")
parser_info_sub = parser_info.add_subparsers(title="Available subcommands")
parser_info_sub.add_parser("user", help="User information").set_defaults(func=exec_info_user)
parser_info_sub.add_parser("quota", help="Quota information").set_defaults(func=exec_info_quota)
args = parser.parse_args()
return global_init(args)
def global_init(args):
global L
log_level = logging.WARNING
if (args.verbose == 1): log_level = logging.INFO
if (args.verbose > 1) : log_level = logging.DEBUG
init_logger(log_level)
dbx = init_dropbox(parse_key(args.api_key))
return args.func(args, dbx)
def init_logger(log_level):
global L
L = logging.getLogger(APP_NAME)
L.setLevel(log_level)
ch = logging.StreamHandler()
ch.setLevel(log_level)
ch.setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5s]: %(message)s"))
L.addHandler(ch)
L.debug("Logger initialized")
def parse_key(key):
global L
if (os.path.isfile(key)):
L.info("Using supplied key as a file - '{}'".format(key))
s = "";
with open(key) as f:
s = f.read().strip()
return s
L.info("Supplied key is not a valid file, using as a raw Dropbox API key - '{}'".format(key))
return key
def init_dropbox(key):
global L
L.info("Initializing Dropbox instance with key '{}'".format(key))
dbx = dropbox.Dropbox(key)
return dbx
def exec_default(args):
print "Executing no command"
print args
def exec_list(args, dbx):
print "Executing LIST command"
print args
def exec_get(args):
print "Executing GET command"
print args
def exec_put(args):
print "Executing PUT command"
print args
def exec_info_user(args, dbx):
global L
L.info("Executing INFO-USER command")
user = dbx.users_get_current_account()
print """\
User ID : {}
Account type : {}
Display Name : {}
Familiar Name: {}
First Name : {}
Last Name : {}
E-Mail : {}
Verified : {}
Disabled : {}
Referral link: {}\
""".format(
user.account_id,
user.account_type._tag,
user.name.display_name,
user.name.familiar_name,
user.name.given_name,
user.name.surname,
user.email,
user.email_verified,
user.disabled,
user.referral_link
)
def exec_info_quota(args, dbx):
L.info("Executing INFO-QUOTA command")
usage = dbx.users_get_space_usage()
if (usage.allocation.is_individual()):
print "Allocated: {:.2f}MB".format(usage.allocation.get_individual().allocated / 1024.0 / 1024.0)
print "Used : {:.2f}MB".format(usage.used / 1024.0 / 1024.0)
else:
L.error("Team accounts are not supported")
def main():
try:
parse_arguments()
except dropbox.exceptions.AuthError as e:
L.error("Authentication error")
except dropbox.exceptions.BadInputError as e:
L.error("Invalid input: {}".format(e.message))
if (__name__ == "__main__"):
main()
| Python | 0.000008 | |
d160d73740c73e2cab8325179e7f0a9ee4ae8c50 | add disk_usage.py example script | examples/disk_usage.py | examples/disk_usage.py | #!/usr/bin/env python
"""
List all mounted disk partitions a-la "df" command.
"""
import sys
import psutil
def convert_bytes(n):
if n == 0:
return "0B"
symbols = ('k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i+1)*10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
def main():
print "Device Total Used Free Use % Type Mount"
for part in psutil.disk_partitions(0):
usage = psutil.disk_usage(part.mountpoint)
print "%-9s %8s %8s %8s %5s%% %8s %s" % (part.device,
convert_bytes(usage.total),
convert_bytes(usage.used),
convert_bytes(usage.free),
int(usage.percent),
part.fstype,
part.mountpoint)
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000001 | |
7475b73072f0037fc53bcae59e331c4d5a997e86 | Add auto-fill test cases | depot/tests/test_checkout.py | depot/tests/test_checkout.py | from django.contrib.auth.models import User
from depot.models import Depot, Organization
from verleihtool.test import ClientTestCase
class AutoFillTestCase(ClientTestCase):
"""
Test cases asserting the auto-fill functionality for checkout-form
:author: Stefan Su
"""
def setUp(self):
super(AutoFillTestCase, self).setUp()
organization = Organization.objects.create()
self.depot = Depot.objects.create(
name='My Depot',
organization=organization
)
def test_logged_in_autofill_username(self):
response = self.as_user.get('/depots/%d/' % self.depot.id)
self.assertInHTML(
'<input type="text" class="form-control" id="id_username" name="name" value="user">',
response.content.decode()
)
def test_not_logged_in_no_autofill(self):
response = self.as_guest.get('/depots/%d/' % self.depot.id)
self.assertInHTML(
str('<input type="text" class ="form-control" id="id_username" name="name" value="">'),
response.content.decode()
)
| Python | 0.000001 | |
0caa9035e06e6596a295ed2ed0a6238a2b09f353 | add PCA and TSNE representation | utils/postprocessing/representation.py | utils/postprocessing/representation.py | import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
def PCA_representation(data, n_components):
pca = PCA(n_components=n_components)
return pca.fit_transform(data)
def TSNE_representation(data, n_components):
model = TSNE(n_components=n_components, random_state=0)
return model.fit_transform(data)
def plot_PCA(data, n_components, name='PCA Representation'):
pca = PCA_representation(data, n_components)
def plot_TSNE(data, n_components, name='TSNE Representation'):
tsne = TSNE_representation(data, n_components)
| Python | 0 | |
3ee47b0adbc379d77f01df51927399ecf3fb24e6 | Add docstring and comment. | examples/mnist-autoencoder.py | examples/mnist-autoencoder.py | #!/usr/bin/env python
'''Single-layer autoencoder example using MNIST digit data.
This example shows one way to train a single-layer autoencoder model using the
handwritten MNIST digits.
This example also shows the use of climate command-line arguments.
'''
import climate
import matplotlib.pyplot as plt
import theanets
from utils import load_mnist, plot_layers, plot_images
g = climate.add_group('MNIST Example')
g.add_argument('--features', type=int, default=8, metavar='N',
help='train a model using N^2 hidden-layer features')
def main(args):
# load up the MNIST digit dataset.
train, valid, _ = load_mnist()
e = theanets.Experiment(
theanets.Autoencoder,
layers=(784, args.features ** 2, 784))
e.train(train, valid,
input_noise=0.1,
weight_l2=0.0001,
algorithm='rmsprop',
momentum=0.9,
min_improvement=0.1)
plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
plt.tight_layout()
plt.show()
v = valid[:100]
plot_images(v, 121, 'Sample data')
plot_images(e.network.predict(v), 122, 'Reconstructed data')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
climate.call(main)
| #!/usr/bin/env python
import climate
import matplotlib.pyplot as plt
import theanets
from utils import load_mnist, plot_layers, plot_images
g = climate.add_group('MNIST Example')
g.add_argument('--features', type=int, default=8, metavar='N',
help='train a model using N^2 hidden-layer features')
def main(args):
train, valid, _ = load_mnist()
e = theanets.Experiment(
theanets.Autoencoder,
layers=(784, args.features ** 2, 784))
e.train(train, valid,
input_noise=0.1,
weight_l2=0.0001,
algorithm='rmsprop',
momentum=0.9,
min_improvement=0.1)
plot_layers([e.network.find('hid1', 'w'), e.network.find('out', 'w')])
plt.tight_layout()
plt.show()
v = valid[:100]
plot_images(v, 121, 'Sample data')
plot_images(e.network.predict(v), 122, 'Reconstructed data')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
climate.call(main)
| Python | 0 |
239e8062329c0303776326ccc2c272cccb25a9d0 | add table iterator for cursor like functionality over C* | cassandra/cqlengine/table_iterator.py.py | cassandra/cqlengine/table_iterator.py.py | class TableIterator(object):
"""
Iterates over a Cassandra table defined by a cqlengine model class using query paging in order to pull back chunks
of data that have `blocksize` number of records.
Can optionally provide kwargs which are used as where clause filters. These kwargs must be columns on the model
which are indexed.
:param model_class: The cqlengine model object that defines the table you want to iterate over.
:type model_class: An instance of cqlengine.Model.
:param blocksize: The number of results you want to pull back with each paging query. Can be used to tune the
performance of the iteration depending on the characteristics of the table.
:type blocksize: integer
:param where_filters: Keyword arguments can be passed to the iterator and these will be used as filter parameters
when querying the database for pages of data.
:type where_filters: **kwargs
:return: an iterator over the a collection of objects of type model_class.
"""
def __init__(self, model_class, blocksize=10000, **where_filters):
self.model_class = model_class
# Pull the keys of the model class for convenient reference.
self.partition_keys = model_class._partition_keys
self.clustering_keys = model_class._clustering_keys
self.blocksize = blocksize
self.where_filters = where_filters
@classmethod
def generate_where_clause_key(cls, column_name, clause_condition):
"""
Takes the name of a primary key column and a condition ('gt', 'lt', 'eq') and creates a where clause key that
can be used with a cqlengine .objects() descriptor to filter based on that condition.
:param key_name: The name of the model column (primary key) for which you want to generate a where clause.
:type key_name: The string representation of the column name.
:param clause_condition: The conditional operator you want to filter by.
:type clause_condition: A string that is a valid cqlengine conditional operator ('gt', 'lt', 'eq')
:return: A string of the form "{my_column_name}__{my_clause_condition}".
"""
return "{}__{}".format(column_name, clause_condition)
@classmethod
def get_paging_where_clause_key(cls, primary_key_column):
"""
Get a where clause key value that can be used to page through the primary key column.
:param primary_key_column: A primary key column class you want a key to page over.
:type primary_key_column: An class that inherits from cqlengine.Column.
:return: A string of the format "{column_name}__{where_condition}" which will page data from that column in the
direction defined by the clustering order of that column.
For example, if I have a clustering key named `my_cluster_key` which has a descending clustering order,
this function will return the key 'my_cluster_key__lt' which can be used to page over the value of
my_cluster_key in it's default order.
"""
condition_lookup = {"asc": "gt", "desc": "lt"}
clustering_order = getattr(primary_key_column, "clustering_order") or "asc"
clause_condition = condition_lookup[clustering_order.lower()]
return cls.generate_where_clause_key(primary_key_column.column_name, clause_condition)
def get_next_query_set(self, previous_object):
"""
Takes a cqlengine model object instance and treats that object as the current cursor into a Cassandra table
generating a cqlengine query object which will page in the set of results immediately following
`previous_object` according to Cassandra partition tokens and clustering order.
:param previous_object: The last object fetched by the previous paging query. Can also be viewed as the cursor
location for this table iteration.
:type previous_object: A cqlengine model object instance.
:return: A cqlengine QuerySet object that will return the objects immediately following `previous_object` in the
Cassandra table.
"""
prev_partition_key_vals = {}
prev_clustering_key_vals = {}
# Pull all of the key values off of previous_object
for p_key_name, _ in self.partition_keys.items():
prev_partition_key_vals[p_key_name] = getattr(previous_object, p_key_name)
for c_key_name, _ in self.clustering_keys.items():
prev_clustering_key_vals[c_key_name] = getattr(previous_object, c_key_name)
# Copy the clustering keys dict since we want to use the values it contains as **kwargs to a QuerySet. We need
# to alter the values without clobbering the original values.
cluster_where_clause = dict(prev_clustering_key_vals.items())
# Iterator over the ordered clustering keys in reverse order.
for c_key_name, c_key_col in reversed(self.clustering_keys.items()):
# Drop the equals clause for the current clustering key because we want a paging conditional ('gt' or 'lt').
del cluster_where_clause[c_key_name]
# Generate a paging clause for this clustering key that we will use as a where clause filter.
new_where_key = self.get_paging_where_clause_key(c_key_col)
cluster_where_clause[new_where_key] = prev_clustering_key_vals[c_key_name]
# Generate our new where clause consisting of the current partition, our paging clustering conditions and
# any where_filters that were originally handed to TableIterator.
where_clause = dict(prev_partition_key_vals.items() + cluster_where_clause.items() + self.where_filters.items())
current_query = self.model_class.objects(**where_clause).limit(self.blocksize)
# TODO: Can we optimize to return results from this function rather than doing garbage query round trip?
if current_query.first():
# This query returns objects, so it's a valid page and we want to use it.
return current_query
else:
# This query returned nothing so we have exhausted the clustering key we are currently looking at.
# Drop the clause for this clustering key and continue to the next one.
del cluster_where_clause[new_where_key]
# We made it through testing all of the clustering key values and got no results so we have exhausted the
# current partition we are looking at.
# Generate the partition key token for the last seen object.
token = cqlengine.Token(previous_object.pk)
# Create a where clause for the partition key token.
pk_token_where = self.generate_where_clause_key('pk__token', 'gt')
partition_key_clause = {pk_token_where: token}
where_clause = dict(partition_key_clause.items() + self.where_filters.items())
query = self.model_class.objects(**where_clause).limit(self.blocksize)
return query
def __iter__(self):
"""
Returns an iterator over the objects that exist in the table passed into __init__.
"""
done_iterating = False
query = self.model_class.objects(**self.where_filters).limit(self.blocksize)
while not done_iterating:
previous_object = None
for obj in query:
previous_object = obj
yield obj
if not previous_object is None:
query = self.get_next_query_set(previous_object)
else:
done_iterating = True
| Python | 0 | |
09112412a4814e3727def2547765546bf44c1e7d | Test joint refinement of 300 cspad images using Brewster 2018 methods. | test/algorithms/refinement/test_cspad_refinement.py | test/algorithms/refinement/test_cspad_refinement.py | # Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
| Python | 0 | |
6bac7268df94d73555c0b594c89b4d5ed0bf53ed | Create NN.py | DeepLearning/DeepLearning/04_Deep_LeeWJ/NN.py | DeepLearning/DeepLearning/04_Deep_LeeWJ/NN.py | """
mnist๋ฐ์ดํฐ ์
์ ํ์ผ์ด ํฌ๊ธฐ์, ์ฒซ ์คํ์์ ๋ค์ด ๋ฐ์ ํ,
pickle๋ก ๋ก๋ํ์ฌ ๊ฐ์ฒด๋ฅผ ๋ณด์ํ๋ ์์ผ๋ก ์๋๋ฅผ ์ค์ผ ์ ์๋ค.
"""
import sys, os
import numpy as np
from mnist import load_mnist
import pickle
sys.path.append(os.pardir) #๋ถ๋ชจ ๋๋ ํฐ๋ฆฌ์ ํ์ผ์ ๊ฐ์ ธ์ฌ ์ ์๋๋ก ์ค์ ํ๋ค.
#load_mnist ๋ฉ์๋์ 3๊ฐ์ง ๋งค๊ฐ๋ณ์
#1. flatten --> ์
๋ ค๊ธฐ๋ฏธ์ง์ ์์ฑ ๋ฐฐ์ด ์ค์ false = 13์ฐจ์๋ฐฐ์ด, true = 1์ฐจ์ ๋ฐฐ์ด
#1์ฐจ์ ๋ฐฐ์ด์ ์ฅํ ๋ฐ์ดํฐ๋ .reshape์ผ๋ก ์๋ ์ด๋ฏธ์ง๋ฅผ ๋ณผ ์ ์๋ค.
#2.normalize --> 0~ 1์ฌ์ด์ ๊ฐ์ผ๋ก ์ ๊ทํ ์ฌ๋ถ์ต์
#3.one_hot encoding --> ์ ๋ต์ ๋ปํ๋ ์์๋ง 1์ด๊ณ , ๋๋จธ์ง 0์ผ๋ก ๋๋ ์ธ์ฝ๋ฉ ๋ฐฉ๋ฒ
# with open('sample_weight.pkl', 'rb') as f:
# network= pickle.load(f)
# print(network)
#
# (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
# ์ํํธ๋งฅ์คํจ์
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c) # ์ค๋ฒํ๋ก ๋ฐฉ์ง
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
# ์๊ทธ๋ชจ์ด๋ํจ์
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True,
one_hot_label=False)
return x_test, t_test
# ๊ฐ์ค์น์ ํธํฅ์ ์ด๊ธฐํ, ์ธ์คํด์คํ
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
# ์๋์ธต ํ์ฑํจ์๋ก ์๊ทธ๋ชจ์ด๋ํจ์, ์ถ๋ ฅ์ธต ํ์ฑํจ์๋ก ์ํํธ๋งฅ์คํจ์๋ฅผ ์ด ์์ ํ ์ ๊ฒฝ๋ง
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
return y
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p= np.argmax(y) # ํ๋ฅ ์ด ๊ฐ์ฅ ๋์ ์์์ ์ธ๋ฑ์ค๋ฅผ ์ป๋๋ค.
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
| Python | 0.000002 | |
99b0aeb3257b8125a30340c06cc1bf834e914461 | add bar_contact.py that was missing | examples/Mechanics/ContactDetection/BulletIO/bar_contact.py | examples/Mechanics/ContactDetection/BulletIO/bar_contact.py | import os,sys
import numpy
import math
import pickle
import random
from siconos.mechanics.contact_detection.tools import Contactor
from siconos.io.mechanics_io import Hdf5
#sys.path.append('../..')
#from mechanics_io import Hdf5
import siconos.numerics as Numerics
import siconos.kernel as Kernel
# WARNING : in 3D by default z-axis is upward
# this is very important to direct PLANx objects
dim = 3
unscaled_bar_length=1.5
aspect_ratio=100.0
unscaled_bar_height=unscaled_bar_length/aspect_ratio
unscaled_bar_width=unscaled_bar_length/aspect_ratio
unscaled_volume = unscaled_bar_length*unscaled_bar_height*unscaled_bar_width
unscaled_density=1000
unscaled_mass=unscaled_volume*unscaled_density
print('unscaled_mass',unscaled_mass)
scale=1.0/unscaled_bar_length*1.0
density = unscaled_density/(scale**3)
bar_height = unscaled_bar_height*scale
bar_length = unscaled_bar_length*scale
bar_width = unscaled_bar_width*scale
body_collection={}
body_collection['plan_id']= {}
id_plan=0
# scale =1
# mass :3.375000e-01
# Inertia :
# 3.600000e-04, 0.000000e+00, 0.000000e+00,
# 0.000000e+00, 1.195312e-01, 0.000000e+00,
# 0.000000e+00, 0.000000e+00, 1.195312e-01,
#create some bodies
# Creation of the hdf5 file for input/output
with Hdf5() as io:
volume = bar_height * bar_length * bar_width
mass = volume*density
print('mass', mass)
print('scale', scale)
# raw_input()
# Definition of a cube as a convex shape
io.addConvexShape('Bar', [ (-bar_length, bar_width, -bar_height),
(-bar_length, -bar_width, -bar_height),
(-bar_length, -bar_width, bar_height),
(-bar_length, bar_width, bar_height),
( bar_length , bar_width, bar_height),
( bar_length, bar_width, -bar_height),
( bar_length, -bar_width, -bar_height),
( bar_length ,-bar_width, bar_height)])
angle= math.pi/8.0
trans=[0,0,4.0*scale]
ori = [math.cos(angle/2.0),0.0,math.sin(angle/2.0),0]
axis = numpy.zeros(3)
angle_test = Kernel.getAxisAngle(trans+ori, axis)
print angle_test,axis
print('ori initial', ori)
io.addObject('bar', [Contactor('Bar')],
translation=trans,
orientation = ori,
velocity=[0, 0, 0, 0, 0.0, 0],
mass=mass)
# Definition of the ground shape
io.addPrimitiveShape('Ground', 'Box', (5*scale, 5*scale, 0.1*scale))
angleground= -math.pi/4.0
axis = [1.0, 0.0, 0.0]
origround = [math.cos(angleground/2.0),
axis[0] * math.sin(angleground/2.0),
axis[1] * math.sin(angleground/2.0),
axis[2] * math.sin(angleground/2.0)]
io.addObject('ground', [Contactor('Ground')],
translation=[0, 0, 0.0],
orientation = origround)
# Definition of a non smooth law. As no group ids are specified it
# is between contactors of group id 0.
io.addNewtonImpactFrictionNSL('contact', mu=0.3)
print body_collection
f = open('body_collection.dict', 'w')
pickle.dump(body_collection,f)
f.close()
step=2000
hstep=0.001
gravity_scale=1.0/scale
import scipy.constants as constants
def apply_forces(body):
g = constants.g / gravity_scale
weight = [0, 0, - body.scalarMass() * g]
body.setFExtPtr(weight)
# Run the simulation from the inputs previously defined and add
# results to the hdf5 file. The visualisation of the output may be done
# with the vview command.
with Hdf5(mode='r+', collision_margin=0.01) as io:
# By default earth gravity is applied and the units are those
# of the International System of Units.
# Because of fixed collision margins used in the collision detection,
# sizes of small objects may need to be expressed in cm or mm.
io.run(with_timer=False,
time_stepping=None,
space_filter=None,
body_class=None,
shape_class=None,
face_class=None,
edge_class=None,
gravity_scale=gravity_scale,
t0=0,
T=step*hstep,
h=hstep,
multipoints_iterations=True,
theta=1.0,
Newton_max_iter=10,
set_external_forces=apply_forces,
solver=Numerics.SICONOS_FRICTION_3D_NSGS,
itermax=1000,
tolerance=1e-12,
numerics_verbose=False,
violation_verbose=True,
output_frequency=10)
| Python | 0 | |
fa521b4358a06d1667864a09cd7195d3a6db764d | Add lc206_reverse_linked_list.py | lc206_reverse_linked_list.py | lc206_reverse_linked_list.py | """206. Reverse Linked List
Easy
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
pass
def main():
# print Solution().reverseList(head)
pass
if __name__ == '__main__':
main()
| Python | 0.000001 | |
2dd6049c1fa9340d14f4b73f843f7ed4408e84f5 | Prepare release script init | utils/create_release.py | utils/create_release.py | #!/usr/bin/env python3
import os
import datetime
import subprocess
from distutils.version import StrictVersion
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def main():
# git_clean = subprocess.check_output(
# "git status --porcelain", shell=True, universal_newlines=True,
# ).strip()
# if git_clean:
# raise RuntimeError("Error, git workspace is not clean: \n{0}".format(git_clean))
with open(os.path.join(PROJECT_ROOT, "VERSION")) as file_h:
current_version = file_h.read().strip()
print("Current version is: {0}".format(current_version))
print("Please insert new version:")
new_version = str(input())
if StrictVersion(new_version) <= StrictVersion(current_version):
raise RuntimeError(
"Error new version is below current version: {0} < {1}".format(
new_version, current_version
)
)
try:
with open(os.path.join(PROJECT_ROOT, "CHANGELOG.md")) as file_h:
changelog = file_h.read()
today = datetime.datetime.today()
changelog = changelog.replace(
"## master - CURRENT\n",
"""\
## master - CURRENT
## {0} - {1}
""".format(
new_version, today.strftime("%d/%m/%Y")
),
)
with open(os.path.join(PROJECT_ROOT, "CHANGELOG.md"), "w") as file_h:
file_h.write(changelog)
with open(os.path.join(PROJECT_ROOT, "VERSION"), "w") as file_h:
file_h.write(new_version)
subprocess.check_call(
'git commit -a -m "Version {0}"'.format(new_version), shell=True
)
# subprocess.check_call("git tag v{0}".format(new_version), shell=True)
# subprocess.check_call("git push --tags", shell=True)
# subprocess.check_call("git push", shell=True)
except subprocess.CalledProcessError as e:
print("Error detected, cleaning state.")
# subprocess.call("git tag -d v{0}".format(new_version), shell=True)
# subprocess.check_call("git reset --hard", shell=True)
raise e
if __name__ == "__main__":
main()
| Python | 0 | |
2850713d0add5cb1ae084898bdd6929c0f5bfb3e | add simulated annealing stat script | master/scripts/planner/solvers/hyperparameter_optimization/test_stat_sa.py | master/scripts/planner/solvers/hyperparameter_optimization/test_stat_sa.py | import GPy
import GPyOpt
import numpy as np
from sys import path
import pickle
import time
from tqdm import tqdm
path.append("..")
path.append("../..")
path.append("../../..")
from solver import SimulatedAnnealingSolver, RandomSolver
import map_converter as m
fs = open("../../../webserver/data/serialization/mapper.pickle", "rb")
mapper = pickle.load(fs)
fs.close()
nb_drone = 1
state = [(1059, 842), (505, 1214), (400, 1122), (502, 339), (866, 512), (1073, 82), (669, 1202), (32, 1122), (45, 52), (209, 993), (118, 653), (487, 896), (748, 638), (271, 1067), (1576, 567), (683, 316), (1483, 1156), (1448, 634), (303, 1220), (759, 823), (1614, 991), (1387, 174), (1618, 227), (367, 39), (35, 902), (967, 690), (944, 327), (912, 1029), (184, 1205), (779, 1026), (694, 123), (1502, 395)]
rplan = RandomSolver(state, mapper, nb_drone)
saplan = SimulatedAnnealingSolver(rplan.state, mapper, nb_drone)
hist = []
for i in tqdm(range(100)):
rplan.solve()
saplan.state = list(rplan.state)
saplan.copy_strategy = "slice"
saplan.steps = 10000000
tmax = 987.57443341
tmin = 1
saplan.Tmax = tmax
saplan.Tmin = tmin
saplan.updates = 0
itinerary, energy = saplan.solve()
hist.append(energy)
hist = np.array(hist)
print("Mean:", np.mean(hist), "Var:", np.var(hist), "Std:", np.std(hist))
print(hist)
| Python | 0 | |
edf2fd4c3c73a82f590ec3065cfdf6de4eb58e01 | Fix include_clients in PostfixCollector | src/collectors/postfix/postfix.py | src/collectors/postfix/postfix.py | # coding=utf-8
"""
Collect stats from postfix-stats. postfix-stats is a simple threaded stats
aggregator for Postfix. When running as a syslog destination, it can be used to
get realtime cumulative stats.
#### Dependencies
* socket
* json (or simeplejson)
* [postfix-stats](https://github.com/disqus/postfix-stats)
"""
import socket
import sys
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import diamond.collector
if sys.version_info < (2, 6):
from string import maketrans
DOTS_TO_UNDERS = maketrans('.', '_')
else:
DOTS_TO_UNDERS = {ord(u'.'): u'_'}
class PostfixCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PostfixCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname to coonect to',
'port': 'Port to connect to',
'include_clients': 'Include client connection stats',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PostfixCollector, self).get_default_config()
config.update({
'path': 'postfix',
'host': 'localhost',
'port': 7777,
'include_clients': 'true',
'method': 'Threaded',
})
return config
def get_json(self):
json_string = ''
address = (self.config['host'], int(self.config['port']))
try:
try:
s = socket.create_connection(address, timeout=1)
s.sendall('stats\n')
while 1:
data = s.recv(4096)
if not data:
break
json_string += data
except socket.error:
self.log.exception("Error talking to postfix-stats")
return ''
finally:
if s:
s.close()
return json_string
def get_data(self):
json_string = self.get_json()
try:
data = json.loads(json_string)
except (ValueError, TypeError):
self.log.exception("Error parsing json from postfix-stats")
return None
return data
def collect(self):
data = self.get_data()
if not data:
return
if self.config['include_clients'] == 'true' and u'clients' in data:
for client, value in data['clients'].iteritems():
# translate dots to underscores in client names
metric = u'.'.join(['clients',
client.translate(DOTS_TO_UNDERS)])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
for action in (u'in', u'recv', u'send'):
if action not in data:
continue
for sect, stats in data[action].iteritems():
for status, value in stats.iteritems():
metric = '.'.join([action,
sect,
status.translate(DOTS_TO_UNDERS)])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
if u'local' in data:
for key, value in data[u'local'].iteritems():
metric = '.'.join(['local', key])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
| # coding=utf-8
"""
Collect stats from postfix-stats. postfix-stats is a simple threaded stats
aggregator for Postfix. When running as a syslog destination, it can be used to
get realtime cumulative stats.
#### Dependencies
* socket
* json (or simeplejson)
* [postfix-stats](https://github.com/disqus/postfix-stats)
"""
import socket
import sys
try:
import json
json # workaround for pyflakes issue #13
except ImportError:
import simplejson as json
import diamond.collector
if sys.version_info < (2, 6):
from string import maketrans
DOTS_TO_UNDERS = maketrans('.', '_')
else:
DOTS_TO_UNDERS = {ord(u'.'): u'_'}
class PostfixCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(PostfixCollector,
self).get_default_config_help()
config_help.update({
'host': 'Hostname to coonect to',
'port': 'Port to connect to',
'include_clients': 'Include client connection stats',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(PostfixCollector, self).get_default_config()
config.update({
'path': 'postfix',
'host': 'localhost',
'port': 7777,
'include_clients': True,
'method': 'Threaded',
})
return config
def get_json(self):
json_string = ''
address = (self.config['host'], int(self.config['port']))
try:
try:
s = socket.create_connection(address, timeout=1)
s.sendall('stats\n')
while 1:
data = s.recv(4096)
if not data:
break
json_string += data
except socket.error:
self.log.exception("Error talking to postfix-stats")
return ''
finally:
if s:
s.close()
return json_string
def get_data(self):
json_string = self.get_json()
try:
data = json.loads(json_string)
except (ValueError, TypeError):
self.log.exception("Error parsing json from postfix-stats")
return None
return data
def collect(self):
data = self.get_data()
if not data:
return
if self.config['include_clients'] and u'clients' in data:
for client, value in data['clients'].iteritems():
# translate dots to underscores in client names
metric = u'.'.join(['clients',
client.translate(DOTS_TO_UNDERS)])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
for action in (u'in', u'recv', u'send'):
if action not in data:
continue
for sect, stats in data[action].iteritems():
for status, value in stats.iteritems():
metric = '.'.join([action,
sect,
status.translate(DOTS_TO_UNDERS)])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
if u'local' in data:
for key, value in data[u'local'].iteritems():
metric = '.'.join(['local', key])
dvalue = self.derivative(metric, value)
self.publish(metric, dvalue)
| Python | 0 |
1d4693b6f5b6f8b3912aae1216665272a36b1411 | Add missing group.py | group.py | group.py | from pygame.sprite import Group as pygame_Group
class Group(pygame_Group):
def draw(self, onto, *args, **kw):
for sprite in self:
sprite.draw(*args, **kw)
super(Group, self).draw(onto)
| Python | 0.000387 | |
5350fa9bb5d67b79d652a57b766ed1b1167a92eb | Introduce TC-DA-1.7 Python test (#21775) | TC_DA_1_7.py | TC_DA_1_7.py | #
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from matter_testing_support import MatterBaseTest, default_matter_test_main, async_test_body
from matter_testing_support import hex_from_bytes, bytes_from_hex
from chip.interaction_model import Status
import chip.clusters as Clusters
import logging
from mobly import asserts
from pathlib import Path
from glob import glob
from cryptography.x509 import load_der_x509_certificate, SubjectKeyIdentifier, AuthorityKeyIdentifier, Certificate
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import PublicFormat, Encoding
from typing import Optional
FORBIDDEN_AKID = [
bytes_from_hex("78:5C:E7:05:B8:6B:8F:4E:6F:C7:93:AA:60:CB:43:EA:69:68:82:D5"),
bytes_from_hex("6A:FD:22:77:1F:51:1F:EC:BF:16:41:97:67:10:DC:DC:31:A1:71:7E")
]
def load_all_paa(paa_path: Path) -> dict:
logging.info("Loading all PAAs in %s" % paa_path)
paa_by_skid = {}
for filename in glob(str(paa_path.joinpath("*.der"))):
with open(filename, "rb") as derfile:
# Load cert
paa_der = derfile.read()
paa_cert = load_der_x509_certificate(paa_der)
# Find the subject key identifier (if present), and record it
for extension in paa_cert.extensions:
if extension.oid == SubjectKeyIdentifier.oid:
skid = extension.value.key_identifier
paa_by_skid[skid] = (Path(filename).name, paa_cert)
return paa_by_skid
def extract_akid(cert: Certificate) -> Optional[bytes]:
# Find the authority key identifier (if present)
for extension in cert.extensions:
if extension.oid == AuthorityKeyIdentifier.oid:
return extension.value.key_identifier
else:
return None
class TC_DA_1_7(MatterBaseTest):
@async_test_body
async def test_TC_DA_1_7(self):
# Option to allow SDK roots (skip step 4 check 2)
allow_sdk_dac = self.user_params.get("allow_sdk_dac", False)
logging.info("Pre-condition: load all PAAs SKIDs")
conf = self.matter_test_config
paa_by_skid = load_all_paa(conf.paa_trust_store_path)
logging.info("Found %d PAAs" % len(paa_by_skid))
logging.info("Step 1: Commissioning, already done")
dev_ctrl = self.default_controller
logging.info("Step 2: Get PAI of DUT1 with certificate chain request")
result = await dev_ctrl.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.CertificateChainRequest(2))
pai_1 = result.certificate
asserts.assert_less_equal(len(pai_1), 600, "PAI cert must be at most 600 bytes")
self.record_data({"pai_1": hex_from_bytes(pai_1)})
logging.info("Step 3: Get DAC of DUT1 with certificate chain request")
result = await dev_ctrl.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.CertificateChainRequest(1))
dac_1 = result.certificate
asserts.assert_less_equal(len(dac_1), 600, "DAC cert must be at most 600 bytes")
self.record_data({"dac_1": hex_from_bytes(dac_1)})
logging.info("Step 4 check 1: Ensure PAI's AKID matches a PAA and signature is valid")
pai1_cert = load_der_x509_certificate(pai_1)
pai1_akid = extract_akid(pai1_cert)
if pai1_akid not in paa_by_skid:
asserts.fail("DUT1's PAI (%s) not matched in PAA trust store" % hex_from_bytes(pai1_akid))
filename, paa_cert = paa_by_skid[pai1_akid]
logging.info("Matched PAA file %s, subject: %s" % (filename, paa_cert.subject))
public_key = paa_cert.public_key()
try:
public_key.verify(signature=pai1_cert.signature, data=pai1_cert.tbs_certificate_bytes,
signature_algorithm=ec.ECDSA(hashes.SHA256()))
except InvalidSignature as e:
asserts.fail("Failed to verify PAI signature against PAA public key: %s" % str(e))
logging.info("Validated PAI signature against PAA")
logging.info("Step 4 check 2: Verify PAI AKID not in denylist of SDK PAIs")
if allow_sdk_dac:
logging.warn("===> TEST STEP SKIPPED: Allowing SDK DACs!")
else:
for candidate in FORBIDDEN_AKID:
asserts.assert_not_equal(hex_from_bytes(pai1_akid), hex_from_bytes(candidate), "PAI AKID must not be in denylist")
logging.info("Step 5: Extract subject public key of DAC and save")
dac1_cert = load_der_x509_certificate(dac_1)
pk_1 = dac1_cert.public_key().public_bytes(encoding=Encoding.X962, format=PublicFormat.UncompressedPoint)
logging.info("Subject public key pk_1: %s" % hex_from_bytes(pk_1))
self.record_data({"pk_1": hex_from_bytes(pk_1)})
if __name__ == "__main__":
default_matter_test_main()
| Python | 0.002474 | |
b680141b9ec5468a5a0890edf25045a6af8b46c2 | Add run.py | run.py | run.py | #!/usr/bin/python
# -*- coding:utf8 -*-
# Powered By KK Studio
from app.DNStack import DNStack
if __name__ == "__main__":
app = DNStack()
app.run()
| Python | 0.000009 | |
d3e786b554bfafeb4f0c16635b80f9911acc4bba | add stacked auto encoder file. | sae.py | sae.py | #coding: utf-8
import requests
import random, numpy
from aa import AutoEncoder
class StackedAutoEncoder:
def __init__(self, visible, hiddens):
# TODO: fine-tuning layer
num_of_nodes= [visible] + hiddens
self.auto_encoders = []
for i in xrange(len(num_of_nodes)-1):
self.auto_encoders.append(AutoEncoder(num_of_nodes[i], num_of_nodes[i+1]))
self.training_layer = 0
def train(self, samples, alpha=0.05):
for i in xrange(self.training_layer):
samples = map(self.auto_encoders[i].encode, samples)
self.auto_encoders[self.training_layer].train(samples,alpha)
def error(self, samples, alpha=0.05):
for i in xrange(self.training_layer):
samples = map(self.auto_encoders[i].encode, samples)
return self.auto_encoders[self.training_layer].error(samples)
def output(self, sample):
for i in xrange(self.training_layer):
sample = self.auto_encoders[i].encode(sample)
top = self.auto_encoders[self.training_layer]
return top.decode(top.encode(sample))
def fix_traning_layer(self):
self.training_layer += 1
if __name__=='__main__':
resp = requests.get('https://archive.ics.uci.edu/ml/machine-learning-databases/spect/SPECT.train')
samples = map(lambda row: row.split(','), resp.text.split('\n'))
titles = samples[0]
samples = samples[1:]
samples = filter(lambda arr: len(arr) > 1, samples)
samples = map(lambda arr: numpy.matrix([map(float, arr)]), samples)
samples = map(lambda mat: mat.transpose(), samples)
V = samples[0].shape[0]
H = 2*V
sae = StackedAutoEncoder(V, [V+2,V])
for i in xrange(1000):
j = int(random.random()*len(samples))
#print samples[j:j+10]
sae.train(samples[j:j+10])
if i<100 or i%1000 == 0:
print sae.error(samples)
sae.fix_traning_layer()
for i in xrange(1000):
j = int(random.random()*len(samples))
#print samples[j:j+10]
sae.train(samples[j:j+10])
if i<100 or i%1000 == 0:
print sae.error(samples)
for sample in samples:
print sae.output(sample)
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.