commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
9266e24e616174cc37b5e6f7926dfda81471abb5 | Initialize PracticeQuestions | books/CrackingCodesWithPython/Chapter13/PracticeQuestions.py | books/CrackingCodesWithPython/Chapter13/PracticeQuestions.py | # Chapter 13 Practice Questions
# 1. What do the following expressions evaluate to?
print(17 % 1000)
print(5 % 5)
# 2. What is the GCD of 10 and 15?
# Don't do this - imports should be at the top of the file
from books.CrackingCodesWithPython.Chapter13.cryptomath import gcd
print(gcd(10, 15))
# 3. What does spam contain after executing spam, eggs = 'hello', 'world'?
spam, eggs = 'hello', 'world'
print(spam)
# 4. The GCD of 17 and 31 is 1. Are 17 and 31 relatively prime?
if not gcd(17, 31) == 1:
print("No")
else:
print("Yes")
# 5. Why aren't 6 and 8 relatively prime?
print(gcd(6, 8))
# 6. What is the formula for the modular inverse of A mod C?
# Hint: check page 183
| Python | 0 | |
c9e90ef5413bd560422e915d213df73ad88dffd7 | Add apigateway integration test for PutIntegration | tests/integration/test_apigateway.py | tests/integration/test_apigateway.py | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resoruce to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.client.create_rest_api(name=self.api_name)['id']
def tearDown(self):
self.client.delete_rest_api(restApiId=self.api_id)
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
| Python | 0 | |
1b7b8ff8b6e33bc25323bc700e3e244758ee1a2d | add initial index code | maras/ind/hash_index.py | maras/ind/hash_index.py | '''
A hash based index
'''
# Import python libs
import struct
import os
# Import maras libs
import maras.utils
# Import third party libs
import msgpack
class HashIndex(object):
'''
Hash index
'''
def __init__(
self,
name,
dbpath,
hash_limit=0xfffff,
key_hash='sha1',
header_len=1000):
self.dbpath = dbpath
self.path = os.path.join(dbpath, '{0}.index'.format(name))
self.hash_limit = hash_limit
self.key_hash, self.key_size = maras.utils.get_hash_data(key_hash)
self.header = {'hlim': hash_limit,
'keyh': key_hash,
'ksz': self.key_size,
'nsz': 8}
self.header_len = header_len
self.h_delim = '_||_||_'
self.fp = self.__open_index()
self.h_bucket_fmt, self.h_bucket_size = self.__gen_bucket_fmt()
def __gen_hbucket_fmt(self):
'''
Generate the hash bucket struct format based on the sizes in the
header
'''
# Match msgpack big edian
# hbucket format is:
# key, data_location, next
# key is length of the hash function
# data_location and next are longs
fmt = '>s{0}LL'.format(self.key_size)
test_struct = struct.pack(fmt, maras.util.rand_hex_str(self.key_size), 0, 0)
return fmt, len(test_struct)
def __open_index(self):
'''
Auto create or open the index
'''
if not os.path.exists(self.path):
return self.create()
return self.open_index()
def create(self):
'''
Create a new index
'''
if os.path.exists(self.path):
raise ValueError('Index exists')
fp_ = open(self.path, 'w+b')
header = '{0}{1}'.format(msgpack.dumps(self.header), self.h_delim)
fp_.write(header)
return fp_
def open_index(self):
'''
Open an existing index
'''
if not os.path.isfile(self.path):
raise ValueError('No Index Exists')
fp_ = open(self.path, 'rb')
raw_head = fp_.read(self.header_len)
self.header = msgpack.loads(raw_head[:raw_head.index(self.h_delim)])
self.hash_limit = self.header['hlim']
self.key_hash, self.key_size = maras.utils.get_hash_data(self.header['keyh'])
fp_.seek(0)
return fp_
def _hash_position(self, key, first):
'''
Calculate the position of the hash based on the key and start location
'''
return abs(hash(key) & self.hash_limit) * self.h_bucket_size + first
def _get_h_entry(self, pos):
'''
Return the unpacked tuple if it exists, else None
'''
self.fp.seek(pos)
raw = self.fp.read(self.h_bucket_size)
try:
return struct.unpack(self.h_bucket_fmt, raw)
except Exception:
return None
def _find_h_tail(self, rec_top, entry):
'''
Use the entry to find the end of the linked list of referenced h_refs
'''
while entry[2]:
rec_top = entry[2]
entry = self._get_entry(entry[2])
return rec_top, entry
def _write_h_entry(self, h_pos, key, id_, start, size, next_):
'''
Write the hash entry
'''
top = self._write_d_entry(id_, start, size)
h_entry = struct.pack(self.h_bucket_fmt, key, top, next_)
self.fp.seek(h_pos)
self.fp.write(h_entry)
def _write_collision(self, entry, h_pos, key, id_, start, size):
'''
'''
top = self._write_d_entry(id_, start, size)
# find the tail
tail_pos, tail = self._find_h_tail(h_pos, entry)
tail_entry = struct.pack(self.h_bucket_fmt, tail[0], tail[1], top)
self.fp.seek(tail_pos)
self.fp.write(tail_entry)
self.fp.seek(0, 2)
h_entry = struct.pack(self.h_bucket_fmt, key, top, 0)
self.fp.write(h_entry)
def _write_d_entry(self, id_, start, size):
'''
Write the data ref entry
'''
self.fp.seek(0, 2)
if self.fp.tell() < self.header_len:
self.fp.seek(self.header_len)
top = self.fp.tell()
self.fp.write(struct.pack(self.h_bucket_fmt, id_, start, size))
return top
def insert(self, key, id_, start, size):
'''
Insert the data into the specified location
'''
if not id_:
id_ = maras.utils.rand_hex_str(self.key_size)
h_pos = self._hash_position(key, self.header_len)
entry = self._get_entry(h_pos)
if entry is None:
self._write_h_entry(h_pos, key, id_, start, size, 0)
elif key != entry[0]:
# hash_collision
self._write_collision(entry, h_pos, key, id_, start, size)
return True
| Python | 0.000002 | |
4ce7a1932d9cde635263a4fe5a80af57589e1cfa | add NASM 2.13.02 Conan package recipe | build_env/Conan/packages/NASM/2.13.02/conanfile.py | build_env/Conan/packages/NASM/2.13.02/conanfile.py | import os
from conans import ConanFile, AutoToolsBuildEnvironment, tools
class NASM(ConanFile):
name = "NASM"
version = "2.13.02"
url = "http://www.nasm.us"
settings = {"os": ["Linux"]}
def getSubdirectories(self, d):
return [ f for f in os.listdir(d) if os.path.isdir(f) ]
def source(self):
self.output.info("")
self.output.info("---------- source ----------")
self.output.info("")
filename = "nasm-" + self.version + ".tar.bz2"
url = "http://www.nasm.us/pub/nasm/releasebuilds/" + self.version + "/" + filename
self.output.info("downloading " + url)
tools.download(url, filename, retry=3, retry_wait=10)
tools.unzip(filename, self.source_folder)
dirnames = self.getSubdirectories(self.source_folder)
if len(dirnames) < 1:
raise Exception("archive does not contain any subdirectories")
os.rename(dirnames[0], self.name)
os.remove(filename)
def build(self):
self.output.info("")
self.output.info("---------- build ----------")
self.output.info("")
with tools.chdir(self.name):
env = AutoToolsBuildEnvironment(self)
env.configure(args=["--prefix=" + self.package_folder])
env.make()
env.make(args=["install"])
def package(self):
self.output.info("")
self.output.info("---------- package ----------")
self.output.info("")
def package_info(self):
self.output.info("")
self.output.info("---------- package_info ----------")
self.output.info("")
self.env_info.PATH.append(os.path.join(self.package_folder, "bin")) | Python | 0 | |
9fb564d8f02d92432a62be02c906e3b227f48c10 | Create add_results_new.py | run_tests/shaker_run/add_results_new.py | run_tests/shaker_run/add_results_new.py | custom_res1 = [{'status_id': 5, 'content': 'Check [Operations per second Median; iops]', 'expected': '88888', 'actual': '7777'},{'status_id': 5, 'content': 'Check [deviation; %]', 'expected': '5555', 'actual': '9999'}]
res1 = {'test_id': test_4kib_read, 'status_id': 5, 'custom_test_case_steps_results': custom_res1}
res2 = {'test_id': test_4kib_write, 'status_id': 5, 'custom_test_case_steps_results': [{'status_id': 5, 'content': 'Check [Operations per second Median; iops]', 'expected': '20202', 'actual': '30303'},{'status_id': 5, 'content': 'Check [deviation; %]', 'expected': '90909', 'actual': '80808'}]}
results_list = [res1, res2]
res_all = {'results': results_list}
print client.send_post('add_results/{}'.format(run_id), res_all)
| Python | 0.000004 | |
729f1c5147e4d4ce242d73731c8e455b2a50fca3 | add 188 | vol4/188.py | vol4/188.py | def tetration(a, b, m):
t0 = 1
for i in range(b):
t1 = pow(a, t0, m)
if t0 == t1:
break
t0 = t1
return t0
if __name__ == "__main__":
print tetration(1777, 1855, 10 ** 8)
| Python | 0.999986 | |
247bb7b5beb58eaa70bbd54488214d19ccb380b1 | read dicom files and write out metadata to a CSV/json | dcm/export_metadata.py | dcm/export_metadata.py | # This script sxtracts meta-data from DICOMs and place it into two files:
# (1) for sequence data, output it as a json
# (2) for tabular data, output it as a CSV (readable by pandas)
# Note that the function does *not* output values if they are longer than 100 characters.
# This avoids outputting look up tables.
import os
import argparse
import sys
import gzip
from pathlib import Path
import json
import pandas as pd
from tqdm import tqdm
import pydicom
parser = argparse.ArgumentParser(description='Extract meta-data from DICOMs')
parser.add_argument('--data', '-d',
default='./files',
help='path to DICOM format images')
parser.add_argument('--out', '-o', default='dicom-metadata.csv.gz',
help=('name out dataframe output, '
'(default: dicom-metadata.csv.gz), '
'note: this is a compressed format.'))
parser.add_argument('--json', '-j', default=None,
help=('name of the output json file, '
'(default: <output-stem>.json)'))
parser.add_argument('--number', '-n', type=int, default=None,
help=('limit the number of DICOMs to process '
' (default: None).'))
def recurse(ds):
"""
Recurses through sequences and adds them to a dictionary
Does not save elements longer than 100 elements, but
notes their existence in the final dictionary.
"""
tmp_dict = dict()
for elem in ds:
if elem.VR == 'SQ':
# do not include look up tables
if 'LUT' not in elem.name:
[recurse(item) for item in elem]
else:
e = elem.tag.group << 16 | elem.tag.element
# Save element value to a dictionary
# *unless* it is huge - these are usually images
if hasattr(elem.value, '__len__'):
if elem.value.__len__() > 100:
tmp_dict[e] = None
else:
if type(elem.value) is pydicom.multival.MultiValue:
tmp_dict[e] = list(elem.value)
else:
tmp_dict[e] = elem.value
else:
if type(elem.value) is pydicom.multival.MultiValue:
tmp_dict[e] = list(elem.value)
else:
tmp_dict[e] = elem.value
return tmp_dict
if __name__ == "__main__":
args = parser.parse_args()
base_path = Path(args.data)
out_filename = args.out
if args.json is not None:
json_filename = args.json
else:
json_filename = out_filename
if json_filename.endswith('.gz'):
json_filename = json_filename[0:-3]
if json_filename.endswith('.csv'):
json_filename = json_filename[0:-4]
json_filename += '.json'
# get list of all dicoms under the given path
files = list()
for h in os.listdir(base_path):
for pt in os.listdir(base_path / h):
for st in os.listdir(base_path / f'{h}{os.sep}{pt}'):
dcm_path = f'{base_path}{os.sep}{h}{os.sep}{pt}{os.sep}{st}'
dcms = os.listdir(dcm_path)
files.extend([f'{dcm_path}{os.sep}{d}' for d in dcms])
files.sort()
N = len(files)
print(f'Found {N} files.')
if args.number is not None:
if args.number < N:
# limit number of dicoms
print(f'Limiting parsing to {args.number} of {N} DICOMs.')
N = args.number
if N == 0:
print('No files to process. Exiting.')
sys.exit()
dicom_tabular_data = list()
with open(json_filename, 'w') as fp:
# initialize the array in the json file
fp.write('[\n')
for i in tqdm(range(N)):
if i > 0:
fp.write(',\n')
dicom_full_path = files[i]
# dicom filename is the last name in filepath
fn = dicom_full_path.split('/')[-1].split('.')[0]
# prepare the json output as a dictionary with this dicom fn as key
fp.write('{')
fp.write(f'"{fn}": ')
# load info from dicom
with open(dicom_full_path, 'rb') as dcm_fp:
plan = pydicom.dcmread(dcm_fp, stop_before_pixels=True)
field_dict = dict()
dicom_json = dict()
# go through each element
for elem in plan:
# index the dictionary using a long value of group, element
e = (elem.tag.group << 16) | elem.tag.element
# sequence data goes into JSON
if elem.VR == 'SQ':
# store number of items in the structured/flat data
field_dict[e] = elem.value.__len__()
# make a dict for the sequence, which will go into json
# don't store look up tables because
# they're huge and not human readable
if 'LUT' not in elem.name:
dicom_json[e] = [recurse(item) for item in elem]
else:
# three "real" data-types: number, string, or list of things
field_dict[e] = elem.value
field_dict['dicom'] = fn
dicom_tabular_data.append(field_dict)
# convert dictionary to json
js = json.dumps(dicom_json)
# write to json file
fp.write(js)
# finish the dicom dictionary
fp.write('}')
# end of array in json file
fp.write('\n]')
# combine list of dictionary into a dataframe
df = pd.DataFrame(dicom_tabular_data)
# make the dicom filename the index
df.set_index('dicom', inplace=True)
# write to file
if out_filename.endswith('.gz'):
df.to_csv(out_filename, sep=',', compression='gzip')
else:
df.to_csv(out_filename, sep=',')
| Python | 0.000001 | |
98c1ff71d57749168f0ca35d97dbe77a8a67e082 | Add module for utilities related to xgboost | mltils/xgboost/utils.py | mltils/xgboost/utils.py |
xgb_to_sklearn = {
'eta': 'learning_rate',
'num_boost_round': 'n_estimators',
'alpha': 'reg_alpha',
'lambda': 'reg_lambda',
'seed': 'random_state',
}
def to_sklearn_api(params):
return {
xgb_to_sklearn.get(key, key): value
for key, value in params.items()
}
| Python | 0 | |
bbb10ba41db6f70512fe6bcb5207377606a22455 | Create Mordecai_Output.py | Geoparser_Comparison/English/Mordecai_Output.py | Geoparser_Comparison/English/Mordecai_Output.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Download and run Mordecai from following link:
"https://github.com/openeventdata/mordecai"
To change the corpus, just change the name in main function.
"""
import xml.etree.ElementTree as et
import re
import json, sys
import requests
#reload(sys)
#sys.setdefaultencoding("utf-8")
def Mordecai(text):
headers = {'Content-Type': 'application/json'}
place=list()
data = {'text': text}
data = json.dumps(data)
out = requests.post('http://localhost:5000/places', data=data, headers=headers)
parsed_json = json.loads(out.text)
try:
for e in parsed_json:
#print e
index = [m.start() for m in re.finditer(e['placename'].strip(), text)]
for ind in index:
place.append(e['searchterm'] + ",," + e['placename'] + ",," + str(e['lat']) + ",," + str(e['lon']) + ",,"+ str(ind) +',,'+ str(ind +len(e['placename'].strip()) ))
except:
pass
return place
if __name__ == '__main__':
f = open('./data/wiki_mordecai_Original.txt' , 'w') #change it if your data is lgl.xml
tree = et.parse('./WikToR(SciPaper).xml') #change it if your data is lgl.xml
root = tree.getroot()
c = 0
for child in root:
c +=1
print c
text = child.find('text').text
place = Mordecai(text)
if (place):
for t in place:
f.write(t + "||")
f.write("\n")
f.flush()
| Python | 0.000198 | |
9d98c3280d4e9dc6dda172d11e02922fc9958471 | add homwork01_v0.2.py | 01/homwork01_v0.2.py | 01/homwork01_v0.2.py | #!/usr/bin/env python
#coding=utf-8
num_list = [1,2,3,2,12,3,1,3,21,2,2,3,4111,22,3333,444,111,4,5,777,65555,45,33,45]
max2 = max1 = num_list[0]
# print max1, max2
# max1 bigger than max2
# 1. n>max1 and n>max2
# 2. n<=max1 and n>max2
# 3. n<max1 and n<=max2
for n in num_list:
if n > max2:
if n > max1:
max2 = max1
max1 = n
elif n < max1:
max2 = n
print "Two large numbers are: %d, %d" % (max1, max2)
| Python | 0.000065 | |
104cefdb55a89ac89984363cb1930bdc3ef054e9 | Add 01_GPy_regression.py to the repository. This is a kind of sandbox file | 01_GPy_regression.py | 01_GPy_regression.py | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Using GPy package to perform Gaussian Processes regression on SN lightcurves
# <rawcell>
# https://gpy.readthedocs.org/en/latest/tuto_GP_regression.html
# <codecell>
import numpy as np
import pylab as pb
pb.ion()
import GPy
import pickle
from matplotlib import pyplot as plt
import snphotcc_infile_reformat as snphot
import time
# %matplotlib inline
# <markdowncell>
# The dataset is a simulated one coming from the SuperNova Classification Challange (Kessler+ 2010).
# The ASCII files are read using code from Newling+ 2011. A catalog of LCs is produced.
# <rawcell>
# import utility
#
# snCatalog = utility.create_sn_catalog('../DES_BLIND+HOSTZ')
# utility.pickle_sn_catalog(snCatalog, 'snCatalog.pkl')
# <markdowncell>
# Opening pickle file containing the catalog
# <codecell>
t1 = time.mktime(time.gmtime())
f = open('snCatalog.pkl', 'rb')
snCatalog = pickle.load(f)
f.close()
# <codecell>
len(snCatalog.SNID)
# <codecell>
gLimMag = 25.2
rLimMag = 25.4
iLimMag = 25.1
zLimMag = 24.9
# <markdowncell>
# Looking for SN with ID 142 (whose LC as already been fitted with Faraway's R code)
# <codecell>
#snIdx = np.where(snCatalog.SNID == 142)[0][0]
snIdx = np.random.random_integers(low=0, high=len(snCatalog.SNID))
print snCatalog.SNID[snIdx]
# <markdowncell>
# Band Selection
# <codecell>
offset = 3
nLines = 6
nCols = 8
for band in ('g', 'r', 'i', 'z'):
# <codecell>
# Plot some of the light curves on the same page (48 per page)
fig, ax = plt.subplots(nrows=nLines, ncols=nCols)
fig.suptitle('band %s' %band)
plt.subplots_adjust(wspace=0.05, hspace=0.05, top=0.95, bottom=0.05, left=0.05, right=0.95)
j = 0 # row index in tuple ax
k = 0 # col index in tuple ax
if band == 'g': LimMag = gLimMag
if band == 'r': LimMag = rLimMag
if band == 'i': LimMag = iLimMag
if band == 'z': LimMag = zLimMag
rangeLim = nLines * nCols
for i in (range(rangeLim)):
snIdx = i + offset * (nLines * nCols)
numObs = len(snCatalog.sne[snIdx].lightCurvesDict[band].mjd)
# <codecell>
#numObs = len(snCatalog.sne[snIdx].g.mjd)
X = np.reshape(snCatalog.sne[snIdx].lightCurvesDict[band].mjd, (numObs, 1))
X = X - np.min(X) # to avoid problems when producing the model
Y = np.reshape(snCatalog.sne[snIdx].lightCurvesDict[band].flux, (numObs, 1))
errY = np.reshape(snCatalog.sne[snIdx].lightCurvesDict[band].fluxErr, (numObs, 1))
Ymag = snphot.flux_to_mag(np.squeeze(Y), LimMag, False)
errYmag = snphot.error_to_mag(np.squeeze(Y), np.squeeze(errY))
Ymag = np.reshape(Ymag, (numObs, 1))
errYmag = np.reshape(errYmag, (numObs, 1))
errYmag
# <markdowncell>
# Setting the kernel function from which depends the cross-validation between different inputs
# <codecell>
kern = GPy.kern.Bias(1) + GPy.kern.RatQuad(1)#GPy.kern.RBF(1)
# <markdowncell>
# Creating the GP model
# <codecell>
#Model 1
m = GPy.models.GPHeteroscedasticRegression(X, Ymag, kern)
m['.*Gaussian_noise'] = errYmag.flatten() #Set the noise parameters to the error in Y
[m['.*Gaussian_noise_%s' %i].constrain_fixed() for i in range(numObs)] #Fix the noise parameters, we know its value so we don't need to learn it
m.checkgrad(verbose=0) # 1
m.optimize_restarts(num_restarts=10)
#ax[j][k].set_title('sn_id%(snid)i_band%(photband)s' % {"snid": snCatalog.SNID[snIdx], "photband": band})
m.plot_f(fignum=1, ax=ax[j][k])
ax[j][k].errorbar(X.flatten(), Ymag.flatten(), yerr=errYmag.flatten(), fmt=None, ecolor='r', zorder=1)
# ax[j][k].ylim(20, 28)
ax[j][k].set_ylim(28, 20)
ax[j][k].set_xticklabels([])
ax[j][k].set_yticklabels([])
fillX = np.linspace(ax[j][k].get_xlim()[0], ax[j][k].get_xlim()[1])
fillY = fillX * 0 + LimMag
fillY2 = fillX * 0 + ax[j][k].get_ylim()[0]
ax[j][k].fill_between(fillX, fillY, fillY2, facecolor='gray', alpha=0.3)
ax[j][k].text(ax[j][k].get_xlim()[0]+0.3*ax[j][k].get_xlim()[1], ax[j][k].get_ylim()[1]+0.05*ax[j][k].get_ylim()[0], 'idx %d' %snIdx, size='medium')
# ax[j][k].set_title(snCatalog.sne[snIdx])
# pb.title('Model 1')
# ax[j][k].gca().invert_yaxis()
if (k < nCols - 1):
k += 1
else:
k = 0
j += 1
figFile = 'img/samples/sn_sample%(block)03d_%(band)s.pdf' % {"block": offset+1, "band": band}
pb.savefig(figFile, format='pdf', dpi=300)
t2 = time.mktime(time.gmtime())
deltaT = time.localtime(t2 - t1)
print 'It took %(mins)d mins and %(secs)d secs.' %{"mins": deltaT[4], "secs": deltaT[5]}
| Python | 0 | |
fbbe551b1347f158bf44350574ca7001a887d824 | Add sfp_gravatar | modules/sfp_gravatar.py | modules/sfp_gravatar.py | #-------------------------------------------------------------------------------
# Name: sfp_gravatar
# Purpose: SpiderFoot plug-in to search Gravatar API for an email address
# and retrieve user information, including username, name, phone
# numbers, additional email addresses, and social media usernames.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-05-26
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import hashlib
import re
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_gravatar(SpiderFootPlugin):
"""Gravatar:Footprint,Investigate,Passive:Social Media::Retrieve user information from Gravatar API."""
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = dict()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.__dataSource__ = 'Gravatar'
self.results = dict()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['EMAILADDR']
# What events this module produces
def producedEvents(self):
return ['RAW_RIR_DATA', 'HUMAN_NAME', 'USERNAME', 'EMAILADDR', 'PHONE_NUMBER', 'GEOINFO']
# Query Gravatar API for the specified email address
# https://secure.gravatar.com/site/implement/
# https://secure.gravatar.com/site/implement/profiles/
def query(self, qry):
email_hash = hashlib.md5(qry.encode('utf-8').lower()).hexdigest()
output = 'json'
res = self.sf.fetchUrl("https://secure.gravatar.com/" + email_hash + '.' + output,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.sf.debug('No response from gravatar.com')
return None
if res['code'] != '200':
return None
try:
data = json.loads(res['content'])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
if data.get('entry') is None or len(data.get('entry')) == 0:
return None
return data.get('entry')[0]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
data = self.query(eventData)
if data is None:
self.sf.debug("No user information found for " + eventData)
return None
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(evt)
if data.get('preferredUsername') is not None:
evt = SpiderFootEvent("USERNAME", data.get('preferredUsername'), self.__name__, event)
self.notifyListeners(evt)
if data.get('name') is not None and data.get('name').get('formatted') is not None:
evt = SpiderFootEvent("HUMAN_NAME", data.get('name').get('formatted'), self.__name__, event)
self.notifyListeners(evt)
if data.get('currentLocation') is not None:
location = data.get('currentLocation')
if len(location) < 3 or len(location) > 100:
self.sf.debug("Skipping likely invalid location.")
else:
evt = SpiderFootEvent("GEOINFO", location, self.__name__, event)
self.notifyListeners(evt)
if data.get('phoneNumbers') is not None:
for number in data.get('phoneNumbers'):
if number.get('value') is not None:
evt = SpiderFootEvent("PHONE_NUMBER", number.get('value'), self.__name__, event)
self.notifyListeners(evt)
if data.get('emails') is not None:
for email in data.get('emails'):
if email.get('value') is not None:
evt = SpiderFootEvent("EMAILADDR", email.get('value'), self.__name__, event)
self.notifyListeners(evt)
if data.get('ims') is not None:
for im in data.get('ims'):
if im.get('value') is not None:
evt = SpiderFootEvent("USERNAME", im.get('value'), self.__name__, event)
self.notifyListeners(evt)
if data.get('accounts') is not None:
for account in data.get('accounts'):
if account.get('username') is not None:
evt = SpiderFootEvent("USERNAME", account.get('username'), self.__name__, event)
self.notifyListeners(evt)
# End of sfp_gravatar class
| Python | 0.000054 | |
7b27f4cdb8135e7d5fd18ff11e2eae9325e6f17a | Move METROPOLIS_FORK_BLKNUM | ethereum/config.py | ethereum/config.py | from rlp.utils import decode_hex
from ethereum import utils
from ethereum.db import BaseDB
default_config = dict(
# Genesis block difficulty
GENESIS_DIFFICULTY=131072,
# Genesis block gas limit
GENESIS_GAS_LIMIT=3141592,
# Genesis block prevhash, coinbase, nonce
GENESIS_PREVHASH=b'\x00' * 32,
GENESIS_COINBASE=b'\x00' * 20,
GENESIS_NONCE=utils.zpad(utils.encode_int(42), 8),
GENESIS_MIXHASH=b'\x00' * 32,
GENESIS_TIMESTAMP=0,
GENESIS_EXTRA_DATA=b'',
GENESIS_INITIAL_ALLOC={},
# Minimum gas limit
MIN_GAS_LIMIT=5000,
# Gas limit adjustment algo:
# block.gas_limit=block.parent.gas_limit * 1023/1024 +
# (block.gas_used * 6 / 5) / 1024
GASLIMIT_EMA_FACTOR=1024,
GASLIMIT_ADJMAX_FACTOR=1024,
BLKLIM_FACTOR_NOM=3,
BLKLIM_FACTOR_DEN=2,
# Block reward
BLOCK_REWARD=5000 * utils.denoms.finney,
NEPHEW_REWARD=5000 * utils.denoms.finney // 32, # BLOCK_REWARD / 32
# GHOST constants
UNCLE_DEPTH_PENALTY_FACTOR=8,
MAX_UNCLE_DEPTH=6, # max (block.number - uncle.number)
MAX_UNCLES=2,
# Difficulty adjustment constants
DIFF_ADJUSTMENT_CUTOFF=13,
BLOCK_DIFF_FACTOR=2048,
MIN_DIFF=131072,
# PoW info
POW_EPOCH_LENGTH=30000,
# Maximum extra data length
MAX_EXTRADATA_LENGTH=32,
# Exponential difficulty timebomb period
EXPDIFF_PERIOD=100000,
EXPDIFF_FREE_PERIODS=2,
# Blank account initial nonce
ACCOUNT_INITIAL_NONCE=0,
# Homestead fork
HOMESTEAD_FORK_BLKNUM=1150000,
HOMESTEAD_DIFF_ADJUSTMENT_CUTOFF=10,
# Metropolis fork
METROPOLIS_FORK_BLKNUM=2 ** 100,
METROPOLIS_ENTRY_POINT=2 ** 160 - 1,
METROPOLIS_STATEROOT_STORE=0x10,
METROPOLIS_BLOCKHASH_STORE=0x20,
METROPOLIS_WRAPAROUND=65536,
METROPOLIS_GETTER_CODE=decode_hex('6000355460205260206020f3'),
METROPOLIS_DIFF_ADJUSTMENT_CUTOFF=9,
# Metropolis fork
)
assert default_config['NEPHEW_REWARD'] == \
default_config['BLOCK_REWARD'] // 32
class Env(object):
def __init__(self, db, config=None, global_config=None):
assert isinstance(db, BaseDB)
self.db = db
self.config = config or dict(default_config)
self.global_config = global_config or dict()
| from rlp.utils import decode_hex
from ethereum import utils
from ethereum.db import BaseDB
default_config = dict(
# Genesis block difficulty
GENESIS_DIFFICULTY=131072,
# Genesis block gas limit
GENESIS_GAS_LIMIT=3141592,
# Genesis block prevhash, coinbase, nonce
GENESIS_PREVHASH=b'\x00' * 32,
GENESIS_COINBASE=b'\x00' * 20,
GENESIS_NONCE=utils.zpad(utils.encode_int(42), 8),
GENESIS_MIXHASH=b'\x00' * 32,
GENESIS_TIMESTAMP=0,
GENESIS_EXTRA_DATA=b'',
GENESIS_INITIAL_ALLOC={},
# Minimum gas limit
MIN_GAS_LIMIT=5000,
# Gas limit adjustment algo:
# block.gas_limit=block.parent.gas_limit * 1023/1024 +
# (block.gas_used * 6 / 5) / 1024
GASLIMIT_EMA_FACTOR=1024,
GASLIMIT_ADJMAX_FACTOR=1024,
BLKLIM_FACTOR_NOM=3,
BLKLIM_FACTOR_DEN=2,
# Block reward
BLOCK_REWARD=5000 * utils.denoms.finney,
NEPHEW_REWARD=5000 * utils.denoms.finney // 32, # BLOCK_REWARD / 32
# GHOST constants
UNCLE_DEPTH_PENALTY_FACTOR=8,
MAX_UNCLE_DEPTH=6, # max (block.number - uncle.number)
MAX_UNCLES=2,
# Difficulty adjustment constants
DIFF_ADJUSTMENT_CUTOFF=13,
BLOCK_DIFF_FACTOR=2048,
MIN_DIFF=131072,
# PoW info
POW_EPOCH_LENGTH=30000,
# Maximum extra data length
MAX_EXTRADATA_LENGTH=32,
# Exponential difficulty timebomb period
EXPDIFF_PERIOD=100000,
EXPDIFF_FREE_PERIODS=2,
# Blank account initial nonce
ACCOUNT_INITIAL_NONCE=0,
# Homestead fork
HOMESTEAD_FORK_BLKNUM=1150000,
HOMESTEAD_DIFF_ADJUSTMENT_CUTOFF=10,
# Metropolis fork
METROPOLIS_FORK_BLKNUM=99999999,
METROPOLIS_ENTRY_POINT=2 ** 160 - 1,
METROPOLIS_STATEROOT_STORE=0x10,
METROPOLIS_BLOCKHASH_STORE=0x20,
METROPOLIS_WRAPAROUND=65536,
METROPOLIS_GETTER_CODE=decode_hex('6000355460205260206020f3'),
METROPOLIS_DIFF_ADJUSTMENT_CUTOFF=9,
# Metropolis fork
)
assert default_config['NEPHEW_REWARD'] == \
default_config['BLOCK_REWARD'] // 32
class Env(object):
def __init__(self, db, config=None, global_config=None):
assert isinstance(db, BaseDB)
self.db = db
self.config = config or dict(default_config)
self.global_config = global_config or dict()
| Python | 0.000005 |
73bc2dbfe40db224a38725f4412e33b1b5accac6 | Add script example. | examples/script.py | examples/script.py | # Copyright (c) 2013 Jordan Halterman <jordan.halterman@gmail.com>
# See LICENSE for details.
import sys, os
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
# The Active Redis API provides native support for Redis server-side
# Lua scripting.
from active_redis import Script
class PushMany(Script):
"""
Push several items on to a queue.
"""
# Define keyword argument names for keys used by the script.
keys = ['key']
# Define keyword argument names for all other arguments to the script.
args = []
# In this case, we're using a variable number of arguments. Note that
# when variable arguments are used, only the last defined argument
# may have a variable number.
variable_args = True
# Finally, define the Lua script. This is just a simple example.
script = """
local key = KEYS[1]
local vals = ARGV
redis.call('RPUSH', key, unpack(vals))
"""
# Building upon the datatype example, we can extend the Queue class
# and make use of our script.
from datatype import Queue
from active_redis import registry
@registry.datatype
class BetterQueue(Queue):
"""A better version of our queue."""
type = 'better_queue'
_scripts = {
'pushmany': PushMany,
}
def push_many(self, *args):
"""Pushes many items on to the queue."""
return self._execute_script('pushmany', self.key, *args)
| Python | 0 | |
68ba389a4b6cefe70864577bcc195f14012e224d | Add UK flag example | examples/ukflag.py | examples/ukflag.py | import math
import omnicanvas
def create_union_flag(height):
# The union flag is twice as wide as it is high
canvas = omnicanvas.Canvas(height * 2, height, background_color="#000066")
#This is the length of the diagonal of the flag, with Pythagoras
diagonal_length = math.sqrt((height ** 2) + ((height * 2) ** 2))
# This is the angle of the diagonal strips from the horizontal
# tan(θ) = opposite / adjacent, so θ = atan(opposite / adjacent)
diagonal_angle = math.degrees(math.atan((height / 2) / height))
# Add The diagonal white strips
canvas.add_rectangle(
height - (height * 0.1),
(height / 2) - (diagonal_length / 2),
height * 0.2,
diagonal_length,
line_width=0,
rotation=(
height, height / 2, 270 + diagonal_angle
)
)
canvas.add_rectangle(
height - (height * 0.1),
(height / 2) - (diagonal_length / 2),
height * 0.2,
diagonal_length,
line_width=0,
rotation=(
height, height / 2, 90 - diagonal_angle
)
)
# Add diagonal red strips - these'll be partly covered by the white cross
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 90 - diagonal_angle
)
)
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 270 - diagonal_angle
)
)
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 270 + diagonal_angle
)
)
canvas.add_rectangle(
height - (height / 15),
(height / 2) - (diagonal_length / 2),
height / 15,
diagonal_length / 2,
line_width=0,
fill_color="#CC0000",
rotation=(
height, height / 2, 90 + diagonal_angle
)
)
# Add the white cross
canvas.add_rectangle(
height - (height / 6),
0,
height / 3,
height,
line_width=0
)
canvas.add_rectangle(
0,
(height / 2) - (height / 6),
height * 2,
height / 3,
line_width=0
)
# Add the red cross
canvas.add_rectangle(
height - (height / 10),
0,
height / 5,
height,
line_width=0,
fill_color="#CC0000",
)
canvas.add_rectangle(
0,
(height / 2) - (height / 10),
height * 2,
height / 5,
line_width=0,
fill_color="#CC0000",
)
return canvas
# Create a flag of height 360px (and so width 720px)
create_union_flag(360).save("ukflag.svg")
| Python | 0.000001 | |
3ab0e590479fabb024937e52eab02e2311033448 | Implement a function to map chord segments to STFT blocks. | time_intervals.py | time_intervals.py | import pandas as pd
import numpy as np
import collections
def block_labels(df_blocks, df_labels):
'''
Given fixed-size overlapping blocks and variable-sized non-overlapping
labels select most suitable label for each block.
This can be useful eg. to assign chord labels to audio blocks.
All times are measured in samples and represented by integers.
Inputs:
- df_blocks: pandas DataFrame with columns start, end (in samples)
- df_labels: pandas DataFrame with columns start, label
Outputs:
- df_blocks: pandas DataFrame with columns start, end, label
In case multiple labels span a single block the label with most coverage is
selected.
'''
def merge_events(df_blocks, df_labels):
df_events = pd.merge(
pd.concat([df_blocks[['start']], df_blocks[['end']].rename(columns={'end': 'start'})]).drop_duplicates(),
df_labels, how='outer')
df_events.sort('start', inplace=True)
df_events.fillna(method='pad', inplace=True)
df_events['duration'] = abs(df_events['start'].diff(-1))
df_events.set_index('start', inplace=True)
return df_events.dropna()
df_events = merge_events(df_blocks, df_labels)
def label_for_block(start, end):
labels = df_events['label'].ix[start:end]
unique_labels = set(labels)
if len(unique_labels) > 1:
durations = df_events['duration'].ix[start:end]
cnt = collections.Counter()
for l, d in zip(labels, durations):
cnt[l] += d
return cnt.most_common(1)[0][0]
else:
return labels.iloc[0]
def add_labels(df_blocks):
block_labels = (label_for_block(start, end) for (i, start, end) in df_blocks.itertuples())
df_block_labels = pd.DataFrame(block_labels, columns=['label'])
return df_blocks.join(df_block_labels)
return add_labels(df_blocks)
def test():
block_size = 10
hop_size = 5
sample_count = 90
block_count = (sample_count - block_size) / hop_size
block_starts = hop_size * np.arange(block_count + 1).astype(np.int32)
block_ends = block_starts + block_size
blocks = list(zip(block_starts, block_ends))
df_blocks = pd.DataFrame(blocks, columns=['start', 'end'])
# label segment start times (the last element is the end of the last segment)
label_times = [0, 25, 38, 50, 60, 64, 68, 80, 81, 84, 89]
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'H', 'N']
df_labels = pd.DataFrame({'start': label_times, 'label': labels}, columns=['start', 'label'])
df_labelled_blocks = block_labels(df_blocks, df_labels)
expected_labels = ['A','A','A','A','B','B','B','C','C','D','D','D','G','G','G','H','H']
actual_labels = list(df_labelled_blocks['label'])
for s, e, a in zip(block_starts, expected_labels, actual_labels):
print(s, e, a, '*' if e != a else '')
assert actual_labels == expected_labels
| Python | 0 | |
fbaca2f2a0ceaa77606d9c24846a1a1b045dc460 | remove deleted files from manifest | addons/l10n_lu/__openerp__.py | addons/l10n_lu/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011 Thamini S.à.R.L (<http://www.thamini.com>)
# Copyright (C) 2011 ADN Consultants S.à.R.L (<http://www.adn-luxembourg.com>)
# Copyright (C) 2012-today OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Luxembourg - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Luxembourg.
======================================================================
* the Luxembourg Official Chart of Accounts (law of June 2009 + 2011 chart and Taxes),
* the Tax Code Chart for Luxembourg
* the main taxes used in Luxembourg
* default fiscal position for local, intracom, extracom """,
'author': 'OpenERP SA & ADN',
'website': 'http://www.openerp.com http://www.adn-luxembourg.com',
'depends': ['account', 'base_vat', 'base_iban'],
'init_xml': [],
'update_xml': [
# basic accounting data
'account.account.type-2011.csv',
'account.account.template-2011.csv',
'account.tax.code.template-2011.csv',
'account.chart.template-2011.csv',
'account.tax.template-2011.csv',
# Change BRE: adds fiscal position
'account.fiscal.position.template-2011.csv',
'account.fiscal.position.tax.template-2011.csv',
# configuration wizard, views, reports...
'l10n_lu_wizard.xml',
'l10n_lu_view.xml',
'wizard/print_vat_view.xml'
],
'test': ['test/l10n_lu_report.yml'],
'demo_xml': [],
'installable': True,
'auto_install': False,
'certificate': '0078164766621',
'images': ['images/config_chart_l10n_lu.jpeg','images/l10n_lu_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011 Thamini S.à.R.L (<http://www.thamini.com>)
# Copyright (C) 2011 ADN Consultants S.à.R.L (<http://www.adn-luxembourg.com>)
# Copyright (C) 2012-today OpenERP SA (<http://openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Luxembourg - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Luxembourg.
======================================================================
* the Luxembourg Official Chart of Accounts (law of June 2009 + 2011 chart and Taxes),
* the Tax Code Chart for Luxembourg
* the main taxes used in Luxembourg
* default fiscal position for local, intracom, extracom """,
'author': 'OpenERP SA & ADN',
'website': 'http://www.openerp.com http://www.adn-luxembourg.com',
'depends': ['account', 'base_vat', 'base_iban'],
'init_xml': [],
'update_xml': [
# basic accounting data
'account.account.type-2011.csv',
'account.account.template-2011.csv',
'account.tax.code.template-2011.csv',
'account.chart.template-2011.csv',
'account.tax.template-2011.csv',
# Change BRE: adds fiscal position
'account.fiscal.position.template-2011.csv',
'account.fiscal.position.tax.template-2011.csv',
# configuration wizard, views, reports...
'l10n_lu_wizard.xml',
'account.tax.template.csv',
'l10n_lu_view.xml',
'wizard/print_vat_view.xml'
],
'test': ['test/l10n_lu_report.yml'],
'demo_xml': [],
'installable': True,
'auto_install': False,
'certificate': '0078164766621',
'images': ['images/config_chart_l10n_lu.jpeg','images/l10n_lu_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 |
c0ee3bb87a26a57bc7dc1bd4e1aaf6136f94bc17 | Add missing filters.py file in organizations | ain7/organizations/filters.py | ain7/organizations/filters.py | # -*- coding: utf-8
"""
ain7/organizations/filters.py
"""
#
# Copyright © 2007-2015 AIn7 Devel Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import django_filters
from ain7.organizations.models import Organization
class OrganizationFilter(django_filters.FilterSet):
class Meta:
model = Organization
fields = {
'name': ['icontains'],
'activity_field': ['icontains'],
}
| Python | 0.000001 | |
f56181aaf6df758abb988d10c757c6eba72d5025 | write beginning of method for storing probabilities in a hash | parser.py | parser.py | import re
probabilityHash = {[], ""}
#[word1, word2], count
def parseIntoProbabilityHash(text):
stripPunctuation = re.sub(ur"[^\w\d'\s]+",' ',text)
wordsInText = stripPunctuation.split()
n = 0
for word in wordsInText:
probabilityHash[wordsInText[n]] = 1
return probabilityHash
| Python | 0.000011 | |
5e54e5ebf9add6d8bd879d963803ee57fd591f4b | Write new Preparation tests | whats_fresh/whats_fresh_api/tests/views/entry/test_new_preparation.py | whats_fresh/whats_fresh_api/tests/views/entry/test_new_preparation.py | from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh_api.models import *
from django.contrib.gis.db import models
import json
class NewPreparationTestCase(TestCase):
"""
Test that the New Preparation page works as expected.
Things tested:
URLs reverse correctly
The outputted page has the correct form fields
POSTing "correct" data will result in the creation of a new
object with the specified details
POSTing data with all fields missing (hitting "save" without entering
data) returns the same field with notations of missing fields
"""
def test_url_endpoint(self):
url = reverse('new-preparation')
self.assertEqual(url, '/entry/preparations/new')
def test_form_fields(self):
"""
Tests to see if the form contains all of the right fields
"""
response = self.client.get(reverse('new-preparation'))
fields = {'name': 'input', 'description': 'input',
'additional_info': 'select'}
form = response.context['preparation_form']
for field in fields:
# for the Edit tests, you should be able to access
# form[field].value
self.assertIn(fields[field], str(form[field]))
def test_successful_preparation_creation_minimal(self):
"""
POST a proper "new preparation" command to the server, and see if the
new preparation appears in the database. All optional fields are null.
"""
Preparation.objects.all().delete()
# Data that we'll post to the server to get the new preparation created
new_preparation = {
'name': 'Fried', 'description': '', 'additional_info': ''}
response = self.client.post(reverse('new-preparation'),
new_preparation)
preparation = Preparation.objects.all()[0]
for field in new_preparation:
self.assertEqual(
getattr(preparation, field), new_preparation[field])
def test_successful_preparation_creation_maximal(self):
"""
POST a proper "new preparation" command to the server, and see if the
new preparation appears in the database. All optional fields are used.
"""
Preparation.objects.all().delete()
# Data that we'll post to the server to get the new preparation created
new_preparation = {
'name': 'Fried',
'description': 'Test Description',
'additional_info': 'Fried food is good'}
response = self.client.post(reverse('new-preparation'),
new_preparation)
preparation = Preparation.objects.all()[0]
for field in new_preparation:
self.assertEqual(
getattr(preparation, field), new_preparation[field])
def test_no_data_error(self):
"""
POST a "new preparation" command to the server missing all of the
required fields, and test to see what the error comes back as.
"""
# Create a list of all objects before sending bad POST data
all_preparations = Preparation.objects.all()
response = self.client.post(reverse('new-preparation'))
required_fields = ['name']
for field_name in required_fields:
self.assertIn(field_name,
response.context['preparation_form'].errors)
# Test that we didn't add any new objects
self.assertEqual(
list(Preparation.objects.all()), list(all_preparations))
| Python | 0.000001 | |
4d92b111eecd3ce938676edee36b288c42484905 | test scraper for UKÄ | statscraper/scrapers/uka_scraper.py | statscraper/scrapers/uka_scraper.py | # encoding: utf-8
u""" A scraper to fetch Swedish university application statistics from
the Swedish Higher Education Authority (Universitetskanslerämbetet, UKÄ),
at http://statistik.uka.se
"""
from statscraper import BaseScraper, Dataset, Dimension, Result, Collection
import requests
from bs4 import BeautifulSoup
class UKA(BaseScraper):
def _fetch_itemslist(self, item):
""" We only offer regional application stats.
Other collections are differently structured.
"""
if item.is_root:
yield Collection("regional",
label="New students by area and school.")
else:
yield Dataset("county",
label="New students by county, school and semester.")
def _fetch_dimensions(self, dataset):
""" Declaring available dimensions like this is not mandatory,
but nice, especially if they differ from dataset to dataset.
If you are using a built in datatype, you can specify the dialect
you are expecting, to have values normalized. This scraper will
look for Swedish month names (e.g. 'Januari'), but return them
according to the Statscraper standard ('january').
"""
yield Dimension(u"school")
yield Dimension(u"semester")
yield Dimension(u"year", datatype="year")
yield Dimension(u"semester",
datatype="academic_term",
dialect="swedish")
def _fetch_data(self, dataset, query=None):
url = "http://statistik.uka.se/4.5d85793915901d205f935d0f.12.5d85793915901d205f965eab.portlet?action=resultat&view=resultTable&frageTyp=3&frageNr=240&tid=%s&grupp1=%s&grupp2=%s"
terms = [6]
counties = [{
'id': "10",
'municipalities': ["80"]
}, ]
for t in terms:
for c in counties:
for m in c["municipalities"]:
html = requests.get(url % (t, c, m["id"])).text
soup = BeautifulSoup(html, 'html.parser')
table = soup.find("table")
row = table.find_all("tr")[5:]
cells = row.find_all("td")
print cells[0].text,
print cells[2].text
"""
yield Result(value.text.encode("utf-8"), {
"date": date,
"month": month,
"year": years[i],
})
"""
| Python | 0 | |
d2762f81a9f8ed405ca5fc9d567004af182d137b | add importer for delimited data | python/delim_import.py | python/delim_import.py | from json_generator import JsonGenerator, writeTrackEntry
def delimImport(file, skipLines, colNames, dataDir, trackLabel, key = None,
delim = "\t", chunkBytes = 200000, compress = True,
config = {'style': {'className': 'feature2'}} ):
fh = open(file, 'r')
data = [line.split(delim) for line in fh.readlines()]
fh.close()
startIndex = colNames.index("Start")
endIndex = colNames.index("End")
chromIndex = colNames.index("Chrom")
for item in data:
item[startIndex] = int(item[startIndex])
item[endIndex] = int(item[endIndex])
def nclCmp(a, b):
if a[startIndex] == b[startIndex]:
return b[endIndex] - a[endIndex]
return a[startIndex] - b[startIndex]
data.sort(nclCmp)
curRef = None
jsongen = None
for item in data:
if item[chromIndex] != curRef:
if jsongen is not None:
jsongen.generateTrack()
curRef = item[chromIndex]
classMeta = [{'attributes': colNames,
'proto': {'Chrom': item[chromIndex]} } ]
jsongen = JsonGenerator(dataDir, trackLabel, item[chromIndex],
chunkBytes, compress, classMeta, key)
jsongen.addSorted([0] + item)
if (jsongen is not None) and (jsongen.hasFeatures):
jsongen.generateTrack()
#attrs = ArrayRepr
config['urlTemplate'] = jsongen.urlTemplate
writeTrackEntry(dataDir, 'FeatureTrack', trackLabel,
key if key is not None else trackLabel,
config)
| Python | 0 | |
319af4e5cfad516f0c68bdbb8adabed19b0b82b6 | Add backend "multi_tcp". | src/backend/multi_tcp.py | src/backend/multi_tcp.py | # coding: UTF-8
import errno
import socket
from collections import defaultdict
DEFAULT_PORT = 4194
DEFAULT_BLOCKSIZE = 8192
DEFAULT_NUMBER = 5
class MultiTCPBackend(object):
def __init__(self, number, blocksize):
self.number = number
self.blocksize = blocksize
self.send_bufs = [b"" for i in range(number)]
self.cur_filling = 0
self.filled_bytes = 0
self.cur_recving = 0
self.remaining_bytes = blocksize
def send(self, data=None, urgent=True):
while data:
left_bytes = self.blocksize - self.filled_bytes
if len(data) >= left_bytes:
self.send_bufs[self.cur_filling] += data[:left_bytes]
self.cur_filling = (self.cur_filling + 1) % self.number
self.filled_bytes = 0
data = data[left_bytes:]
else:
self.send_bufs[self.cur_filling] += data
self.filled_bytes += len(data)
break
if not urgent:
return True
return self._continue()
def _continue(self):
complete = True
for i, conn in zip(range(self.number), self.conns):
if not self.send_bufs[i]:
continue
try:
sent = conn.send(self.send_bufs[i])
except socket.error as e:
if e.errno == errno.EWOULDBLOCK:
sent = 0
else:
raise
if sent:
self.send_bufs[i] = self.send_bufs[i][sent:]
if self.send_bufs[i]:
complete = False
return complete
def recv(self):
conn = self.conns[self.cur_recving]
data = conn.recv(self.remaining_bytes)
if data == b"":
return None
self.remaining_bytes -= len(data)
if self.remaining_bytes == 0:
self.cur_recving = (self.cur_recving + 1) % self.number
self.remaining_bytes = self.blocksize
return data
def close(self):
for conn in self.conns:
conn.setblocking(1)
# TODO make close non-blocking
conn.close()
def get_rlist(self):
return [self.conns[self.cur_recving].fileno()]
def get_wlist(self):
return [self.conns[i].fileno()
for i in range(self.number) if self.send_bufs[i]]
class ClientBackend(MultiTCPBackend):
server = "127.0.0.1"
port = DEFAULT_PORT
blocksize = DEFAULT_BLOCKSIZE
number = DEFAULT_NUMBER
def __init__(self, **opts):
if 'server' in opts:
self.server = opts['server']
if 'port' in opts:
self.port = opts['port']
if 'blocksize' in opts:
self.blocksize = opts['blocksize']
if 'number' in opts:
self.number = opts['number']
super(ClientBackend, self).__init__(self.number, self.blocksize)
# initialize socket
self.conns = [socket.socket() for i in range(self.number)]
for conn in self.conns:
# TODO make connect non-blocking
conn.connect((self.server, self.port))
conn.setblocking(0)
class ServerInstance(MultiTCPBackend):
def __init__(self, conns, address, blocksize):
super(ServerInstance, self).__init__(len(conns), blocksize)
self.conns = conns
self.address = address
for conn in conns:
conn.setblocking(0)
class ServerBackend(object):
address = ""
port = DEFAULT_PORT
blocksize = DEFAULT_BLOCKSIZE
number = DEFAULT_NUMBER
def __init__(self, **opts):
if 'address' in opts:
self.address = opts['address']
if 'port' in opts:
self.port = opts['port']
if 'blocksize' in opts:
self.blocksize = opts['blocksize']
if 'number' in opts:
self.number = opts['number']
# initialize waiting list
self.connections = defaultdict(list)
# initialize socket
self.conn = socket.socket()
self.conn.bind((self.address, self.port))
self.conn.listen(10)
def accept(self):
conn, address = self.conn.accept()
address = address[0]
# collect connections
# TODO should expire after a while
self.connections[address].append(conn)
if len(self.connections[address]) < self.number:
return None
# create new instance
conns = self.connections[address]
del self.connections[address]
return ServerInstance(conns, address, self.blocksize)
def close(self):
self.conn.close()
def get_rlist(self):
return [self.conn.fileno()]
| Python | 0 | |
de456b7e6397d775bd244b7e20eb1d675ca1bde0 | Add logging to attrib plugin | nose2/plugins/attrib.py | nose2/plugins/attrib.py | import logging
from unittest import TestSuite
from nose2.events import Plugin
log = logging.getLogger(__name__)
undefined = object()
# TODO: eval attribs
class AttributeSelector(Plugin):
"""TODO: document"""
def __init__(self):
self.attribs = []
self.addOption(self.attribs, "A", "attr", "Attribulate")
def startTestRun(self, event):
if not self.attribs:
return
log.debug('Attribute selector attribs %s', self.attribs)
attribs = []
for attr in self.attribs:
# all attributes within an attribute group must match
attr_group = []
for attrib in attr.strip().split(","):
# don't die on trailing comma
if not attrib:
continue
items = attrib.split("=", 1)
if len(items) > 1:
# "name=value"
# -> 'str(obj.name) == value' must be True
key, value = items
else:
key = items[0]
if key[0] == "!":
# "!name"
# 'bool(obj.name)' must be False
key = key[1:]
value = False
else:
# "name"
# -> 'bool(obj.name)' must be True
value = True
attr_group.append((key, value))
attribs.append(attr_group)
if not attribs:
return
event.suite = self.filterSuite(event.suite, attribs)
def filterSuite(self, suite, attribs):
new_suite = suite.__class__()
for test in suite:
if isinstance(test, TestSuite):
new_suite.addTest(self.filterSuite(test, attribs))
elif self.validateAttrib(test, attribs):
new_suite.addTest(test)
return new_suite
def validateAttrib(self, test, attribs):
any_ = False
for group in attribs:
match = True
for key, value in group:
obj_value = self.getAttr(test, key)
if callable(value):
if not value(key, test):
match = False
break
elif value is True:
# value must exist and be True
if not bool(obj_value):
match = False
break
elif value is False:
# value must not exist or be False
if bool(obj_value):
match = False
break
elif type(obj_value) in (list, tuple):
# value must be found in the list attribute
if not str(value).lower() in [str(x).lower()
for x in obj_value]:
match = False
break
else:
# value must match, convert to string and compare
if (value != obj_value
and str(value).lower() != str(obj_value).lower()):
match = False
break
any_ = any_ or match
return any_
def getAttr(self, test, key):
val = getattr(test, key, undefined)
if val is not undefined:
return val
if hasattr(test, '_testFunc'):
val = getattr(test._testFunc, key, undefined)
if val is not undefined:
return val
elif hasattr(test, '_testMethodName'):
meth = getattr(test, test._testMethodName, undefined)
if meth is not undefined:
val = getattr(meth, key, undefined)
if val is not undefined:
return val
| from unittest import TestSuite
from nose2.events import Plugin
undefined = object()
# TODO: eval attribs
class AttributeSelector(Plugin):
"""TODO: document"""
def __init__(self):
self.attribs = []
self.addOption(self.attribs, "A", "attr", "Attribulate")
def startTestRun(self, event):
if not self.attribs:
return
attribs = []
for attr in self.attribs:
# all attributes within an attribute group must match
attr_group = []
for attrib in attr.strip().split(","):
# don't die on trailing comma
if not attrib:
continue
items = attrib.split("=", 1)
if len(items) > 1:
# "name=value"
# -> 'str(obj.name) == value' must be True
key, value = items
else:
key = items[0]
if key[0] == "!":
# "!name"
# 'bool(obj.name)' must be False
key = key[1:]
value = False
else:
# "name"
# -> 'bool(obj.name)' must be True
value = True
attr_group.append((key, value))
attribs.append(attr_group)
if not attribs:
return
event.suite = self.filterSuite(event.suite, attribs)
def filterSuite(self, suite, attribs):
new_suite = suite.__class__()
for test in suite:
if isinstance(test, TestSuite):
new_suite.addTest(self.filterSuite(test, attribs))
elif self.validateAttrib(test, attribs):
new_suite.addTest(test)
return new_suite
def validateAttrib(self, test, attribs):
any_ = False
for group in attribs:
match = True
for key, value in group:
obj_value = self.getAttr(test, key)
if callable(value):
if not value(key, test):
match = False
break
elif value is True:
# value must exist and be True
if not bool(obj_value):
match = False
break
elif value is False:
# value must not exist or be False
if bool(obj_value):
match = False
break
elif type(obj_value) in (list, tuple):
# value must be found in the list attribute
if not str(value).lower() in [str(x).lower()
for x in obj_value]:
match = False
break
else:
# value must match, convert to string and compare
if (value != obj_value
and str(value).lower() != str(obj_value).lower()):
match = False
break
any_ = any_ or match
return any_
def getAttr(self, test, key):
val = getattr(test, key, undefined)
if val is not undefined:
return val
if hasattr(test, '_testFunc'):
val = getattr(test._testFunc, key, undefined)
if val is not undefined:
return val
elif hasattr(test, '_testMethodName'):
meth = getattr(test, test._testMethodName, undefined)
if meth is not undefined:
val = getattr(meth, key, undefined)
if val is not undefined:
return val
| Python | 0 |
5bcdb4c7a0184c76bedc0843bac11981234bad77 | add some tests for dashboard views | tests/plans/test_dashboard_views.py | tests/plans/test_dashboard_views.py | import pytest
from django.conf import settings
from django.urls import reverse
from adhocracy4.test.helpers import redirect_target
from meinberlin.apps.plans.models import Plan
from meinberlin.test.helpers import assert_template_response
@pytest.mark.django_db
def test_initiator_can_edit(client, plan_factory):
plan = plan_factory()
initiator = plan.organisation.initiators.first()
url = reverse('a4dashboard:plan-update',
kwargs={'organisation_slug': plan.organisation.slug,
'pk': plan.pk})
client.login(username=initiator.email, password='password')
response = client.get(url)
assert_template_response(
response, 'meinberlin_plans/plan_update_form.html')
choices = settings.A4_PROJECT_TOPICS
data = {
'title': 'my plan title',
'description_image': '',
'description_image_copyright': '',
'contact': 'me@example.com',
'point': '',
'point_label': '',
'district': '',
'cost': '1.000',
'description': 'this is a description',
'topics': choices[0][0],
'status': plan.status,
'participation': plan.participation
}
response = client.post(url, data)
assert redirect_target(response) == 'plan-list'
plan.refresh_from_db()
assert plan.topics == [data.get('topics')]
assert plan.title == data.get('title')
assert plan.description == data.get('description')
@pytest.mark.django_db
def test_group_member_can_edit(client, plan_factory, user_factory,
group_factory, organisation):
group1 = group_factory()
group2 = group_factory()
group_member = user_factory.create(groups=(group1, group2))
organisation.groups.add(group2)
plan = plan_factory(group=group2, organisation=organisation)
url = reverse('a4dashboard:plan-update',
kwargs={'organisation_slug': organisation.slug,
'pk': plan.pk})
client.login(username=group_member.email, password='password')
response = client.get(url)
assert_template_response(
response, 'meinberlin_plans/plan_update_form.html')
choices = settings.A4_PROJECT_TOPICS
data = {
'title': 'my plan title',
'description_image': '',
'description_image_copyright': '',
'contact': 'me@example.com',
'point': '',
'point_label': '',
'district': '',
'cost': '1.000',
'description': 'this is a description',
'topics': choices[0][0],
'status': plan.status,
'participation': plan.participation
}
response = client.post(url, data)
assert redirect_target(response) == 'plan-list'
plan.refresh_from_db()
assert plan.topics == [data.get('topics')]
assert plan.title == data.get('title')
assert plan.description == data.get('description')
assert plan.group == group2
@pytest.mark.django_db
def test_initiator_can_create(client, organisation):
initiator = organisation.initiators.first()
url = reverse('a4dashboard:plan-create',
kwargs={'organisation_slug': organisation.slug})
client.login(username=initiator.email, password='password')
response = client.get(url)
assert_template_response(
response, 'meinberlin_plans/plan_create_dashboard.html')
choices = settings.A4_PROJECT_TOPICS
data = {
'title': 'my plan title',
'description_image': '',
'description_image_copyright': '',
'contact': 'me@example.com',
'point': '',
'point_label': '',
'district': '',
'cost': '1.000',
'description': 'this is a description',
'topics': choices[0][0],
'status': 0,
'participation': 2
}
response = client.post(url, data)
assert redirect_target(response) == 'plan-list'
plan = Plan.objects.all().first()
assert plan.topics == [data.get('topics')]
assert plan.title == data.get('title')
assert plan.description == data.get('description')
assert not plan.group
@pytest.mark.django_db
def test_group_member_can_create(client, organisation, user_factory,
group_factory):
group1 = group_factory()
group2 = group_factory()
group_member = user_factory.create(groups=(group1, group2))
organisation.groups.add(group2)
url = reverse('a4dashboard:plan-create',
kwargs={'organisation_slug': organisation.slug})
client.login(username=group_member.email, password='password')
response = client.get(url)
assert_template_response(
response, 'meinberlin_plans/plan_create_dashboard.html')
choices = settings.A4_PROJECT_TOPICS
data = {
'title': 'my plan title',
'description_image': '',
'description_image_copyright': '',
'contact': 'me@example.com',
'point': '',
'point_label': '',
'district': '',
'cost': '1.000',
'description': 'this is a description',
'topics': choices[0][0],
'status': 0,
'participation': 2
}
response = client.post(url, data)
assert redirect_target(response) == 'plan-list'
plan = Plan.objects.all().first()
assert plan.topics == [data.get('topics')]
assert plan.title == data.get('title')
assert plan.description == data.get('description')
assert plan.group == group2
| Python | 0 | |
51a5c7626b634687be57c3e6ed05ea07f6468ad0 | add analyzer test | timeside/tests/api/test_analyzer.py | timeside/tests/api/test_analyzer.py | # -*- coding: utf-8 -*-
import timeside
from sys import stdout
import os.path
import numpy
class TestAnalyzer:
graphers = timeside.core.processors(timeside.api.IGrapher)
decoders = timeside.core.processors(timeside.api.IDecoder)
encoders= timeside.core.processors(timeside.api.IEncoder)
analyzers = timeside.core.processors(timeside.api.IAnalyzer)
def __init__(self, path):
self.source = os.path.join(os.path.dirname(__file__), path)
print "Processing %s" % self.source
self.decoder = timeside.decoder.FileDecoder(self.source)
print 'format: ', self.decoder.format()
self.pipe = self.decoder
self.analyzers_sub_pipe = []
def process(self):
for analyzer in self.analyzers:
sub_pipe = analyzer()
self.analyzers_sub_pipe.append(sub_pipe)
self.pipe = self.pipe | sub_pipe
self.pipe.run()
def results(self):
analyzers = []
for analyzer in self.analyzers_sub_pipe:
value = analyzer.result()
analyzers.append({'name':analyzer.name(),
'id':analyzer.id(),
'unit':analyzer.unit(),
'value':str(value)})
print analyzers
test = TestAnalyzer('../samples/guitar.wav')
#test = TestAnalyzer('/mnt/data4/Music1/Cellar_playlist_tmp/JanoB/VirulentAcidMix.wav')
test.process()
test.results()
| Python | 0.000001 | |
b634e5966c48299eda8cc9a3dcd4e8f769df6812 | Create 5kyu_tree_to_list.py | Solutions/5kyu/5kyu_tree_to_list.py | Solutions/5kyu/5kyu_tree_to_list.py | class Node:
def __init__(self, data, child_nodes=None):
self.data = data
self.child_nodes = child_nodes
def tree_to_list(tr):
call = to_list(tr, 0, [])
return call
def to_list(tr, depth, res):
res.append([tr.data, depth])
if tr.child_nodes:
for i in tr.child_nodes:
to_list(i, depth+1, res)
return [i[0] for i in sorted(res, key = lambda x: x[1])]
| Python | 0.000002 | |
f1cb1cb0cdcf7ef3d5d0e286bfbd9d9664239098 | Create 6kyu_alphabetized.py | Solutions/6kyu/6kyu_alphabetized.py | Solutions/6kyu/6kyu_alphabetized.py | def alphabetized(s):
return ''.join(s for s in sorted(s, key=lambda s: s.lower()) if s.isalpha())
| Python | 0.000033 | |
0f55bd7e100dca1ef94dfe2f47b0f46774197e3f | Create cbus.py | cbus.py | cbus.py | #!/usr/bin/python3
#console command for lighting control of c-bus network
#add command line switches for changing the default ip and port
#add option for immediate return i.e. dont wait for return codes
#cbus on 6, cbus off 7, cbus ramp 7m 100
#parse command line, convert time to closest value
# Copyright 2014 Darren McInnes codemonkey[at}archer.com(dot]au
#
# Permission to use, copy, modify, distribute this
# software and its documentation for any purpose is hereby granted
# without fee, provided that the above copyright notice appear in
# all copies and that both that the copyright notice and this
# permission notice and warranty disclaimer appear in supporting
# documentation, and that the name of the author not be used in
# advertising or publicity pertaining to distribution of the
# software without specific, written prior permission.
# The author disclaims all warranties with regard to this
# software, including all implied warranties of merchantability
# and fitness due to it being crap. In no event shall the author
# be liable for any special, indirect or consequential damages or
# any damages whatsoever resulting from loss of use, data or profits,
# whether in an action of contract, negligence, arising out of or in
# connection with the use or performance of this software.
import os
import sys #handles command line arguments
import socket
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("command", choices=["off", "on", "ramp"], help="off/on/ramp")
parser.add_argument("group", type=int, choices=range(0,254), help="group between 0 and 254")
parser.add_argument("-a", "--address", default="192.168.0.105", help="network address of c-gate server")
parser.add_argument("-p", "--port",type=int, default="20023", help="command port number")
parser.add_argument("-n", "--net", type=int, default="254", help="c-bus network number")
parser.add_argument("-l", "--lighting", type=int, default="56", help="c-bus application number")
parser.add_argument("-r", "--ramp", type=int, default="0", help="ramp speed 0s to 17m")
#parser.add_argument("-p", "--level", type=int, default="100", help="level")
args = parser.parse_args()
#print (args.echo)
#print (args.gr
if args.command=="ramp":
x = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
x.connect((args.address, args.port))
data = x.recv(4096)
x.sendall(bytes(args.command+' '+str(args.net)+'/'+str(args.lighting)+'/'+str(args.group)+'\n','UTF-8'))
# time.sleep(.1)
data = x.recv(4096)
x.close()
else:
x = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
x.connect((args.address, args.port))
data = x.recv(4096)
x.sendall(bytes(args.command+' '+str(args.net)+'/'+str(args.lighting)+'/'+str(args.group)+'\n','UTF-8'))
# time.sleep(.1)
data = x.recv(4096)
x.close()
print(data)
| Python | 0 | |
2eddc73e2d7b78fbfac521eb1e6014ca26421510 | Add forgotten migration | osmdata/migrations/0012_auto_20170829_1539.py | osmdata/migrations/0012_auto_20170829_1539.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-29 15:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('osmdata', '0011_auto_20170824_1521'),
]
operations = [
migrations.AlterField(
model_name='osmelement',
name='bounds',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='osmdata.Bounds'),
),
]
| Python | 0.000004 | |
d00243d9500118400f7e08409d9564b15b2b4148 | Add trivial CLI example | examples/cliExample.py | examples/cliExample.py | # Very Simple CLI example
from OTXv2 import OTXv2
import IndicatorTypes
import argparse
# Your API key
API_KEY = ''
OTX_SERVER = 'https://otx.alienvault.com/'
otx = OTXv2(API_KEY, server=OTX_SERVER)
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-i', '--ip', help='IP eg; 4.4.4.4', required=False)
parser.add_argument(
'-d', '--domain', help='Domain eg; alienvault.com', required=False)
parser.add_argument('-ho', '--hostname',
help='Hostname eg; www.alienvault.com', required=False)
parser.add_argument(
'-u', '--url', help='URL eg; http://www.alienvault.com', required=False)
parser.add_argument(
'-m', '--md5', help='MD5 Hash of a file eg; 7b42b35832855ab4ff37ae9b8fa9e571', required=False)
parser.add_argument(
'-p', '--pulse', help='Search pulses for a string eg; Dridex', required=False)
parser.add_argument('-s', '--subscribed', help='Get pulses you are subscribed to',
required=False, action='store_true')
args = vars(parser.parse_args())
if args["ip"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.IPv4, args["ip"])))
if args["domain"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.DOMAIN, args["domain"])))
if args["hostname"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.HOSTNAME, args["hostname"])))
if args["url"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.URL, args["url"])))
if args["md5"]:
print (str(otx.get_indicator_details_full(IndicatorTypes.FILE_HASH_MD5, args["md5"])))
if args["pulse"]:
result = otx.search_pulses(args["pulse"])
print (str(result.get('results')))
if args["subscribed"]:
print (str(otx.getall(max_page=3, limit=5))) | Python | 0.000004 | |
ecc8a93ddda784102311ebfd4c3c93624f356778 | Add migration to add strip_html sql function | cnxarchive/sql/migrations/20160723123620_add_sql_function_strip_html.py | cnxarchive/sql/migrations/20160723123620_add_sql_function_strip_html.py | # -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION strip_html(html_text TEXT)
RETURNS text
AS $$
import re
return re.sub('<[^>]*?>', '', html_text, re.MULTILINE)
$$ LANGUAGE plpythonu IMMUTABLE;
""")
def down(cursor):
cursor.execute("DROP FUNCTION IF EXISTS strip_html(TEXT)")
| Python | 0 | |
0f5b15a1f909c79b40a3f2655d00bc7852d41847 | add missing migration | conversion_service/conversion_job/migrations/0003_auto_20151120_1528.py | conversion_service/conversion_job/migrations/0003_auto_20151120_1528.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('conversion_job', '0002_auto_20151119_1332'),
]
operations = [
migrations.AlterField(
model_name='conversionjob',
name='status',
field=models.CharField(max_length=20, verbose_name='job status', default='new', choices=[('error', 'error'), ('new', 'new'), ('queued', 'queued'), ('started', 'started'), ('done', 'done')]),
),
migrations.AlterField(
model_name='gisformat',
name='progress',
field=models.CharField(max_length=20, verbose_name='progress', default='new', choices=[('error', 'error'), ('new', 'new'), ('received', 'received'), ('started', 'started'), ('successful', 'successful')]),
),
]
| Python | 0.000258 | |
ed45aa20bc54714c6eb355417520c3d90a6b47fc | Add init.py | init.py | init.py | #!/usr/bin/env python
import os
import sys
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'readthedocs.settings.dev')
sys.path.append(os.getcwd())
django.setup()
from django.contrib.auth.models import User
admin = User.objects.create_user('admin', '', 'admin')
admin.is_superuser = True
admin.is_staff = True
admin.save()
test = User.objects.create_user('test', '', 'test')
test.is_staff = True
test.save()
| Python | 0.000063 | |
67d86229279e979d8ef5ac54e5ed8ca85c32ff2e | add another sample script (multiple.py). | demos/multiple.py | demos/multiple.py | #!/usr/bin/env python
from Exscript import Host
from Exscript.util.interact import read_login
from Exscript.util.template import eval_file
from Exscript.util.start import start
def one(conn):
conn.open()
conn.authenticate()
conn.autoinit()
conn.execute('show ip int brie')
def two(conn):
eval_file(conn, 'mytemplate.exscript', interface = 'POS1/0')
account = read_login()
# Start on one host.
host1 = Host('localhost')
host1.set('myvariable', 'foobar')
start(account, host1, one)
# Start on another.
host2 = Host('otherhost1')
host3 = Host('otherhost2')
start(account, [host1, host2], two)
| Python | 0 | |
3704654e704c0595e933f4ab2832e945816afde8 | Add setup.py file | TimeSeries/PublicApis/Python/setup.py | TimeSeries/PublicApis/Python/setup.py | from setuptools import setup
setup(
name="aquarius-timeseries-client",
py_modules=["timeseries_client"],
version="0.1",
description="Python client for Aquarius TimeSeries API",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/AquaticInformatics/Examples",
install_requires=(
"requests",
"pyrfc3339"
)
)
| Python | 0.000001 | |
42e1447db973cce539353912eada05b26870bae6 | Add serial test connection. | experiment_control/test_serial_connection.py | experiment_control/test_serial_connection.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, Niklas Hauser
# All rights reserved.
#
# The file is part of my bachelor thesis and is released under the 3-clause BSD
# license. See the file `LICENSE` for the full license governing this code.
# -----------------------------------------------------------------------------
import os, sys, time
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'tinyos', 'support', 'sdk', 'python'))
from tinyos.message import *
from tinyos.message.Message import *
from tinyos.message.SerialPacket import *
from tinyos.packet.Serial import Serial
from messages import *
class Connection(object):
def __init__(self, device=None):
super(Connection, self).__init__()
self.logger = logging.getLogger('Connection.({})'.format(device))
self.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# console logging
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.mif = MoteIF.MoteIF()
self.device = device
self.tos_source = self.mif.addSource("serial@" + device)
self.mif.addListener(self, SerialMessage.SerialMessage)
self.mif.addListener(self, RadioMessage.RadioMessage)
self.mif.addListener(self, SensorMessage.SensorMessage)
self.logger.info("listening")
self.temperature = 0
self.humidity = 0
def receive(self, src, msg):
if msg.get_amType() == SensorMessage.AM_TYPE:
m = SensorMessage.SensorMessage(msg.dataGet())
self.temperature = m.get_temperature()*0.01 - 40.1
linear_humidity = -2.0468 + 0.0367 * m.get_humidity() + (-1.5955e-6 * m.get_humidity())**2
self.humidity = (self.temperature - 25) * (0.01 + 0.00008 * m.get_humidity()) + linear_humidity
self.logger.debug("SensorMessage: NodeId={}, Temp={:.1f}C, Hum={:.1f}%" \
.format(m.get_nodeid(), self.temperature, self.humidity))
elif msg.get_amType() == SerialMessage.AM_TYPE:
m = SerialMessage.SerialMessage(msg.dataGet())
self.logger.info("SerialMessage: {}".format(str(m)))
elif msg.get_amType() == RadioMessage.AM_TYPE:
m = RadioMessage.RadioMessage(msg.dataGet())
self.logger.info("RadioMessage: {}".format(str(m)))
else:
self.logger.warn("Unknown Message: {}".format(str(msg)))
def transmit(self, addr, msg):
self.logger.info("Transmitting: addr={} {}".format(addr, msg))
self.mif.sendMsg(self.tos_source, addr, msg.get_amType(), 0, msg)
if __name__ == "__main__":
sender = Connection("/dev/ttyUSB3:telos")
receiver = Connection("/dev/ttyUSB1:telos")
data = [0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,
0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,
0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9]
# data = [0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,
# 0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,
# 0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88,0x88]
rawData = chr(0)*20 + "".join(map(chr, data))
tx = SerialMessage.SerialMessage()
tx.set_header_channel(26)
tx.set_header_type(SerialMessage.SerialMessage.get_amType())
tx.set_header_power(3)
tx.set_header_len(len(data))
tx.set_header_nodeid(0)
tx.set_data(data)
print tx.data
time.sleep(1)
sender.transmit(1, tx)
while(1):
pass
| Python | 0 | |
da22d8dffadbb4713e715aca7918942f445090c9 | embed video form and model fields | embed_video/fields.py | embed_video/fields.py | from django.db import models
from django import forms
from django.utils.translation import ugettext_lazy as _
from .base import detect_backend
__all__ = ('EmbedVideoField', 'EmbedVideoFormField')
class EmbedVideoField(models.URLField):
def formfield(self, **kwargs):
defaults = {'form_class': EmbedVideoFormField}
defaults.update(kwargs)
return super(EmbedVideoField, self).formfield(**defaults)
class EmbedVideoFormField(forms.URLField):
def validate(self, url):
super(EmbedVideoFormField, self).validate(url)
try:
detect_backend(url)
except:
raise forms.ValidationError(_(u'URL could not be recognized.'))
return url
| Python | 0 | |
b81028067cf65b2ee3a155d081e7983a1de70d5f | Add mistakenly omitted migrations | opentreemap/treemap/migrations/0005_auto_20150729_1046.py | opentreemap/treemap/migrations/0005_auto_20150729_1046.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('treemap', '0004_auto_20150720_1523'),
]
operations = [
migrations.AlterField(
model_name='fieldpermission',
name='permission_level',
field=models.IntegerField(default=0, choices=[(0, 'Invisible'), (1, 'Read Only'), (2, 'Pending Write Access'), (3, 'Full Write Access')]),
),
migrations.AlterField(
model_name='role',
name='default_permission',
field=models.IntegerField(default=0, choices=[(0, 'Invisible'), (1, 'Read Only'), (2, 'Pending Write Access'), (3, 'Full Write Access')]),
),
]
| Python | 0 | |
1fa74f6a6a5faeb9579c889df32e4bfe8d6908df | Add migration | fat/migrations/0059_event_extra_sponsored.py | fat/migrations/0059_event_extra_sponsored.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-08 10:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fat', '0058_auto_20160808_1007'),
]
operations = [
migrations.AddField(
model_name='event',
name='extra_sponsored',
field=models.TextField(blank=True),
),
]
| Python | 0.000002 | |
62c70b301ffc1e178c3bd54bd81291876b3883ea | Add simple linear interpolation filling. | analysis/03-fill-dropouts-linear.py | analysis/03-fill-dropouts-linear.py | #!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import lmj.cubes.fill
import numpy as np
import pandas as pd
logging = climate.get_logger('fill')
def fill(dfs, window):
'''Complete missing marker data using linear interpolation.
This method alters the given `dfs` in-place.
Parameters
----------
dfs : list of pd.DataFrame
Frames of source data. The frames will be stacked into a single large
frame to use during SVT. This stacked frame will then be split and
returned.
window : int
Model windows of this many consecutive frames.
'''
df = lmj.cubes.fill.stack(dfs, window)
centers = lmj.cubes.fill.center(df)
pos, _, _ = lmj.cubes.fill.window(df, window, interpolate=True)
lmj.cubes.fill.update(df, pos, window)
lmj.cubes.fill.restore(df, centers)
lmj.cubes.fill.unstack(df, dfs)
def main(args):
lmj.cubes.fill.main(args, lambda ts: fill([t.df for t in ts], args.window))
if __name__ == '__main__':
climate.call(main)
| Python | 0 | |
7942254131bcf005d5a5f1bb33ca7d1ffff1b311 | Create keyAllCtrls.py | af_scripts/blendshapes/keyAllCtrls.py | af_scripts/blendshapes/keyAllCtrls.py | import maya.cmds as cmds
import maya.mel as mel
cmds.select(cmds.ls('*:*.faceCtrl', o=1))
mel.eval('doSetKeyframeArgList 6 { "4","0","0","0","1","0","0","animationList","0","1","0" };')
| Python | 0.000002 | |
f51c4abc95fda5504e7c7a5ad87355698798ddd1 | create temporary streaming solution | temp_vidstream.py | temp_vidstream.py | import picamera
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.start_recording('vidstream.mp4')
camera.wait_recording(60)
camera.stop_recording()
| Python | 0 | |
89d27dd0a28f84c99930c0f1dad496e525f62272 | migrate to namespace table | migrations/versions/28c0d6c2f887_add_namespaces.py | migrations/versions/28c0d6c2f887_add_namespaces.py | """Add namespaces
Revision ID: 28c0d6c2f887
Revises: 4323056c0b78
Create Date: 2013-10-14 22:18:29.705865
"""
# revision identifiers, used by Alembic.
revision = '28c0d6c2f887'
down_revision = '4323056c0b78'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
# op.create_table('namespaces',
# sa.Column('id', sa.Integer(), nullable=False),
# sa.Column('user_id', sa.Integer(), nullable=False),
# sa.PrimaryKeyConstraint('id')
# )
op.alter_column(u'foldermeta', u'user_id', new_column_name='namespace_id',
existing_type=mysql.INTEGER(display_width=11))
op.alter_column(u'foldermeta', 'folder_name',
existing_type=mysql.VARCHAR(length=255),
nullable=False)
op.alter_column(u'foldermeta', 'msg_uid',
existing_type=mysql.INTEGER(display_width=11),
nullable=False)
op.alter_column(u'messagemeta', u'user_id', new_column_name='namespace_id',
existing_type=mysql.INTEGER(display_width=11))
op.alter_column(u'rawmessage', u'user_id', new_column_name='namespace_id',
existing_type=mysql.INTEGER(display_width=11))
op.alter_column(u'uidvalidity', u'user_id', new_column_name='namespace_id',
existing_type=mysql.INTEGER(display_width=11))
op.add_column(u'users', sa.Column('root_namespace', sa.Integer(), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'users', 'root_namespace')
op.add_column(u'uidvalidity', sa.Column(u'user_id', mysql.INTEGER(display_width=11), nullable=False))
op.drop_column(u'uidvalidity', 'namespace_id')
op.add_column(u'rawmessage', sa.Column(u'user_id', mysql.INTEGER(display_width=11), nullable=False))
op.drop_column(u'rawmessage', 'namespace_id')
op.add_column(u'messagemeta', sa.Column(u'user_id', mysql.INTEGER(display_width=11), nullable=False))
op.drop_column(u'messagemeta', 'namespace_id')
op.alter_column(u'foldermeta', 'msg_uid',
existing_type=mysql.INTEGER(display_width=11),
nullable=True)
op.alter_column(u'foldermeta', 'folder_name',
existing_type=mysql.VARCHAR(length=255),
nullable=True)
op.add_column(u'foldermeta', sa.Column(u'user_id', mysql.INTEGER(display_width=11), nullable=False))
op.drop_column(u'foldermeta', 'namespace_id')
op.drop_table('namespaces')
### end Alembic commands ###
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0.000002 | |
55f2325354724cfe8b90324038daf2c1acaa916a | Add unit tests for OpenStack config defaults | teuthology/openstack/test/test_config.py | teuthology/openstack/test/test_config.py | from teuthology.config import config
class TestOpenStack(object):
def setup(self):
self.openstack_config = config['openstack']
def test_config_clone(self):
assert 'clone' in self.openstack_config
def test_config_user_data(self):
os_type = 'rhel'
os_version = '7.0'
template_path = self.openstack_config['user-data'].format(
os_type=os_type,
os_version=os_version)
assert os_type in template_path
assert os_version in template_path
def test_config_ip(self):
assert 'ip' in self.openstack_config
def test_config_machine(self):
assert 'machine' in self.openstack_config
machine_config = self.openstack_config['machine']
assert 'disk' in machine_config
assert 'ram' in machine_config
assert 'cpus' in machine_config
def test_config_volumes(self):
assert 'volumes' in self.openstack_config
volumes_config = self.openstack_config['volumes']
assert 'count' in volumes_config
assert 'size' in volumes_config
| Python | 0 | |
526d58fb917a4e098018f733b4c0b254417140b4 | Add @log_route decorator | keeper/logutils.py | keeper/logutils.py | """Logging helpers and utilities.
"""
__all__ = ['log_route']
from functools import wraps
from timeit import default_timer as timer
import uuid
from flask import request, make_response
import structlog
def log_route():
"""Route decorator to initialize a thread-local logger for a route.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# Initialize a timer to capture the response time
# This is for convenience, in addition to route monitoring.
start_time = timer()
# Initialize a new thread-local logger and add a unique request
# ID to its context.
# http://www.structlog.org/en/stable/examples.html
logger = structlog.get_logger()
log = logger.new(
request_id=str(uuid.uuid4()),
path=request.path,
method=request.method,
)
# Pass through route
response = f(*args, **kwargs)
response = make_response(response)
# Close out the logger
end_time = timer()
log.info(
status=response.status_code,
response_time=end_time - start_time)
return response
return decorated_function
return decorator
| Python | 0.00001 | |
3f3115a0a9c7407820b3b10c06dcfa4f92ac6e57 | Add owned book scaffold | goodreads_api_client/resources/owned_book.py | goodreads_api_client/resources/owned_book.py | # -*- coding: utf-8 -*-
"""Module containing owned book resource class."""
from goodreads_api_client.exceptions import OauthEndpointNotImplemented
from goodreads_api_client.resources.base import Resource
class OwnedBook(Resource):
def create(self):
raise OauthEndpointNotImplemented('owned_book.compare')
def destroy(self):
raise OauthEndpointNotImplemented('owned_book.destroy')
def list(self):
raise OauthEndpointNotImplemented('owned_book.list')
def show(self):
raise OauthEndpointNotImplemented('owned_book.show')
def update(self):
raise OauthEndpointNotImplemented('owned_book.update')
| Python | 0 | |
5d99b7c2dfbfbb776716f2258d560bab2602531f | Create main.py | main.py | main.py | # -*- coding: utf-8 -*-
#Backlog Manager
#programmed by Ian Hitterdal (otend)
#licensed under MIT license
import work
import random
def addWork(medium):
#input: valid medium string
#user input: work title string
#output: none
#user output: none, really
global workDict
global mediumList
if medium not in mediumList:
print("Invalid medium, otend did something wrong")
else:
inName = input("What is the name of the work? ")
workDict[medium].append(work.Work(inName))
def pickMedium():
#input: none
#user input: integer to choose a medium from the list
#output: valid medium string
global mediumList
print("Which medium would you like to use?")
n = 1
for med in mediumList:
print(n,". ",med)
n = n+1
choice = int(input("Enter a number. "))
return mediumList[choice-1]
def chooseWork(medium):
#input: valid medium string
#user input: affirmation of viewing
#output: none
#user output: work chosen
global workDict
valList = []
for item in workDict[medium]:
if item.wasViewed == False:
valList.append(item)
if len(valList) == 0:
print("No works.")
else:
a = random.choice(workDict[medium])
print("You should watch/play/whatever...")
print(a.name,"\n")
b = input("Did you watch it? y/n")
if(b == "y"):
a.wasViewed = True
def listWork(medium):
#Input: string that is in the medium list
#output: none
#user output: all entries present in the list for that medium.
global workDict
print("Here are the works registered for {}.",medium)
for i in workDict[medium]:
print(i)
def watDo():
#input: none
#user input: choice of task
#output: none
#user output: tasks available, other outputs dependent on validity of choice
#valid: goodbye or none
#invalid: error message
print("What do you want to do?")
print("1. Add a work.")
print("2. Have a work chosen.")
print("3. List works.")
print("4. Quit.")
choice = input("Enter a number.")
if choice not in ["1","2","3","4"]:
print("You have entered an invalid choice. Please try again.")
watDo()
elif choice == "4":
print("Goodbye.")
else:
a = pickMedium()
if(choice == "1"):
addWork(a)
watDo()
elif(choice == "2"):
chooseWork(a)
watDo()
else:
listWork(a)
watDo()
mediumList = ["film", "game", "show", "comic", "book", "album"]
workDict = dict()
for n in mediumList:
workDict[n] = list()
print("Welcome to Backlog Manager 0.1 Pre-Alpha!")
watDo()
| Python | 0.000001 | |
f75d321b200217514cde901cc15cc2b798e3dcfe | Add new hipchat module | bumblebee/modules/hipchat.py | bumblebee/modules/hipchat.py | """Displays the unread messages count for an HipChat user
Requires the following library:
* requests
Parameters:
* hipchat.token: HipChat user access token, the token needs to have the 'View Messages' scope.
* hipchat.interval: Refresh interval in minutes (defaults to 5)
"""
import time
import functools
import bumblebee.input
import bumblebee.output
import bumblebee.engine
try:
import requests
except ImportError:
pass
HIPCHAT_API_URL = "https://www.hipchat.com/v2/readstate?expand=items.unreadCount"
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
bumblebee.output.Widget(full_text=self.output)
)
self._count = 0
self._interval = int(self.parameter("interval", "5"))
self._nextcheck = 0
self._requests = requests.Session()
self._requests.headers.update({"Authorization":"Bearer {}".format(self.parameter("token", ""))})
immediate_update = functools.partial(self.update, immediate=True)
engine.input.register_callback(self, button=bumblebee.input.RIGHT_MOUSE, cmd=immediate_update)
def output(self, _):
return str(self._count)
def update(self, _, immediate=False):
if immediate or self._nextcheck < int(time.time()):
self._nextcheck = int(time.time()) + self._interval * 60
try:
self._count = 0
items = self._requests.get(HIPCHAT_API_URL).json().get('items')
self._count = sum([item.get('unreadCount').get('count') for item in items])
except Exception:
self._count = "n/a"
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0 | |
786ed1d37ae5285bce1178d401d487233d4bd5b1 | Add greater/less than tests | test/osa_tests.py | test/osa_tests.py | #!/usr/bin/env python
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extra tests for jinja2 templates in Ansible."""
def greater_than(value, reference_value):
"""Return true if value > reference_value."""
return value > reference_value
def less_than(value, reference_value):
"""Return true if value < reference_value."""
return value < reference_value
class TestModule:
"""Main test class from Ansible."""
def tests(self):
"""Add these tests to the list of tests available to Ansible."""
return {
'greater_than': greater_than,
'less_than': less_than,
}
| Python | 0.000001 | |
0a3488915938de418ab0675f4cc051769b470927 | Fix tab switching test on reference builds. | tools/perf/measurements/tab_switching.py | tools/perf/measurements/tab_switching.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The tab switching measurement.
This measurement opens pages in different tabs. After all the tabs have opened,
it cycles through each tab in sequence, and records a histogram of the time
between when a tab was first requested to be shown, and when it was painted.
"""
from metrics import histogram_util
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_runner
# TODO: Revisit this test once multitab support is finalized.
class TabSwitching(page_measurement.PageMeasurement):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--enable-stats-collection-bindings')
options.AppendExtraBrowserArg('--dom-automation')
options.AppendExtraBrowserArg('--reduce-security-for-dom-automation-tests')
def CanRunForPage(self, page):
return not page.page_set.pages.index(page)
def DidNavigateToPage(self, page, tab):
for i in xrange(1, len(page.page_set.pages)):
t = tab.browser.tabs.New()
page_state = page_runner.PageState()
page_state.PreparePage(page.page_set.pages[i], t)
page_state.ImplicitPageNavigation(page.page_set.pages[i], t)
def MeasurePage(self, _, tab, results):
"""Although this is called MeasurePage, we're actually using this function
to cycle through each tab that was opened via DidNavigateToPage and
thenrecord a single histogram for the tab switching metric.
"""
histogram_name = 'MPArch.RWH_TabSwitchPaintDuration'
histogram_type = histogram_util.BROWSER_HISTOGRAM
first_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
prev_histogram = first_histogram
for i in xrange(len(tab.browser.tabs)):
t = tab.browser.tabs[i]
t.Activate()
def _IsDone():
cur_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
diff_histogram = histogram_util.SubtractHistogram(
cur_histogram, prev_histogram)
return diff_histogram
util.WaitFor(_IsDone, 30)
prev_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
last_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
diff_histogram = histogram_util.SubtractHistogram(last_histogram,
first_histogram)
results.AddSummary(histogram_name, '', diff_histogram,
data_type='unimportant-histogram')
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The tab switching measurement.
This measurement opens pages in different tabs. After all the tabs have opened,
it cycles through each tab in sequence, and records a histogram of the time
between when a tab was first requested to be shown, and when it was painted.
"""
from metrics import histogram_util
from telemetry.core import util
from telemetry.page import page_measurement
from telemetry.page import page_runner
# TODO: Revisit this test once multitab support is finalized.
class TabSwitching(page_measurement.PageMeasurement):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--enable-stats-collection-bindings')
options.AppendExtraBrowserArg('--dom-automation')
def CanRunForPage(self, page):
return not page.page_set.pages.index(page)
def DidNavigateToPage(self, page, tab):
for i in xrange(1, len(page.page_set.pages)):
t = tab.browser.tabs.New()
page_state = page_runner.PageState()
page_state.PreparePage(page.page_set.pages[i], t)
page_state.ImplicitPageNavigation(page.page_set.pages[i], t)
def MeasurePage(self, _, tab, results):
"""Although this is called MeasurePage, we're actually using this function
to cycle through each tab that was opened via DidNavigateToPage and
thenrecord a single histogram for the tab switching metric.
"""
histogram_name = 'MPArch.RWH_TabSwitchPaintDuration'
histogram_type = histogram_util.BROWSER_HISTOGRAM
first_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
prev_histogram = first_histogram
for i in xrange(len(tab.browser.tabs)):
t = tab.browser.tabs[i]
t.Activate()
def _IsDone():
cur_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
diff_histogram = histogram_util.SubtractHistogram(
cur_histogram, prev_histogram)
return diff_histogram
util.WaitFor(_IsDone, 30)
prev_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
last_histogram = histogram_util.GetHistogramFromDomAutomation(
histogram_type, histogram_name, tab)
diff_histogram = histogram_util.SubtractHistogram(last_histogram,
first_histogram)
results.AddSummary(histogram_name, '', diff_histogram,
data_type='unimportant-histogram')
| Python | 0.998504 |
01d9134067852a1f9dfecf75f730f9fba14434e0 | Add test_gradient_checker.py | python/paddle/v2/framework/tests/test_gradient_checker.py | python/paddle/v2/framework/tests/test_gradient_checker.py | import unittest
import numpy
from paddle.v2.framework.op import Operator
from gradient_checker import GradientChecker
from gradient_checker import get_numeric_gradient
class GetNumericGradientTest(unittest.TestCase):
def test_add_op(self):
add_op = Operator('add_two', X="X", Y="Y", Out="Z")
x = numpy.random.random((10, 1)).astype("float32")
y = numpy.random.random((10, 1)).astype("float32")
arr = get_numeric_gradient(add_op, {'X': x, "Y": y}, 'Z', 'X')
self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
def test_softmax_op(self):
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
shiftx = x - numpy.max(x)
exps = numpy.exp(shiftx)
return exps / numpy.sum(exps)
def label_softmax_grad(Y, dY):
dX = Y * 0.0
for i in range(Y.shape[0]):
d = numpy.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return dX
softmax_op = Operator("softmax", X="X", Y="Y")
X = numpy.random.random((2, 2)).astype("float32")
Y = numpy.apply_along_axis(stable_softmax, 1, X)
dY = numpy.ones(Y.shape)
dX = label_softmax_grad(Y, dY)
arr = get_numeric_gradient(softmax_op, {"X": X}, 'Y', 'X')
numpy.testing.assert_almost_equal(arr, dX, decimal=1e-2)
if __name__ == '__main__':
unittest.main()
| Python | 0.000005 | |
9779fc585d8d8d87580a47139742eb25bc52facd | Add new decorators module, move deprecated from utils over here | kiwi/decorators.py | kiwi/decorators.py | #
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2005 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
#
import gobject
from kiwi import _warn
class deprecated(object):
def __init__(self, new):
self._new = new
def __call__(self, func):
def wrapper(*args, **kwargs):
_warn("%s is deprecated, use %s instead" % (func.__name__,
self._new))
return func(*args, **kwargs)
return wrapper
class delayed(object):
def __init__(self, delay):
self._delay = delay
self._timeout_id = -1
def __call__(self, func):
def real_call(args, kwargs):
func(*args, **kwargs)
self._timeout_id = -1
return False
def wrapper(*args, **kwargs):
# Only one call at a time
if self._timeout_id != -1:
return
self._timeout_id = gobject.timeout_add(self._delay,
real_call, args, kwargs)
return wrapper
| Python | 0 | |
9258451157de31f3ece7e18fcb8ae43c433239f4 | add example to post files to Portals File System | portals_api/upload_files_to_portals_file_system.py | portals_api/upload_files_to_portals_file_system.py | # Example that uploads a file to the Portals File System using Portals API
# Access Level- Portals Domain Administrator
# Note: Uses Python 'Requests' module for calling API
# APIs:
# - http://docs.exosite.com/portals/#update-file-content
import requests
import getpass
directory = "images" #default directory name
domain = "" #example: example.exosite.com
user_email = "" #example: myname@example.com - assume administrator access to Portals Domain Solution
if domain == "":
domain = raw_input('Enter Portals Domain (e.g. "example.exosite.com": ')
if user_email == "":
user_email = raw_input('Enter Your Email Address: ')
user_password = getpass.getpass() #ask for password each time at prompt
# Files to upload
files = {"MyLogo.png":open("./MyLogo.png", "rb"),
"MyOtherLogo.jpg":open("./MyOtherLogo.jpg", "rb")
}
url = "https://"+domain+"/api/portals/v1/fs/"+directory
print 'Uploading files to ' + domain
r = requests.post(url, files=files, auth=(user_email, user_password))
print("Status: ", r.status_code)
r = requests.get(url)
if r.status_code == 200:
folder = r.json()
for directory, filepath in folder.iteritems():
for filename, filetype in filepath.iteritems():
print("/".join([url,directory,filename])) | Python | 0 | |
425d8ef0f439e9580c85e0dc04e5fe0c93cffddf | add 16 | p016.py | p016.py | # 2**15 = 32768 and the sum of its digits is 3+2+7+6+8=26
# what is the sum of the digits of the number 2**1000?
def f(n):
return sum([ int(c) for c in str(2**n)])
print f(1000)
| Python | 0.999998 | |
2b73467ccfbf6e29047223f1c1e3250916b6ffdb | add 23 | p023.py | p023.py | from itertools import combinations_with_replacement
def divisors(n):
r = set()
for i in range(1, n / 2):
if n % i == 0:
r.add(i)
r.add(n / i)
r.discard(n)
return r
abundant = filter(lambda n: sum(divisors(n)) > n, range(2, 29000))
u = set(range(1, 29000))
for i in combinations_with_replacement(abundant, 2):
u.discard(sum(i))
print sum(u)
| Python | 0.999986 | |
351f2779549add63963d4103fbe1b058dde59d85 | Add stupid test to make Jenkins happy. | zipline/test/test_sanity.py | zipline/test/test_sanity.py | from unittest2 import TestCase
class TestEnviroment(TestCase):
def test_universe(self):
# first order logic is working today. Yay!
self.assertTrue(True != False)
| Python | 0.000006 | |
67f5e754a5f90903e09a6a876d858d002c513f8a | Add initial draft of posterior models | abcpy/posteriors.py | abcpy/posteriors.py | import scipy as sp
from .utils import stochastic_optimization
class BolfiPosterior():
def __init__(self, model, threshold, priors=None):
self.threshold = threshold
self.model = model
self.priors = [None] * model.n_var
self.ML, ML_val = stochastic_optimization(self._neg_unnormalized_loglikelihood_density, self.model.bounds, 10000)
print("ML parameters: %s" % (self.ML))
self.MAP, MAP_val = stochastic_optimization(self._neg_unnormalized_logposterior_density, self.model.bounds, 10000)
print("MAP parameters: %s" % (self.MAP))
def _unnormalized_loglikelihood_density(self, x):
mean, var, std = self.model.evaluate(x)
return sp.stats.norm.logcdf(self.threshold, mean, std)
def _unnormalized_likelihood_density(self, x):
return np.exp(self._unnormalized_loglikelihood_density(x))
def _neg_unnormalized_loglikelihood_density(self, x):
return -1 * self._unnormalized_loglikelihood_density(x)
def _unnormalized_logposterior_density(self, x):
return self._unnormalized_loglikelihood_density(x) + self._logprior_density(x)
def _unnormalized_posterior_density(self, x):
return np.exp(self._unnormalized_logposterior_density(x))
def _neg_unnormalized_logposterior_density(self, x):
return -1 * self._unnormalized_logposterior_density(x)
def _logprior_density(self, x):
logprior_density = 0.0
for xv, prior in zip(x, self.priors):
if prior is not None:
logprior_density += prior.getLogProbDensity(xv)
return logprior_density
def _prior_density(self, x):
return np.exp(self._logprior_density(x))
def _neg_logprior_density(self, x):
return -1 * self._logprior_density(x)
def sample(self):
return tuple([[v] for v in self.MAP])
| Python | 0 | |
8131bb276a467d7df00f7452616869d20d312eb7 | add api_view test | apps/api/tests/tests_view.py | apps/api/tests/tests_view.py | import datetime
from django.test import TestCase
from django.test.client import Client
from apps.pages.models import Page, Page_translation
class MySmileApiTestCase(TestCase):
def setUp(self):
some_page = Page.objects.create(id=1,
slug='index',
color='#FDA132',
photo='images/photo.png',
sortorder=1,
status=Page.STATUS_PUBLISHED,
ptype=Page.PTYPE_API,
updated_at=datetime.datetime.now(),
created_at=datetime.datetime.now())
Page_translation.objects.create(id=1,
page=some_page,
lang='en',
menu='Main',
col_central='lorem ipsum',
col_bottom_1='lorem ipsum',
col_bottom_2='lorem ipsum',
col_bottom_3='lorem ipsum',
meta_title='Welcome!',
meta_description='This is mane page!',
meta_keywords='Python3, Django',
photo_alt='',
photo_description = '',
updated_at=datetime.datetime.now(),
created_at=datetime.datetime.now())
self._client = Client()
def test_content_short(self):
response = self._client.get('/api/content')
self.assertEqual(response.status_code, 200)
def test_content_slug(self):
response = self._client.get('/api/content?slug=index')
self.assertEqual(response.status_code, 200)
def test_content_slug_lang(self):
response = self._client.get('/api/content?slug=index&lang=en')
self.assertEqual(response.status_code, 200)
def test_language(self):
response = self._client.get('/api/language')
self.assertEqual(response.status_code, 200)
def test_contact(self):
response = self._client.get('/api/contact')
self.assertEqual(response.status_code, 200)
| Python | 0 | |
6104fdc57931151f6cf3c8cd517f5efee17fe826 | Update repost_stock_for_deleted_bins_for_merging_items.py | erpnext/patches/v7_1/repost_stock_for_deleted_bins_for_merging_items.py | erpnext/patches/v7_1/repost_stock_for_deleted_bins_for_merging_items.py | from __future__ import unicode_literals
import frappe
from erpnext.stock.stock_balance import repost_stock
def execute():
frappe.reload_doc('manufacturing', 'doctype', 'production_order_item')
frappe.reload_doc('manufacturing', 'doctype', 'production_order')
modified_items = frappe.db.sql_list("""
select name from `tabItem`
where is_stock_item=1 and modified >= '2016-10-31'
""")
if not modified_items:
return
item_warehouses_with_transactions = []
transactions = ("Sales Order Item", "Material Request Item", "Purchase Order Item",
"Stock Ledger Entry", "Packed Item")
for doctype in transactions:
item_warehouses_with_transactions += list(frappe.db.sql("""
select distinct item_code, warehouse
from `tab{0}` where docstatus=1 and item_code in ({1})"""
.format(doctype, ', '.join(['%s']*len(modified_items))), tuple(modified_items)))
item_warehouses_with_transactions += list(frappe.db.sql("""
select distinct production_item, fg_warehouse
from `tabProduction Order` where docstatus=1 and production_item in ({0})"""
.format(', '.join(['%s']*len(modified_items))), tuple(modified_items)))
item_warehouses_with_transactions += list(frappe.db.sql("""
select distinct pr_item.item_code, pr.source_warehouse
from `tabProduction Order` pr, `tabProduction Order Item` pr_item
where pr_item.parent and pr.name and pr.docstatus=1 and pr_item.item_code in ({0})"""
.format(', '.join(['%s']*len(modified_items))), tuple(modified_items)))
item_warehouses_with_bin = list(frappe.db.sql("select distinct item_code, warehouse from `tabBin`"))
item_warehouses_with_missing_bin = list(
set(item_warehouses_with_transactions) - set(item_warehouses_with_bin))
for item_code, warehouse in item_warehouses_with_missing_bin:
repost_stock(item_code, warehouse)
| from __future__ import unicode_literals
import frappe
from erpnext.stock.stock_balance import repost_stock
def execute():
frappe.reload_doc('manufacturing', 'doctype', 'production_order_item')
modified_items = frappe.db.sql_list("""
select name from `tabItem`
where is_stock_item=1 and modified >= '2016-10-31'
""")
if not modified_items:
return
item_warehouses_with_transactions = []
transactions = ("Sales Order Item", "Material Request Item", "Purchase Order Item",
"Stock Ledger Entry", "Packed Item")
for doctype in transactions:
item_warehouses_with_transactions += list(frappe.db.sql("""
select distinct item_code, warehouse
from `tab{0}` where docstatus=1 and item_code in ({1})"""
.format(doctype, ', '.join(['%s']*len(modified_items))), tuple(modified_items)))
item_warehouses_with_transactions += list(frappe.db.sql("""
select distinct production_item, fg_warehouse
from `tabProduction Order` where docstatus=1 and production_item in ({0})"""
.format(', '.join(['%s']*len(modified_items))), tuple(modified_items)))
item_warehouses_with_transactions += list(frappe.db.sql("""
select distinct pr_item.item_code, pr.source_warehouse
from `tabProduction Order` pr, `tabProduction Order Item` pr_item
where pr_item.parent and pr.name and pr.docstatus=1 and pr_item.item_code in ({0})"""
.format(', '.join(['%s']*len(modified_items))), tuple(modified_items)))
item_warehouses_with_bin = list(frappe.db.sql("select distinct item_code, warehouse from `tabBin`"))
item_warehouses_with_missing_bin = list(
set(item_warehouses_with_transactions) - set(item_warehouses_with_bin))
for item_code, warehouse in item_warehouses_with_missing_bin:
repost_stock(item_code, warehouse)
| Python | 0 |
3acf451435e1978fcfdd5c5d8f0386e87460039e | Add zerg(ling) rush example | examples/zerg_rush.py | examples/zerg_rush.py | import random
import sc2
from sc2 import Race, Difficulty, ActionResult
from sc2.player import Bot, Computer
class ZergRushBot(sc2.BotAI):
def __init__(self):
self.drone_counter = 0
self.overlord_counter = 0
self.extractor_started = False
self.spawning_pool_started = False
self.moved_workers_to_gas = False
self.moved_workers_from_gas = False
self.queeen_started = False
async def on_step(self, state, iteration):
if not self.units("Hatchery").exists:
for drone in self.units("Drone") | self.units("Zergling"):
await self.do(drone("Attack", self.enemy_start_locations[0]))
return
hatchery = self.units("Hatchery").first
larvae = self.units("Larva")
for zl in self.units("Zergling").idle:
await self.do(zl("Attack", self.enemy_start_locations[0]))
for q in self.units("Queen").idle:
await self.do(q("Effect Inject Larva", hatchery))
if self.vespene >= 100:
sp = self.units("Spawning Pool").ready
if sp.exists and self.minerals >= 100:
await self.do(sp.first("Research Zergling Metabolic Boost"))
self.minerals -= 100
if not self.moved_workers_from_gas:
self.moved_workers_from_gas = True
for drone in self.units("Drone"):
m = state.units("MineralField", name_exact=False).closer_than(drone.position, 10)
await self.do(drone("Gather", m.random, queue=True))
if state.common.food_used > 20 and state.common.food_used + 2 > state.common.food_cap:
if larvae.exists:
if self.minerals >= self.units("Overlord").cost.minerals:
self.overlord_counter += 1
await self.do(larvae.random("Train Overlord"))
return
if self.units("Spawning Pool").ready.exists:
if larvae.exists and self.minerals > self.units("Zergling").cost.minerals:
for _ in range(min(larvae.amount, self.minerals // self.units("Zergling").cost.minerals)):
await self.do(larvae.random("Train Zergling"))
self.minerals -= self.units("Zergling").cost.minerals
return
if self.units("Extractor").ready.exists and not self.moved_workers_to_gas:
self.moved_workers_to_gas = True
extractor = self.units("Extractor").first
for drone in self.units("Drone").random_group_of(3):
await self.do(drone("Gather", extractor))
if self.minerals > 500:
for d in range(4, 15):
pos = hatchery.position.to2.towards(self.game_info.map_center, d)
if await self.can_place("Hatchery", pos):
self.spawning_pool_started = True
await self.do(self.units("Drone").random("Build Hatchery", pos))
break
if larvae.exists:
if self.drone_counter < 3:
if self.minerals >= self.units("Drone").cost.minerals:
self.drone_counter += 1
await self.do(larvae.random("Train Drone"))
return
elif self.overlord_counter == 0:
if self.minerals >= self.units("Overlord").cost.minerals:
self.overlord_counter += 1
await self.do(larvae.random("Train Overlord"))
return
elif self.drone_counter < 2:
if self.minerals >= self.units("Drone").cost.minerals:
self.drone_counter += 1
await self.do(larvae.random("Train Drone"))
return
if self.drone_counter > 1:
if not self.extractor_started:
if self.minerals >= self.units("Extractor").cost.minerals:
self.extractor_started = True
drone = self.units("Drone").random
target = state.units("VespeneGeyser").closest_to(drone.position)
await self.do(drone("Build Extractor", target))
elif not self.spawning_pool_started:
if self.minerals >= self.units("Spawning Pool").cost.minerals:
for d in range(4, 15):
pos = hatchery.position.to2.towards(self.game_info.map_center, d)
if await self.can_place("Spawning Pool", pos):
self.spawning_pool_started = True
drone = self.units("Drone").closest_to(pos)
await self.do(drone("Build Spawning Pool", pos))
break
elif not self.queeen_started:
if self.minerals >= self.units("Queen").cost.minerals:
r = await self.do(hatchery("Train Queen"))
if not r:
self.queeen_started = True
sc2.run_game(sc2.maps.get("Abyssal Reef LE"), [
Bot(Race.Zerg, ZergRushBot()),
Computer(Race.Protoss, Difficulty.Easy)
], realtime=True)
| Python | 0.000002 | |
142ec5bdca99d11236f2d479cf4dafbc7e8962a3 | test of the nis module | Lib/test/test_nis.py | Lib/test/test_nis.py | import nis
verbose = 0
if __name__ == '__main__':
verbose = 1
maps = nis.maps()
for nismap in maps:
if verbose:
print nismap
mapping = nis.cat(nismap)
for k, v in mapping.items():
if verbose:
print ' ', k, v
if not k:
continue
if nis.match(k, nismap) <> v:
print "NIS match failed for key `%s' in map `%s'" % (k, nismap)
| Python | 0 | |
a35a6b715670e985c0bd711a4cb55df2a267e018 | Create downloader.py | 3.下载缓存/downloader.py | 3.下载缓存/downloader.py | import urlparse
import urllib2
import random
import time
from datetime import datetime, timedelta
import socket
DEFAULT_AGENT = 'wswp'
DEFAULT_DELAY = 5
DEFAULT_RETRIES = 1
DEFAULT_TIMEOUT = 60
class Downloader:
def __init__(self, delay=DEFAULT_DELAY, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRIES, timeout=DEFAULT_TIMEOUT, opener=None, cache=None):
socket.setdefaulttimeout(timeout)
self.throttle = Throttle(delay)
self.user_agent = user_agent
self.proxies = proxies
self.num_retries = num_retries
self.opener = opener
self.cache = cache
def __call__(self, url):
result = None
if self.cache:
try:
result = self.cache[url]
except KeyError:
# url is not available in cache
pass
else:
if self.num_retries > 0 and 500 <= result['code'] < 600:
# server error so ignore result from cache and re-download
result = None
if result is None:
# result was not loaded from cache so still need to download
self.throttle.wait(url)
proxy = random.choice(self.proxies) if self.proxies else None
headers = {'User-agent': self.user_agent}
result = self.download(url, headers, proxy=proxy, num_retries=self.num_retries)
if self.cache:
# save result to cache
self.cache[url] = result
return result['html']
def download(self, url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers or {})
opener = self.opener or urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except Exception as e:
print 'Download error:', str(e)
html = ''
if hasattr(e, 'code'):
code = e.code
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
return self._get(url, headers, proxy, num_retries-1, data)
else:
code = None
return {'html': html, 'code': code}
class Throttle:
"""Throttle downloading by sleeping between requests to same domain
"""
def __init__(self, delay):
# amount of delay between downloads for each domain
self.delay = delay
# timestamp of when a domain was last accessed
self.domains = {}
def wait(self, url):
"""Delay if have accessed this domain recently
"""
domain = urlparse.urlsplit(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now()
| Python | 0.000001 | |
6bf4f7491bdfe8a5afd5eb8cdb4a8fcb2af78b36 | Add commands/findCognateClassesCrossingMeanings.py | ielex/lexicon/management/commands/findCognateClassesCrossingMeanings.py | ielex/lexicon/management/commands/findCognateClassesCrossingMeanings.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from collections import defaultdict
from django.core.management import BaseCommand
from ielex.lexicon.models import CognateJudgement, Lexeme
class Command(BaseCommand):
help = "Compiles a list of cognate classes,"\
"\nwhere each cognate class belongs to more than one meaning."
def handle(self, *args, **options):
lexemeMeaningMap = dict(Lexeme.objects.values_list('id', 'meaning_id'))
cogLexTuples = CognateJudgement.objects.values_list(
'cognate_class_id', 'lexeme_id')
cogMeaningMap = defaultdict(set)
for cogId, lexId in cogLexTuples:
cogMeaningMap[cogId].add(lexemeMeaningMap[lexId])
for cogId, mIdSet in cogMeaningMap.iteritems():
if len(mIdSet) > 1:
print("Cognate class %s has multiple meanings: %s." %
(cogId, mIdSet))
| Python | 0 | |
b7dd7f75f655f4fbcb34d8f9ec260a6f18e8f617 | Add utility to create administrative users. | backend/scripts/adminuser.py | backend/scripts/adminuser.py | #!/usr/bin/env python
import rethinkdb as r
from optparse import OptionParser
import sys
def create_group(conn):
group = {}
group['name'] = "Admin Group"
group['description'] = "Administration Group for Materials Commons"
group['id'] = 'admin'
group['owner'] = 'admin@materialscommons.org'
group['users'] = []
group['birthtime'] = r.now()
group['mtime'] = r.now()
r.table('usergroups').insert(group).run(conn)
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
return admin_group
def add_user(user, group, conn):
for u in group['users']:
if u == user:
return
group['users'].append(user)
r.table('usergroups').get('admin').update(group).run(conn)
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-P", "--port", type="int", dest="port",
help="rethinkdb port")
parser.add_option("-u", "--user", type="string", dest="user",
help="user to add to admin group")
(options, args) = parser.parse_args()
if options.port is None:
print "You must specify the rethinkdb port"
sys.exit(1)
if options.user is None:
print "You must specify a user to add"
sys.exit(1)
conn = r.connect('localhost', options.port, db='materialscommons')
admin_group = r.table('usergroups').get('admin')\
.run(conn, time_format='raw')
if admin_group is None:
admin_group = create_group(conn)
add_user(options.user, admin_group, conn)
| Python | 0 | |
a1c4eb2183e3d3920e992b0753392d987b518bcf | add unit-test for tablegenerator.util.split_string_at_suffix | benchexec/tablegenerator/test_util.py | benchexec/tablegenerator/test_util.py | # BenchExec is a framework for reliable benchmarking.
# This file is part of BenchExec.
#
# Copyright (C) 2007-2016 Dirk Beyer
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import unittest
sys.dont_write_bytecode = True # prevent creation of .pyc files
from benchexec.tablegenerator import util
class TestUnit(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.longMessage = True
cls.maxDiff = None
def assertEqualNumberAndUnit(self, value, number, unit):
self.assertEqual(util.split_number_and_unit(value), (number, unit))
self.assertEqual(util.split_string_at_suffix(value, False), (number, unit))
def assertEqualTextAndNumber(self, value, text, number):
self.assertEqual(util.split_string_at_suffix(value, True), (text, number))
def test_split_number_and_unit(self):
self.assertEqualNumberAndUnit("", "", "")
self.assertEqualNumberAndUnit("1", "1", "")
self.assertEqualNumberAndUnit("1s", "1", "s")
self.assertEqualNumberAndUnit("111s", "111", "s")
self.assertEqualNumberAndUnit("s1", "s1", "")
self.assertEqualNumberAndUnit("s111", "s111", "")
self.assertEqualNumberAndUnit("-1s", "-1", "s")
self.assertEqualNumberAndUnit("1abc", "1", "abc")
self.assertEqualNumberAndUnit("abc", "", "abc")
self.assertEqualNumberAndUnit("abc1abc", "abc1", "abc")
self.assertEqualNumberAndUnit("abc1abc1abc", "abc1abc1", "abc")
def test_split_string_at_suffix(self):
self.assertEqualTextAndNumber("", "", "")
self.assertEqualTextAndNumber("1", "", "1")
self.assertEqualTextAndNumber("1s", "1s", "")
self.assertEqualTextAndNumber("111s", "111s", "")
self.assertEqualTextAndNumber("s1", "s", "1")
self.assertEqualTextAndNumber("s111", "s", "111")
self.assertEqualTextAndNumber("-1s", "-1s", "")
self.assertEqualTextAndNumber("abc1", "abc", "1")
self.assertEqualTextAndNumber("abc", "abc", "")
self.assertEqualTextAndNumber("abc1abc", "abc1abc", "")
self.assertEqualTextAndNumber("abc1abc1", "abc1abc", "1")
| Python | 0.000001 | |
8d32947304d72a13ed8e27d41d35028a904072e9 | Add libpq package | libpq/conanfile.py | libpq/conanfile.py | from conans import ConanFile, AutoToolsBuildEnvironment, tools
import os
class LibpqConn(ConanFile):
name = "libpq"
version = "9.6.3"
license = "PostgreSQL license https://www.postgresql.org/about/licence/"
url = "https://github.com/trigger-happy/conan-packages"
description = "C library for interfacing with postgresql"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
pkgLink = 'https://ftp.postgresql.org/pub/source/v{pkgver}/postgresql-{pkgver}.tar.bz2'.format(pkgver=self.version)
self.run("curl -JOL " + pkgLink)
self.run("tar xf postgresql-{pkgver}.tar.bz2".format(pkgver=self.version))
self.run("mkdir deploy")
def build(self):
env_build = AutoToolsBuildEnvironment(self)
install_prefix=os.getcwd()
with tools.chdir("postgresql-{pkgver}".format(pkgver=self.version)):
with tools.environment_append(env_build.vars):
self.run("./configure --with-openssl --without-readline --prefix={0}".format(install_prefix))
with tools.chdir("src/interfaces/libpq"):
self.run("make install")
def package(self):
with tools.chdir("deploy"):
self.copy("lib/*", dst="lib", keep_path=False)
self.copy("include/*", dst=".", keep_path=True)
def package_info(self):
self.cpp_info.libs = ["pq"]
| Python | 0.000001 | |
e59c03f0bad78c9cb1db86f2fb0ac29009c8474e | add rll | reverse-linked-list.py | reverse-linked-list.py | # https://leetcode.com/problems/reverse-linked-list/
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} head
# @return {ListNode}
def reverseList(self, head):
last, current = None, head
while current:
next = current.next
current.next = last
last = current
current = next
return last
| Python | 0.000001 | |
0c17398f68597eae175ad6a37945cf37e95e1809 | Reset invalid default quotas for CloudServiceProjectLink [WAL-814] | nodeconductor/structure/migrations/0050_reset_cloud_spl_quota_limits.py | nodeconductor/structure/migrations/0050_reset_cloud_spl_quota_limits.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes import models as ct_models
from django.db import migrations, models
from nodeconductor.quotas.models import Quota
from nodeconductor.structure.models import CloudServiceProjectLink
def reset_cloud_spl_quota_limits(apps, schema_editor):
old_limits = {
'vcpu': 100,
'ram': 256000,
'storage': 5120000,
}
for model in CloudServiceProjectLink.get_all_models():
content_type = ct_models.ContentType.objects.get_for_model(model)
for quota, limit in old_limits.items():
Quota.objects.filter(content_type=content_type, name=quota, limit=limit).update(limit=-1)
class Migration(migrations.Migration):
dependencies = [
('structure', '0049_extend_abbreviation'),
]
operations = [
migrations.RunPython(reset_cloud_spl_quota_limits),
]
| Python | 0 | |
63ae0b619ea50b1e234abc139becaeb84c703302 | add player class | MellPlayer/player.py | MellPlayer/player.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Netease Music Player
Created on 2017-02-20
@author: Mellcap
'''
class Player(object):
def __init__(self):
pass
def start(self):
pass
def pause(self):
pass
def start_or_pause(self):
pass
def switch_song(self, action='next'):
'''
action: next/prev
'''
pass
def switch_playlist(self, action='next'):
'''
action: next/prev
'''
pass
| Python | 0 | |
602db58ff01ef7ea2718d713a5b2026377023b8d | Create context_processors.py | commons/context_processors.py | commons/context_processors.py | from os import environ
from {{ project_name }} import __version__
import uuid
def metainfo(request):
metainfo = {
'uuid': unicode(uuid.uuid4()),
'version': __version__,
'static_version': "?v={}".format(uuid),
'branch': environ['BRANCH']
}
return metainfo
| Python | 0.000577 | |
6ac6f936a12fcc1578db3fed629ec3a8bc471dcb | remove print | src/you_get/extractor/acfun.py | src/you_get/extractor/acfun.py | #!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import *
from .qq import qq_download_by_id
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://comment.acfun.com/%s.json' % id
return get_html(url)
def get_srt_lock_json(id):
url = 'http://comment.acfun.com/%s_lock.json' % id
return get_html(url)
def acfun_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False):
info = json.loads(get_html('http://www.acfun.com/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
sourceId = info['sourceId']
danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_id(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
raise NotImplementedError(sourceType)
if not info_only:
title = get_filename(title)
try:
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(danmakuId)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w') as x:
x.write(cmt)
print('Downloading %s ...\n' % (title + '.cmt_lock.json'))
cmt = get_srt_lock_json(danmakuId)
with open(os.path.join(output_dir, title + '.cmt_lock.json'), 'w') as x:
x.write(cmt)
except:
pass
def acfun_download(url, output_dir = '.', merge = True, info_only = False):
assert re.match(r'http://[^\.]+.acfun.[^\.]+/v/ac(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\" href=\"[^\"]+\" title=\"([^\"]+)\"", html)
if videos is not None:
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1]
acfun_download_by_vid(p_vid, p_title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
# Useless - to be removed?
id = r1(r"src=\"/newflvplayer/player.*id=(\d+)", html)
sina_download_by_vid(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "AcFun.com"
download = acfun_download
download_playlist = playlist_not_supported('acfun')
| #!/usr/bin/env python
__all__ = ['acfun_download']
from ..common import *
from .qq import qq_download_by_id
from .sina import sina_download_by_vid
from .tudou import tudou_download_by_iid
from .youku import youku_download_by_vid
import json, re
def get_srt_json(id):
url = 'http://comment.acfun.com/%s.json' % id
return get_html(url)
def get_srt_lock_json(id):
url = 'http://comment.acfun.com/%s_lock.json' % id
return get_html(url)
def acfun_download_by_vid(vid, title=None, output_dir='.', merge=True, info_only=False):
info = json.loads(get_html('http://www.acfun.com/video/getVideo.aspx?id=' + vid))
sourceType = info['sourceType']
sourceId = info['sourceId']
danmakuId = info['danmakuId']
if sourceType == 'sina':
sina_download_by_vid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'youku':
print(sourceId, danmakuId)#
youku_download_by_vid(sourceId, title=title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'tudou':
tudou_download_by_iid(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
elif sourceType == 'qq':
qq_download_by_id(sourceId, title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
raise NotImplementedError(sourceType)
if not info_only:
title = get_filename(title)
try:
print('Downloading %s ...\n' % (title + '.cmt.json'))
cmt = get_srt_json(danmakuId)
with open(os.path.join(output_dir, title + '.cmt.json'), 'w') as x:
x.write(cmt)
print('Downloading %s ...\n' % (title + '.cmt_lock.json'))
cmt = get_srt_lock_json(danmakuId)
with open(os.path.join(output_dir, title + '.cmt_lock.json'), 'w') as x:
x.write(cmt)
except:
pass
def acfun_download(url, output_dir = '.', merge = True, info_only = False):
assert re.match(r'http://[^\.]+.acfun.[^\.]+/v/ac(\d+)', url)
html = get_html(url)
title = r1(r'<h1 id="txt-title-view">([^<>]+)<', html)
title = unescape_html(title)
title = escape_file_path(title)
assert title
videos = re.findall("data-vid=\"(\d+)\" href=\"[^\"]+\" title=\"([^\"]+)\"", html)
if videos is not None:
for video in videos:
p_vid = video[0]
p_title = title + " - " + video[1]
acfun_download_by_vid(p_vid, p_title, output_dir=output_dir, merge=merge, info_only=info_only)
else:
# Useless - to be removed?
id = r1(r"src=\"/newflvplayer/player.*id=(\d+)", html)
sina_download_by_vid(id, title, output_dir=output_dir, merge=merge, info_only=info_only)
site_info = "AcFun.com"
download = acfun_download
download_playlist = playlist_not_supported('acfun')
| Python | 0.000001 |
4152b6a10610aa364e901f062a8611b94f65b3de | Create e.py | at/abc126/e.py | at/abc126/e.py | # 并查集
read = input
n, m = map(int, read().split())
f = [-1 for i in range(n + 1)] # 1 ~ n
def find(x):
if f[x]<0:
return x
else :
f[x] = find(f[x])
return f[x]
for i in range(m):
x,y,z = map(int, read().split())
if abs(x) < abs(y): #合并到x上,保证x是大集合
x,y = y,x
fx = find(x)
fy = find(y)
if fx == fy:continue # 已经在一个集合,不操作
f[fx] = f[fx] - 1
f[fy] = fx
# print(x,y,fx,fy,f)
ans = 0
for i in range(1, n+1): # 1~n
if f[i] < 0:
ans += 1
print(ans)
| Python | 0.000001 | |
2057ebd9bae44b232b133ca0c0f76e11d4ca3b5f | Add missing file | conary/server/wsgi_adapter.py | conary/server/wsgi_adapter.py | #
# Copyright (c) rPath, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import webob
import sys
def modpython_to_webob(mpreq, handler):
# This could be written as a mod_python -> WSGI gateway, but this is much
# more compact.
from mod_python import apache
mpreq.add_common_vars()
environ = dict(mpreq.subprocess_env.items())
environ['wsgi.version'] = (1, 0)
if environ.get('HTTPS', '').lower() == 'on':
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
environ['wsgi.input'] = mpreq
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = False
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = False
request = webob.Request(environ)
response = handler(request)
mpreq.status = response.status_int
for key, value in response.headerlist:
if key.lower() == 'content-length':
mpreq.set_content_length(int(value))
elif key.lower() == 'content-type':
mpreq.content_type = value
else:
mpreq.headers_out.add(key, value)
for chunk in response.app_iter:
mpreq.write(chunk)
return apache.OK
| Python | 0.000006 | |
38cec6e7806e55d957e9810d1bb861054ae4842b | add useful methods | useful_methods.py | useful_methods.py | # encoding utf-8
def bisect_right(data, target, lo, hi):
"""
Given a sorted array, returns the insertion position of target
If the value is already present, the insertion post is to the right of all of them
>>> bisect_right([1,1,2,3,4,5], 1, 0, 6)
2
>>> bisect_right([1,1,2,3,4,5], 0, 0, 6)
0
>>> bisect_right([1,1,2,3,4,5], 6, 0, 6)
6
"""
while lo < hi:
mid = (lo+hi)/2
if data[mid] > target:
hi = mid
else:
lo = mid+1
return lo
def bisect_left(data, target, lo, hi):
"""
Given a sorted array, returns the insertion position of target
If the value is already present, the insertion post is to the left of all of them
>>> bisect_left([1,1,2,3,4,5], 1, 0, 6)
0
>>> bisect_left([1,1,2,3,4,5], 6, 0, 6)
6
>>> bisect_left([1,1,2,3,4,5], 0, 0, 6)
0
"""
while lo < hi:
mid = (lo+hi)/2
if data[mid] < target:
lo = mid+1
else:
hi = mid
return lo
def permutations_generator(head, tail=[]):
"""
>>> [p for p in permutations_generator([1, 2, 3])]
[[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]
"""
if not head:
yield tail
else:
for i in xrange(len(head)):
for p in permutations_generator(head[:i] + head[i+1:], tail+[head[i]]):
yield p
def permutations(data):
"""
>>> [p for p in permutations([1, 2, 3])]
[[3, 2, 1], [2, 3, 1], [3, 1, 2], [1, 3, 2], [2, 1, 3], [1, 2, 3]]
"""
stack = [(data, [])]
rv = []
while stack:
head, tail = stack.pop()
if not head:
rv.append(tail)
else:
for i in xrange(len(head)-1, -1, -1):
stack.append((head[:i] + head[i+1:], [head[i]]+tail))
return rv
class BinaryIndexedTree:
def __init__(self, length):
self._data = [0 for i in xrange(length+1)]
def value(self, pos):
rv = self._data[pos]
if (pos > 0):
z = pos - (pos & -pos)
pos -= 1
while (pos != z):
rv -= self._data[pos]
pos -= (pos & -pos)
return rv
def add(self, pos, count):
while pos <= len(self._data):
self._data[pos] += count
pos += (pos & -pos)
def accum(self, pos):
rv = 0
while (pos > 0):
rv += self._data[pos]
pos -= (pos & -pos)
return rv
def powerset(s):
"""Computes all of the sublists of s"""
rv = [[]]
for num in s:
rv += [x+[num] for x in rv]
return rv
def lis(arr):
"""
Return the Longest Increasing Subsequence of arr, in O(N^2)
>>> lis([2, 1, 3, 4, -5, 3, 2, 4, 5])
[-5, 2, 4, 5]
"""
elements = [(0, 1)]
global_max = (0, 1)
for i in xrange(1, len(arr)):
max_before = (i, 1)
for j in xrange(i-1, -1, -1):
if arr[i] > arr[j] and elements[j][1]+1 > max_before[1]:
max_before = (j, elements[j][1]+1)
elements.append(max_before)
if max_before[1] > global_max[1]:
global_max = (i, max_before[1])
last = len(arr)
current = global_max
sequence = []
while last != current[0]:
last = current[0]
sequence.append(arr[current[0]])
current = elements[current[0]]
return sequence[::-1]
| Python | 0.000036 | |
aef33a2c8f34d164bba18741a3cf6e5b71a60a99 | Add stub file for extract_csv.py | extract_csv.py | extract_csv.py | def extract_csv(filename):
# TODO: connect to sqlite database and extract a csv of the rows.
pass
if __name__ == '__main__':
extract_csv('data.csv') | Python | 0.000001 | |
f99eb9a2397f571f045f6a5f663a42878e94b3ea | Create Euler_003.py | Euler_003.py | Euler_003.py | #
x, num = 2, 600851475143
while num != x:
if num % x == 0: num = num / x; x = 2
else: x += 1
print x
| Python | 0.000169 | |
411ef30db7431e9df1af02cd68a6ae0b9d874af0 | add a first draft for the test of canal metrics | dipy/reconst/tests/test_canal_metrics.py | dipy/reconst/tests/test_canal_metrics.py | import numpy as np
from dipy.reconst.dsi import DiffusionSpectrumModel
from dipy.data import get_data
from dipy.core.gradients import gradient_table
from numpy.testing import (assert_almost_equal,
run_module_suite)
from dipy.reconst.canal import ShoreModel, SHOREmatrix
from dipy.sims.voxel import MultiTensor, all_tensor_evecs, multi_tensor_odf, single_tensor_odf, multi_tensor_rtop, multi_tensor_msd, multi_tensor_pdf
from dipy.data import fetch_isbi2013_2shell, read_isbi2013_2shell
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi
from dipy.data import get_sphere
def test_canal_metrics():
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
# fetch_isbi2013_2shell()
# img, gtab = read_isbi2013_2shell()
mevals = np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
angl = [(0, 0), (60, 0)]
S, sticks = MultiTensor(gtab, mevals, S0=100, angles=angl,
fractions=[50, 50], snr=None)
S = S / S[0, None].astype(np.float)
asm = ShoreModel(gtab)
asmfit = asm.fit(S)
radialOrder = 8
zeta = 800
lambdaN = 1e-12
lambdaL = 1e-12
Cshore = asmfit.l2estimation(radialOrder=radialOrder, zeta=zeta,
lambdaN=lambdaN, lambdaL=lambdaL)
Cmat = SHOREmatrix(radialOrder, zeta, gtab)
S_reconst = np.dot(Cmat, Cshore)
nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
assert_almost_equal(nmse_signal, 0.0, 4)
mevecs2 = np.zeros((2, 3, 3))
angl = np.array(angl)
for i in range(2):
mevecs2[i] = all_tensor_evecs(sticks[i]).T
sphere = get_sphere('symmetric724')
v = sphere.vertices
radius = 10e-3
pdf_shore = asmfit.pdf_iso(v * radius)
pdf_mt = multi_tensor_pdf(
v * radius, [.5, .5], mevals=mevals, mevecs=mevecs2)
nmse_pdf = np.sqrt(np.sum((pdf_mt - pdf_shore) ** 2)) / (pdf_mt.sum())
assert_almost_equal(nmse_pdf, 0.0, 2)
rtop_shore_signal = asmfit.rtop_signal()
rtop_shore_pdf = asmfit.rtop_pdf()
assert_almost_equal(rtop_shore_signal, rtop_shore_pdf, 9)
#rtop_mt = multi_tensor_rtop([.5, .5], mevals=mevals)
#err_rtop = np.abs(rtop_mt - rtop_shore_pdf) / rtop_mt
#assert_almost_equal(err_rtop, 0.0, 1)
msd_mt = multi_tensor_msd([.5, .5], mevals=mevals)
msd_shore = asmfit.msd()
err_msd = np.abs(msd_mt - msd_shore) / msd_mt
assert_almost_equal(err_msd, 0, 1)
if __name__ == '__main__':
run_module_suite()
| Python | 0 | |
1072b8e28e75cf41a35302c9febd1ec22473e966 | Add code/analyse_chain_growth.py | code/analyse_chain_growth.py | code/analyse_chain_growth.py | #!/usr/bin/env python
import sys
import os
import os.path
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('dirs', type=str, nargs='+',
help='directories containing simulation files')
parser.add_argument('--rate', type=float, default=0.1)
parser.add_argument('--sites', type=int, default=1)
parser.add_argument('-N', type=int, default=10000)
args = parser.parse_args()
import numpy as np
from scipy.optimize import leastsq
from io import StringIO
import matplotlib.pyplot as plt
NNEIGH=3.5
# Open lammps log file to extract thermodynamic observables
def from_log(logfile,i0,i1):
return np.loadtxt(StringIO(u''.join(logfile[i0+1:i1])), unpack=True)
fitfunc = lambda p, t: 1*(1.-np.exp(-t*p[0]-p[1]))
errfunc = lambda p, t, y: fitfunc(p, t) - y
p_data = []
for d in args.dirs:
logfile = open(os.path.join(os.getcwd(), d, 'log.lammps')).readlines()
start_indices = [(i,l) for (i,l) in enumerate(logfile) if l.startswith('Time ')]
stop_indices = [(i,l) for (i,l) in enumerate(logfile) if l.startswith('Loop time')]
time, e_tot, temp, e_kin, e_vdw, e_bond, e_pot, press, rho, n_bonds, n_bonds_max, bonds = from_log(logfile, start_indices[-1][0], stop_indices[-1][0])
time -= time[0]
plt.plot(time, n_bonds)
nmax = min(int(1./(args.rate*args.fraction)), len(time))
nmax = len(time)
p, success = leastsq(errfunc, [args.rate*NNEIGH*args.fraction, 0./args.rate], args=(time[:nmax], n_bonds[:nmax]))
p_data.append(p)
print p
plt.plot(time, 1*(1.-np.exp(-time*args.rate*NNEIGH*args.fraction)))
p_data = np.array(p_data)
print p_data.mean(axis=0)
plt.plot(time, fitfunc(p_data.mean(axis=0), time), 'k--')
plt.show()
| Python | 0.000111 | |
bd15388aa877f32ebc613511ad909b311ed3bcf0 | Add tests | sympy/concrete/tests/test_dispersion.py | sympy/concrete/tests/test_dispersion.py | from sympy.core import Symbol, S, oo
from sympy.concrete.dispersion import *
def test_dispersion():
x = Symbol("x")
fp = S(0).as_poly(x)
assert sorted(dispersionset(fp)) == [0]
fp = S(2).as_poly(x)
assert sorted(dispersionset(fp)) == [0]
fp = (x + 1).as_poly(x)
assert sorted(dispersionset(fp)) == [0]
assert dispersion(fp) == 0
fp = (x*(x + 3)).as_poly(x)
assert sorted(dispersionset(fp)) == [0, 3]
assert dispersion(fp) == 3
fp = ((x - 3)*(x + 3)).as_poly(x)
assert sorted(dispersionset(fp)) == [0, 6]
assert dispersion(fp) == 6
fp = ((x + 1)*(x + 2)).as_poly(x)
assert sorted(dispersionset(fp)) == [0, 1]
assert dispersion(fp) == 1
fp = (x**4 - 3*x**2 + 1).as_poly(x)
gp = fp.shift(-3)
assert sorted(dispersionset(fp, gp)) == [2, 3, 4]
assert dispersion(fp, gp) == 4
assert sorted(dispersionset(gp, fp)) == []
assert dispersion(gp, fp) == -oo
a = Symbol("a")
fp = (x*(3*x**2+a)*(x-2536)*(x**3+a)).as_poly(x)
gp = fp.as_expr().subs(x, x-345).as_poly(x)
assert sorted(dispersionset(fp, gp)) == [345, 2881]
| Python | 0.000001 | |
6ed3b62efe24aa8aeaedd314bb4e472628713bac | Create deft_opportunist.py | tpdatasrc/tpgamefiles/scr/tpModifiers/deft_opportunist.py | tpdatasrc/tpgamefiles/scr/tpModifiers/deft_opportunist.py | #Deft Opportunist: Complete Adventurer, p. 106
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
print "Registering Deft Opportunist"
def DOAOO(attachee, args, evt_obj):
if attachee.has_feat("Deft Opportunist") != 0:
#Check if it's an AOO, if so add 4 to the Attack Roll
if evt_obj.attack_packet.get_flags() & D20CAF_ATTACK_OF_OPPORTUNITY:
evt_obj.bonus_list.add(4, 0, "Target Deft Opportunist bonus")
return 0
eDO = PythonModifier("Deft Opportunist Feat", 2)
eDO.MapToFeat("Deft Opportunist")
eDO.AddHook(ET_OnToHitBonus2, EK_NONE, DOAOO, ())
| Python | 0.001698 | |
52f8daf63644fde1efd1c132d6b02ac6670ef0a4 | Add migrations merge | temba/channels/migrations/0038_merge.py | temba/channels/migrations/0038_merge.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0037_auto_20160905_1537'),
('channels', '0033_auto_20160623_1438'),
]
operations = [
]
| Python | 0.000001 | |
1b538aba890c8a81fc7bf66f2c35519608fbd6be | Create drivers.py | chips/analog/mock/drivers.py | chips/analog/mock/drivers.py | # This code has to be added to the corresponding __init__.py
DRIVERS["analogmock"] = ["ANALOG", "PUUM"]
| Python | 0.000001 | |
528de5a29d7beb743e5e80775a349f931e71262f | add test that triggers previous error | test/workflows/test_base.py | test/workflows/test_base.py | import json
import fmriprep.workflows.base as base
import re
import unittest
import mock
class TestBase(unittest.TestCase):
def test_fmri_preprocess_single(self):
''' Tests that it runs without errors '''
# NOT a test for correctness
# SET UP INPUTS
test_settings = {
'output_dir': '.',
'work_dir': '.'
}
# SET UP EXPECTATIONS
# RUN
base.fmri_preprocess_single(settings=test_settings)
# ASSERT
| Python | 0 | |
0e02a9de3599e726b5a4dffd17f92a0cd0d2aaee | add import script for Wyre | polling_stations/apps/data_collection/management/commands/import_wyre.py | polling_stations/apps/data_collection/management/commands/import_wyre.py | from data_collection.management.commands import BaseXpressWebLookupCsvImporter
class Command(BaseXpressWebLookupCsvImporter):
council_id = 'E07000128'
addresses_name = 'WyrePropertyPostCodePollingStationWebLookup-2017-03-08 2.CSV'
stations_name = 'WyrePropertyPostCodePollingStationWebLookup-2017-03-08 2.CSV'
elections = ['local.lancashire.2017-05-04']
| Python | 0 | |
4e9ecd13cedc069e53e6acc941f643ad0f8cf6b0 | fix cleanup command | corehq/apps/callcenter/management/commands/remove_callcenter_form_data.py | corehq/apps/callcenter/management/commands/remove_callcenter_form_data.py | from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.session import sessionmaker
from corehq.apps.callcenter.utils import get_call_center_domains, get_or_create_mapping
from ctable.models import SqlExtractMapping
from ctable.util import get_extractor
from django.conf import settings
mapping_name = 'cc_form_submissions'
class Command(BaseCommand):
help = 'Remove legacy call center data'
option_list = BaseCommand.option_list + (
make_option('--all-tables', action='store_true', default=False,
help="Delete all tables regardless of domain setting"),
make_option('--all-mappings', action='store_true', default=False,
help="Delete all mappings and mappings regardless of domain setting"),
make_option('--dry-run', action='store_true', default=False,
help="Don't actually do anything"),
)
def handle(self, *args, **options):
drop_all_tables = options.get('all_tables', False)
delete_all_mappings = options.get('all_mappings', False)
dry_run = options.get('dry_run', False)
if dry_run:
print("\n-------- DRY RUN --------\n")
all_tables = get_db_tables(settings.SQL_REPORTING_DATABASE_URL)
extractor = get_extractor('SQL')
domains = get_call_center_domains()
for domain in domains:
print("Processing domain", domain)
mapping = get_or_create_mapping(domain, mapping_name)
if mapping.table_name in all_tables:
print("\tDropping SQL table", mapping.table_name)
if not dry_run:
extractor.clear_all_data(mapping)
if not mapping.new_document:
print("\tDeleting ctable mapping", mapping.name)
if not dry_run:
mapping.delete()
missed_tables = [t for t in all_tables if t.endswith(mapping_name)]
if missed_tables:
print('\nSome tables are still hanging around:')
with extractor.backend as backend:
for table in missed_tables:
if not drop_all_tables:
print('\t*', table)
else:
print("\tDeleting table", table)
backend.op.drop_table(table)
if not drop_all_tables:
print("\n To delete these tables run with '--all-tables'")
all_mappings = SqlExtractMapping.all()
missed_mappings = [m for m in all_mappings if m.name == mapping_name]
if missed_mappings:
print('\nSome mappings are still hanging around:')
for mapping in missed_mappings:
if not delete_all_mappings:
print('\t*', mapping.name, 'for domains', ', '.join(mapping.domains))
else:
print('\tDeleting mapping', mapping.name, 'for domains', ', '.join(mapping.domains))
mapping.delete()
if not delete_all_mappings:
print("\n To delete these mappings run with '--all-mappings'")
def get_session(url):
engine = create_engine(url)
session = sessionmaker(bind=engine)
return session()
def get_db_tables(database_url):
session = get_session(database_url)
results = session.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public';
""")
return [r[0] for r in results]
| from __future__ import print_function
from optparse import make_option
from django.core.management.base import BaseCommand
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.session import sessionmaker
from corehq.apps.callcenter.utils import get_call_center_domains, get_or_create_mapping
from ctable.models import SqlExtractMapping
from ctable.util import get_extractor
from django.conf import settings
mapping_name = 'cc_form_submissions'
class Command(BaseCommand):
help = 'Remove legacy call center data'
option_list = BaseCommand.option_list + (
make_option('--all-tables', action='store_true', default=False,
help="Delete all tables regardless of domain setting"),
make_option('--all-mappings', action='store_true', default=False,
help="Delete all mappings and mappings regardless of domain setting"),
make_option('--dry-run', action='store_true', default=False,
help="Don't actually do anything"),
)
def handle(self, *args, **options):
drop_all_tables = options.get('all-tables', False)
delete_all_mappings = options.get('all-mappings', False)
dry_run = options.get('dry_run', False)
if dry_run:
print("\n-------- DRY RUN --------\n")
all_tables = get_db_tables(settings.SQL_REPORTING_DATABASE_URL)
domains = get_call_center_domains()
for domain in domains:
print("Processing domain", domain)
mapping = get_or_create_mapping(domain, mapping_name)
if mapping.table_name in all_tables:
print("\tDropping SQL table", mapping.table_name)
if not dry_run:
extractor = get_extractor(mapping.backend)
extractor.clear_all_data(mapping)
if not mapping.new_document:
print("\tDeleting ctable mapping", mapping.name)
if not dry_run:
mapping.delete()
missed_tables = [t for t in all_tables if t.endswith(mapping_name)]
if missed_tables:
print('\nSome tables are still hanging around:')
with extractor.backend as backend:
for table in missed_tables:
if not drop_all_tables:
print('\t*', table)
else:
print("\tDeleting table", table)
backend.op.drop_table(table)
if not drop_all_tables:
print("\n To delete these tables run with '--all-tables'")
all_mappings = SqlExtractMapping.all()
missed_mappings = [m for m in all_mappings if m.name == mapping_name]
if missed_mappings:
print('\nSome mappings are still hanging around:')
for mapping in missed_mappings:
if not delete_all_mappings:
print('\t*', mapping.name, 'for domains', ', '.join(mapping.domains))
else:
print('\tDeleting mapping', mapping.name, 'for domains', ', '.join(mapping.domains))
mapping.delete()
if not delete_all_mappings:
print("\n To delete these mappings run with '--all-mappings'")
def get_session(url):
engine = create_engine(url)
session = sessionmaker(bind=engine)
return session()
def get_db_tables(database_url):
session = get_session(database_url)
results = session.execute("""
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public';
""")
return [r[0] for r in results]
| Python | 0.000006 |
b14fb988321076f4cf17cebec7635fd209e08465 | Create video.py | client/video.py | client/video.py | # Capture video with OpenCV
import numpy as np
import cv2
import time
cap = cv2.VideoCapture('serenity.mp4')
while(cap.isOpened()):
ret, frame = cap.read()
# time.sleep(.25)
cv2.rectangle(frame,(384,0),(510,128),(0,255,0),3)
cv2.imshow('frame',frame)
if cv2.waitKey(5) & 0xFF == ord('q'):
break
cap.release()
| Python | 0.000001 | |
18a356c9fa49f32627481f312b03aa34ff711456 | Revert "Define the tests as grpc_cc_test to automatically test against all po…" | test/core/bad_client/generate_tests.bzl | test/core/bad_client/generate_tests.bzl | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the bad_client tests."""
def test_options():
return struct()
# maps test names to options
BAD_CLIENT_TESTS = {
'badreq': test_options(),
'connection_prefix': test_options(),
'headers': test_options(),
'initial_settings_frame': test_options(),
'head_of_line_blocking': test_options(),
'large_metadata': test_options(),
'server_registered_method': test_options(),
'simple_request': test_options(),
'window_overflow': test_options(),
'unknown_frame': test_options(),
}
def grpc_bad_client_tests():
native.cc_library(
name = 'bad_client_test',
srcs = ['bad_client.cc'],
hdrs = ['bad_client.h'],
deps = ['//test/core/util:grpc_test_util', '//:grpc', '//:gpr', '//test/core/end2end:cq_verifier']
)
for t, topt in BAD_CLIENT_TESTS.items():
native.cc_test(
name = '%s_bad_client_test' % t,
srcs = ['tests/%s.cc' % t],
deps = [':bad_client_test'],
)
| #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate build.json data for all the bad_client tests."""
load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_cc_library")
def test_options():
return struct()
# maps test names to options
BAD_CLIENT_TESTS = {
'badreq': test_options(),
'connection_prefix': test_options(),
'headers': test_options(),
'initial_settings_frame': test_options(),
'head_of_line_blocking': test_options(),
'large_metadata': test_options(),
'server_registered_method': test_options(),
'simple_request': test_options(),
'window_overflow': test_options(),
'unknown_frame': test_options(),
}
def grpc_bad_client_tests():
grpc_cc_library(
name = 'bad_client_test',
srcs = ['bad_client.cc'],
hdrs = ['bad_client.h'],
deps = ['//test/core/util:grpc_test_util', '//:grpc', '//:gpr', '//test/core/end2end:cq_verifier']
)
for t, topt in BAD_CLIENT_TESTS.items():
grpc_cc_test(
name = '%s_bad_client_test' % t,
srcs = ['tests/%s.cc' % t],
deps = [':bad_client_test'],
)
| Python | 0 |
59ac83e45116a97cfbdd7522f967337e73d51766 | add cargo deny test | tests/integration_tests/build/test_dependencies.py | tests/integration_tests/build/test_dependencies.py | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Enforces controls over dependencies."""
import os
import framework.utils as utils
def test_licenses():
"""Ensure license compatibility for Firecracker.
For a list of currently allowed licenses checkout deny.toml in
the root directory.
@type: build
"""
toml_file = os.path.normpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../Cargo.toml')
)
utils.run_cmd('cargo deny --manifest-path {} check licenses'.
format(toml_file))
| Python | 0 | |
b03078362d171854a7438c335821363e4010a7db | add Expect_Geometry_Not_to_Overlap (#4642) | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_geometry_not_to_overlap.py | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_geometry_not_to_overlap.py | import json
from typing import Optional
import geopandas
import numpy as np
import rtree
from shapely.geometry import LineString, Point, Polygon
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.exceptions import InvalidExpectationConfigurationError
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import ColumnExpectation
from great_expectations.expectations.metrics import (
ColumnAggregateMetricProvider,
column_aggregate_value,
)
# This class defines a Metric to support your Expectation.
# For most ColumnMapExpectations, the main business logic for calculation will live in this class.
class ColumnValuesToCheckOverlap(ColumnAggregateMetricProvider):
# This is the id string that will be used to reference your metric.
metric_name = "column_values.geometry_not_overlap"
# This method implements the core logic for the PandasExecutionEngine
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
geo_ser = geopandas.GeoSeries(column)
input_indices, result_indices = geo_ser.sindex.query_bulk(
geo_ser.geometry, predicate="overlaps"
)
overlapping = np.unique(result_indices) # integer indeces of overlapping
if np.any(overlapping):
return {"success": False, "indices": overlapping}
else:
return {"success": True}
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
class ExpectColumnValuesGeometryNotToOverlap(ColumnExpectation):
"""Expect geometries in this column Not to overlap with each other. If any two geometries do overlap, expectation will return False.
For more information look here
https://stackoverflow.com/questions/64042379/shapely-is-valid-returns-true-to-invalid-overlap-polygons"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"geometry_not_overlaps": [
Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
Polygon([(2, 2), (4, 2), (4, 4), (2, 4)]),
Point(5, 6),
],
"geometry_overlaps": [
Polygon([(0, 0), (1, 1), (0, 1)]),
Polygon([(10, 0), (10, 5), (0, 0)]),
Polygon([(0, 0), (2, 2), (2, 0)]),
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "geometry_not_overlaps"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "geometry_overlaps"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
metric_dependencies = ("column_values.geometry_not_overlap",)
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
# # Check other things in configuration.kwargs and raise Exceptions if needed
# try:
# assert (
# ...
# ), "message"
# assert (
# ...
# ), "message"
# except AssertionError as e:
# raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
def _validate(
self,
configuration: ExpectationConfiguration,
metrics,
runtime_configuration: dict = None,
execution_engine=None,
):
success = metrics.get("column_values.geometry_not_overlap").get("success")
indices = metrics.get("column_values.geometry_not_overlap").get("indices")
return {"success": success, "result": {"overlapping_indices": indices}}
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon-22",
"geospatial",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["rtree", "geopandas", "shapely", "numpy"],
}
if __name__ == "__main__":
ExpectColumnValuesGeometryNotToOverlap().print_diagnostic_checklist()
| Python | 0 | |
c3f01d8b365e6d367b1a565e5ce59cf04eb1bac3 | fix build | get_version.py | get_version.py | """Return the short version string."""
from mpfmonitor._version import __short_version__
print("{}.x".format(__short_version__))
| Python | 0.000001 | |
15d3692aee84432b6b7f8306505b3f59649fd6f9 | Remove mimetype from the module_files table | cnxarchive/sql/migrations/20160128111115_mimetype_removal_from_module_files.py | cnxarchive/sql/migrations/20160128111115_mimetype_removal_from_module_files.py | # -*- coding: utf-8 -*-
"""\
- Move the mimetype value from ``module_files`` to ``files``.
- Remove the ``mimetype`` column from the ``module_files`` table.
"""
from __future__ import print_function
import sys
def up(cursor):
# Move the mimetype value from ``module_files`` to ``files``.
cursor.execute("UPDATE files AS f SET media_type = mf.mimetype "
"FROM module_files AS mf "
"WHERE mf.fileid = f.fileid")
# Warn about missing mimetype.
cursor.execute("SELECT fileid, sha1 "
"FROM files AS f "
"WHERE f.fileid NOT IN (SELECT fileid FROM module_files)")
rows = '\n'.join(['{}, {}'.format(fid, sha1)
for fid, sha1 in cursor.fetchall()])
print("These files (fileid, sha1) do not have a corresponding "
"module_files entry:\n{}\n".format(rows),
file=sys.stderr)
# Remove the ``mimetype`` column from the ``module_files`` table.
cursor.execute("ALTER TABLE module_files DROP COLUMN mimetype")
def down(cursor):
# Add a ``mimetype`` column to the ``module_files`` table.
cursor.execute("ALTER TABLE module_files ADD COLUMN mimetype TEXT")
# Move the mimetype value from ``files`` to ``module_files``.
print("Rollback cannot accurately replace mimetype values that "
"were in the ``modules_files`` table.",
file=sys.stderr)
cursor.execute("UPDATE module_files AS mf SET mimetype = f.media_type "
"FROM files AS f "
"WHERE f.fileid = mf.fileid")
| Python | 0.000001 | |
67b5cd3f00ca57c4251dab65c5a6e15ab2be8a42 | Create result.py | aiorucaptcha/result.py | aiorucaptcha/result.py | class ResultObject:
def __init__(self, code, task_id):
self.code = code
self.task_id = task_id
def __str__(self):
return self.code
| Python | 0.000002 | |
4a7a15359763cbd6956bd30bde7cd68b05b2b4a2 | test _compare_and_pop_smallest | tests/test_huffman_codes.py | tests/test_huffman_codes.py | import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '../..'))
import unittest
from huffman_codes import huffman_codes, Node, Queue, _compare_and_pop_smallest, \
_traverse_children_and_assign_codes
class TestHuffmanCodes(unittest.TestCase):
def test_compare_and_pop_smallest__first_q_smaller(self):
q_1 = Queue()
q_1.enqueue((None, 1))
q_2 = Queue()
q_2.enqueue((None, 2))
output = _compare_and_pop_smallest(q_1, q_2)
self.assertEqual(output[1], 1)
def test_compare_and_pop_smallest__second_q_smaller(self):
q_1 = Queue()
q_1.enqueue((None, 1))
q_2 = Queue()
q_2.enqueue((None, 2))
output = _compare_and_pop_smallest(q_2, q_1)
self.assertEqual(output[1], 1)
def test_compare_and_pop_smallest__first_q_empty(self):
q_1 = Queue()
q_2 = Queue()
q_2.enqueue((None, 2))
output = _compare_and_pop_smallest(q_2, q_1)
self.assertEqual(output[1], 2)
def test_compare_and_pop_smallest__second_q_empty(self):
q_1 = Queue()
q_1.enqueue((None, 1))
q_2 = Queue()
output = _compare_and_pop_smallest(q_2, q_1)
self.assertEqual(output[1], 1)
def test_traverse_children_and_assign_codes(self):
pass
def test_huffman_codes(self):
pass
| Python | 0.00006 | |
43d3158e536b7cae3f427f655b08aa8b4c24fe96 | Add an iter_entry_points style test | tests/test_spicedham_api.py | tests/test_spicedham_api.py | from unittest import TestCase
from spicedham import Spicedham
from mock import Mock, patch
class TestSpicedHamAPI(TestCase):
@patch('spicedham.Spicedham._classifier_plugins')
def test_classify(self, mock_plugins):
sh = Spicedham()
plugin0 = Mock()
plugin0.classify.return_value = .5
plugin1 = Mock()
plugin1.classify.return_value = .75
plugin2 = Mock()
plugin2.classify.return_value = None
mock_plugins.__iter__.return_value = [plugin0, plugin1, plugin2]
# Test when some plugins return numbers and some return None
value = sh.classify(['classifying', 'data'])
self.assertEqual(value, 0.625)
# Test when all plugins return one
plugin0.classify.return_value = None
plugin1.classify.return_value = None
value = sh.classify(['classifying', 'data'])
self.assertEqual(value, 0)
@patch('spicedham.iter_entry_points')
@patch('spicedham.Spicedham.backend')
def test_load_backend(self, mock_backend, mock_iter_entry_points):
sh = Spicedham()
mock_backend = None
mock_django_orm = Mock()
mock_iter_entry_points = Mock()
# mock_plugin_class = Mock()
# mock_plugin_object = Mock()
# mock_plugin_class.return_value = mock_plugin_object
# mock_django_orm.load.return_value = mock_plugin_class()
#h= mock_django_orm.name = 'djangoorm'
# mock_sqlalchemy_orm = Mock()
#mock_sqlalchemy_orm.name = 'sqlalchemy'
#mock_plugin_class.return_value = mock_plugin_object
#mock_sqlalchemy_orm.load.return_value = mock_plugin_class()
#mock_iter_entry_points.__iter__.return_value = [mock_django_orm,
# mock_sqlalchemy_orm]
# Test the first run with the django_orm plugin
ret = sh._load_backend()
print 'mm', mock_iter_entry_points.mock_calls
self.assertEqual(ret, mock_plugin_object)
# Test the second run with the django_orm plugin
ret = sh._load_backend()
self.assertEqual(ret, mock_plugin_class.return_value)
# rest the backend for the next test
mock_backend = None
mock_iter_entry_points.return_value = [mock_django_orm,
mock_sqlalchemy_orm]
# Test the first run with the sqlalchemy plugin
ret = sh._load_backend()
self.assertEqual(ret, mock_plugin_class.return_value)
# Test the second run with the sqlalchemy plugin
ret = sh._load_backend()
self.assertEqual(ret, mock_plugin_class.return_value)
@patch('spicedham.Spicedham._load_backend')
@patch('spicedham.iter_entry_points')
def test_load_plugins(self, mock_iter_entry_points, mock_load_backend):
#plugin0 = Mock()
plugin0Object = Mock()
#plugin0Class = Mock(return_value=plugin0Object)
#plugin0.load = Mock(return_value=plugin0Class)
#plugin1 = Mock()
plugin1Object = Mock()
#plugin1Class = Mock(return_value=plugin1Object)
#plugin1.load = Mock(return_value=plugin1Class)
#plugin2 = Mock()
plugin2Object = Mock()
#plugin2Class = Mock(return_value=plugin2Object)
#plugin2.load = Mock(return_value=plugin2Class)
input_plugins = [plugin0Object, plugin1Object, plugin2Object]
expected_plugins = [plugin0Object.load, plugin1Object.load, plugin2Object.load]
mock_iter_entry_points.return_value = input_plugins
self.assertEqual(spicedham._plugins, None)
# now load the plugins
load_plugins()
mock_iter_entry_points.assert_called_with(group='spicedham.classifiers', name=None)
self.assertEqual(spicedham._plugins, expected_plugins)
# now load the plugins again, they should not change
mock_iter_entry_points.called = False
load_plugins()
self.assertEqual(mock_iter_entry_points.called, False)
self.assertEqual(spicedham._plugins, input_plugins)
| Python | 0 | |
ba49a66b401bc32e57abede6adc5a0f933e8834a | Add tests for view helpers | tests/test_views_helpers.py | tests/test_views_helpers.py | from django.test import RequestFactory
from django_cas_ng.views import (
_service_url,
_redirect_url,
_login_url,
_logout_url,
)
#
# _service_url tests
#
def test_service_url_helper():
factory = RequestFactory()
request = factory.get('/login/')
actual = _service_url(request)
expected = 'http://testserver/login/'
assert actual == expected
def test_service_url_helper_as_https():
factory = RequestFactory()
request = factory.get('/login/', secure=True)
actual = _service_url(request)
expected = 'https://testserver/login/'
assert actual == expected
def test_service_url_helper_with_redirect():
factory = RequestFactory()
request = factory.get('/login/', secure=True)
actual = _service_url(request, redirect_to='https://testserver/landing-page/')
expected = 'https://testserver/login/?next=https%3A%2F%2Ftestserver%2Flanding-page%2F'
assert actual == expected
#
# _redirect_url tests
#
def test_redirect_url_with_url_as_get_parameter():
factory = RequestFactory()
request = factory.get('/login/', data={'next': '/landing-page/'}, secure=True)
actual = _redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_redirect_url_falls_back_to_cas_redirect_url_setting(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = '/landing-page/'
factory = RequestFactory()
request = factory.get('/login/', secure=True)
actual = _redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_params_redirect_url_preceeds_settings_redirect_url(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = '/landing-page/'
factory = RequestFactory()
request = factory.get('/login/', data={'next': '/override/'}, secure=True)
actual = _redirect_url(request)
expected = '/override/'
assert actual == expected
def test_redirect_url_falls_back_to_http_referrer(settings):
settings.CAS_IGNORE_REFERER = False
settings.CAS_REDIRECT_URL = '/wrong-landing-page/'
factory = RequestFactory()
request = factory.get('/login/', secure=True, HTTP_REFERER='/landing-page/')
actual = _redirect_url(request)
expected = '/landing-page/'
assert actual == expected
def test_redirect_url_strips_domain_prefix(settings):
settings.CAS_IGNORE_REFERER = True
settings.CAS_REDIRECT_URL = 'https://testserver/landing-page/'
factory = RequestFactory()
request = factory.get('/login/', secure=True)
actual = _redirect_url(request)
expected = '/landing-page/'
assert actual == expected
#
# _login_url tests
#
def test_login_url_helper(settings):
settings.CAS_RENEW = False
settings.CAS_EXTRA_LOGIN_PARAMS = False
settings.CAS_SERVER_URL = 'http://www.example.com/cas/'
actual = _login_url('http://testserver/')
expected = 'http://www.example.com/cas/login?service=http%3A%2F%2Ftestserver%2F'
assert actual == expected
def test_login_url_helper_with_extra_params(settings):
settings.CAS_RENEW = False
settings.CAS_EXTRA_LOGIN_PARAMS = {'test': '1234'}
settings.CAS_SERVER_URL = 'http://www.example.com/cas/'
actual = _login_url('http://testserver/')
# since the dictionary of parameters is unordered, we dont know which
# parameter will be first, so just check that both are in the url.
assert 'service=http%3A%2F%2Ftestserver%2F' in actual
assert 'test=1234' in actual
def test_login_url_helper_with_renew(settings):
settings.CAS_RENEW = True
settings.CAS_EXTRA_LOGIN_PARAMS = None
settings.CAS_SERVER_URL = 'http://www.example.com/cas/'
actual = _login_url('http://testserver/')
# since the dictionary of parameters is unordered, we dont know which
# parameter will be first, so just check that both are in the url.
assert 'service=http%3A%2F%2Ftestserver%2F' in actual
assert 'renew=true' in actual
#
# _login_url tests
#
def test_logout_url_helper(settings):
settings.CAS_SERVER_URL = 'https://www.example.com/cas/'
factory = RequestFactory()
request = factory.get('/logout/')
actual = _logout_url(request)
expected = 'https://www.example.com/cas/logout'
assert actual == expected
def test_logout_url_helper_with_redirect(settings):
settings.CAS_SERVER_URL = 'https://www.example.com/cas/'
factory = RequestFactory()
request = factory.get('/logout/')
actual = _logout_url(request, next_page='/landing-page/')
expected = 'https://www.example.com/cas/logout?url=http%3A%2F%2Ftestserver%2Flanding-page%2F'
assert actual == expected
| Python | 0 | |
3f84a3cb50e18ce9df96a9173d0be180633aad0d | Add polynomial learning example | Examples/polynomial_approximation.py | Examples/polynomial_approximation.py | """
Example of neural network learning a polynomial equation. Test polynomial is f(x) = (6x^2 + 3x) ÷ (3x)
Training is run on x values from 1.0 to 100.0
"""
from mazex import MazeX
import numpy as np
import random
import math
import matplotlib.pyplot as plt
# Create list to store how close networks guesses are
graph_data = []
# Create Neural Network
net = MazeX([1, 20, 4, 1], ["relu", "relu", 'lin'], learning_constant=0.00001)
# test how close the network is to the correct answer given x = 12 and log the result for the graph
def check(run):
guess = net.forward(np.array([[12.0]]))
print(f"run {run} OFF BY: {25 - guess[0][0]}")
graph_data.append(25 - guess[0][0])
# run a bunch of training steps on random values to help network learn the polynomial
for i in range(100):
t = random.uniform(1.0, 100.0)
ans = ((6 * math.pow(t, 2)) + (3 * t)) / (3 * t)
Y = np.full((1, 1), ans)
X = np.full((1, 1), t)
net.train(X, Y)
check(i)
# plot the training data for visual feedback of learning progress. Saves graph to same directory as script
plt.plot(graph_data)
plt.ylabel('Error')
plt.xlabel("training run")
plt.title('Error over time')
plt.savefig(f'Polynomial_approximation.png')
| Python | 0.01065 | |
abe40e3c82ef1f351275a59b2e537f43530caa0c | Clean up db script (remove articles older than two days). | app/cleanup_stories.py | app/cleanup_stories.py | from pymongo import MongoClient
from fetch_stories import get_mongo_client, close_mongo_client
from bson import ObjectId
from datetime import datetime, timedelta
def remove_old_stories():
client = get_mongo_client()
db = client.get_default_database()
article_collection = db['articles']
two_days_ago = datetime.utcnow() - timedelta(days=2)
two_days_ago = ObjectId.from_datetime(two_days_ago)
query = {
'_id' : { '$lt' : two_days_ago}
}
article_collection.remove(query)
close_mongo_client(client)
def main():
remove_old_stories()
if __name__ == '__main__':
main()
| Python | 0 | |
4bc3d5fb8197502c6eaddc055babd9ce679909bd | Move make_topicspace.py to outer folder. | make_topicspace.py | make_topicspace.py | import os, sys, logging, scipy, joblib
import math
import argparse
from toolset.corpus import Corpus
from gensim import corpora, models, matutils
from sklearn.cluster import MiniBatchKMeans as mbk
from toolset import mogreltk
def make_topicspace(data_file_path, stopwords_file_path=None,
n_topics=300, method='lda', n_clusters=8):
# Allow gensim to print additional info while executing.
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if not os.path.exists(data_file_path+'/formatted'):
print('No corpus file found.')
collection = Corpus(data_file_path+'/formatted',
filepath_dict_path=data_file_path+'/filepath_dict.txt')
# First pass of the collection to create the dictionary.
if not os.path.exists(data_file_path + '/dictionary.txt'):
print('Generating dictionary...')
dictionary = corpora.Dictionary()
batch_size = 0
max_batch_size = 2000
batch = []
for i, text in enumerate(collection.document_generator()):
if stopwords_file_path is not None:
batch.append(mogreltk.stem(text, stopwords_file_path))
else:
batch.append(mogreltk.stem(text))
batch_size += 1
if batch_size >= max_batch_size:
dictionary.add_documents(batch, prune_at=5000)
batch_size = 0
batch = []
dictionary.add_documents(batch, prune_at=5000)
dictionary.filter_extremes(no_below=50, no_above=0.3)
joblib.dump(dictionary, data_file_path + '/dictionary.txt')
# Second pass of the collection to generate the bag of words representation.
if not os.path.exists(data_file_path + '/corpus.txt'):
print('Generating corpus...')
if not 'dictionary' in locals():
dictionary = joblib.load(data_file_path + '/dictionary.txt')
if stopwords_file_path is not None:
corpus = [dictionary.doc2bow(mogreltk.stem(text, stopwords_file_path))
for text in collection.document_generator()]
else:
corpus = [dictionary.doc2bow(mogreltk.stem(text))
for text in collection.document_generator()]
joblib.dump(corpus, data_file_path + '/corpus.txt')
# Transform from BoW representation to tf-idf.
if not os.path.exists(data_file_path + '/tfidf_model.txt'):
print('Generating tf-idf matrix...')
if not 'corpus' in locals():
corpus = joblib.load(data_file_path + '/corpus.txt')
tfidf = models.TfidfModel(corpus)
joblib.dump(tfidf, data_file_path + '/tfidf_model.txt')
corpus_tfidf = tfidf[corpus]
tfidf_sparse = matutils.corpus2csc(corpus_tfidf)
tfidf_sparse = scipy.sparse.csc_matrix.transpose(tfidf_sparse).tocsr()
joblib.dump(tfidf_sparse, data_file_path + '/tfidf_sparse.txt')
# Apply Latent Dirichlet Allocation.
if not os.path.exists(data_file_path + '/topic_model.txt'):
if not 'dictionary' in locals():
dictionary = joblib.load(data_file_path + '/dictionary.txt')
if not 'corpus' in locals():
corpus = joblib.load(data_file_path + '/corpus.txt')
if not 'tfidf' in locals():
tfidf = joblib.load(data_file_path + '/tfidf_model.txt')
corpus_tfidf = tfidf[corpus]
if method == 'lsa':
print('Applying Latent Semantic Analysis for {} topics.'.format(n_topics))
lsa = models.lsimodel.LsiModel(corpus=corpus_tfidf, id2word=dictionary,
num_topics=n_topics)
joblib.dump(lsa, data_file_path + '/topic_model.txt')
transformed_corpus = lsa[corpus]
else:
print('Applying Latent Dirichlet Allocation for {} topics.'.format(n_topics))
lda = models.ldamodel.LdaModel(corpus=corpus_tfidf, id2word=dictionary,
num_topics=n_topics, passes=2)
joblib.dump(lda, data_file_path + '/topic_model.txt')
transformed_corpus = lda[corpus]
# Convert topic space matrix to sparse in the Compressed Sparse Row format.
topic_space = matutils.corpus2csc(transformed_corpus)
# Transpose the topic space matrix because it will be used with sklearn and
# it needs the documents in the rows.
topic_space = scipy.sparse.csc_matrix.transpose(topic_space).tocsr()
joblib.dump(topic_space, data_file_path + '/topic_space.txt')
# Apply clustering using KMeans
if not os.path.exists(data_file_path + '/kmodel.txt'):
if not 'topic_space' in locals():
topic_space = joblib.load(data_file_path + '/topic_space.txt')
kmodel = mbk(n_clusters=n_clusters, n_init=10, reassignment_ratio=0.03, verbose=True)
kmodel.fit(topic_space)
dist_space = kmodel.transform(topic_space)
joblib.dump(kmodel, data_file_path + '/kmodel.txt')
joblib.dump(dist_space, data_file_path + '/dist_space.txt')
if not os.path.exists('{}/lemmatizer.txt'.format(data_file_path)):
lemmatizer = mogreltk.Lemmatizer()
lemmatizer.fit(collection.document_generator(), stopwords_file_path)
joblib.dump(lemmatizer, '{}/lemmatizer.txt'.format(data_file_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process input filepath.')
parser.add_argument('data_file_path', type=str,
help='The path to the data directory.')
parser.add_argument('-s', '--stop', type=str,
help='The path to the stopwords file.')
parser.add_argument('-t', '--n_topics', type=int,
help='The number of topics that will be extracted.')
parser.add_argument('-m', '--method', type=str,
help='The topic modeling method to be used.')
parser.add_argument('-k', '--n_clusters', type=int ,
help='The number of clusters to be created.')
args = parser.parse_args()
make_topicspace(data_file_path=args.data_file_path, stopwords_file_path=args.stop,
n_topics=args.n_topics, method=args.method, n_clusters=args.n_clusters)
| Python | 0 | |
ba590d28810409fa57783e6d29a651790f865e5c | create base api exceptions module | apps/api/exceptions.py | apps/api/exceptions.py | import json
from tastypie.exceptions import TastypieError
from tastypie.http import HttpResponse
class CustomBadRequest(TastypieError):
"""
This exception is used to interrupt the flow of processing to immediately
return a custom HttpResponse.
"""
def __init__(self, success=False, code="", message=""):
self._response = {
"error": {
"success": success or False,
"code": code or "not_provided",
"message": message or "No error message was provided."}}
@property
def response(self):
return HttpResponse(
json.dumps(self._response),
content_type='application/json')
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.