commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
88d2ad776518d62a66fa3b8f7dd7520cff3debfc | Create bulk_parse.py | scripts/bulk_parse.py | scripts/bulk_parse.py | Python | 0.000008 | ||
10d71b1208175eac4af0a20d7ee0a8176c7829ef | add new rename script to prepend to *.c files | rename/prepend.py | rename/prepend.py | import os
import sys
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'usage: <path> <prepend>'
sys.exit()
exts=['.c']
change_count = 0
for root, dirs, files in os.walk(sys.argv[1]):
for filename in files:
if any(filename.lower().endswith(ext) for ext in exts):
if sys.argv[2] not in filename :
os.rename(os.path.join(root, filename), os.path.join(root, sys.argv[2] + filename))
print os.path.join(root, sys.argv[2] + filename)
change_count += 1
print 'files changed: ', change_count
| Python | 0 | |
b20b8bc06b6141fad1fbab9befa184644821351f | add joblib02.py | trypython/extlib/joblib02.py | trypython/extlib/joblib02.py | # coding: utf-8
"""
joblibモジュールについてのサンプルです。
joblib.Parallel の利用にて joblib側のログを出力する方法について。
"""
import datetime
import os
import random
import time
import joblib as job
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr, hr
NOW = datetime.datetime.now
RND = random.Random()
CPU_COUNT = -1
# ログ出力 (簡易)
LOG_VERBOSE = 1
# ログ出力 (詳細)
LOG_VERBOSE_ALL_ITERATION_REPORT = 10
class Sample(SampleBase):
def exec(self):
start_dt = NOW()
# ----------------------------------------------------------
# joblibのParallel() は、CPU数とは別にいくつかのオプション引数
# を持つ。verboseもその一つで、値を指定することでjoblibの内部ログを
# 出力することが出来る。値は、intとなっており、以下の値域を持つ。
#
# [verboseの値域]
# 0以外: 進捗ログを出力する (簡易)
# 10以上: 進捗ログを出力する (各イテレーション毎に出力してくれる)
# ----------------------------------------------------------
results = job.Parallel(n_jobs=CPU_COUNT, verbose=LOG_VERBOSE)(
[
job.delayed(heavy_proc)(f'value-{i}', RND.randrange(1, 3), True)
for i in range(1, 5)
]
)
end_dt = NOW()
pr('job-results', results)
pr('total elapsed', (end_dt - start_dt).seconds)
hr('log-verbose-all-iteration-report')
start_dt = NOW()
results = job.Parallel(n_jobs=CPU_COUNT, verbose=LOG_VERBOSE_ALL_ITERATION_REPORT)(
[
job.delayed(heavy_proc)(f'value-{i}', RND.randrange(1, 3), True)
for i in range(1, 5)
]
)
end_dt = NOW()
pr('job-results', results)
pr('total elapsed', (end_dt - start_dt).seconds)
def heavy_proc(value: str, sleep_seconds: int, silent: bool) -> dict:
start_dt = NOW()
pid = os.getpid()
if not silent:
pr('start', f'pid: {pid} [{value}] sleep: {sleep_seconds}')
time.sleep(sleep_seconds)
if not silent:
pr('end', f'pid: {pid} [{value}]')
end_dt = NOW()
return {
'pid': pid,
'elapsed': (end_dt - start_dt).seconds
}
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| Python | 0 | |
02ad029840b2e770bc802fd7f8504498cb0f756d | Add `issubset` and `issuperset` tests | lib/ansible/plugins/test/mathstuff.py | lib/ansible/plugins/test/mathstuff.py | # (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
| Python | 0.000001 | |
adede4415e36830485429f49b8476f655f3d4929 | Add environment.py | tests/environment.py | tests/environment.py | # -*- coding: UTF-8 -*-
import shutil
from steps.common_steps.common_environment import docker_setup
def before_all(context):
docker_setup(context)
context.build_or_pull_image(skip_pull=True, skip_build=True)
def after_scenario(context, scenario):
if 'KEEP_CONTAINER_AFTER_TEST' in context.config.userdata:
return
context.remove_container()
def after_all(context):
if hasattr(context, 'temp_dir'):
shutil.rmtree(context.temp_dir) # FIXME catch exception
| Python | 0.000003 | |
bf86584829f56f91b363f251d77f3157f952db0f | Add tests for masking of data based on being within a range of values | tests/test_cyprep.py | tests/test_cyprep.py | import unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
9c249d3f9d202632b7fd2241d39dfc2e180fd358 | Add ledger tests | tests/test_ledger.py | tests/test_ledger.py | # -*- coding: utf-8 -*-
import pytest
from accounts.ledger import Ledger
# Database migrations run for each test in this module.
# See `conftest.pytest_runtest*`.
DB_MIGRATIONS = ['0003-create-balances', '0004-create-movements']
# Fixtures ###
@pytest.fixture
def ledger(db):
return Ledger(db.connection)
# Tests ###
TOKEN = 'test'
AMOUNT = 100
def _get_balance(db, token):
db.execute("SELECT amount FROM balances WHERE token = %s", [token])
res = db.fetchone()
return res and res[0]
def test_balance(db, ledger):
assert ledger.balance(TOKEN) == 0
db.execute("INSERT INTO balances (token, amount) VALUES (%s, %s)", [TOKEN, AMOUNT])
db.connection.commit()
assert ledger.balance(TOKEN) == AMOUNT
def test_deposit(db, ledger):
# Account doesn't exist yet
assert _get_balance(db, TOKEN) is None
assert ledger.deposit(TOKEN, AMOUNT) is True
assert _get_balance(db, TOKEN) == AMOUNT
db.execute("SELECT amount FROM movements WHERE token = %s", [TOKEN])
assert db.fetchone()[0] == AMOUNT
def test_withdraw(db, ledger):
assert _get_balance(db, TOKEN) is None
# Insufficient funds
assert ledger.withdraw(TOKEN, AMOUNT) is False
assert _get_balance(db, TOKEN) is None
db.execute("INSERT INTO balances (token, amount) VALUES (%s, %s)", [TOKEN, AMOUNT+10])
db.connection.commit()
assert ledger.withdraw(TOKEN, AMOUNT) is True
assert _get_balance(db, TOKEN) == 10
db.execute("SELECT amount FROM movements WHERE token = %s", [TOKEN])
assert db.fetchone()[0] == -AMOUNT
| Python | 0.000001 | |
65f6b1101aba2086654f2ff0ff3e942f69d584b2 | Add an application that returns spaCy similarity query | app/app.py | app/app.py | from flask import Flask, jsonify
import spacy.en
from numpy import dot
from numpy.linalg import norm
app = Flask(__name__)
nlp = spacy.en.English()
def cossim(a, b):
return dot(a, b) / (norm(a) * norm(b))
@app.route('/')
def index():
return "Hello, World!"
@app.route('/spaCy/api/similarity/<word1>/<word2>', methods=['GET'])
def get_spacy_sim(word1, word2):
tok1 = nlp(word1)[0]
tok2 = nlp(word2)[0]
sim = cossim(tok1.repvec, tok2.repvec)
print type(sim)
return jsonify({'word1': word1, 'word2': word2, 'similarity': float(sim)})
if __name__ == '__main__':
app.run(debug=True)
| Python | 0.000278 | |
ddd4473f8edc4e7cfc503fc6cdbb570f33f224a4 | Add Preprocessor module Edges to generate possible edges between two entities given the relation type | nala/preprocessing/edges.py | nala/preprocessing/edges.py | import abc
from nala.structures.data import Edge
class EdgeGenerator:
"""
Abstract class for generating edges between two entities. Each edge represents
a possible relationship between the two entities
Subclasses that inherit this class should:
* Be named [Name]EdgeGenerator
* Implement the abstract method generate
* Append new items to the list field "edges" of each Part in the dataset
"""
@abc.abstractmethod
def generate(self, dataset):
"""
:type dataset: nala.structures.data.Dataset
"""
return
class SimpleEdgeGenerator(EdgeGenerator):
"""
Simple implementation of generating edges between the two entities
if they are contained in the same sentence.
Implements the abstract class EdgeGenerator.
:type entity1_class: str
:type entity2_class: str
:type relation_type: str
"""
def __init__(self, entity1_class, entity2_class, relation_type):
self.entity1_class = entity1_class
self.entity2_class = entity2_class
self.relation_type = relation_type
def generate(self, dataset):
from itertools import product
for part in dataset.parts():
for ann_1, ann_2 in product(
(ann for ann in part.annotations if ann.class_id == self.entity1_class),
(ann for ann in part.annotations if ann.class_id == self.entity2_class)):
index_1 = part.get_sentence_index_for_annotation(ann_1)
index_2 = part.get_sentence_index_for_annotation(ann_2)
if index_1 == None:
print (ann_1)
x = input()
if index_2 == None:
print (ann_2)
x = input()
if index_1 == index_2 and index_1 != None:
part.edges.append(
Edge(ann_1, ann_2, self.relation_type,
part.get_sentence_string_array()[index_1]))
| Python | 0.000003 | |
fffef837502a2af438f1646aa29b0b307038fca1 | Test VP working | comdet/test/test_vp.py | comdet/test/test_vp.py | from __future__ import absolute_import, print_function
import sys
import matplotlib.pyplot as plt
import seaborn.apionly as sns
import PIL
import numpy as np
import timeit
import os
import collections
import comdet.biclustering as bc
import comdet.test.utils as test_utils
import comdet.pme.preference as pref
import comdet.pme.sampling as sampling
import comdet.pme.lsd as lsd
import comdet.pme.vanishing as vp
import comdet.pme.line as line
import comdet.pme.acontrario as ac
def base_plot(segments=[]):
plt.figure()
plt.axis('off')
plt.imshow(gray_image, cmap='gray', alpha=.5)
for seg in segments:
seg.plot(c='k', linewidth=1)
def plot_models(models, palette=None, **kwargs):
if palette is not None and 'color' in kwargs:
raise RuntimeError('Cannot specify palette and color simultaneously.')
for i, mod in enumerate(models):
if palette is not None:
kwargs['color'] = palette[i]
mod.plot(**kwargs)
def plot_final_models(x, mod_inliers, palette):
base_plot()
sz_ratio = 1.5
plt.xlim((1 - sz_ratio) * gray_image.size[0], sz_ratio * gray_image.size[0])
plt.ylim(sz_ratio * gray_image.size[1],(1 - sz_ratio) * gray_image.size[1])
all_inliers = []
for ((mod, inliers), color) in zip(mod_inliers, palette):
all_inliers.append(inliers)
segments = x[inliers]
if mod.point[2] != 0:
mod.plot(color=color, linewidth=1)
for seg in segments:
if mod.point[2] != 0:
midpoint = (seg.p_a + seg.p_b) / 2
new_seg = lsd.Segment(midpoint, mod.point)
new_seg.plot(c=color, linewidth=.2, alpha=.3)
else:
seg_line = line.Line(np.vstack((seg.p_a[:2], seg.p_b[:2])))
seg_line.plot(c=color, linewidth=.2, alpha=.3)
seg.plot(c=color, linewidth=1)
remaining_segs = np.logical_not(reduce(np.logical_or, all_inliers))
for seg in x[remaining_segs]:
seg.plot(c='k', linewidth=1)
def ground_truth(n_elements, n_groups=5, group_size=50):
gt_groups = []
for i in range(n_groups):
v = np.zeros((n_elements,), dtype=bool)
v[i * group_size:(i+1) * group_size] = True
gt_groups.append(v)
return gt_groups
TestStats = collections.namedtuple("VPStats", ['time'])
def run_biclustering(x, original_models, pref_matrix, deflator, ac_tester,
gt_groups, output_prefix, palette='Set1'):
t = timeit.default_timer()
bic_list = bc.bicluster(deflator, n=5)
t1 = timeit.default_timer() - t
print('Time:', t1)
models, bic_list = test_utils.clean(vp.VanishingPoint, x, ac_tester,
bic_list)
palette = sns.color_palette(palette, len(bic_list))
plt.figure()
pref.plot(pref_matrix, bic_list=bic_list, palette=palette)
plt.savefig(output_prefix + '_pref_mat.pdf', dpi=600)
mod_inliers_list = [(mod, ac_tester.inliers(mod)) for mod in models]
plot_final_models(x, mod_inliers_list, palette=palette)
plt.savefig(output_prefix + '_final_models.pdf', dpi=600)
# test_utils.compute_measures(gt_groups, [bic[0] for bic in bic_list])
return TestStats(time=t1)
def test(x, name, ransac_gen, ac_tester, gt_groups):
print(name, len(x))
output_prefix = '../results/vp/'
if not os.path.exists(output_prefix):
os.mkdir(output_prefix)
output_prefix += name
if not os.path.exists(output_prefix):
os.mkdir(output_prefix)
output_prefix += '/' + name
base_plot(x)
plt.savefig(output_prefix + '_data.pdf', dpi=600)
pref_matrix, orig_models = pref.build_preference_matrix(len(x), ransac_gen,
ac_tester)
print('Preference matrix size:', pref_matrix.shape)
base_plot(x)
plot_models(orig_models, alpha=0.2)
plt.savefig(output_prefix + '_original_models.pdf', dpi=600)
plt.figure()
pref.plot(pref_matrix)
plt.savefig(output_prefix + '_pref_mat.pdf', dpi=600)
print('Running regular bi-clustering')
deflator = bc.deflation.Deflator(pref_matrix)
stats_reg = run_biclustering(x, orig_models, pref_matrix, deflator,
ac_tester, gt_groups,
output_prefix + '_bic_reg')
print('Running compressed bi-clustering')
compression_level = 128
deflator = bc.deflation.L1CompressedDeflator(pref_matrix, compression_level)
stats_comp = run_biclustering(x, orig_models, pref_matrix, deflator,
ac_tester, gt_groups,
output_prefix + '_bic_comp')
return stats_reg, stats_comp
def print_stats(stats):
time_str = 'Time. mean: {0}, std: {1}, median: {2}'
times = [s.time for s in stats]
print(time_str.format(np.mean(times), np.std(times), np.median(times)))
if __name__ == '__main__':
sys.stdout = test_utils.Logger("test_vp.txt")
sampling_factor = 5
inliers_threshold = 2 * np.pi * 0.01
epsilon = 0
dir_name = '/Users/mariano/Documents/datasets/YorkUrbanDB/'
stats_list = []
for i, example in enumerate(os.listdir(dir_name)):
# if example != 'P1020171':
# continue
# if example != 'P1020829':
# continue
# if example != 'P1020826':
# continue
if not os.path.isdir(dir_name + example):
continue
img_name = dir_name + '{0}/{0}.jpg'.format(example)
gray_image = PIL.Image.open(img_name).convert('L')
segments = lsd.compute(gray_image)
segments = np.array(segments)
ac_tester = ac.LocalNFA(segments, epsilon, inliers_threshold)
sampler = sampling.AdaptiveSampler(int(len(segments) * sampling_factor))
ransac_gen = sampling.ModelGenerator(vp.VanishingPoint, segments,
sampler)
# gt_groups = ground_truth(data.shape[0], n_groups=n_groups,
# group_size=50)
gt_groups = None
print('-'*40)
seed = 0
# seed = np.random.randint(0, np.iinfo(np.uint32).max)
print('seed:', seed)
np.random.seed(seed)
res = test(segments, example, ransac_gen, ac_tester, gt_groups)
stats_list.append(res)
plt.close('all')
reg_list, comp_list = zip(*stats_list)
print('Statistics of regular bi-clustering')
print_stats(reg_list)
print('Statistics of compressed bi-clustering')
print_stats(comp_list)
plt.show()
| Python | 0 | |
0377cf9cc3c2460c2936ec9153edbdb196cff5bf | Add zdt agent | zephyrus/examples/zdt/agent.py | zephyrus/examples/zdt/agent.py | import sys
from itertools import islice
from math import sqrt
from zephyrus.agent import Agent
from zephyrus.message import Message
class ZDTAgent(Agent):
def mainloop(self):
msg = self.socket_receive.recv()
action = self.perceive(msg.content)
self.socket_send(str(action))
def act(self, perceived):
f1 = perceived[0]
g = 1 + 9 * sum(islice(perceived, 1, None)) / (len(perceived) - 1)
zdt = 1 - sqrt(f1 / g)
return Message("agent", "RESULT", zdt)
if __name__ == '__main__':
ZDTAgent(1, *sys.argv[1:]).start()
| Python | 0.000017 | |
bc812daf7c99b34a3952d933666f240597eb835d | add a spider for Xin Shi Dai board, Ya Zhou catagory. | t66ySpider/t66ySpider/spiders/t66yXinshidaiYazhouSpider.py | t66ySpider/t66ySpider/spiders/t66yXinshidaiYazhouSpider.py | # -*- coding: utf-8 -*-
import scrapy
from t66ySpider.items import T66YspiderXinshidaiItem
class t66yDagaierSpider(scrapy.Spider):
name = 'XinShiDaiYaZhou'
allowed_domains = ['t66y.com']
start_urls = ["http://t66y.com/thread0806.php?fid=8&type=1"]
unicode_next_page = u'\u4e0b\u4e00\u9801'
def parse(self, response):
thread_hrefs = response.selector.xpath('//h3/a/@href')
for thread_href in thread_hrefs:
thread_url = response.urljoin(thread_href.extract())
yield scrapy.Request(thread_url, callback=self.parse_thread)
next_page_href = response.selector.xpath(
"//a[text()='%s']/@href" % self.unicode_next_page)[0]
next_page_url = response.urljoin(next_page_href.extract())
yield scrapy.Request(next_page_url, callback=self.parse)
def parse_thread(self, response):
item = T66YspiderXinshidaiItem()
item['t_title'] = response.selector.xpath(
'string(//title)')[0].extract()
item['t_image_list'] = response.selector.xpath(
'//input/@src').extract()
yield item
| Python | 0 | |
25d8cbfd4b59166ba748d5cd42fbcd7ffe925f0e | Allow using exogenous data in hierachical models #124 | tests/hierarchical/test_hierarchy_AU_AllMethods_Exogenous_all_nodes.py | tests/hierarchical/test_hierarchy_AU_AllMethods_Exogenous_all_nodes.py | import pandas as pd
import numpy as np
import pyaf.HierarchicalForecastEngine as hautof
import pyaf.Bench.TS_datasets as tsds
import datetime
#get_ipython().magic('matplotlib inline')
def create_exog_data(b1):
# fake exog data based on date variable
lDate1 = b1.mPastData['Date']
lDate2 = b1.mFutureData['Date'] # not needed. exogfenous data are missing when not available.
lDate = lDate1.append(lDate2)
lExogenousDataFrame = pd.DataFrame()
lExogenousDataFrame['Date'] = lDate
lExogenousDataFrame['Date_second'] = lDate.dt.second
lExogenousDataFrame['Date_minute'] = lDate.dt.minute
lExogenousDataFrame['Date_hour'] = lDate.dt.hour
lExogenousDataFrame['Date_dayofweek'] = lDate.dt.dayofweek
lExogenousDataFrame['Date_day'] = lDate.dt.day
lExogenousDataFrame['Date_dayofyear'] = lDate.dt.dayofyear
lExogenousDataFrame['Date_month'] = lDate.dt.month
lExogenousDataFrame['Date_week'] = lDate.dt.week
# a column in the exog data can be of any type
lExogenousDataFrame['Date_day_name'] = lDate.dt.day_name()
lExogenousDataFrame['Date_month_name'] = lDate.dt.month_name()
lExogenousVariables = [col for col in lExogenousDataFrame.columns if col.startswith('Date_')]
lExogenousData = (lExogenousDataFrame , lExogenousVariables)
return lExogenousData
b1 = tsds.load_AU_hierarchical_dataset();
df = b1.mPastData;
lEngine = hautof.cHierarchicalForecastEngine()
lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC'];
lEngine.mOptions.mNbCores = 16
lEngine
H = b1.mHorizon;
# lEngine.mOptions.enable_slow_mode();
# lEngine.mOptions.mDebugPerformance = True;
lExogenousData = create_exog_data(b1)
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H, b1.mHierarchy, iExogenousData = lExogenousData);
lEngine.getModelInfo();
#lEngine.standardPlots("outputs/AU");
dfapp_in = df.copy();
dfapp_in.tail()
dfapp_out = lEngine.forecast(dfapp_in, H);
#dfapp_out.to_csv("outputs/Hierarchical_AU_apply_out.csv")
| Python | 0 | |
b135e8e473837909c6847f8a52711527409b5224 | Add windows build tools | tools/build_mwpfh.py | tools/build_mwpfh.py | from __future__ import print_function
import subprocess
import sys
import os
path = os.path.split(__file__)[0]
if path:
os.chdir(path)
environments = ['26', '27', '32', '33', '34']
target = "pypi" if "--push" in sys.argv else "test"
returnvalues = {}
def run(pyver, cmds, target=None):
cmd = [r"C:\Python%s\Python.exe" % pyver, "setup.py"] + cmds
if target:
cmd += ["-r", target]
print(" ".join(cmd), end=" ")
retval = subprocess.call(cmd, stdout=open("%s%s.log" % (cmds[0], pyver), 'w'), stderr=subprocess.STDOUT, cwd="..")
if not retval:
print("[OK]")
else:
print("[FAILED (%i)]" % retval)
return retval
run("27", ["register"], target)
if 'failed' in open('register27.log').read():
raise Exception
for pyver in environments:
print()
try:
os.unlink('mwparserfromhell/parser/_tokenizer.pyd')
except WindowsError:
pass
if run(pyver, ["test"]) == 0:
run(pyver, ["bdist_wheel", "upload"], target) | Python | 0 | |
002842c4d7db431a4dedc067ef54dab8747d70f4 | add debug statement | library/pyjamas/media/Video.mshtml.py | library/pyjamas/media/Video.mshtml.py |
class Video(Media):
def __init__(self, src=None, **kwargs):
print "create object"
obj = DOM.createElement("OBJECT")
DOM.setAttribute(obj, "TYPE", "application/x-mplayer2")
#DOM.setAttribute(obj, "type", "application/x-oleobject")
DOM.setAttribute(obj, "classid",
#"CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95")
"CLSID:6BF52A52-394A-11d3-B153-00C04F79FAA6")
print "set element"
self.setElement(obj)
print "widget init"
Media.__init__(self, **kwargs)
print "setSrc"
if src:
self.setSrc(src)
#self.setID("MediaPlayer")
self.dispparam = DOM.createElement("PARAM")
DOM.setAttribute(self.dispparam, "name", "ShowDisplay")
DOM.setBooleanAttribute(self.dispparam, "VALUE", "false")
self.getElement().appendChild(self.dispparam)
def setSrc(self, src):
print "setSrc", src
#self.srcparam = DOM.createElement("PARAM")
#DOM.setAttribute(self.srcparam, "name", "FileName")
#DOM.setAttribute(self.srcparam, "VALUE", src)
#self.getElement().appendChild(self.srcparam)
obj = self.getElement()
print dir(obj)
DOM.setAttribute(obj, "URL", src)
#obj.URL = src
def setControls(self, controls):
print "setControls", controls
self.ctrlparam = DOM.createElement("PARAM")
DOM.setAttribute(self.ctrlparam, "name", "ShowControls")
DOM.setBooleanAttribute(self.ctrlparam, "VALUE",
controls and "true" or "false")
self.getElement().appendChild(self.ctrlparam)
def setStatusbar(self, statusbar):
print "setstatus", statusbar
self.statparam = DOM.createElement("PARAM")
DOM.setAttribute(self.statparam, "name", "ShowStatusBar")
DOM.setBooleanAttribute(self.statparam, "VALUE",
statusbar and "true" or "false")
self.getElement().appendChild(self.statparam)
def setLoop(self, autorewind):
print "autorewind", autorewind
self.loopparam = DOM.createElement("PARAM")
DOM.setAttribute(self.loopparam, "name", "autorewind")
DOM.setBooleanAttribute(self.loopparam, "VALUE",
autorewind and "true" or "false")
self.getElement().appendChild(self.loopparam)
def setAutoplay(self, autostart):
print "autoplay", autostart
self.playparam = DOM.createElement("PARAM")
DOM.setAttribute(self.playparam, "name", "autostart")
DOM.setBooleanAttribute(self.playparam, "VALUE",
autostart and "true" or "false")
self.getElement().appendChild(self.playparam)
|
class Video(Media):
def __init__(self, src=None, **kwargs):
print "create object"
obj = DOM.createElement("OBJECT")
DOM.setAttribute(obj, "TYPE", "application/x-mplayer2")
#DOM.setAttribute(obj, "type", "application/x-oleobject")
DOM.setAttribute(obj, "classid",
#"CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95")
"CLSID:6BF52A52-394A-11d3-B153-00C04F79FAA6")
print "set element"
self.setElement(obj)
print "widget init"
Media.__init__(self, **kwargs)
print "setSrc"
if src:
self.setSrc(src)
#self.setID("MediaPlayer")
self.dispparam = DOM.createElement("PARAM")
DOM.setAttribute(self.dispparam, "name", "ShowDisplay")
DOM.setBooleanAttribute(self.dispparam, "VALUE", "false")
self.getElement().appendChild(self.dispparam)
def setSrc(self, src):
print "setSrc", src
#self.srcparam = DOM.createElement("PARAM")
#DOM.setAttribute(self.srcparam, "name", "FileName")
#DOM.setAttribute(self.srcparam, "VALUE", src)
#self.getElement().appendChild(self.srcparam)
obj = self.getElement()
DOM.setAttribute(obj, "URL", src)
#obj.URL = src
def setControls(self, controls):
print "setControls", controls
self.ctrlparam = DOM.createElement("PARAM")
DOM.setAttribute(self.ctrlparam, "name", "ShowControls")
DOM.setBooleanAttribute(self.ctrlparam, "VALUE",
controls and "true" or "false")
self.getElement().appendChild(self.ctrlparam)
def setStatusbar(self, statusbar):
print "setstatus", statusbar
self.statparam = DOM.createElement("PARAM")
DOM.setAttribute(self.statparam, "name", "ShowStatusBar")
DOM.setBooleanAttribute(self.statparam, "VALUE",
statusbar and "true" or "false")
self.getElement().appendChild(self.statparam)
def setLoop(self, autorewind):
print "autorewind", autorewind
self.loopparam = DOM.createElement("PARAM")
DOM.setAttribute(self.loopparam, "name", "autorewind")
DOM.setBooleanAttribute(self.loopparam, "VALUE",
autorewind and "true" or "false")
self.getElement().appendChild(self.loopparam)
def setAutoplay(self, autostart):
print "autoplay", autostart
self.playparam = DOM.createElement("PARAM")
DOM.setAttribute(self.playparam, "name", "autostart")
DOM.setBooleanAttribute(self.playparam, "VALUE",
autostart and "true" or "false")
self.getElement().appendChild(self.playparam)
| Python | 0.000018 |
012acdc7a280b307bbb110449dcfee5d05a77e38 | Create new package (#6379) | var/spack/repos/builtin/packages/r-chemometrics/package.py | var/spack/repos/builtin/packages/r-chemometrics/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RChemometrics(RPackage):
"""R companion to the book "Introduction to Multivariate Statistical Analysis
in Chemometrics" written by K. Varmuza and P. Filzmoser (2009)."""
homepage = "https://cran.r-project.org/web/packages/chemometrics/index.html"
url = "https://cran.r-project.org/src/contrib/chemometrics_1.4.2.tar.gz"
list_url = "https://cran.rstudio.com/src/contrib/Archive/chemometrics"
version('1.4.2', '8137b0ca4004add9cc2ea81d2c54427f')
version('1.4.1', '1e5a89442bb4a61db0da884eedd74fc2')
version('1.3.9', '2b619791896db1513ca3d714acb68af3')
version('1.3.8', '7fad828bd094b5485fbf20bdf7d3d0d1')
version('1.3.7', 'a9e2f32efb1545421dd96185fd849184')
depends_on('r@3.4.0:3.4.9')
depends_on('r-rpart', type=('build', 'run'))
depends_on('r-mclust', type=('build', 'run'))
depends_on('r-lars', type=('build', 'run'))
depends_on('r-robustbase', type=('build', 'run'))
depends_on('r-e1071', type=('build', 'run'))
depends_on('r-pls', type=('build', 'run'))
depends_on('r-som', type=('build', 'run'))
depends_on('r-pcapp', type=('build', 'run'))
| Python | 0 | |
aa78a2670766b0a5e093a1876cb402ed513573bd | Add script to explore parameters units | openfisca_france/scripts/parameters/explore_parameters_unit.py | openfisca_france/scripts/parameters/explore_parameters_unit.py | # -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
| Python | 0 | |
e095b6a76ac36255983d8c69d4899d64178e0ef3 | Add segment_euclidean_length tests module | tests/plantcv/morphology/test_segment_euclidean_length.py | tests/plantcv/morphology/test_segment_euclidean_length.py | import pytest
import cv2
import numpy as np
from plantcv.plantcv import outputs
from plantcv.plantcv.morphology import segment_euclidean_length
def test_segment_euclidean_length(morphology_test_data):
# Clear previous outputs
outputs.clear()
skeleton = cv2.imread(morphology_test_data.skel_img, -1)
_ = segment_euclidean_length(segmented_img=skeleton,
objects=morphology_test_data.load_segments(morphology_test_data.segments_file, "leaves"))
assert len(outputs.observations['default']['segment_eu_length']['value']) == 4
def test_segment_euclidean_length_bad_input():
skel = np.zeros((10, 10), dtype=np.uint8)
edges = [np.array([[[5, 3]], [[4, 4]], [[3, 5]], [[4, 6]], [[5, 7]], [[6, 6]], [[7, 5]], [[6, 4]]], dtype=np.int32)]
with pytest.raises(RuntimeError):
_ = segment_euclidean_length(segmented_img=skel, objects=edges)
| Python | 0.000001 | |
26ab37868e67b5b815cf8df67cc04876ff44c148 | Add file for Nongrammar entities tests | tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py | tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main() | Python | 0 | |
e80ec7adc6fe71310e1c2adba720be9640a49d0f | test code for midiGenerator | src/test4.py | src/test4.py | import midiGenerator
generator = midiGenerator.MidiGenerator(200,1)
channel = midiGenerator.Channel()
note = midiGenerator.Note(43,100,200)
channel.addNote(note)
channel.addNote(midiGenerator.Note(45,200,300))
channel.addNote(midiGenerator.Note(57,300,400))
channel.addNote(midiGenerator.Note(38,400,500))
channel.addNote(midiGenerator.Note(33,500,600))
channel.addNote(midiGenerator.Note(45,600,700))
channel.endTrack()
generator.addChannel(channel)
generator.save("t.midi")
| Python | 0 | |
63065390fca52045db0665bbb8f2b4df7a7b57d4 | Implement pivoted Cholesky decomposition and code to do Woodbury solves with them. | gpytorch/utils/pivoted_cholesky.py | gpytorch/utils/pivoted_cholesky.py | import torch
def pivoted_cholesky(matrix, max_iter, error_tol=1e-5):
matrix_size = matrix.size(-1)
matrix_diag = matrix.diag()
# TODO: This check won't be necessary in PyTorch 0.4
if isinstance(matrix_diag, torch.autograd.Variable):
matrix_diag = matrix_diag.data
error = torch.norm(matrix_diag, 1)
permutation = matrix_diag.new(matrix_size).long()
torch.arange(0, matrix_size, out=permutation)
m = 0
# TODO: pivoted_cholesky should take tensor_cls and use that here instead
L = matrix_diag.new(max_iter, matrix_size).zero_()
while m < max_iter and error > error_tol:
max_diag_value, max_diag_index = torch.max(matrix_diag[permutation][m:], 0)
max_diag_index = max_diag_index + m
pi_m = permutation[m]
permutation[m] = permutation[max_diag_index][0]
permutation[max_diag_index] = pi_m
pi_m = permutation[m]
L_m = L[m] # Will be all zeros -- should we use torch.zeros?
L_m[pi_m] = torch.sqrt(max_diag_value)[0]
row = matrix[pi_m]
if isinstance(row, torch.autograd.Variable):
row = row.data
pi_i = permutation[m + 1:]
L_m[pi_i] = row[pi_i]
if m > 0:
L_prev = L[:m].index_select(1, pi_i)
L_m[pi_i] -= torch.sum(L[:m, pi_m].unsqueeze(1) * L_prev, dim=0)
L_m[pi_i] /= L_m[pi_m]
matrix_diag[pi_i] = matrix_diag[pi_i] - (L_m[pi_i] ** 2)
L[m] = L_m
error = torch.sum(matrix_diag[permutation[m + 1:]])
m = m + 1
return L[:m, :]
def woodbury_factor(low_rank_mat, shift):
"""
Given a low rank (k x n) matrix V and a shift, returns the
matrix R so that
R = (I_k + 1/shift VV')^{-1}V
to be used in solves with (V'V + shift I) via the Woodbury formula
"""
n = low_rank_mat.size(-1)
k = low_rank_mat.size(-2)
shifted_mat = (1 / shift) * low_rank_mat.matmul(low_rank_mat.t())
shifted_mat = shifted_mat + shifted_mat.new(k).fill_(1).diag()
R = torch.potrs(low_rank_mat, shifted_mat.potrf())
return R
def woodbury_solve(vector, low_rank_mat, woodbury_factor, shift):
"""
Solves the system of equations:
(sigma*I + VV')x = b
Using the Woodbury formula.
Input:
- vector (size n) - right hand side vector b to solve with.
- woodbury_factor (k x n) - The result of calling woodbury_factor on V
and the shift, \sigma
- shift (scalar) - shift value sigma
"""
right = (1 / shift) * low_rank_mat.t().matmul(woodbury_factor.matmul(vector))
return (1 / shift) * (vector - right)
| Python | 0 | |
21446e16fdc829024450fe2dfe1e7b25006151b4 | Add unittest for yaml database config validation. | test/test_config_dbyaml.py | test/test_config_dbyaml.py | #!/usr/bin/env python2
import unittest
import subprocess
import threading
import tempfile
import os
import shutil
from testdc import *
DAEMON_PATH = './astrond'
TERMINATED = -15
EXITED = 1
class ConfigTest(object):
def __init__(self, config):
self.config = config
self.process = None
def run(self, timeout):
def target():
self.process = subprocess.Popen([DAEMON_PATH, self.config])
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
class TestConfigDBYaml(unittest.TestCase):
@classmethod
def setUpClass(cls):
cfg, cls.config_file = tempfile.mkstemp()
os.close(cfg)
cls.test_command = ConfigTest(cls.config_file)
cls.yaml_dir = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
if cls.config_file is not None:
os.remove(cls.config_file)
if cls.yaml_dir is not None:
shutil.rmtree(cls.yaml_dir)
@classmethod
def write_config(cls, config):
f = open(cls.config_file, "w")
f.write(config)
f.close()
@classmethod
def run_test(cls, config, timeout = 2):
cls.write_config(config)
return cls.test_command.run(timeout)
def test_dbyaml_good(self):
config = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 75757
generate:
min: 1000000
max: 1000010
backend:
type: yaml
foldername: %r
""" % (test_dc, self.yaml_dir)
self.assertEquals(self.run_test(config), TERMINATED)
def test_dbyaml_reserved_control(self):
config = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 777
generate:
min: 1000000
max: 1000010
backend:
type: yaml
foldername: %r
""" % (test_dc, self.yaml_dir)
self.assertEquals(self.run_test(config), EXITED)
def test_yamldb_invalid_generate(self):
config = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 75757
generate:
min: 0
max: 1000010
backend:
type: yaml
foldername: %r
""" % (test_dc, self.yaml_dir)
self.assertEquals(self.run_test(config), EXITED)
config = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 75757
generate:
min: 1000000
max: 0
backend:
type: yaml
foldername: %r
""" % (test_dc, self.yaml_dir)
self.assertEquals(self.run_test(config), EXITED)
def test_yamldb_reserved_generate(self):
config = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 75757
generate:
min: 444
max: 1000010
backend:
type: yaml
foldername: %r
""" % (test_dc, self.yaml_dir)
self.assertEquals(self.run_test(config), EXITED)
config = """\
messagedirector:
bind: 127.0.0.1:57123
general:
dc_files:
- %r
roles:
- type: database
control: 75757
generate:
min: 1000000
max: 555
backend:
type: yaml
foldername: %r
""" % (test_dc, self.yaml_dir)
self.assertEquals(self.run_test(config), EXITED)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
e195aef0fa870bf0f471be99a0144a59fdcc5b97 | Create norm_distri_of_proj_valu.py | norm_distri_of_proj_valu.py | norm_distri_of_proj_valu.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import math
x_train = pd.read_csv("Train.csv")
x_test = pd.read_csv("Test.csv")
def log_method(x):
if x == 0:
return 0
return math.log(x,2)
test = x_train["Project_Valuation"].order()
test = test.apply(lambda x: log_method(x))
mean = sum(test) / len(test)
varience = sum((average - value) ** 2 for value in test) / len(test)
sigma = math.sqrt(variance)
plt.plot(test,mlab.normpdf(test,mean,sigma))
| Python | 0.000005 | |
3724e828ea7c0aa2a910db16c1392390f7c9f7a8 | add a simple schema building tool | spyne/test/interface/build_schema.py | spyne/test/interface/build_schema.py | #!/usr/bin/env python
# This can be used to debug invalid Xml Schema documents.
import sys
from lxml import etree
if len(sys.argv) != 2:
print "Usage: %s <path_to_xsd_file>" % sys.argv[0]
sys.exit(1)
f = open(sys.argv[1])
etree.XMLSchema(etree.parse(f))
| Python | 0 | |
0a5167807d615f59808195aed6114cfa9b293eda | Update migrations to work with Django 1.9. | pybb/migrations/0005_auto_20151108_1528.py | pybb/migrations/0005_auto_20151108_1528.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-08 23:28
from __future__ import unicode_literals
from django.db import migrations, models
import pybb.util
class Migration(migrations.Migration):
dependencies = [
('pybb', '0004_slugs_required'),
]
operations = [
migrations.AlterField(
model_name='post',
name='user_ip',
field=models.GenericIPAddressField(blank=True, default='0.0.0.0', null=True, verbose_name='User IP'),
),
migrations.AlterField(
model_name='profile',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to=pybb.util.FilePathGenerator(to='pybb/avatar'), verbose_name='Avatar'),
),
migrations.AlterField(
model_name='profile',
name='language',
field=models.CharField(blank=True, choices=[('af', 'Afrikaans'), ('ar', 'Arabic'), ('ast', 'Asturian'), ('az', 'Azerbaijani'), ('bg', 'Bulgarian'), ('be', 'Belarusian'), ('bn', 'Bengali'), ('br', 'Breton'), ('bs', 'Bosnian'), ('ca', 'Catalan'), ('cs', 'Czech'), ('cy', 'Welsh'), ('da', 'Danish'), ('de', 'German'), ('el', 'Greek'), ('en', 'English'), ('en-au', 'Australian English'), ('en-gb', 'British English'), ('eo', 'Esperanto'), ('es', 'Spanish'), ('es-ar', 'Argentinian Spanish'), ('es-mx', 'Mexican Spanish'), ('es-ni', 'Nicaraguan Spanish'), ('es-ve', 'Venezuelan Spanish'), ('et', 'Estonian'), ('eu', 'Basque'), ('fa', 'Persian'), ('fi', 'Finnish'), ('fr', 'French'), ('fy', 'Frisian'), ('ga', 'Irish'), ('gl', 'Galician'), ('he', 'Hebrew'), ('hi', 'Hindi'), ('hr', 'Croatian'), ('hu', 'Hungarian'), ('ia', 'Interlingua'), ('id', 'Indonesian'), ('io', 'Ido'), ('is', 'Icelandic'), ('it', 'Italian'), ('ja', 'Japanese'), ('ka', 'Georgian'), ('kk', 'Kazakh'), ('km', 'Khmer'), ('kn', 'Kannada'), ('ko', 'Korean'), ('lb', 'Luxembourgish'), ('lt', 'Lithuanian'), ('lv', 'Latvian'), ('mk', 'Macedonian'), ('ml', 'Malayalam'), ('mn', 'Mongolian'), ('mr', 'Marathi'), ('my', 'Burmese'), ('nb', 'Norwegian Bokmal'), ('ne', 'Nepali'), ('nl', 'Dutch'), ('nn', 'Norwegian Nynorsk'), ('os', 'Ossetic'), ('pa', 'Punjabi'), ('pl', 'Polish'), ('pt', 'Portuguese'), ('pt-br', 'Brazilian Portuguese'), ('ro', 'Romanian'), ('ru', 'Russian'), ('sk', 'Slovak'), ('sl', 'Slovenian'), ('sq', 'Albanian'), ('sr', 'Serbian'), ('sr-latn', 'Serbian Latin'), ('sv', 'Swedish'), ('sw', 'Swahili'), ('ta', 'Tamil'), ('te', 'Telugu'), ('th', 'Thai'), ('tr', 'Turkish'), ('tt', 'Tatar'), ('udm', 'Udmurt'), ('uk', 'Ukrainian'), ('ur', 'Urdu'), ('vi', 'Vietnamese'), ('zh-hans', 'Simplified Chinese'), ('zh-hant', 'Traditional Chinese')], default='en', max_length=10, verbose_name='Language'),
),
migrations.AlterField(
model_name='profile',
name='time_zone',
field=models.FloatField(choices=[(-12.0, '-12'), (-11.0, '-11'), (-10.0, '-10'), (-9.5, '-09.5'), (-9.0, '-09'), (-8.5, '-08.5'), (-8.0, '-08 PST'), (-7.0, '-07 MST'), (-6.0, '-06 CST'), (-5.0, '-05 EST'), (-4.0, '-04 AST'), (-3.5, '-03.5'), (-3.0, '-03 ADT'), (-2.0, '-02'), (-1.0, '-01'), (0.0, '00 GMT'), (1.0, '+01 CET'), (2.0, '+02'), (3.0, '+03'), (3.5, '+03.5'), (4.0, '+04'), (4.5, '+04.5'), (5.0, '+05'), (5.5, '+05.5'), (6.0, '+06'), (6.5, '+06.5'), (7.0, '+07'), (8.0, '+08'), (9.0, '+09'), (9.5, '+09.5'), (10.0, '+10'), (10.5, '+10.5'), (11.0, '+11'), (11.5, '+11.5'), (12.0, '+12'), (13.0, '+13'), (14.0, '+14')], default=3.0, verbose_name='Time zone'),
),
]
| Python | 0 | |
56a8250baa197285a5727dfbca12adaab81238ab | Add a snippet. | python/tkinter/python3/menu_checkbutton.py | python/tkinter/python3/menu_checkbutton.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# See also: http://effbot.org/tkinterbook/checkbutton.htm
import tkinter as tk
root = tk.Tk()
test_var = tk.IntVar()
#test_var.set(1) # Initialize
def callback():
print("var = ", test_var.get())
# CREATE A TOPLEVEL MENU ######################################################
menubar = tk.Menu(root)
# CREATE A PULLDOWN MENU ######################################################
#
# tearoff:
# "tearoff=1" permet à l'utilisateur de détacher le sous menu dans une
# fenêtre à part.
file_menu = tk.Menu(menubar, tearoff=0)
file_menu.add_checkbutton(label="Checkbutton test", variable=test_var, command=callback)
menubar.add_cascade(label="Test", menu=file_menu)
# DISPLAY THE MENU ############################################################
#
# The config method is used to attach the menu to the root window. The
# contents of that menu is used to create a menubar at the top of the root
# window. There is no need to pack the menu, since it is automatically
# displayed by Tkinter.
root.config(menu=menubar)
root.mainloop()
| Python | 0.000002 | |
fa3a02e6660ce556defc2f2c6008c6eb24eb71c1 | Add a simple sampler for playing wav files triggered by note on messages | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py | import time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
| Python | 0 | |
bff1e954213fb7592505c94294eb3800a8b199c3 | Update patternMatch.py | TechInterviews/Python/patternMatch.py | TechInterviews/Python/patternMatch.py | import sys
import re
# Strip only the beginning and ending slashes
def stripSlashes(path):
if path.startswith('/'):
path = path[1:]
if path.endswith('/'):
path = path[:-1]
return path
def findBestWildCardMatch(patterns):
#The best match is wildcards that are rightmost
#Get the positions of the * and add them to get the largest number to figure out which is rightmost
pass
def getRePattern(pattern):
return pattern.replace(',', '/').replace('*', '[a-zA-Z0-9_]*')
def findBestMatch(patterns, paths):
result = []
temp = []
for path in paths:
temp.clear()
for pattern in patterns:
rePattern = getRePattern(pattern)
if re.search(rePattern, stripSlashes(path)):
temp.append(pattern)
if len(temp) > 1:
result.append(findBestWildCardMatch(temp))
elif len(temp) == 0:
result.append("NO MATCH FOUND")
return result
#['foot', 'fell', 'fastest']
# Example to call this program: python34 patternMatch.py <input_file> output_file
def main(args):
input_file = open(args[1], 'r')
output_file = open(args[2], 'w')
pattern_list = []
path_list = []
# Expects correct format in file: int N followed by pattern lines then int M followed by path lines.
N = int(input_file.readline())
for j in range(N):
pattern_list.append(input_file.readline())
M = int(input_file.readline())
for i in range(M):
path_list.append(input_file.readline())
print(findBestMatch(pattern_list, path_list))
input_file.close()
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| import sys
import re
def stripSlashes(path):
if path.startswith('/'):
path = path[1:]
if path.endswith('/'):
path = path[:-1]
return path
def findBestWildCardMatch(patterns):
pass
def getRePattern(pattern):
return pattern.replace(',', '/').replace('*', '[a-zA-Z0-9_]*')
def findBestMatch(patterns, paths):
result = []
temp = []
for path in paths:
temp.clear()
for pattern in patterns:
rePattern = getRePattern(pattern)
if re.search(rePattern, stripSlashes(path)):
temp.append(pattern)
if len(temp) > 1:
result.append(findBestWildCardMatch(temp))
elif len(temp) == 0:
result.append("NO MATCH FOUND")
return result
#['foot', 'fell', 'fastest']
# Example to call this program: python34 patternMatch.py <input_file> output_file
def main(args):
input_file = open(args[1], 'r')
output_file = open(args[2], 'w')
pattern_list = []
path_list = []
# Expects correct format in file: int N followed by pattern lines then int M followed by path lines.
N = int(input_file.readline())
for j in range(N):
pattern_list.append(input_file.readline())
M = int(input_file.readline())
for i in range(M):
path_list.append(input_file.readline())
print(findBestMatch(pattern_list, path_list))
input_file.close()
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| Python | 0 |
bbe0cf1666b4706973bfba73ed77126581026057 | add new test case to test add image from local file system. | integrationtest/vm/virt_plus/other/test_add_local_image.py | integrationtest/vm/virt_plus/other/test_add_local_image.py | '''
New Integration Test for add image from MN local URI.
The file should be placed in MN.
@author: Youyk
'''
import os
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
test_image = '/tmp/zstack_wp_test_local_uri.img'
def test():
os.system('dd if=/dev/zero of=%s bs=1M count=1 seek=300' % test_image)
time.sleep(10)
image_name = 'test-image-%s' % time.time()
image_option = test_util.ImageOption()
image_option.set_name(image_name)
image_option.set_description('test image which is upload from local filesystem.')
image_option.set_url('file://%s' % test_image)
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)[0]
image_option.set_backup_storage_uuid_list([bs.uuid])
image_option.set_format('raw')
image_option.set_mediaType('RootVolumeTemplate')
image_inv = img_ops.add_root_volume_template(image_option)
time.sleep(10)
image = zstack_image_header.ZstackTestImage()
image.set_creation_option(image_option)
image.set_image(image_inv)
test_obj_dict.add_image(image)
image.check()
vm = test_stub.create_vm(image_name = image_name)
vm.destroy()
image.delete()
os.system('rm -f %s' % test_image)
test_util.test_pass('Test adding image from local stroage pass.')
#Will be called only if exception happens in test().
def error_cleanup():
test_lib.lib_error_cleanup(test_obj_dict)
os.system('rm -f %s' % test_image)
| Python | 0 | |
bfdcebfb287b6c3495e74888ace0409f47b530c9 | add testGroup script | ros_ws/src/crazyswarm/scripts/testGroup.py | ros_ws/src/crazyswarm/scripts/testGroup.py | #!/usr/bin/env python
import numpy as np
from pycrazyswarm import *
Z = 1.5
if __name__ == "__main__":
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
allcfs = swarm.allcfs
allcfs.crazyfliesById[9].setGroup(1)
allcfs.crazyfliesById[10].setGroup(2)
allcfs.takeoff(targetHeight=Z, duration=1.0 + Z, group = 1)
timeHelper.sleep(1.5 + Z)
allcfs.land(targetHeight=0.06, duration=1.0 + Z)
timeHelper.sleep(1.5 + Z)
| Python | 0.000001 | |
ae3374305bad49c358a173e26490c5c90b219208 | test for multiple open-read-close cycle | tests/multiple_readings.py | tests/multiple_readings.py | import serial
import struct
import time
import pandas as pd
import numpy as np
def measure():
start_time = time.time()
with serial.Serial('/dev/cu.usbmodem14121', 1000000, timeout=1) as inport:
open_time = time.time()
data = inport.read(100)
read_time = time.time()
close_time = time.time()
return (open_time - start_time, read_time - open_time, close_time - read_time, len(data))
df = pd.DataFrame.from_records(
(measure() for i in range(100)),
columns=["open", "read", "close", "datalength"])
print(df)
print(df.describe())
| Python | 0 | |
4e3644234fab9cb14a3d511b24bce3ed8a1446e0 | Add in a minor testcase. | tests/scales/test_minor.py | tests/scales/test_minor.py | # Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
| Python | 0.000001 | |
f00fc0d5a7f9e7a1ad325126c5286cf9defd8a5f | Include full tadbit tools test | tests/test_tadbit_tools.py | tests/test_tadbit_tools.py | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from os import path, remove
import pytest # pylint: disable=unused-import
from basic_modules.metadata import Metadata
from tadbit_map_parse_filter_wrapper import tadbit_map_parse_filter
from tadbit_normalize_wrapper import tadbit_normalize
from tadbit_segment_wrapper import tadbit_segment
from tadbit_bin_wrapper import tadbit_bin
@pytest.mark.hic
@pytest.mark.pipeline
def test_tadbit_tools():
"""
Test case to ensure that the tadbit tools code works.
"""
resource_path = path.join(path.dirname(__file__), "data/")
files = {
"parsing:refGenome": resource_path + 'tb.Human.GCA_000001405.22_gem.fasta',
"mapping:refGenome": resource_path + 'tb.Human.GCA_000001405.22_gem.fasta.gem',
"read1": resource_path + 'tb.Human.SRR1658573_1.fastq',
"read2": resource_path + 'tb.Human.SRR1658573_2.fastq'
}
metadata_1 = {}
metadata_1["read1"] = Metadata(
data_type = "hic_reads",
file_type = "fastq",
file_path=files["read1"],
sources=[""],
meta_data={
"visible": True,
"assembly": "hg38"
},
taxon_id=9606)
metadata_1["read2"] = Metadata(
data_type = "hic_reads",
file_type = "fastq",
file_path=files["read1"],
sources=[""],
meta_data={
"visible": True,
"assembly": "hg38"
},
taxon_id=9606)
metadata_1["parsing:refGenome"] = Metadata(
data_type = "sequence_genomic",
file_type = "fasta",
file_path=files["parsing:refGenome"],
sources=[""],
meta_data={
"visible": True,
"assembly": "hg38"
},
taxon_id=9606)
metadata_1["mapping:refGenome"] = Metadata(
data_type = "sequence_mapping_index",
file_type = "gem",
file_path=files["mapping:refGenome"],
sources=[""],
meta_data={
"visible": True,
"assembly": "hg38"
},
taxon_id=9606)
config = {
"project": resource_path,
"execution": resource_path,
"description": "TADbit tools test",
"mapping:rest_enzyme": "MboI",
"mapping:iterative_mapping": False,
"filtering:filters": [
"1",
"2",
"3",
"4",
"9",
"10"
],
"chromosomes": "",
"mapping:windows": "1:20 1:40",
"filtering:min_dist_RE": "500",
"filtering:min_fragment_size": "50",
"filtering:max_fragment_size": "100000"
}
if path.isfile(resource_path + "/data/paired_reads.bam"):
remove(resource_path + "/data/paired_reads.bam")
if path.isfile(resource_path + "/data/paired_reads.bam.bai"):
remove(resource_path + "/data/paired_reads.bam.bai")
if path.isfile(resource_path + "/data/map_parse_filter_stats.tar.gz"):
remove(resource_path + "/data/map_parse_filter_stats.tar.gz")
tb_handle = tadbit_map_parse_filter(configuration=config)
tb_1_files, tb_1_meta = tb_handle.run(files, metadata_1, [])
print(tb_1_files)
for tb_file in tb_1_files:
assert path.isfile(tb_1_files[tb_file]) is True
assert path.getsize(tb_1_files[tb_file]) > 0
files = {
"bamin": tb_1_files["paired_reads"]
}
metadata_2 = {}
metadata_2["bamin"] = tb_1_meta["paired_reads"]
config = {
"project": resource_path,
"execution": resource_path,
"description": "TADbit tools test",
"resolution": "100000",
"segmentation:chromosome_names": "",
"segmentation:callers": [
"1",
"2"
]
}
tb_handle = tadbit_segment(configuration=config)
tb_2_files, tb_2_meta = tb_handle.run(files, metadata_2, [])
print(tb_2_files)
for tb_file in tb_2_files:
assert path.isfile(tb_2_files[tb_file]) is True
assert path.getsize(tb_2_files[tb_file]) > 0
files = {
"bamin": tb_1_files["paired_reads"],
"refGenomes_folder": resource_path + 'tb.Human.GCA_000001405.22_gem.fasta'
}
metadata_3 = {}
metadata_3["bamin"] = tb_1_meta["paired_reads"]
config = {
"project": resource_path,
"execution": resource_path,
"description": "TADbit tools test",
"resolution": "100000",
"min_perc": "2",
"max_perc": "99.8",
"normalization": "Vanilla"
}
tb_handle = tadbit_normalize(configuration=config)
tb_3_files, tb_3_meta = tb_handle.run(files, metadata_3, [])
print(tb_3_files)
for tb_file in tb_3_files:
assert path.isfile(tb_3_files[tb_file]) is True
assert path.getsize(tb_3_files[tb_file]) > 0
files = {
"bamin": tb_1_files["paired_reads"],
"hic_biases": tb_3_files["hic_biases"]
}
metadata_4 = {}
metadata_4["bamin"] = tb_1_meta["paired_reads"]
metadata_4["hic_biases"] = tb_3_meta["hic_biases"]
config = {
"project": resource_path,
"execution": resource_path,
"description": "TADbit tools test",
"resolution": "100000",
"coord1": ""
}
tb_handle = tadbit_bin(configuration=config)
tb_4_files, tb_4_meta = tb_handle.run(files, metadata_4, [])
print(tb_4_files)
for tb_file in tb_4_files:
assert path.isfile(tb_4_files[tb_file]) is True
assert path.getsize(tb_4_files[tb_file]) > 0 | Python | 0 | |
e98065e04cfd52bb369d3b07d29f37fb458baa91 | add solution for Merge Intervals | src/mergeIntervals.py | src/mergeIntervals.py | # Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
# @param intervals, a list of Interval
# @return a list of Interval
def merge(self, intervals):
if not intervals:
return []
res = []
intervals = sorted([[i.start, i.end] for i in intervals])
prev = intervals[0]
for i in xrange(1, len(intervals)):
if intervals[i][0] <= prev[1]:
prev[1] = max(prev[1], intervals[i][1])
else:
res.append(prev)
prev = intervals[i]
res.append(prev)
return res
| Python | 0 | |
6f8c64ed6f99493811cab54137a1eed44d851260 | Add python script to get group and module given a class name | scripts/GetGroupAndModuleFromClassName.py | scripts/GetGroupAndModuleFromClassName.py | #!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
| Python | 0.000003 | |
3ff18745a561ab28e04d9218e00fc0aa367631f5 | add `solution` module | src/obpds/solution.py | src/obpds/solution.py | #
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
import numpy
__all__ = ['Solution', 'EquilibriumSolution']
class Solution(object):
pass
class FlatbandSolution(Solution):
def __init__(self, T, N, x, Ev, Ec, Ei):
self.T = T
self.N = N
self.x = x
self.Ev = Ev
self.Ec = Ec
self.Ei = Ei
class EquilibriumSolution(Solution):
def __init__(self, T, N, x, Na, Nd, Ev, Ec, Ei, psi, n, p):
self.T = T
self.N = N
self.x = x
self.Na = Na
self.Nd = Nd
self.Ev = Ev
self.Ec = Ec
self.Ei = Ei
self.psi = psi
self.n = n
self.p = p
self.Ef = numpy.zeros(N) | Python | 0.000001 | |
9a2f68d14ae2d576c59035c67ffa12c96b4f748a | Add provider tests | test_saau.py | test_saau.py | from saau.loading import load_image_providers, load_service_providers
def test_load_service_providers():
assert load_service_providers(None)
def test_load_image_providers():
assert load_image_providers(None) | Python | 0 | |
018be657ea3e088b3116e8a78fe81713a2a30e29 | Add tifftopdf, a frontend for tiff2pdf and tiffinfo. | tifftopdf.py | tifftopdf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# 2012-06-29
#
# To the extent possible under law, Roland Smith has waived all copyright and
# related or neighboring rights to NAME. This work is published from the
# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/
'''Description.'''
import os
import sys
import subprocess
from multiprocessing import Pool, Lock
globallock = Lock()
def checkfor(args):
"""Make sure that a program necessary for using this script is available."""
try:
f = open('/dev/null')
subprocess.call(args, stderr=subprocess.STDOUT, stdout=f)
f.close()
except:
print "Required program '{}' not found! exiting.".format(args[0])
sys.exit(1)
def process(fname):
"""Process the file named fname."""
try:
args = ['tiffinfo', fname]
# Gather information about the TIFF file.
txt = subprocess.check_output(args).split()
if not txt[7] == 'Width:':
raise ValueError
width = float(txt[8])
length = float(txt[11])
xres = float(txt[13][:-1])
yres = float(txt[14])
# Create the output file name.
if fname.endswith(('.tif', '.TIF')):
outname = fname[:-4]
elif fname.endswith(('.tiff', '.TIFF')):
outname = fname[:-5]
outname = outname.replace(' ', '_') + '.pdf'
args = ['tiff2pdf', '-w', str(width/xres), '-l', str(length/xres),
'-x', str(xres), '-y', str(yres), '-o', outname, fname]
subprocess.call(args)
globallock.acquire()
print "File '{}' converted to '{}'.".format(fname, outname)
globallock.release()
except:
globallock.acquire()
print "Converting {} failed.".format(fname)
globallock.release()
## This is the main program ##
if __name__ == '__main__':
if len(sys.argv) == 1:
path, binary = os.path.split(sys.argv[0])
print "Usage: {} [file ...]".format(binary)
sys.exit(0)
checkfor(['tiffinfo'])
checkfor(['tiff2pdf'])
p = Pool()
p.map(process, sys.argv[1:])
p.close()
| Python | 0 | |
6dcbb2004271860b7d2e8bf0d12da46c925f151c | add a utility to show/set/clear software write protect on a lun | tools/swp.py | tools/swp.py | #!/usr/bin/env python
# coding: utf-8
#
# A simple example to show/set/clear the software write protect flag SWP
#
import sys
from pyscsi.pyscsi.scsi import SCSI
from pyscsi.pyscsi.scsi_device import SCSIDevice
from pyscsi.pyscsi import scsi_enum_modesense6 as MODESENSE6
def usage():
print 'Usage: swp.py [--help] [--on|--off] <device>'
def main():
swp_on = 0
swp_off = 0
i = 1
while i < len(sys.argv):
if sys.argv[i] == '--help':
return usage()
if sys.argv[i] == '--on':
del sys.argv[i]
swp_on = 1
continue
if sys.argv[i] == '--off':
del sys.argv[i]
swp_off = 1
continue
i += 1
if len(sys.argv) < 2:
return usage()
device = sys.argv[1]
sd = SCSIDevice(device)
s = SCSI(sd)
i = s.modesense6(page_code=MODESENSE6.PAGE_CODE.CONTROL).result
if swp_on:
i['mode_pages'][0]['swp'] = 1
s.modeselect6(i)
print 'Set SWP ON'
return
if swp_off:
i['mode_pages'][0]['swp'] = 0
s.modeselect6(i)
print 'Set SWP OFF'
return
print 'SWP is %s' % ("ON" if i['mode_pages'][0]['swp'] else "OFF")
if __name__ == "__main__":
main()
| Python | 0.000001 | |
80d2fa29185e9c3c54ed1e173122bbe5a78624a4 | Create tutorial4.py | tutorial4.py | tutorial4.py | Python | 0 | ||
f8d4596db159f143d51c62ea2a097a72f9877ee6 | Add test for clusqmgr | test/clusqmgr.py | test/clusqmgr.py | import unittest
from testbase import MQWebTest
class TestQueueActions(MQWebTest):
def testInquire(self):
data = self.getJSON('/api/clusqmgr/inquire/' + self.qmgr)
self.assertFalse('mqweb' not in data, 'No mqweb data returned')
if 'error' in data:
self.assertFalse(True, 'Received a WebSphere MQ error:' + str(data['error']['reason']['code']))
self.assertFalse('clusqmgrs' not in data, 'No clusqmgrs array returned')
self.assertFalse(len(data['clusqmgrs']) == 0, 'No cluster information found')
self.assertTrue(self.checkIds(data['clusqmgrs'][0]), 'There are unmapped Websphere MQ attributes')
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueueActions)
unittest.TextTestRunner(verbosity=2).run(suite)
| Python | 0 | |
e21e04436f0596f25ca3fb75a9fe15916687c955 | Add utils.py tests | monasca_persister/tests/test_utils.py | monasca_persister/tests/test_utils.py | # (C) Copyright 2019 Fujitsu Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
from oslotest import base
from monasca_persister.repositories import utils
class TestUtils(base.BaseTestCase):
def setUp(self):
super(TestUtils, self).setUp()
def tearDown(self):
super(TestUtils, self).tearDown()
def test_parse_measurement_message(self):
message = Mock()
message.message.value = """{
"metric": {
"name": "metric_name",
"timestamp": "metric_timestamp",
"value": "0.0",
"value_meta": {
},
"dimensions": {}
},
"meta": {
"region": "meta_region",
"tenantId": "meta_tenantId"
}
}"""
data = utils.parse_measurement_message(message)
self.assertEqual(data[0], {})
self.assertEqual(data[1], 'metric_name')
self.assertEqual(data[2], 'meta_region')
self.assertEqual(data[3], 'meta_tenantId')
self.assertEqual(data[4], 'metric_timestamp')
self.assertEqual(data[5], 0.0)
self.assertEqual(data[6], {})
def test_parse_alarm_state_hist_message(self):
message = Mock()
message.message.value = """{
"alarm-transitioned": {
"alarmId": "dummyid",
"metrics": "dummymetrics",
"newState": "dummynewState",
"oldState": "dummyoldState",
"link": "dummylink",
"lifecycleState": "dummylifecycleState",
"stateChangeReason": "dummystateChangeReason",
"tenantId": "dummytenantId",
"timestamp": "dummytimestamp",
"subAlarms": {
"subAlarmExpression": "dummy_sub_alarm",
"currentValues": "dummy_values",
"metricDefinition": "dummy_definition",
"subAlarmState": "dummy_state"
}
}
}"""
output = ['"sub_alarm_expression":"dummy_sub_alarm"',
'"current_values":"dummy_values"',
'"metric_definition":"dummy_definition"',
'"sub_alarm_state":"dummy_state"']
data = utils.parse_alarm_state_hist_message(message)
self.assertEqual(data[0], 'dummyid')
self.assertEqual(data[1], 'dummymetrics')
self.assertEqual(data[2], 'dummynewState')
self.assertEqual(data[3], 'dummyoldState')
self.assertEqual(data[4], 'dummylink')
self.assertEqual(data[5], 'dummylifecycleState')
self.assertEqual(data[6], "dummystateChangeReason")
for elem in output:
self.assertIn(elem, data[7])
self.assertEqual(data[8], 'dummytenantId')
self.assertEqual(data[9], 'dummytimestamp')
def test_parse_events_message(self):
message = Mock()
message.message.value = """{
"event": {
"event_type": "dummy_event_type",
"timestamp": "dummy_timestamp",
"payload": "dummy_payload",
"dimensions": "dummy_dimensions"
},
"meta": {
"project_id": "dummy_project_id"
}
}"""
project_id, timestamp, event_type, payload, dimensions = utils.parse_events_message(message)
self.assertEqual(project_id, "dummy_project_id")
self.assertEqual(timestamp, "dummy_timestamp")
self.assertEqual(event_type, "dummy_event_type")
self.assertEqual(payload, "dummy_payload")
self.assertEqual(dimensions, "dummy_dimensions")
| Python | 0.000001 | |
66443f49c932fba9203b878b7be5f8c1a99a4e9e | make pacbio like names | iron/utilities/rename_to_pacbio.py | iron/utilities/rename_to_pacbio.py | #!/usr/bin/python
import sys,argparse
from SequenceBasics import FastaHandleReader, FastqHandleReader
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input',help="Use - for STDIN")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--fasta',action='store_true')
group.add_argument('--fastq',action='store_true')
args = parser.parse_args()
if args.input=='-': args.input = sys.stdin
else: args.input= open(args.input)
if args.fasta:
args.input = FastaHandleReader(args.input)
elif args.fastq:
args.input = FastqHandleReader(args.input)
z = 0
while True:
e = args.input.read_entry()
if not e: break
z+=1
name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'
if args.fastq:
print '@'+name
print e['seq']
print '+'
print e['qual']
elif args.fasta:
print '>'+name
print e['seq']
if __name__=="__main__":
main()
| Python | 0.02221 | |
b3633655ce700adfe3bd5390735edf799fd56624 | add missing migration | gunnery/core/migrations/0003_auto__add_field_server_port.py | gunnery/core/migrations/0003_auto__add_field_server_port.py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Server.port'
db.add_column(u'core_server', 'port',
self.gf('django.db.models.fields.IntegerField')(default=22),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Server.port'
db.delete_column(u'core_server', 'port')
models = {
u'core.application': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'Application'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': u"orm['core.Department']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.environment': {
'Meta': {'unique_together': "(('application', 'name'),)", 'object_name': 'Environment'},
'application': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environments'", 'to': u"orm['core.Application']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_production': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.server': {
'Meta': {'unique_together': "(('environment', 'name'),)", 'object_name': 'Server'},
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'servers'", 'to': u"orm['core.Environment']"}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '22'}),
'roles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'servers'", 'symmetrical': 'False', 'to': u"orm['core.ServerRole']"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'core.serverrole': {
'Meta': {'unique_together': "(('department', 'name'),)", 'object_name': 'ServerRole'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'serverroles'", 'to': u"orm['core.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['core'] | Python | 0.000258 | |
dddac1090fae15edb9a8d2a2781bb80989a0bc84 | add eventrange control | pilot/control/eventrange.py | pilot/control/eventrange.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, wen.guan@cern.ch, 2018
import json
import Queue
import os
import time
from pilot.util import https
from pilot.util.config import config
import logging
logger = logging.getLogger(__name__)
def download_event_ranges(job, num_ranges=None):
"""
Download event ranges
:param job:
:param num_ranges:
:return: List of event ranges.
"""
log = logger.getChild(str(job['PandaID']))
try:
if num_ranges == None:
# ToBeFix num_ranges with corecount
num_ranges = 1
data = {'pandaID': job['PandaID'],
'jobsetID': job['jobsetID'],
'taskID': job['taskID'],
'nRanges': num_ranges}
log.info("Downloading new event ranges: %s" % data)
res = https.request('{pandaserver}/server/panda/getEventRanges'.format(pandaserver=config.Pilot.pandaserver),
data=data)
log.info("Downloaded event ranges: %s" % res)
if res['StatusCode'] == 0 or str(res['StatusCode']) == '0':
return res['eventRanges']
return []
except Exception, e:
log.error("Failed to download event ranges: %s" % (e.get_detail()))
return None
def update_event_ranges(job, event_ranges, version=1):
"""
Update an event range on the Event Server
:param event_ranges:
"""
log = logger.getChild(str(job['PandaID']))
log.info("Updating event ranges: %s" % event_ranges)
try:
if version:
data = {'eventRanges': json.dumps(event_ranges), 'version': 1}
else:
data = {'eventRanges': json.dumps(event_ranges)}
log.info("Updating event ranges: %s" % data)
res = https.request('{pandaserver}/server/panda/updateEventRanges'.format(pandaserver=config.Pilot.pandaserver),
data=data)
log.info("Updated event ranges status: %s" % res)
except Exception, e:
log.error("Failed to update event ranges: %s" % (e.get_detail()))
| Python | 0 | |
42d6f1d17ea0f0117a82eb1933a5150b5eb1e29a | add missing is_context_manager | pikos/_internal/util.py | pikos/_internal/util.py | import inspect
def is_context_manager(obj):
""" Check if the obj is a context manager """
# FIXME: this should work for now.
return hasattr(obj, '__enter__') and hasattr(obj, '__exit__')
| Python | 0.999274 | |
ec2310dc42ccdeaafc74c232fad3199dcd22e252 | Create EPICLocSearch_parse-intron.py | EPICLocSearch_parse-intron.py | EPICLocSearch_parse-intron.py | " " " this file was created in november 2014
as part of a de novo search for EPIC loci in
the chaetognath species Pterosagitta draco
property of dr. Ferdinand Marlétaz
" " "
#!/usr/bin/env python
import sys
import re
from collections import defaultdict
def reverse(ali,taxa,clust):
alen=len(ali[taxa[0]])
#print alen
positions=['']*alen
for tax in taxa:
seq=ali[tax]
#print tax, len(seq)
for i,res in enumerate(seq):
positions[i]+=res
#print taxa
n_int=0
onset=50
maxgaps=20
#We selection introns with at least 30 flanking positions out of 50
#print ','.join(taxa)
msk=[tax for tax in taxa if tax.startswith('Lgi')]
id_msk=''.join(msk[0]) if len(msk)>0 else 'NA'
for i,pos in enumerate(positions):
if ''.join(set(pos))=='(':
#print '('*len(taxa)
items=dict((e,positions[i+1].count(e)) for e in set(positions[i+1]))
sum_pres=sum([items[v] for v in ['0','1','2'] if v in items])
sum_tot=sum(items.values())
if sum_pres>sum_tot-5:
cons_left=ali['cons'][i-onset:i]
cons_right=ali['cons'][i+3:i+onset+3]
cons_left_sc=cons_left.count('+')
cons_right_sc=cons_right.count('+')
seq_left=ali[id_msk][i-onset:i].replace(']',')').split(')')[-1] if ')' in ali[id_msk][i-onset:i].replace(']',')') else ali[id_msk][i-onset:i]
seq_right=ali[id_msk][i+3:i+onset+3].replace('[','(').split('(')[0]
gap_left=cons_left.count('-')
gap_right=cons_right.count('-')
if len(seq_left.replace('-',''))>=onset-maxgaps and len(seq_right.replace('-',''))>=onset-maxgaps:
if gap_left<maxgaps and gap_right<maxgaps:
print "{0}\t{1}\t{2}/{3}\t{4}/{5}\t{6} / {7}\t{8} / {9}".format(clust,i,sum_pres,sum_tot,cons_left_sc,cons_right_sc,cons_left,cons_right,seq_left,seq_right)
out.write('>{0}_{1}_left\n{2}\n'.format(clust,i,seq_left))
out.write('>{0}_{1}_right\n{2}\n'.format(clust,i,seq_right))
#print '\n'.join(positions[i-10:i+11])
n_int+=1
#print i,pos[0:50]
#print n_int
gene=''
aliSet=defaultdict(str)
taxa=[]
elt=['cons','insert','sites','intron','sfilt']
out=open('flanks.fa','w')
for line in open(sys.argv[1]):
if line.startswith('./METFAM'):
clust=line.rstrip().split('/')[3]
gene=''
if len(aliSet)>0:
#print "\n",clust
#for tax in taxa:
#print "{0}\t{1}".format(tax,len(aliSet[tax]))
rev=reverse(aliSet,taxa,clust.split('.')[0])
aliSet=defaultdict(str)
taxa=[]
alin=re.search(r'([^\s]+)(\s+)(.+)\n', line)
if alin:
name=line[0:40].split()[0]
seq=line.rstrip()[40:]
#print name,seq[0:40]
aliSet[name]+=seq
if not name in taxa and not name in elt:
taxa.append(name)
#print alin.groups()
#Bfl-G461809
#cons
| Python | 0 | |
c0a809ff79d90712a5074d208193ac9fd2af9901 | Add haproxy parser | playback/cli/haproxy.py | playback/cli/haproxy.py | import sys
from playback.api import HaproxyInstall
from playback.api import HaproxyConfig
from playback.templates.haproxy_cfg import conf_haproxy_cfg
from playback.cliutil import priority
def install(args):
try:
target = HaproxyInstall(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except AttributeError as e:
sys.stderr.write(e.message)
sys.exit(1)
target.install()
def config(args):
try:
target = HaproxyConfig(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)
except AttributeError:
sys.stderr.write('No hosts found. Please using --hosts param.')
sys.exit(1)
if args.upload_conf:
target.upload_conf(args.upload_conf)
if args.configure_keepalived:
target.configure_keepalived(args.router_id, args.priority,
args.state, args.interface, args.vip)
def gen_conf():
with open('haproxy.cfg', 'w') as f:
f.write(conf_haproxy_cfg)
@priority(12)
def make(parser):
"""provision HAProxy with Keepalived"""
s = parser.add_subparsers(
title='commands',
metavar='COMMAND',
help='description',
)
def install_f(args):
install(args)
install_parser = s.add_parser('install', help='install HAProxy')
install_parser.set_defaults(func=install_f)
def config_f(args):
config(args)
config_parser = s.add_parser('config', help='configure HAProxy')
config_parser.add_argument('--upload-conf', help='upload configuration file to the target host',
action='store', default=False, dest='upload_conf')
config_parser.add_argument('--configure-keepalived', help='configure keepalived',
action='store_true', default=False, dest='configure_keepalived')
config_parser.add_argument('--router_id', help='Keepalived router id e.g. lb1',
action='store', default=False, dest='router_id')
config_parser.add_argument('--priority', help='Keepalived priority e.g. 150',
action='store', default=False, dest='priority')
config_parser.add_argument('--state', help='Keepalived state e.g. MASTER',
action='store', default=False, dest='state')
config_parser.add_argument('--interface', help='Keepalived binding interface e.g. eth0',
action='store', default=False, dest='interface')
config_parser.add_argument('--vip', help='Keepalived virtual ip e.g. CONTROLLER_VIP',
action='store', default=False, dest='vip')
config_parser.set_defaults(func=config_f)
def gen_conf_f(args):
gen_conf()
gen_conf_parser = s.add_parser('gen-conf', help='generate the example configuration to the current location')
gen_conf_parser.set_defaults(func=gen_conf_f)
| Python | 0.000001 | |
acc5c52011db4c8edc615ae3e0cad9cea4fe58b8 | Add basic test for filesystem observer source | spreadflow_observer_fs/test/test_source.py | spreadflow_observer_fs/test/test_source.py | # -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
| Python | 0 | |
4d1c81af1d028b2d0fd58f8bab7e7e0246c04f3b | Create alternative_matching.py | hacker_rank/regex/grouping_and_capturing/alternative_matching.py | hacker_rank/regex/grouping_and_capturing/alternative_matching.py | Regex_Pattern = r'^(Mr\.|Mrs\.|Ms\.|Dr\.|Er\.)[a-zA-Z]{1,}$' # Do not delete 'r'.
| Python | 0.00001 | |
8033f8a033ddc38c3f1e2276c8c2b4f50c8360fb | Add Python template | src/template.py | src/template.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import argparse
def main(filename=None):
print("Hello world!")
if os.path.isfile(filename) is not True:
file_status = ' (file does not exist)'
else:
file_status = ''
print("Input File = '{}'{}".format(filename,file_status))
_, file_ext = os.path.splitext(filename)
if not file_ext in ['.txt','.text']:
print("File extension '{}' is invalid".format(file_ext))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Starting Template for Python 3 Programs')
parser.add_argument('file',help='Input file')
args = parser.parse_args()
main(args.file)
| Python | 0.000001 | |
11603040c58e27ebb109275bd4454a54e0c61d42 | Test examples | tests/acceptance/test_examples.py | tests/acceptance/test_examples.py | from typing import Dict
from miniworld.util import JSONConfig
# TODO: examples/batman_adv.json, problem is configurator
def test_snapshot_boot_single_scenario(image_path, runner):
with runner() as r:
for _ in range(5):
scenario = JSONConfig.read_json_config('examples/nb_bridged_lan.json') # type: Dict
r.start_scenario(scenario)
r.step()
r.step()
r.stop(hard=False)
# TODO: test stop/step
def test_snapshot_boot_multiple_scenarios(image_path, runner):
with runner() as r:
scenario = JSONConfig.read_json_config('examples/batman_adv.json') # type: Dict
r.start_scenario(scenario)
for _ in range(5):
r.step()
r.stop(hard=False)
scenario = JSONConfig.read_json_config('examples/nb_bridged_lan.json') # type: Dict
r.start_scenario(scenario)
for _ in range(5):
r.step()
r.stop(hard=False)
scenario = JSONConfig.read_json_config('examples/nb_bridged_wifi.json') # type: Dict
r.start_scenario(scenario)
for _ in range(5):
r.step()
r.stop(hard=False)
| Python | 0.000001 | |
d2c5462c5677d7674921f02687017f4128f219f7 | Create while_loop_else.py | while_loop_else.py | while_loop_else.py | // You can actually do a while loop that ends with an else //
while True:
...
...
...
...
else:
| Python | 0.000058 | |
0322e1c51fe07cc9707a687ab309a00ca374a1af | Add a cleanup_test_data management command to remove old test data from dev and stage | moztrap/model/core/management/commands/cleanup_test_data.py | moztrap/model/core/management/commands/cleanup_test_data.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
from optparse import make_option
from django.core.management.base import BaseCommand
from moztrap.model.core import models as core_models
from moztrap.model.environments import models as env_models
class Command(BaseCommand):
help = 'Deletes old test data'
option_list = BaseCommand.option_list + (
make_option('--permanent',
action='store_true',
dest='permanent',
default=True,
help='Permanently delete records?'),)
def handle(self, *args, **options):
for model in (core_models.Product,
env_models.Category,
env_models.Element,
env_models.Profile):
obj_type = model._meta.object_name
objects_to_delete = model.everything.filter(name__startswith='Test %s ' % obj_type)
obj_count = objects_to_delete.count()
objects_to_delete.delete(permanent=options['permanent'])
self.stdout.write('%s: %s test %s object(s) deleted\n' %
(datetime.now().isoformat(), obj_count, obj_type))
| Python | 0 | |
fe7bc09a19caac8fdc205d9e72a5b05f4688db02 | add south migrations | referral/migrations/0002_auto__add_field_campaign_pattern.py | referral/migrations/0002_auto__add_field_campaign_pattern.py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Campaign.pattern'
db.add_column('referral_campaign', 'pattern',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Campaign.pattern'
db.delete_column('referral_campaign', 'pattern')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'referral.campaign': {
'Meta': {'ordering': "['name']", 'object_name': 'Campaign'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'pattern': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'referral.referrer': {
'Meta': {'ordering': "['name']", 'object_name': 'Referrer'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'referrers'", 'null': 'True', 'to': "orm['referral.Campaign']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'referral.userreferrer': {
'Meta': {'ordering': "['referrer__name']", 'object_name': 'UserReferrer'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['referral.Referrer']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'referrer'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['referral'] | Python | 0.001497 | |
e2004076b1e04df21d9122d94e8ac00776542483 | Create new package. (#6044) | var/spack/repos/builtin/packages/r-allelicimbalance/package.py | var/spack/repos/builtin/packages/r-allelicimbalance/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAllelicimbalance(RPackage):
"""Provides a framework for allelic specific expression
investigation using RNA-seq data."""
homepage = "http://bioconductor.org/packages/AllelicImbalance/"
url = "https://git.bioconductor.org/packages/AllelicImbalance"
version('1.14.0', git='https://git.bioconductor.org/packages/AllelicImbalance', commit='35958534945819baafde0e13d1eb4d05a514142c')
depends_on('r@3.4.0:3.4.9', when='@1.14.0')
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-genomicalignments', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-bsgenome', type=('build', 'run'))
depends_on('r-variantannotation', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-gviz', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-latticeextra', type=('build', 'run'))
depends_on('r-gridextra', type=('build', 'run'))
depends_on('r-seqinr', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-nlme', type=('build', 'run'))
| Python | 0 | |
483cdf6b4dd846d9da11788ae98d86d373fb5c49 | add analyze script | app/lda/scripts/analyze.py | app/lda/scripts/analyze.py | from __future__ import print_function
import numpy as np
import sys
import pandas as pd
phi_path = '/users/wdai/bosen/app/lda/output/lda.S0.M4.T32/lda_out.phi'
num_topics = 100
num_words = 52210
top_k = 10
dict_path = '/users/wdai/bosen/app/lda/datasets/words_freq.tsv'
topk_file = '/users/wdai/bosen/app/lda/output/topk.tsv'
def read_dict():
df = pd.read_csv(dict_path, sep='\t')
min_occur = 10
df = df[df['count'] >= min_occur]
df = df[df['count'] <= 1e6] # remove super frequent words
print('# of words occuring at least 10 times:', len(df.index))
words = df['word'].tolist()
id = df['id'].as_matrix()
# TODO(wdai): remap the word ID after truncation.
return dict(zip(id, words))
if __name__ == '__main__':
phi = np.zeros((num_topics, num_words))
with open(phi_path, 'r') as f:
lines = f.readlines()
for topic, line in enumerate(lines):
fields = [float(field.strip()) for field in line.split()]
assert len(fields) == num_words, 'len(fields): %d vs num_words %d' % \
(len(fields), num_words)
phi[topic, :] = fields
# top-k words
#topk = np.zeros((num_topics, top_k))
i2w = read_dict()
with open(topk_file, 'w') as f:
for t in range(num_topics):
ind = np.argpartition(phi[t,:], -top_k, axis=0)[-top_k:]
ind = ind[np.argsort(phi[t,ind])[::-1]]
for n in ind:
f.write('%s:%.2f\t' % (i2w[n], phi[t,n]))
f.write('\n')
print('Output top %d words to %s' % (top_k, topk_file))
| Python | 0.000001 | |
fcb02edeb8fafa8c297d48edc8ebf6b389321430 | add test | test_iris.py | test_iris.py | import unittest
from sklearn import datasets
from sklearn.utils.validation import check_random_state
from stacked_generalization import StackedClassifier, FWLSClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.tree.tree import SPARSE_SPLITTERS
class TestStackedClassfier(unittest.TestCase):
def setUp(self):
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
self.iris = iris
def test_stacked_classfier(self):
bclf = LogisticRegression(random_state=1)
clfs = [RandomForestClassifier(n_estimators=50, criterion = 'gini', random_state=1),
ExtraTreesClassifier(n_estimators=50, criterion = 'gini', random_state=2),
ExtraTreesClassifier(n_estimators=40, criterion = 'gini', random_state=3),
GradientBoostingClassifier(n_estimators=25, random_state=1),
GradientBoostingClassifier(n_estimators=40, random_state=1),
RidgeClassifier(random_state=1)]
sl = StackedClassifier(bclf, clfs, n_folds=3, verbose=0, stack_by_proba=True, oob_score_flag=True)
sl.fit(self.iris.data, self.iris.target)
score = sl.score(self.iris.data, self.iris.target)
self.assertGreater(score, 0.8, "Failed with score = {0}".format(score))
self.assertGreater(score, 0.8, "Failed with score = {0}".format(sl.oob_score_))
if __name__ == '__main__':
unittest.main()
| Python | 0.000002 | |
f43ac6c526aceddea81c8fccf99e33bd7cb917c4 | fix api wrapper | src/sentry/debug/middleware.py | src/sentry/debug/middleware.py | from __future__ import absolute_import
import json
import re
from debug_toolbar.toolbar import DebugToolbar
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.html import escape
from six.moves import _thread as thread
WRAPPER = """
<!DOCTYPE html>
<html>
<body>
<pre>{content}</pre>
</body>
</html>
"""
class ToolbarCache(object):
def __init__(self):
self._toolbars = {}
def create(self, request):
toolbar = DebugToolbar(request)
self._toolbars[thread.get_ident()] = toolbar
return toolbar
def pop(self):
return self._toolbars.pop(thread.get_ident(), None)
def get(self):
return self._toolbars.get(thread.get_ident(), None)
toolbar_cache = ToolbarCache()
class DebugMiddleware(object):
_body_regexp = re.compile(re.escape('</body>'), flags=re.IGNORECASE)
def show_toolbar_for_request(self, request):
if not settings.SENTRY_DEBUGGER:
return False
if not request.is_superuser():
return False
if 'text/html' not in request.META.get('HTTP_ACCEPT', '*/*'):
return False
return True
def show_toolbar_for_response(self, response):
content_type = response['Content-Type']
for type in ('text/html', 'application/json'):
if type in content_type:
return True
return False
def process_request(self, request):
# Decide whether the toolbar is active for this request.
if not self.show_toolbar_for_request(request):
return
toolbar = toolbar_cache.create(request)
# Activate instrumentation ie. monkey-patch.
for panel in toolbar.enabled_panels:
panel.enable_instrumentation()
# Run process_request methods of panels like Django middleware.
response = None
for panel in toolbar.enabled_panels:
response = panel.process_request(request)
if response:
break
return response
def process_view(self, request, view_func, view_args, view_kwargs):
toolbar = toolbar_cache.get()
if not toolbar:
return
# Run process_view methods of panels like Django middleware.
response = None
for panel in toolbar.enabled_panels:
response = panel.process_view(request, view_func, view_args, view_kwargs)
if response:
break
def process_response(self, request, response):
toolbar = toolbar_cache.pop()
if not toolbar:
return response
if not self.show_toolbar_for_response(response):
return response
# Run process_response methods of panels like Django middleware.
for panel in reversed(toolbar.enabled_panels):
new_response = panel.process_response(request, response)
if new_response:
response = new_response
# Deactivate instrumentation ie. monkey-unpatch. This must run
# regardless of the response. Keep 'return' clauses below.
# (NB: Django's model for middleware doesn't guarantee anything.)
for panel in reversed(toolbar.enabled_panels):
panel.disable_instrumentation()
# Collapse the toolbar by default if SHOW_COLLAPSED is set.
if 'djdt' in request.COOKIES:
response.delete_cookie('djdt')
try:
content = force_text(response.content, encoding='utf-8')
except UnicodeDecodeError:
# Make sure we at least just return a response on an encoding issue
return response
if 'text/html' not in response['Content-Type']:
if 'application/json' in response['Content-Type']:
content = json.dumps(json.loads(content), indent=2)
content = WRAPPER.format(
content=escape(content),
)
response['Content-Type'] = 'text/html'
# Insert the toolbar in the response.
bits = self._body_regexp.split(content)
if len(bits) > 1:
bits[-2] += toolbar.render_toolbar()
content = '</body>'.join(bits)
response.content = content
response['Content-Length'] = len(content)
return response
| from __future__ import absolute_import
import json
import re
from debug_toolbar.toolbar import DebugToolbar
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from six.moves import _thread as thread
class ToolbarCache(object):
def __init__(self):
self._toolbars = {}
def create(self, request):
toolbar = DebugToolbar(request)
self._toolbars[thread.get_ident()] = toolbar
return toolbar
def pop(self):
return self._toolbars.pop(thread.get_ident(), None)
def get(self):
return self._toolbars.get(thread.get_ident(), None)
toolbar_cache = ToolbarCache()
class DebugMiddleware(object):
_body_regexp = re.compile(re.escape('</body>'), flags=re.IGNORECASE)
def show_toolbar_for_request(self, request):
if not settings.SENTRY_DEBUGGER:
return False
if not request.is_superuser():
return False
if 'text/html' not in request.META.get('HTTP_ACCEPT', '*/*'):
return False
return True
def show_toolbar_for_response(self, response):
content_type = response['Content-Type']
for type in ('text/html', 'application/json'):
if type in content_type:
return True
return False
def process_request(self, request):
# Decide whether the toolbar is active for this request.
if not self.show_toolbar_for_request(request):
return
toolbar = toolbar_cache.create(request)
# Activate instrumentation ie. monkey-patch.
for panel in toolbar.enabled_panels:
panel.enable_instrumentation()
# Run process_request methods of panels like Django middleware.
response = None
for panel in toolbar.enabled_panels:
response = panel.process_request(request)
if response:
break
return response
def process_view(self, request, view_func, view_args, view_kwargs):
toolbar = toolbar_cache.get()
if not toolbar:
return
# Run process_view methods of panels like Django middleware.
response = None
for panel in toolbar.enabled_panels:
response = panel.process_view(request, view_func, view_args, view_kwargs)
if response:
break
def process_response(self, request, response):
toolbar = toolbar_cache.pop()
if not toolbar:
return response
if not self.show_toolbar_for_response(response):
return response
# Run process_response methods of panels like Django middleware.
for panel in reversed(toolbar.enabled_panels):
new_response = panel.process_response(request, response)
if new_response:
response = new_response
# Deactivate instrumentation ie. monkey-unpatch. This must run
# regardless of the response. Keep 'return' clauses below.
# (NB: Django's model for middleware doesn't guarantee anything.)
for panel in reversed(toolbar.enabled_panels):
panel.disable_instrumentation()
# Collapse the toolbar by default if SHOW_COLLAPSED is set.
if 'djdt' in request.COOKIES:
response.delete_cookie('djdt')
try:
content = force_text(response.content, encoding='utf-8')
except UnicodeDecodeError:
# Make sure we at least just return a response on an encoding issue
return response
if 'text/html' not in response['Content-Type']:
if 'application/json' in response['Content-Type']:
content = json.dumps(json.loads(content), indent=2)
content = render_to_string('debug_toolbar/wrapper.html', {
'content': content,
})
response['Content-Type'] = 'text/html'
# Insert the toolbar in the response.
bits = self._body_regexp.split(content)
if len(bits) > 1:
bits[-2] += toolbar.render_toolbar()
content = '</body>'.join(bits)
response.content = content
response['Content-Length'] = len(content)
return response
| Python | 0.000015 |
2d0f76538f8927a85a2c51b0b6c34f54c775b883 | Add kmeans receiver | lexos/receivers/kmeans_receiver.py | lexos/receivers/kmeans_receiver.py | from lexos.receivers.base_receiver import BaseReceiver
class KmeansOption:
def __init__(self,):
class KmeansReceiver(BaseReceiver):
def options_from_front_end(self) -> KmeansOption:
"""Get the Kmeans option from front end.
:return: a KmeansOption object to hold all the options.
"""
| Python | 0.001009 | |
ab87f960ecb6f330f4574d2e8dc6b3d4cc96c40f | add solution for Spiral Matrix II | src/spiralMatrixII.py | src/spiralMatrixII.py | class Solution:
# @return a list of lists of integer
def generateMatrix(self, n):
if n == 0:
return []
dirs = [[0, 1], [1, 0], [0, -1], [-1, 0]]
cur = cur_d = 0
cur_x = cur_y = 0
matrix = [[0 for col in xrange(n)] for row in xrange(n)]
while cur != n*n:
cur += 1
matrix[cur_x][cur_y] = cur
nx = cur_x + dirs[cur_d][0]
ny = cur_y + dirs[cur_d][1]
if nx < 0 or ny < 0 or nx == n or ny == n or matrix[nx][ny]:
cur_d = (cur_d+1) % 4
nx = cur_x + dirs[cur_d][0]
ny = cur_y + dirs[cur_d][1]
cur_x, cur_y = nx, ny
return matrix
| Python | 0 | |
69a031db7d83254291349804ee5f59fe9972f181 | Add simple jitclass example | examples/jitclass.py | examples/jitclass.py | """
A simple jitclass example.
"""
import numpy as np
from numba import jitclass # import the decorator
from numba import int32, float32 # import the types
spec = [
('value', int32), # a simple scalar field
('array', float32[:]), # an array field
]
@jitclass(spec)
class Bag(object):
def __init__(self, value):
self.value = value
self.array = np.zeros(value, dtype=np.float32)
@property
def size(self):
return self.array.size
def increment(self, val):
for i in range(self.size):
self.array[i] += val
return self.array
mybag = Bag(21)
print('isinstance(mybag, Bag)', isinstance(mybag, Bag))
print('mybag.value', mybag.value)
print('mybag.array', mybag.array)
print('mybag.size', mybag.size)
print('mybag.increment(3)', mybag.increment(3))
print('mybag.increment(6)', mybag.increment(6))
| Python | 0.00001 | |
5273a97ab1da4b809573617d3fc01705c322992f | Add tests for form mixin. | thecut/authorship/tests/test_forms.py | thecut/authorship/tests/test_forms.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
| Python | 0 | |
e838b6d53f131badfbb7b51b4eb268ebb5d7c450 | Add tests for using the new Entity ID tracking in the rule matcher | spacy/tests/matcher/test_entity_id.py | spacy/tests/matcher/test_entity_id.py | from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
| Python | 0 | |
2cf812ba2015bfcc392a2f401c253850b31060c7 | Make sure all tags are alphanumeric | perf_insights/perf_insights/upload.py | perf_insights/perf_insights/upload.py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
import webapp2
import uuid
from perf_insights import trace_info
sys.path.append('third_party')
import cloudstorage as gcs
default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(default_retry_params)
class UploadPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""
<html><body>
<head><title>Performance Insights - Trace Uploader</title></head>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><input type="file" name="trace"/></div>
<div><input type="submit" value="Upload"></div>
</form><hr>
</body></html>""")
def post(self):
trace_uuid = str(uuid.uuid4())
bucket_name = ('/performance-insights/' + trace_uuid)
gcs_file = gcs.open(bucket_name,
'w',
content_type='application/octet-stream',
options={},
retry_params=default_retry_params)
gcs_file.write(self.request.get('trace'))
gcs_file.close()
trace_object = trace_info.TraceInfo(id=trace_uuid)
trace_object.prod = self.request.get('prod')
trace_object.remote_addr = os.environ["REMOTE_ADDR"]
tags_string = self.request.get('tags')
if re.match('^[a-zA-Z0-9,]+$', tags_string): # ignore non alpha-numeric tags
trace_object.tags = tags_string.split(',')
trace_object.user_agent = self.request.headers.get('User-Agent')
trace_object.ver = self.request.get('product_version')
trace_object.put()
self.response.write(trace_uuid)
app = webapp2.WSGIApplication([('/upload', UploadPage)])
| # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import webapp2
import uuid
from perf_insights import trace_info
sys.path.append('third_party')
import cloudstorage as gcs
default_retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(default_retry_params)
class UploadPage(webapp2.RequestHandler):
def get(self):
self.response.out.write("""
<html><body>
<head><title>Performance Insights - Trace Uploader</title></head>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><input type="file" name="trace"/></div>
<div><input type="submit" value="Upload"></div>
</form><hr>
</body></html>""")
def post(self):
trace_uuid = str(uuid.uuid4())
bucket_name = ('/performance-insights/' + trace_uuid)
gcs_file = gcs.open(bucket_name,
'w',
content_type='application/octet-stream',
options={},
retry_params=default_retry_params)
gcs_file.write(self.request.get('trace'))
gcs_file.close()
trace_object = trace_info.TraceInfo(id=trace_uuid)
trace_object.prod = self.request.get('prod')
trace_object.remote_addr = os.environ["REMOTE_ADDR"]
tags_string = self.request.get('tags')
if len(tags_string):
trace_object.tags = tags_string.split(',')
trace_object.user_agent = self.request.headers.get('User-Agent')
trace_object.ver = self.request.get('product_version')
trace_object.put()
self.response.write(trace_uuid)
app = webapp2.WSGIApplication([('/upload', UploadPage)])
| Python | 0 |
cf31fd1afabb1c37d5e6f1033ded440889f5949b | Create thonplate.py | thonplate.py | thonplate.py | class BaseTag(object):
children = None
parent = None
name = None
indent_string = ' '
def __init__(self):
self.children = []
self.parent = None
def __str__(self):
return self.render()
def add(self, *args):
for child in args:
if isinstance(child, (str, unicode, long, int, float)):
self.children.append(StringTag(child))
elif isinstance(child, BaseTag):
self.children.append(child)
elif child is None:
# default args are None, so ignoring them is probably the best thing to do
pass
elif isinstance(child, (list, tuple)) or child.__class__.__name__ == 'generator':
for grandchild in child:
self.children.append(grandchild)
else:
raise Exception("Cannot add item to %s of type %s" % (self.name, child.__class__))
return self
def add_if(self, condition, *args):
if condition:
return self.add(*args)
else:
return self
def render(self, depth=0):
return ''
def _get_indent(self, depth):
return self.indent_string * depth
class Template(BaseTag):
template = None
head_tags = None
def __init__(self):
super(Template, self).__init__()
self.title = None
self.head_tags = []
def render(self, depth=-1):
str = ''
for child in self.children:
str += child.render(depth + 1)
return str
def get_head_tags(self):
return self.head_tags
def add_head_tag(self, tag):
self.head_tags.append(tag)
def add_head_tags(self, *tags):
for tag in tags:
self.head_tags.append(tag)
def get_title(self):
return self.title
class Tag(BaseTag):
name = None
attrs = None
def __init__(self, *args, **kwargs):
super(Tag, self).__init__()
self.attrs = {}
for arg in args:
if isinstance(arg, dict):
for key, value in arg.iteritems():
self.attrs[key] = value
else:
raise Exception('Do not recognize arg of type %s' % arg.__class__)
for key, value in kwargs.iteritems():
if key == 'cls':
key = 'class'
if value is None:
value = ''
self.attrs[key] = value
def get_attr_string(self):
attr_string = ''
for key, value in self.attrs.iteritems():
attr_string += ' %s="%s"' % (key, value)
return attr_string
def get_open_tag(self):
attr_string = self.get_attr_string()
return '<%s%s>' % (self.name, attr_string)
def get_self_closing_tag(self):
attr_string = self.get_attr_string()
return '<%s%s/>' % (self.name, attr_string)
def get_close_tag(self):
return '</%s>' % self.name
def render(self, depth=0):
indent = self._get_indent(depth)
if len(self.children) == 0:
str = indent + self.get_self_closing_tag() + '\n'
else:
str = indent + self.get_open_tag() + '\n'
for child in self.children:
str += child.render(depth + 1)
str += indent + self.get_close_tag() + '\n'
return str
class StringTag(Tag):
value = None
name = 'string_tag'
def __init__(self, value):
self.value = unicode(value)
super(StringTag, self).__init__()
def render(self, depth=0):
return self.value
class CloseTagRequired(Tag):
def render(self, depth=0):
indent = self._get_indent(depth)
result = indent + self.get_open_tag()
if len(self.children) == 1 and isinstance(self.children[0], StringTag):
# if there is only one child and it's a string, do not pad it with whitespace
for child in self.children:
result += child.render(depth + 1)
result += self.get_close_tag() + '\n'
elif len(self.children) > 0:
result += '\n'
for child in self.children:
result += child.render(depth + 1)
result += indent + self.get_close_tag() + '\n'
else:
result += self.get_close_tag() + '\n'
return result
class a(CloseTagRequired):
name = 'a'
class b(CloseTagRequired):
name = 'b'
class button(CloseTagRequired):
name = 'button'
class body(Tag):
name = 'body'
class em(CloseTagRequired):
name = 'em'
class div(CloseTagRequired):
name = 'div'
class form(Tag):
name = 'form'
class h1(CloseTagRequired):
name = 'h1'
class h2(CloseTagRequired):
name = 'h2'
class h3(CloseTagRequired):
name = 'h3'
class h4(CloseTagRequired):
name = 'h4'
class h5(CloseTagRequired):
name = 'h5'
class h6(CloseTagRequired):
name = 'h6'
class hr(Tag):
name = 'hr'
class head(Tag):
name = 'head'
class html(Tag):
name = 'html'
class i(CloseTagRequired):
name = 'i'
class label(CloseTagRequired):
name = 'label'
class li(Tag):
name = 'li'
class link(Tag):
name = 'link'
class meta(Tag):
name = 'meta'
class nav(Tag):
name = 'nav'
class ol(CloseTagRequired):
name = 'ol'
class p(CloseTagRequired):
name = 'p'
class pre(CloseTagRequired):
name = 'pre'
class strong(CloseTagRequired):
name = 'strong'
class script(CloseTagRequired):
name = 'script'
class span(CloseTagRequired):
name = 'span'
class textarea(CloseTagRequired):
name = 'textarea'
class title(CloseTagRequired):
name = 'title'
class ul(Tag):
name = 'ul'
class input(Tag):
name = 'input'
class css(link):
def __init__(self, url, *args):
super(css, self).__init__(*args)
self.attrs['href'] = url
self.attrs['type'] = 'text/css'
self.attrs['rel'] = 'stylesheet'
class js(script):
def __init__(self, url, *args, **kwargs):
super(js, self).__init__(*args, **kwargs)
self.attrs['src'] = url
self.attrs['type'] = 'text/javascript'
self.attrs['charset'] = 'utf-8'
class doctype(Tag):
name = '!DOCTYPE'
def render(self, depth=0):
attr_string = ''
for key, value in self.attrs.iteritems():
if value is None or value == '':
attr_string += ' %s' % key
else:
attr_string += ' %s="%s"' % (key, value)
indent = depth * Tag.indent_string
return '%s<!DOCTYPE%s>\n\n' % (indent, attr_string)
| Python | 0.000002 | |
084f9bb8333a7cfb3f4247afbcae62375060fa2b | Add rude graphics mode tester | tests/testpic.py | tests/testpic.py | import serial
import time
import random
import sys
# Give port name of your UART as first argument. No error checking
# here, sorry.
#
ser = serial.Serial(sys.argv[1], 9600, timeout = 1)
serwrite = lambda x: ser.write(bytearray(map(ord, x)))
move_to = lambda x, y: serwrite("\x1B[{0};{1}H".format(y, x))
serwrite("xxxxx\x08") # dismiss if we're left in ANSI mode...
serwrite("\x1B[0]") # Text mode
serwrite("\x1B[2J") # Clear screen
serwrite("\x1B[m") # Reset colors
serwrite("\x1B[?7l") # disable wrap
image=[
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"............................... ...............................",
".............................. ..............................",
"............................. .............................",
"............................. .............................",
"............................ ............................",
"........................... ...........................",
"........................... ...........................",
".......................... ..........................",
".......................... .. ..........................",
"......................... .. .........................",
"........................ .... ........................",
"........................ ...... ........................",
"....................... ...... .......................",
"...................... ........ ......................",
"...................... ........ ......................",
"..................... .......... .....................",
".................... ............ ....................",
".................... ............ ....................",
"................... ........ ... ...................",
"................... ........ ... ...................",
".................. ........ ... ..................",
"................. ......... ..... .................",
"................. ............ ....... .................",
"................ .... ......... ................",
"............... .... ......... ...............",
"............... ... ........ ...............",
".............. ... . ......... ..............",
".............. ... .. .......... ..............",
"............. ... . ......... .............",
"............ ..... . .......... ............",
"............ ........ ... ........... ............",
"........... .......... ... ........... ...........",
".......... ........ ... . ............ ..........",
".......... ........ ... ........... ..........",
"......... ......... ... ..... ..... .........",
"........ ......... .... .. ..... ........",
"........ ......... ... ..... ..... ........",
"....... .......... .... ....... ..... .......",
"....... .......... .... ...... ...... .......",
"...... .......... ..... .... ..... ......",
"..... ........... ..... ... ...... .....",
"..... ............ ...... ... ...... .....",
".... ........... ...... ... ...... ....",
"... ............ ....... .. ...... ...",
"... ............. ........ ... ...... ...",
".. ................................................ ..",
".. .................................................. ..",
". .",
" ",
" ",
" ",
". .",
"................................................................",
"................................................................",
"................................................................",
"................................................................",
"................................................................"
]
nums = [ "..", ". ", " .", " " ]
for row in range(16):
for char in range(32):
num = 0
for index in range (row * 4, row * 4 + 4):
num <<= 2
num |= nums.index(image[index][(char * 2):(char * 2) + 2][:2])
if num in [8,10,13,27,127]:
serwrite(chr(27) + chr(num))
else:
serwrite(chr(num))
if row < 15:
serwrite(chr(13))
ser.flush()
time.sleep(2)
for i in range(5):
for x in range(17):
serwrite("\x1B[{0}]".format(x))
ser.flush()
time.sleep(0.05)
for x in range(16,-1,-1):
serwrite("\x1B[{0}]".format(x))
ser.flush()
time.sleep(0.05)
for x in range(17):
serwrite("\x1B[{0}]".format(x))
ser.flush()
time.sleep(0.05)
ser.flush()
ser.close()
| Python | 0 | |
a1eff713339d528720ed5999d05a85066018f070 | Add visualise.py | visualise.py | visualise.py | # visualise.py
# Imports
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from uniform_bspline import Contour
# main
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input_path')
parser.add_argument('--num-samples', type=int, default=1024)
args = parser.parse_args()
print 'Input:', args.input_path
with open(args.input_path, 'rb') as fp:
z = json.load(fp)
degree, num_control_points, dim, is_closed = (
z['degree'], z['num_control_points'], z['dim'], z['is_closed'])
print ' degree:', degree
print ' num_control_points:', num_control_points
print ' dim:', dim
print ' is_closed:', is_closed
c = Contour(degree, num_control_points, dim, is_closed=is_closed)
Y, w, u, X = map(np.array, [z['Y'], z['w'], z['u'], z['X']])
print ' num_data_points:', Y.shape[0]
kw = {}
if Y.shape[1] == 3:
kw['projection'] = '3d'
f = plt.figure()
ax = f.add_subplot(111, **kw)
ax.set_aspect('equal')
def plot(X, *args, **kwargs):
ax.plot(*(tuple(X.T) + args), **kwargs)
plot(Y, 'ro')
for m, y in zip(c.M(u, X), Y):
plot(np.r_['0,2', m, y], 'k-')
plot(X, 'bo--', ms=8.0)
plot(c.M(c.uniform_parameterisation(args.num_samples), X), 'b-', lw=2.0)
plt.show()
if __name__ == '__main__':
main()
| Python | 0.000369 | |
ee5089a6a16c5a6142444a0ad312fdb641aa845c | Fix tests | test/test.py | test/test.py | #!/usr/bin/env python
import locale
import os
import sys
import unittest
from tempfile import TemporaryFile
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from argparse import ArgumentParser
from argcomplete import *
IFS = '\013'
class TestArgcomplete(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['_ARGCOMPLETE'] = "yes"
os.environ['_ARC_DEBUG'] = "yes"
os.environ['IFS'] = IFS
def setUp(self):
pass
def tearDown(self):
pass
def run_completer(self, parser, command, point=None):
with TemporaryFile() as t:
os.environ['COMP_LINE'] = command
os.environ['COMP_POINT'] = point if point else str(len(command))
os.environ['COMP_WORDBREAKS'] = '"\'@><=;|&(:'
with self.assertRaises(SystemExit):
autocomplete(parser, output_stream=t, exit_method=sys.exit)
t.seek(0)
return t.read().decode(locale.getpreferredencoding()).split(IFS)
def test_basic_completion(self):
p = ArgumentParser()
p.add_argument("--foo")
p.add_argument("--bar")
completions = self.run_completer(p, "prog ")
assert(set(completions) == set(['-h', '--help', '--foo', '--bar']))
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
import locale
import os
import sys
import unittest
from tempfile import TemporaryFile
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from argparse import ArgumentParser
from argcomplete import *
IFS = '\013'
class TestArgcomplete(unittest.TestCase):
@classmethod
def setUpClass(cls):
os.environ['_ARGCOMPLETE'] = "yes"
os.environ['_ARC_DEBUG'] = "yes"
os.environ['IFS'] = IFS
def setUp(self):
pass
def tearDown(self):
pass
def run_completer(self, parser, command, point=None):
with TemporaryFile() as t:
os.environ['COMP_LINE'] = command
os.environ['COMP_POINT'] = point if point else str(len(command))
with self.assertRaises(SystemExit):
autocomplete(parser, output_stream=t, exit_method=sys.exit)
t.seek(0)
return t.read().decode(locale.getpreferredencoding()).split(IFS)
def test_basic_completion(self):
p = ArgumentParser()
p.add_argument("--foo")
p.add_argument("--bar")
completions = self.run_completer(p, "prog ")
assert(set(completions) == set(['-h', '--help', '--foo', '--bar']))
if __name__ == '__main__':
unittest.main()
| Python | 0.000003 |
a10648569bbd5dca44adc3cfd5a128703325932b | Create dihedral_tent.py | dihedral_tent.py | dihedral_tent.py | import numpy as np
import mdtraj as md
import argparse, cPickle
from multiprocessing import Pool
from itertools import product
from itertools import combinations_with_replacement as combinations
from contextlib import closing
def rbins(n=30):
return np.linspace(-np.pi, np.pi, n+3)[1:-1]
def ent(H):
H /= H.sum()
return -np.sum(H*np.nan_to_num(np.log2(H)))
def ent1D(X, r=rbins()):
H = np.histogram(X, r)[0]
return ent(H)
def ent2D(X, Y, r=rbins()):
H = np.histogram2d(X, Y, 2*[r])[0]
return ent(H)
def ent3D(X, Y, Z, r=rbins()):
W = np.vstack((X, Y, Z)).T
H = np.histogramdd(W, 3*[r])[0]
return ent(H)
def ce(X,Y):
return ent2D(X, Y) - ent1D(Y)
def cmi(X, Y, Z):
return ent2D(X, Z) + ent2D(Y, Z) - ent3D(X, Y, Z) - ent1D(Z)
def dihedrals(traj):
kinds = [md.compute_phi,
md.compute_psi]
#md.compute_chi1,
#md.compute_chi2]
return [kind(traj)[1].T for kind in kinds]
def f(cD, pD):
g = lambda i: sum([cmi(cD[d[0]][i[0]], pD[d[1]][i[1]], pD[d[0]][i[0]]) for d in combinations(range(len(cD)), 2)])
g.__name__ = 'g'
return g
def h(cD, pD):
q = lambda i: sum([ce(cD[d[0]][i[0]], pD[d[0]][i[0]]) for d in combinations(range(len(cD)), 2)])
q.__name__ = 'q'
return q
def run(current, past, iter, N):
cD = dihedrals(current)
pD = dihedrals(past)
n = cD[0].shape[0]
R = []
q = h(cD, pD)
for i in range(iter+1):
g = f(cD, pD)
with closing(Pool(processes=N)) as pool:
R.append(np.reshape(pool.map(g, product(range(n), range(n))), (n, n)))
pool.terminate()
[np.random.shuffle(d) for d in cD]
[np.random.shuffle(d) for d in pD]
CMI = R[0] - np.mean(R[1:], axis = 0)
with closing(Pool(processes=N)) as pool:
CH = (pool.map(q, zip(*(2*[range(n)])))*np.ones((n,n))).T
pool.terminate()
T = CMI/CH
return T.T - T
def parse_cmdln():
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-c', '--current', dest='current',help='File containing current step states.')
parser.add_argument('-p', '--past', dest='past',help='File containing past step states.')
parser.add_argument('-t', '--topology', dest='top',help='File containing topology.', default=None)
parser.add_argument('-s', '--shuffle-iter', dest='iter', help='Number of shuffle iterations.', default=100, type=int)
parser.add_argument('-n', '--n-proc', dest='N',help='Number of processors', default=4, type=int)
parser.add_argument('-o', '--output', dest='out', help='Name of output file.', default='tent.pkl')
args = parser.parse_args()
return args
if __name__=="__main__":
options = parse_cmdln()
current = md.load(options.current, top = options.top)
past = md.load(options.past, top = options.top)
D = run(current, past, options.iter, options.N)
cPickle.dump(D, open(options.out, 'wb'))
| Python | 0.000042 | |
13e45a8578e57e2cb55b29980b0f3326dd393a20 | Create sump_monitor.py | sump_monitor.py | sump_monitor.py | #Import the required modules
import RPi.GPIO as GPIO
import time
import requests
import math
#Setup the GPIO
GPIO.setmode(GPIO.BCM)
#Define the TRIG and ECO pins - these are labeled on the sensor
TRIG = 23
ECHO = 24
#Number of readings we are going to take to avoid issues
numreadings = 7
#Alert that we are starting the measurement
print "Distance Measurement In Progress"
#Loop based on the above number
distancearray=[]
count = 0
while (count < numreadings):
#Setup the two pins for reading
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
GPIO.output(TRIG, False)
print "Waiting For Sensor To Settle"
time.sleep(2)
GPIO.output(TRIG, True)
time.sleep(0.00001)
GPIO.output(TRIG, False)
while GPIO.input(ECHO)==0:
pulse_start = time.time()
while GPIO.input(ECHO)==1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 17150
distance = round(distance, 2)
print "Distance:",distance,"cm"
distancearray.append(distance)
count = count + 1
#Get the half of the reading number and round up
mid = numreadings / 2
mid = int(math.ceil(mid))
#Sort the array
distancearray.sort()
#Just for debugging
print distancearray
print distancearray[mid]
#Put the middle value back into the distance variable
distance = distancearray[mid]
#Write the data to the influxdn instance
data = 'environment,host=rpi1,location=basement,type=sumppump value=' + str(distance)
print data
output = requests.post('http://192.168.9.42:8086/write?db=home', data=data)
print output
#Release connections to the GPIO pins
GPIO.cleanup()
| Python | 0.000002 | |
9a6ca54f7cca0bd5f21f0bc590a034e7e3e05b6e | Add migration to add userprofiles to existing users | src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py | src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
| Python | 0 | |
a47d2654a5e23417c9e23f2ad19ed1b150524337 | add new mtc script | trunk/mtc.py | trunk/mtc.py | #!/usr/bin/python
"""
(C) Legoktm, 2008
Distributed under the terms of the MIT license.
__version__ = '$Id: $'
"""
import urllib, re, time
import os, sys
sys.path.append(os.environ['HOME'] + '/stuffs/pywiki/pylegoktm')
import wikipedia, pagegenerators, catlib
from image import *
from upload import UploadRobot
def delink(name):
name = str(name)
return re.compile(r'\[\[(.*?)\]\]', re.IGNORECASE).sub(r'\1', name)
def defilelink(name):
name = str(name)
return re.compile(r'\[\[File:(.*?)\]\]', re.IGNORECASE).sub(r'\1', name)
#SITES
wikien = wikipedia.getSite(code = 'en', fam = 'wikipedia')
commons = wikipedia.getSite(code = 'commons', fam = 'commons')
#FIX ERRORS that CommonsHelper makes
def fixdescrip(des):
# des = re.compile(r'\[\[wikipedia:Category:(.*?)\]\]', re.IGNORECASE).sub(r'[[Category:\1]]', name)
des = re.compile(r'\[\[wikipedia:commons:Category:(.*?)\]\]', re.IGNORECASE).sub(r'[[Category:\1]]', des)
des = re.compile(r'\[\[commons:Category:(.*?)\]\]', re.IGNORECASE).sub(r'[[Category:\1]]', des)
des = re.compile(r'\[\[wikipedia:commons:(.*?)\]\]', re.IGNORECASE).sub(r'[[\1]]', des)
des = re.compile(r'\[\[:en:commons:(.*?)\]\]', re.IGNORECASE).sub(r'[[\1]]', des)
des = re.compile(r'index.php\?title=Image', re.IGNORECASE).sub(r'index.php?title=File', des)
des = re.compile(r'\[http://en.wikipedia.org en.wikipedia\]', re.IGNORECASE).sub(r'[[:en:w|en.wikipedia]]', des)
des = re.compile(r'was stated to be made', re.IGNORECASE).sub(r'was made', des)
if re.search('category', des, re.I):
des = re.compile(r'\{\{subst:Unc\}\} <!\-\- Remove this line once you have added categories \-\->', re.IGNORECASE).sub(r'', des)
return des
#Get the description from CH
def ch2(name):
params = {
'language' : 'en',
'image' : defilelink(name),
'project' : 'wikipedia',
'username' : 'Legoktm',
'doit' : 'Get_text',
}
print 'The parameters are:\n%s' %(str(params))
params = urllib.urlencode(params)
f = urllib.urlopen("http://toolserver.org/~magnus/commonshelper.php", params)
ch2text = f.read()
f.close()
tablock = ch2text.split('<textarea ')[1].split('>')[0]
descrip = ch2text.split('<textarea '+tablock+'>')[1].split('</textarea>')[0]
print 'Recieved info from CommonsHelper about %s:' %(delink(name))
descrip = fixdescrip(descrip)
print descrip
time.sleep(15)
return descrip
#Upload the image
def upload(name):
descrip = ch2(name)
print 'Uploading %s to commons:commons.' %(delink(name))
#wikipedia.showDiff('', descrip)
time.sleep(20)
bot = UploadRobot(name.fileUrl(), description=descrip, useFilename=name.fileUrl(), keepFilename=True, verifyDescription=False, targetSite = commons)
bot.run()
print '%s was uploaded to commons:commons.' %(delink(name))
#Edit enwiki page to reflect movement
def ncd(name):
name = delink(name)
page = wikipedia.Page(wikien, name)
wikitext = page.get()
state0 = wikitext
moveToCommonsTemplate = [r'Commons ok', r'Copy to Wikimedia Commons', r'Move to commons', r'Movetocommons', r'To commons', r'Copy to Wikimedia Commons by BotMultichill']
for moveTemplate in moveToCommonsTemplate:
wikitext = re.sub(r'\{\{' + moveTemplate + r'\}\}', u'', wikitext)
wikitext = '{{subst:ncd}}\n' + wikitext
print 'about to ncd'
wikipedia.showDiff(state0, wikitext)
time.sleep(15)
page.put(wikitext, u'File is now available on Wikimedia Commons.')
def moveimage(name):
#HACK
name = str(name)
name = re.compile(r'\[\[(.*?)\]\]', re.IGNORECASE).sub(r'\1', name)
name = wikipedia.ImagePage(wikien, name)
if wikipedia.Page(commons, delink(name)).exists():
print 'pre ncd'
print delink(name)
ncd(name)
return
upload(name)
ncd(page)
#Use the gen and go!
def findimages():
wikien = wikipedia.getSite(code = 'en', fam = 'wikipedia')
commons = wikipedia.getSite(code = 'commons', fam = 'commons')
transclusionPage = wikipedia.Page(wikien, 'Template:Commons ok')
gen = pagegenerators.ReferringPageGenerator(transclusionPage, onlyTemplateInclusion = True)
# category = catlib.Category(wikien, 'Copy to Wikimedia Commons')
# gen = pagegenerators.CategorizedPageGenerator(category, recurse=True)
for page in gen:
print page
moveimage(page)
if __name__ == "__main__":
try:
findimages()
finally:
wikipedia.stopme() | Python | 0.000001 | |
c2d658ed1caa91eb963a3df850b5cf9b99633f69 | Add missing transpose.py | python/bifrost/transpose.py | python/bifrost/transpose.py |
# Copyright (c) 2016, The Bifrost Authors. All rights reserved.
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from libbifrost import _bf, _check, _get, _string2space
from ndarray import asarray
import ctypes
def transpose(dst, src, axes=None):
if axes is None:
axes = reversed(range(len(dst.shape)))
dst_bf = asarray(dst).as_BFarray()
src_bf = asarray(src).as_BFarray()
array_type = ctypes.c_int*src.ndim
axes_array = array_type(*axes)
_check(_bf.Transpose(src_bf, dst_bf, axes_array))
| Python | 0.006976 | |
f46b08ce3d45b44d3f71759705e8045322c6155d | Create __init__.py | pythainlp/spell/__init__.py | pythainlp/spell/__init__.py | # TODO
| Python | 0.000429 | |
1d70b3600ed7e56ad610787d1d5f8c7980121b8f | Add lzyf compreesion | yay0/lzyf.py | yay0/lzyf.py | # Compressor for LZYF
import yay0, logging, struct
maxOffsets = [16, 32, 1024]
maxLengths = {16: 513, 32: 4, 1024: 17}
log = logging.getLogger("lzyf")
def compress(src):
src_size = len(src)
dst_size = 0
dst = bytearray()
src_pos = 0
rl = 0
ctrl_byte = 0
buf = bytearray()
# Start a copy-run
buf.append(src[src_pos])
src_pos += 1
rl += 1
while src_pos < src_size:
pos1, len1 = yay0.checkRunlength(src_pos, src_size, src, maxOffsets[0], maxLengths[maxOffsets[0]])
pos2, len2 = yay0.checkRunlength(src_pos, src_size, src, maxOffsets[2], maxLengths[maxOffsets[2]])
if len1 < 2 and len2 < 2:
# No repeat pattern, add to or create copy run
buf.append(src[src_pos])
rl += 1
src_pos +=1
if rl == 0x1F:
log.info("Copy run of {} ({}) from {} to {} at {} to {}".format(rl, len(buf), src_pos-rl, src_pos, dst_size, dst_size+rl+1))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
else:
# output existing copy run, if any
if rl != 0:
log.info("Copy run of {} ({}) from {} to {} at {} to {}".format(rl, len(buf), src_pos-rl, src_pos, dst_size, dst_size+rl+1))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
# log
if len1 > len2:
# encode pos1, len1 using C
v = src_pos-pos1-1
ctrl_byte = 0x2000 | ((v & 0x0F) << 9) | ((len1-2) & 0x1FF)
dst.extend(ctrl_byte.to_bytes(2, byteorder='big'))
dst_size += 2
src_pos += len1
elif len2 <= maxLengths[maxOffsets[1]] and pos2 <= maxOffsets[1]:
# encode pos2, len2 using A
v = src_pos - pos2 - 1
ctrl_byte = 0x80 | ((v<<2) & 0x7c) | ((len2-1) & 0x03)
dst.append(ctrl_byte)
dst_size += 1
src_pos += len2
else:
# encode pos2, len2 using B
v = src_pos - pos2 - 1
ctrl_byte = 0x4000 | ((v<<4) & 0x3FF0) | ((len2-2) & 0x0F)
dst.extend(ctrl_byte.to_bytes(2, byteorder='big'))
dst_size += 2
src_pos += len2
if rl != 0:
log.info("Copy run of {} ({}) from {} to {} at {} to {}".format(rl, len(buf), src_pos-rl, src_pos, dst_size, dst_size+rl+1))
dst.append(rl)
dst.extend(buf)
dst_size += len(buf) + 1
buf = bytearray()
rl = 0
log.info("Encoded {} into {} bytes.".format(src_size, dst_size))
return (dst_size, src_size, dst)
def analyzeRuns(data):
for i in range(len(data)):
p, l = yay0.checkRunlength(i, len(data), data, 1024, 513)
if l>1:
log.info("{}: Found run of {} at {}".format(i, l, p))
# i += l
| Python | 0.999393 | |
82069f44f8b8bcb9f7b4df9a267a8641c54b0442 | convert dwt_idwt doctests to nose tests. | pywt/tests/test_dwt_idwt.py | pywt/tests/test_dwt_idwt.py | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (run_module_suite, assert_allclose, assert_,
assert_raises, dec)
import pywt
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='cpd')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln = pywt.dwt_coeff_len(data_len=len(x), filter_len=w.dec_len, mode='sym')
assert_(ln == 6)
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.MODES.modes]
assert_allclose(ln_modes, [6, 6, 6, 6, 6, 4])
@dec.knownfailureif(True, "None input not yet working")
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1,2,0,1], None, 'db2', 'sym')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'sym')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'sym')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'sym')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'sym')
def test_idwt_correct_size_kw():
res = pywt.idwt([1, 2, 3, 4, 5], [1, 2, 3, 4], 'db2', 'sym',
correct_size=True)
expected = [1.76776695, 0.61237244, 3.18198052, 0.61237244, 4.59619408,
0.61237244]
assert_allclose(res, expected)
assert_raises(ValueError, pywt.idwt,
[1, 2, 3, 4, 5], [1, 2, 3, 4], 'db2', 'sym')
assert_raises(ValueError, pywt.idwt, [1, 2, 3, 4], [1, 2, 3, 4, 5], 'db2',
'sym', correct_size=True)
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1,2,4], [4,1,3], 'db4', 'sym')
if __name__ == '__main__':
run_module_suite()
| Python | 0 | |
34001081c2cfaa86d85f7a5b51925dca4a6e1a9f | Use Python 3 type syntax in `zerver/webhooks/yo/view.py`. | zerver/webhooks/yo/view.py | zerver/webhooks/yo/view.py | # Webhooks for external integrations.
from typing import Optional
import ujson
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_private_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.models import UserProfile, get_user
@api_key_only_webhook_view('Yo')
@has_request_variables
def api_yo_app_webhook(request: HttpRequest, user_profile: UserProfile,
email: str = REQ(default=""),
username: str = REQ(default='Yo Bot'),
topic: Optional[str] = REQ(default=None),
user_ip: Optional[str] = REQ(default=None)) -> HttpResponse:
body = ('Yo from %s') % (username,)
receiving_user = get_user(email, user_profile.realm)
check_send_private_message(user_profile, request.client, receiving_user, body)
return json_success()
| # Webhooks for external integrations.
from typing import Optional
import ujson
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.actions import check_send_private_message
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.models import UserProfile, get_user
@api_key_only_webhook_view('Yo')
@has_request_variables
def api_yo_app_webhook(request, user_profile, email=REQ(default=""),
username=REQ(default='Yo Bot'), topic=REQ(default=None),
user_ip=REQ(default=None)):
# type: (HttpRequest, UserProfile, str, str, Optional[str], Optional[str]) -> HttpResponse
body = ('Yo from %s') % (username,)
receiving_user = get_user(email, user_profile.realm)
check_send_private_message(user_profile, request.client, receiving_user, body)
return json_success()
| Python | 0 |
8c98d12a08617b9a1ab1a264b826f5e9046eca05 | Add getHWND/getAllWindows utility functions for bots. | assisstant/bots/utility.py | assisstant/bots/utility.py | import subprocess
# criteria: dictionary that has key/values to match against.
# e.g. {"wm_class": "Navigator.Firefox"}
def getHWND(criteria):
windows = getAllWindows()
for window in windows:
if criteria.items() <= window.items():
return window
return None
def getAllWindows():
windows = []
with subprocess.Popen(["wmctrl", "-l", "-p", "-x"], stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:
for line in p.stdout:
tokens = line.split()
windows.append({"hwnd": tokens[0], "workspace": tokens[1], "pid": tokens[2], "wm_class": tokens[3], "title": " ".join(tokens[5:])})
return windows
| Python | 0 | |
d9133f865c8f0c64e589e902c88a8e85feb77963 | remove call to the deleted function. | tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py | tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Moves source files to match Arduino library conventions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import os
import six
def rename_example_subfolder_files(library_dir):
"""Moves source files in example subfolders to equivalents at root."""
patterns = ['*.h', '*.cpp', '*.c']
for pattern in patterns:
search_path = os.path.join(library_dir, 'examples/*/*', pattern)
for source_file_path in glob.glob(search_path):
source_file_dir = os.path.dirname(source_file_path)
source_file_base = os.path.basename(source_file_path)
new_source_file_path = source_file_dir + '_' + source_file_base
os.rename(source_file_path, new_source_file_path)
def move_person_data(library_dir):
"""Moves the downloaded person model into the examples folder."""
old_person_data_path = os.path.join(
library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' +
'person_model_int8/person_detect_model_data.cpp')
new_person_data_path = os.path.join(
library_dir, 'examples/person_detection/person_detect_model_data.cpp')
if os.path.exists(old_person_data_path):
os.rename(old_person_data_path, new_person_data_path)
# Update include.
with open(new_person_data_path, 'r') as source_file:
file_contents = source_file.read()
file_contents = file_contents.replace(
six.ensure_str('#include "tensorflow/lite/micro/examples/' +
'person_detection/person_detect_model_data.h"'),
'#include "person_detect_model_data.h"')
with open(new_person_data_path, 'w') as source_file:
source_file.write(file_contents)
def rename_example_main_inos(library_dir):
"""Makes sure the .ino sketch files match the example name."""
search_path = os.path.join(library_dir, 'examples/*', 'main.ino')
for ino_path in glob.glob(search_path):
example_path = os.path.dirname(ino_path)
example_name = os.path.basename(example_path)
new_ino_path = os.path.join(example_path, example_name + '.ino')
os.rename(ino_path, new_ino_path)
def main(unparsed_args):
"""Control the rewriting of source files."""
library_dir = unparsed_args[0]
rename_example_subfolder_files(library_dir)
rename_example_main_inos(library_dir)
move_person_data(library_dir)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
_, unparsed_args = parser.parse_known_args()
main(unparsed_args)
if __name__ == '__main__':
parse_args()
| # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Moves source files to match Arduino library conventions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import os
import six
def rename_example_subfolder_files(library_dir):
"""Moves source files in example subfolders to equivalents at root."""
patterns = ['*.h', '*.cpp', '*.c']
for pattern in patterns:
search_path = os.path.join(library_dir, 'examples/*/*', pattern)
for source_file_path in glob.glob(search_path):
source_file_dir = os.path.dirname(source_file_path)
source_file_base = os.path.basename(source_file_path)
new_source_file_path = source_file_dir + '_' + source_file_base
os.rename(source_file_path, new_source_file_path)
def move_person_data(library_dir):
"""Moves the downloaded person model into the examples folder."""
old_person_data_path = os.path.join(
library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' +
'person_model_int8/person_detect_model_data.cpp')
new_person_data_path = os.path.join(
library_dir, 'examples/person_detection/person_detect_model_data.cpp')
if os.path.exists(old_person_data_path):
os.rename(old_person_data_path, new_person_data_path)
# Update include.
with open(new_person_data_path, 'r') as source_file:
file_contents = source_file.read()
file_contents = file_contents.replace(
six.ensure_str('#include "tensorflow/lite/micro/examples/' +
'person_detection/person_detect_model_data.h"'),
'#include "person_detect_model_data.h"')
with open(new_person_data_path, 'w') as source_file:
source_file.write(file_contents)
def rename_example_main_inos(library_dir):
"""Makes sure the .ino sketch files match the example name."""
search_path = os.path.join(library_dir, 'examples/*', 'main.ino')
for ino_path in glob.glob(search_path):
example_path = os.path.dirname(ino_path)
example_name = os.path.basename(example_path)
new_ino_path = os.path.join(example_path, example_name + '.ino')
os.rename(ino_path, new_ino_path)
def main(unparsed_args):
"""Control the rewriting of source files."""
library_dir = unparsed_args[0]
rename_example_subfolder_files(library_dir)
rename_example_main_inos(library_dir)
move_person_data(library_dir)
move_image_data_experimental(library_dir)
def parse_args():
"""Converts the raw arguments into accessible flags."""
parser = argparse.ArgumentParser()
_, unparsed_args = parser.parse_known_args()
main(unparsed_args)
if __name__ == '__main__':
parse_args()
| Python | 0 |
4912c8261dba456e8e4a62051afdf01565f20ae9 | Add first iteration of raw_to_average_jpegs.py. | raw_to_average_jpegs.py | raw_to_average_jpegs.py | #! /usr/bin/env python
#
# Tested on Macs. First run `brew install ufraw exiftool`
import argparse
import glob
import multiprocessing as mp
import os
import subprocess
def parseArgs():
desc = 'Auto-white-balance raw images and create average-sized JPEG files with their EXIF info.'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-p', '--path', dest='imagesPath', default=os.getcwd(),
help='Sets the path containing the DNG images. Default is the current ' + \
'working directory, which is: %(default)s')
return parser, parser.parse_args()
def processFiles(fname):
subprocess.check_call(['ufraw-batch', '--wb=auto', '--overwrite',
'--size=2048', '--out-type=jpeg', fname])
subprocess.check_call(['exiftool', '-overwrite_original', '-q', '-x', 'Orientation',
'-TagsFromFile', fname, fname.replace('.DNG', '.jpg')])
def workingProgramCheck(prog):
'''Checks whether the program is accessible on the system.'''
try:
subprocess.check_call(['which', '-s', prog])
except Exception:
raise Exception(prog + ' is not accessible on the system.')
def main():
parser, args = parseArgs()
# Check whether ufraw and exiftool are working properly.
workingProgramCheck('ufraw-batch')
workingProgramCheck('exiftool')
pool = mp.Pool(mp.cpu_count())
for fname in glob.glob(os.path.normpath(os.path.join(args.imagesPath, '*.DNG'))):
pool.apply_async(processFiles, [fname])
pool.close()
pool.join()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
6aaa08a48dade981de18b117363357fdffaeb641 | add python_capstone_setup.py | suite/python_capstone_setup.py | suite/python_capstone_setup.py | #!/bin/sh
# this prints out Capstone setup & core+Python-binding versions
python -c "import capstone; print capstone.debug()"
| Python | 0.000012 | |
077cf46ab42c76bf3a854142a4f530625a377837 | Create tutorial2.py | tutorial2.py | tutorial2.py | Python | 0 | ||
1bb1ececfcd548d52a28b713f4ee7eb4e710da85 | Add an example of using fchollet multi_gpu_model on InceptionV3. | keras_tf_multigpu/examples/fchollet_inception3_multigpu.py | keras_tf_multigpu/examples/fchollet_inception3_multigpu.py | import tensorflow as tf
from keras.applications import InceptionV3
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
gpu_count = 2
# Instantiate the base model
# (here, we do it on CPU, which is optional).
with tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):
model = InceptionV3(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on N GPUs.
# This assumes that your machine has N available GPUs.
if gpu_count > 1:
parallel_model = multi_gpu_model(model, gpus=gpu_count)
else:
parallel_model = model
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on N GPUs.
# Since the batch size is N*32, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)
| Python | 0 | |
0aa472a110308c8d8ccafd080c5f3d73a8d8098d | add azure role assignment module (#52623) | lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py | lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py | #!/usr/bin/python
#
# Copyright (c) 2018 Yunge Zhu, (@yungezz)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_roleassignment
version_added: "2.8"
short_description: Manage Azure Role Assignment.
description:
- Create and delete instance of Azure Role Assignment.
options:
name:
description:
- Unique name of role assignment.
assignee_object_id:
description:
- The object id of assignee. This maps to the ID inside the Active Directory.
- It can point to a user, service principal or security group.
- Required when creating role assignment.
role_definition_id:
description:
- The role definition id used in the role assignment.
- Required when creating role assignment.
scope:
description:
- The scope of the role assignment to create.
- For example, use /subscriptions/{subscription-id}/ for subscription,
- /subscriptions/{subscription-id}/resourceGroups/{resource-group-name} for resource group,
- /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name} for resource.
state:
description:
- Assert the state of the role assignment.
- Use 'present' to create or update a role assignment and 'absent' to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
author:
- "Yunge Zhu(@yungezz)"
'''
EXAMPLES = '''
- name: Create a role assignment
azure_rm_roleassignment:
scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
assignee_object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
role_definition_id:
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
- name: Delete a role assignment
azure_rm_roleassignment:
name: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
'''
RETURN = '''
id:
description: Id of current role assignment.
returned: always
type: str
sample:
"/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
'''
import uuid
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from msrest.serialization import Model
from azure.mgmt.authorization import AuthorizationManagementClient
from azure.mgmt.authorization.models import RoleAssignmentCreateParameters
except ImportError:
# This is handled in azure_rm_common
pass
def roleassignment_to_dict(assignment):
return dict(
id=assignment.id,
name=assignment.name,
type=assignment.type,
assignee_object_id=assignment.principal_id,
role_definition_id=assignment.role_definition_id,
scope=assignment.scope
)
class AzureRMRoleAssignment(AzureRMModuleBase):
"""Configuration class for an Azure RM Role Assignment"""
def __init__(self):
self.module_arg_spec = dict(
name=dict(
type='str'
),
scope=dict(
type='str'
),
assignee_object_id=dict(
type='str'
),
role_definition_id=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.name = None
self.scope = None
self.assignee_object_id = None
self.role_definition_id = None
self.results = dict(
changed=False,
id=None,
)
self.state = None
self._client = None
super(AzureRMRoleAssignment, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
old_response = None
response = None
# get management client
self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version="2018-01-01-preview")
# build cope
self.scope = self.build_scope()
if self.name is None:
self.name = str(uuid.uuid4())
# get existing role assignment
old_response = self.get_roleassignment()
if old_response:
self.results['id'] = old_response['id']
if self.state == 'present':
# check if the role assignment exists
if not old_response:
self.log("Role assignment doesn't exist in this scope")
self.results['changed'] = True
if self.check_mode:
return self.results
response = self.create_roleassignment()
self.results['id'] = response['id']
else:
self.log("Role assignment already exists, not updatable")
self.log('Result: {0}'.format(old_response))
elif self.state == 'absent':
if old_response:
self.log("Delete role assignment")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_roleassignment()
self.log('role assignment deleted')
else:
self.fail("role assignment {0} not exists.".format(self.name))
return self.results
# build scope
def build_scope(self):
subscription_scope = '/subscription/' + self.subscription_id
if self.scope is None:
return subscription_scope
return self.scope
def create_roleassignment(self):
'''
Creates role assignment.
:return: deserialized role assignment
'''
self.log("Creating role assignment {0}".format(self.name))
try:
parameters = RoleAssignmentCreateParameters(role_definition_id=self.role_definition_id, principal_id=self.assignee_object_id)
response = self._client.role_assignments.create(scope=self.scope,
role_assignment_name=self.name,
parameters=parameters)
except CloudError as exc:
self.log('Error attempting to create role assignment.')
self.fail("Error creating role assignment: {0}".format(str(exc)))
return roleassignment_to_dict(response)
def delete_roleassignment(self):
'''
Deletes specified role assignment.
:return: True
'''
self.log("Deleting the role assignment {0}".format(self.name))
scope = self.build_scope()
try:
response = self._client.role_assignments.delete(name=self.name,
scope=self.scope)
except CloudError as e:
self.log('Error attempting to delete the role assignment.')
self.fail("Error deleting the role assignment: {0}".format(str(e)))
return True
def get_roleassignment(self):
'''
Gets the properties of the specified role assignment.
:return: deserialized role assignment dictionary
'''
self.log("Checking if the role assignment {0} is present".format(self.name))
response = None
try:
response = self._client.role_assignments.get(scope=self.scope, role_assignment_name=self.name)
return roleassignment_to_dict(response)
except CloudError as ex:
self.log("Didn't find role assignment {0} in scope {1}".format(self.name, self.scope))
return False
def main():
"""Main execution"""
AzureRMRoleAssignment()
if __name__ == '__main__':
main()
| Python | 0 | |
9ffafa9c11e71c176adb4056fbc780e450cc0d82 | Add experimental queries module. | databroker/queries.py | databroker/queries.py | """
This module is experimental.
"""
import collections.abc
import abc
class Query(collections.abc.Mapping):
"""
This represents a MongoDB query.
MongoDB queries are typically encoded as simple dicts. This object supports
the dict interface in a read-only fashion. Subclassses add a nice __repr__
and mutable attributes from which the contents of the dict are derived.
"""
@abc.abstractproperty
def _query(self):
...
def __iter__(self):
return iter(self._query)
def __getitem__(self, key):
return self._query[key]
def __len__(self):
return len(self._query)
class TimeRange(Query):
"""
A search query representing a time range.
"""
def __init__(self, since=None, until=None):
self.since = since
self.until = until
@property
def _query(self):
query = {'time': {}}
if self.since is not None:
query['time']['$gte'] = self.since
if self.until is not None:
query['time']['$lt'] = self.until
return query
def __repr__(self):
return f"{type(self).__name__}(since={self.since}, until={self.until})"
| Python | 0 | |
177f198a1efb99da592d96a2d5d259722b8a47ee | Add a test of switching back and forth between Decider() values (specifically 'MD5' and 'timestamp-match'), copied from back when this functionality was configured with the SourceSignatures() function. | test/Decider/switch-rebuild.py | test/Decider/switch-rebuild.py | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that switching Decider() types between MD5 and timestamp-match
does not cause unnecessary rebuilds.
"""
import TestSCons
test = TestSCons.TestSCons(match=TestSCons.match_re_dotall)
base_sconstruct_contents = """\
Decider('%s')
def build(env, target, source):
open(str(target[0]), 'wt').write(open(str(source[0]), 'rt').read())
B = Builder(action=build)
env = Environment(BUILDERS = { 'B' : B })
env.B(target='switch.out', source='switch.in')
"""
def write_SConstruct(test, sig_type):
contents = base_sconstruct_contents % sig_type
test.write('SConstruct', contents)
# Build first MD5 checksums.
write_SConstruct(test, 'MD5')
test.write('switch.in', "switch.in\n")
switch_out_switch_in = test.wrap_stdout(r'build\(\["switch.out"\], \["switch.in"\]\)\n')
test.run(arguments='switch.out', stdout=switch_out_switch_in)
test.up_to_date(arguments='switch.out')
# Now rebuild with timestamp-match. Because we always store timestamps,
# even when making the decision based on MD5 checksums, the build is
# still up to date.
write_SConstruct(test, 'timestamp-match')
test.up_to_date(arguments='switch.out')
# Now switch back to MD5 checksums. When we rebuilt with the timestamp,
# it wiped out the MD5 value (because the point of timestamps is to not
# open up and checksum the contents), so the file is considered *not*
# up to date and must be rebuilt to generate a checksum.
write_SConstruct(test, 'MD5')
test.not_up_to_date(arguments='switch.out')
# And just for good measure, make sure that we now rebuild in response
# to a content change.
test.write('switch.in', "switch.in 2\n")
test.run(arguments='switch.out', stdout=switch_out_switch_in)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Python | 0 | |
e547b5a202d13ba9dc66451f959a8044844de4a6 | add simple test code for distributions.Independent with Normal | tests/chainer_tests/distributions_tests/test_independent.py | tests/chainer_tests/distributions_tests/test_independent.py | import functools
import itertools
import operator
import numpy
from chainer import distributions
from chainer import testing
from chainer.testing import array
from chainer.testing import attr
from chainer import utils
def skip_not_in_params(property):
def decorator(f):
@functools.wraps(f)
def new_f(self, *args, **kwargs):
if property not in self.params.keys():
self.skipTest(
"\'%s\' does not exist in params.keys()." % property)
else:
f(self, *args, **kwargs)
return new_f
return decorator
def _generate_valid_shape_pattern(
inner_shape, inner_event_shape, reinterpreted_batch_ndims):
shape_pattern = []
for bs, es, m in itertools.product(
inner_shape, inner_event_shape, reinterpreted_batch_ndims):
if (m is not None) and (m > len(bs)):
continue
shape_pattern.append({
'full_shape': bs + es,
'inner_shape': bs,
'inner_event_shape': es,
'reinterpreted_batch_ndims': m
})
return shape_pattern
def _generate_test_parameter(
parameter_list, inner_shape, inner_event_shape,
reinterpreted_batch_ndims):
shape_pattern = _generate_valid_shape_pattern(
inner_shape, inner_event_shape, reinterpreted_batch_ndims)
return list(map(
lambda dicts: dict(dicts[0], **dicts[1]),
itertools.product(parameter_list, shape_pattern)))
@testing.parameterize(*_generate_test_parameter(
testing.product({
'sample_shape': [(3, 2), ()],
'is_variable': [True, False]
}),
inner_shape=[(4, 5), (5,), ()],
inner_event_shape=[()],
reinterpreted_batch_ndims=[1, 0, None]
))
@testing.fix_random()
@testing.with_requires('scipy')
class TestIndependentNormal(testing.distribution_unittest):
scipy_onebyone = True
def _build_inner_distribution(self):
pass
def setUp_configure(self):
from scipy import stats
self.dist = lambda **params: distributions.Independent(
distributions.Normal(**params), self.reinterpreted_batch_ndims)
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob",
"support"])
loc = utils.force_array(numpy.random.uniform(
-1, 1, self.full_shape).astype(numpy.float32))
scale = utils.force_array(numpy.exp(numpy.random.uniform(
-1, 1, self.full_shape)).astype(numpy.float32))
if self.reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = max(0, len(self.inner_shape) - 1)
else:
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
batch_ndim = len(self.inner_shape) - reinterpreted_batch_ndims
self.shape = self.inner_shape[:batch_ndim]
self.event_shape = \
self.inner_shape[batch_ndim:] + self.inner_event_shape
d = functools.reduce(operator.mul, self.event_shape, 1)
if self.event_shape == ():
self.scipy_dist = stats.norm
self.params = {"loc": loc, "scale": scale}
self.scipy_params = {"loc": loc, "scale": scale}
else:
self.scipy_dist = stats.multivariate_normal
scale_tril = numpy.eye(d).astype(numpy.float32) * \
scale.reshape(self.shape + (d,))[..., None]
cov = numpy.einsum('...ij,...jk->...ik', scale_tril, scale_tril)
self.params = {"loc": loc, "scale": scale}
self.scipy_params = {"mean": numpy.reshape(
loc, self.shape + (d,)), "cov": cov}
def sample_for_test(self):
smp = numpy.random.normal(
size=self.sample_shape + self.full_shape
).astype(numpy.float32)
return smp
def test_batch_ndim_error(self):
with self.assertRaises(ValueError):
distributions.Independent(
distributions.Normal(**self.params),
len(self.inner_shape) + 1)
def check_covariance(self, is_gpu):
if is_gpu:
cov1 = self.gpu_dist.covariance.array
else:
cov1 = self.cpu_dist.covariance.array
cov2 = self.params['cov']
array.assert_allclose(cov1, cov2)
@skip_not_in_params('cov')
def test_covariance_cpu(self):
self.check_covariance(False)
@skip_not_in_params('cov')
@attr.gpu
def test_covariance_gpu(self):
self.check_covariance(True)
testing.run_module(__name__, __file__)
| Python | 0 | |
666cd734270f7d7487815c7805940cb8a2d99c2c | Add script for downloading and patching the TIGER corpus | confopy/localization/de/corpus_de/tiger_dl_patch.py | confopy/localization/de/corpus_de/tiger_dl_patch.py | #!/usr/bin/python
# coding: utf-8
'''
File: tiger_release_aug07.corrected.16012013_patch.py
Author: Oliver Zscheyge
Description:
Fixes wrong morph values in the TIGER corpus:
tiger_release_aug07.corrected.16012013.xml
Also converts XML file to utf-8 encoding.
'''
import urllib
import tarfile
import codecs
import fileinput
import os
TIGER_URL = "http://www.ims.uni-stuttgart.de/forschung/ressourcen/korpora/TIGERCorpus/download/tigercorpus-2.2.xml.tar.gz"
TIGER_PKG_FILE = "tiger.tar.gz"
TIGER_FILE = "tiger_release_aug07.corrected.16012013.xml"
TIGER_FILE_UTF8 = "tiger_release_aug07.corrected.16012013_utf8.xml"
TIGER_FILE_UTF8_PATCHED = "tiger_release_aug07.corrected.16012013_utf8_patched.xml"
SOURCE_ENC = "iso-8859-1"
TARGET_ENC = "utf-8"
def main():
print("Downloading and extracting TIGER corpus...")
download_extract()
print("Converting the corpus to UTF-8 and fixing strings...")
convert_to_utf8()
fix_strings()
print("Cleaning up downloaded and generated files...")
cleanup()
print("Done!")
def download_extract():
urllib.urlretrieve(TIGER_URL, TIGER_PKG_FILE)
tar = tarfile.open(TIGER_PKG_FILE)
tar.extractall()
tar.close()
def convert_to_utf8():
"""Converting the TIGER_FILE to utf-8 encoding.
Taken from:
http://stackoverflow.com/questions/191359/how-to-convert-a-file-to-utf-8-in-python
"""
BLOCKSIZE = 1048576 # or some other, desired size in bytes
with codecs.open(TIGER_FILE, "r", SOURCE_ENC) as sourceFile:
with codecs.open(TIGER_FILE_UTF8, "w", TARGET_ENC) as targetFile:
while True:
contents = sourceFile.read(BLOCKSIZE)
if not contents:
break
targetFile.write(contents)
def fix_strings():
replacements = {
1 : [u"ISO-8859-1", u"utf-8"]
, 293648 : [u"Pl.1.Pres.Ind", u"1.Pl.Pres.Ind"]
, 543756 : [u"Pl.3.Pres.Ind", u"3.Pl.Pres.Ind"]
, 1846632 : [u"Pl.3.Pres.Ind", u"3.Pl.Pres.Ind"]
, 2634040 : [u"Pl.3.Pres.Ind", u"3.Pl.Pres.Ind"]
}
linenr = 1
with codecs.open(TIGER_FILE_UTF8_PATCHED, "w", TARGET_ENC) as outfile:
with codecs.open(TIGER_FILE_UTF8, "r", TARGET_ENC) as infile:
for line in infile:
line = unicode(line).replace(u"\r", u"") # Replace Window's carriage returns
replacement = replacements.get(linenr, [])
if replacement != []:
line = line.replace(replacement[0], replacement[1], 1)
linenr += 1
outfile.write(line)
# for line in fileinput.input(TIGER_FILE_FIXED, inplace=True):
# replacement = replacements.get(fileinput.filelineno(), [])
# if replacement == []:
# print line,
# else:
# print line.replace(replacement[0], replacement[1], 1),
def cleanup():
os.remove(TIGER_PKG_FILE)
os.remove(TIGER_FILE)
os.remove(TIGER_FILE_UTF8)
if __name__ == '__main__':
main()
| Python | 0 | |
8c7fc2382db0ec9c901f6c2c2b00971f3ee7c3cc | Add tests for custom authentication backend | logintokens/tests/test_backends.py | logintokens/tests/test_backends.py | """logintokens app unittests for backends
"""
from time import sleep
from django.test import TestCase, Client
from django.contrib.auth import get_user_model, authenticate
from logintokens.tokens import default_token_generator
USER = get_user_model()
class EmailOnlyAuthenticationBackendTest(TestCase):
"""Tests for email only authentication backend
"""
def setUp(self):
self.client = Client()
self.generator = default_token_generator
self.new_username = 'newvisitor'
self.existing_user = USER._default_manager.create_user('existinguser')
def test_different_tokens_usable(self):
"""Two differing tokens should both be usabe to authenticate.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.assertEqual(authenticate(token=token1), self.existing_user)
self.assertEqual(authenticate(token=token2), self.existing_user)
def test_login_invalidates_tokens(self):
"""Tokens generated before a successful login should become invalid.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.client.force_login(self.existing_user)
self.assertIsNone(authenticate(token=token1))
self.assertIsNone(authenticate(token=token2))
def test_new_visitor_creates_user(self):
"""Using a token from a new visitor should create their user object.
"""
token = self.generator.make_token(self.new_username)
user = authenticate(token=token)
self.assertIsInstance(user, USER)
| Python | 0 | |
f746c2a8a59342060d404944a586b11e1f46df5a | Merge with lp:openobject-addons | addons/product_visible_discount/__openerp__.py | addons/product_visible_discount/__openerp__.py | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Prices Visible Discounts',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Sales Management',
'description': """
This module lets you calculate discounts on Sale Order lines and Invoice lines base on the partner's pricelist.
===============================================================================================================
To this end, a new check box named 'Visible Discount' is added to the pricelist form.
**Example:**
For the product PC1 and the partner "Asustek": if listprice=450, and the price
calculated using Asustek's pricelist is 225. If the check box is checked, we
will have on the sale order line: Unit price=450, Discount=50,00, Net price=225.
If the check box is unchecked, we will have on Sale Order and Invoice lines:
Unit price=225, Discount=0,00, Net price=225.
""",
'depends': ["sale","purchase"],
'demo': [],
'data': ['product_visible_discount_view.xml'],
'auto_install': False,
'installable': True,
'certificate' : "001144718884654279901",
'images': ['images/pricelists_visible_discount.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Prices Visible Discounts',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'Sales Management',
'description': """
This module lets you calculate discounts on Sale Order lines and Invoice lines base on the partner's pricelist.
===============================================================================================================
To this end, a new check box named 'Visible Discount' is added to the pricelist form.
**Example:**
For the product PC1 and the partner "Asustek": if listprice=450, and the price
calculated using Asustek's pricelist is 225. If the check box is checked, we
will have on the sale order line: Unit price=450, Discount=50,00, Net price=225.
If the check box is unchecked, we will have on Sale Order and Invoice lines:
Unit price=225, Discount=0,00, Net price=225.
""",
"depends": ["sale","purchase"],
"demo_xml": [],
"update_xml": ['product_visible_discount_view.xml'],
"auto_install": False,
"installable": True,
"certificate" : "001144718884654279901",
'images': ['images/pricelists_visible_discount.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 |
54ca48a2b8cbd53cd6506fdbce47d16f03a28a7d | Add unit tests for bubble sort | tests/test_sorting_and_searching/test_bubble_sort.py | tests/test_sorting_and_searching/test_bubble_sort.py | import unittest
from aids.sorting_and_searching.bubble_sort import bubble_sort
class BubbleSortTestCase(unittest.TestCase):
'''
Unit tests for bubble sort
'''
def setUp(self):
self.example_1 = [2, 5, 4, 3, 1]
def test_bubble_sort(self):
bubble_sort(self.example_1)
self.assertEqual(self.example_1,[1,2,3,4,5])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
f24bdbbd0a7abc32d49f844a4b97b128a3adc299 | add tests for KnownIssues | test/core/test_known_issues.py | test/core/test_known_issues.py | from django.test import TestCase
from django.utils import timezone
from unittest.mock import patch
from squad.core.models import Group, KnownIssue
class KnownIssueTest(TestCase):
def setUp(self):
self.group = Group.objects.create(slug='mygroup')
self.project = self.group.projects.create(slug='myproject')
self.env1 = self.project.environments.create(slug='env1')
self.suite1 = self.project.suites.create(slug="suite1")
self.date = timezone.now()
def test_active_known_issue(self):
build = self.project.builds.create(
datetime=self.date,
version=self.date.strftime("%Y%m%d"),
)
test_run = build.test_runs.create(environment=self.env1)
# create failed test
test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
known_issue = KnownIssue.objects.create(
title="foo",
test_name=test.full_name
)
known_issue.save()
known_issue.environment.add(test_run.environment)
self.assertEqual(1, len(KnownIssue.active_by_environment(test_run.environment)))
def test_inactive_known_issue(self):
build = self.project.builds.create(
datetime=self.date,
version=self.date.strftime("%Y%m%d"),
)
test_run = build.test_runs.create(environment=self.env1)
# create failed test
test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
known_issue = KnownIssue.objects.create(
title="foo",
test_name=test.full_name
)
known_issue.save()
known_issue.environment.add(test_run.environment)
known_issue.active = False
known_issue.save()
self.assertEqual(0, len(KnownIssue.active_by_environment(self.env1)))
def test_active_by_project(self):
build = self.project.builds.create(
datetime=self.date,
version=self.date.strftime("%Y%m%d"),
)
test_run = build.test_runs.create(environment=self.env1)
# create failed test
test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
known_issue = KnownIssue.objects.create(
title="foo",
test_name=test.full_name
)
known_issue.save()
known_issue.environment.add(test_run.environment)
self.assertEqual(1, len(KnownIssue.active_by_project_and_test(self.project, test.full_name)))
def test_inactive_by_project(self):
build = self.project.builds.create(
datetime=self.date,
version=self.date.strftime("%Y%m%d"),
)
test_run = build.test_runs.create(environment=self.env1)
# create failed test
test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
known_issue = KnownIssue.objects.create(
title="foo",
test_name=test.full_name
)
known_issue.save()
known_issue.environment.add(test_run.environment)
known_issue.active = False
known_issue.save()
self.assertEqual(0, len(KnownIssue.active_by_project_and_test(self.project, test.full_name)))
| Python | 0 | |
38e75951570be46f6a36eeb000a4621bc76bf02a | Move history plugin to learn phase. | flexget/plugins/output/history.py | flexget/plugins/output/history.py | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, Unicode, desc
from flexget import options, plugin
from flexget.event import event
from flexget.manager import Base, Session
from flexget.utils.tools import console
log = logging.getLogger('history')
class History(Base):
__tablename__ = 'history'
id = Column(Integer, primary_key=True)
task = Column('feed', String)
filename = Column(String)
url = Column(String)
title = Column(Unicode)
time = Column(DateTime)
details = Column(String)
def __init__(self):
self.time = datetime.now()
def __str__(self):
return '<History(filename=%s,task=%s)>' % (self.filename, self.task)
class PluginHistory(object):
"""Records all accepted entries for later lookup"""
schema = {'type': 'boolean'}
def on_task_learn(self, task, config):
"""Add accepted entries to history"""
if config is False:
return # Explicitly disabled with configuration
for entry in task.accepted:
item = History()
item.task = task.name
item.filename = entry.get('output', None)
item.title = entry['title']
item.url = entry['url']
reason = ''
if 'reason' in entry:
reason = ' (reason: %s)' % entry['reason']
item.details = 'Accepted by %s%s' % (entry.get('accepted_by', '<unknown>'), reason)
task.session.add(item)
def do_cli(manager, options):
session = Session()
try:
console('-- History: ' + '-' * 67)
query = session.query(History)
if options.search:
search_term = options.search.replace(' ', '%').replace('.', '%')
query = query.filter(History.title.like('%' + search_term + '%'))
query = query.order_by(desc(History.time)).limit(options.limit)
for item in reversed(query.all()):
console(' Task : %s' % item.task)
console(' Title : %s' % item.title)
console(' Url : %s' % item.url)
if item.filename:
console(' Stored : %s' % item.filename)
console(' Time : %s' % item.time.strftime("%c"))
console(' Details : %s' % item.details)
console('-' * 79)
finally:
session.close()
@event('options.register')
def register_parser_arguments():
parser = options.register_command('history', do_cli, help='view the history of entries that FlexGet has accepted')
parser.add_argument('--limit', action='store', type=int, metavar='NUM', default=50,
help='limit to %(metavar)s results')
parser.add_argument('--search', action='store', metavar='TERM', help='limit to results that contain %(metavar)s')
@event('plugin.register')
def register_plugin():
plugin.register(PluginHistory, 'history', builtin=True, api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, Unicode, desc
from flexget import options, plugin
from flexget.event import event
from flexget.manager import Base, Session
from flexget.utils.tools import console
log = logging.getLogger('history')
class History(Base):
__tablename__ = 'history'
id = Column(Integer, primary_key=True)
task = Column('feed', String)
filename = Column(String)
url = Column(String)
title = Column(Unicode)
time = Column(DateTime)
details = Column(String)
def __init__(self):
self.time = datetime.now()
def __str__(self):
return '<History(filename=%s,task=%s)>' % (self.filename, self.task)
class PluginHistory(object):
"""Records all accepted entries for later lookup"""
schema = {'type': 'boolean'}
@plugin.priority(-255)
def on_task_output(self, task, config):
"""Add accepted entries to history"""
if config is False:
return # Explicitly disabled with configuration
for entry in task.accepted:
item = History()
item.task = task.name
item.filename = entry.get('output', None)
item.title = entry['title']
item.url = entry['url']
reason = ''
if 'reason' in entry:
reason = ' (reason: %s)' % entry['reason']
item.details = 'Accepted by %s%s' % (entry.get('accepted_by', '<unknown>'), reason)
task.session.add(item)
def do_cli(manager, options):
session = Session()
try:
console('-- History: ' + '-' * 67)
query = session.query(History)
if options.search:
search_term = options.search.replace(' ', '%').replace('.', '%')
query = query.filter(History.title.like('%' + search_term + '%'))
query = query.order_by(desc(History.time)).limit(options.limit)
for item in reversed(query.all()):
console(' Task : %s' % item.task)
console(' Title : %s' % item.title)
console(' Url : %s' % item.url)
if item.filename:
console(' Stored : %s' % item.filename)
console(' Time : %s' % item.time.strftime("%c"))
console(' Details : %s' % item.details)
console('-' * 79)
finally:
session.close()
@event('options.register')
def register_parser_arguments():
parser = options.register_command('history', do_cli, help='view the history of entries that FlexGet has accepted')
parser.add_argument('--limit', action='store', type=int, metavar='NUM', default=50,
help='limit to %(metavar)s results')
parser.add_argument('--search', action='store', metavar='TERM', help='limit to results that contain %(metavar)s')
@event('plugin.register')
def register_plugin():
plugin.register(PluginHistory, 'history', builtin=True, api_ver=2)
| Python | 0 |
70e1910ef01c6313360dff3f3e728e4f5f404f38 | Allow history to be filtered by task | flexget/plugins/output/history.py | flexget/plugins/output/history.py | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, Unicode, desc
from flexget import options, plugin
from flexget.event import event
from flexget.logger import console
from flexget.manager import Base, Session
log = logging.getLogger('history')
class History(Base):
__tablename__ = 'history'
id = Column(Integer, primary_key=True)
task = Column('feed', String)
filename = Column(String)
url = Column(String)
title = Column(Unicode)
time = Column(DateTime)
details = Column(String)
def __init__(self):
self.time = datetime.now()
def __str__(self):
return '<History(filename=%s,task=%s)>' % (self.filename, self.task)
class PluginHistory(object):
"""Records all accepted entries for later lookup"""
schema = {'type': 'boolean'}
def on_task_learn(self, task, config):
"""Add accepted entries to history"""
if config is False:
return # Explicitly disabled with configuration
for entry in task.accepted:
item = History()
item.task = task.name
item.filename = entry.get('output', None)
item.title = entry['title']
item.url = entry['url']
reason = ''
if 'reason' in entry:
reason = ' (reason: %s)' % entry['reason']
item.details = 'Accepted by %s%s' % (entry.get('accepted_by', '<unknown>'), reason)
task.session.add(item)
def do_cli(manager, options):
session = Session()
try:
console('-- History: ' + '-' * 67)
query = session.query(History)
if options.search:
search_term = options.search.replace(' ', '%').replace('.', '%')
query = query.filter(History.title.like('%' + search_term + '%'))
if options.task:
query = query.filter(History.task.like('%' + options.task + '%'))
query = query.order_by(desc(History.time)).limit(options.limit)
for item in reversed(query.all()):
console(' Task : %s' % item.task)
console(' Title : %s' % item.title)
console(' Url : %s' % item.url)
if item.filename:
console(' Stored : %s' % item.filename)
console(' Time : %s' % item.time.strftime("%c"))
console(' Details : %s' % item.details)
console('-' * 79)
finally:
session.close()
@event('options.register')
def register_parser_arguments():
parser = options.register_command('history', do_cli, help='view the history of entries that FlexGet has accepted')
parser.add_argument('--limit', action='store', type=int, metavar='NUM', default=50,
help='limit to %(metavar)s results')
parser.add_argument('--search', action='store', metavar='TERM', help='limit to results that contain %(metavar)s')
parser.add_argument('--task', action='store', metavar='TASK', help='limit to results in specified %(metavar)s')
@event('plugin.register')
def register_plugin():
plugin.register(PluginHistory, 'history', builtin=True, api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime
from sqlalchemy import Column, String, Integer, DateTime, Unicode, desc
from flexget import options, plugin
from flexget.event import event
from flexget.logger import console
from flexget.manager import Base, Session
log = logging.getLogger('history')
class History(Base):
__tablename__ = 'history'
id = Column(Integer, primary_key=True)
task = Column('feed', String)
filename = Column(String)
url = Column(String)
title = Column(Unicode)
time = Column(DateTime)
details = Column(String)
def __init__(self):
self.time = datetime.now()
def __str__(self):
return '<History(filename=%s,task=%s)>' % (self.filename, self.task)
class PluginHistory(object):
"""Records all accepted entries for later lookup"""
schema = {'type': 'boolean'}
def on_task_learn(self, task, config):
"""Add accepted entries to history"""
if config is False:
return # Explicitly disabled with configuration
for entry in task.accepted:
item = History()
item.task = task.name
item.filename = entry.get('output', None)
item.title = entry['title']
item.url = entry['url']
reason = ''
if 'reason' in entry:
reason = ' (reason: %s)' % entry['reason']
item.details = 'Accepted by %s%s' % (entry.get('accepted_by', '<unknown>'), reason)
task.session.add(item)
def do_cli(manager, options):
session = Session()
try:
console('-- History: ' + '-' * 67)
query = session.query(History)
if options.search:
search_term = options.search.replace(' ', '%').replace('.', '%')
query = query.filter(History.title.like('%' + search_term + '%'))
query = query.order_by(desc(History.time)).limit(options.limit)
for item in reversed(query.all()):
console(' Task : %s' % item.task)
console(' Title : %s' % item.title)
console(' Url : %s' % item.url)
if item.filename:
console(' Stored : %s' % item.filename)
console(' Time : %s' % item.time.strftime("%c"))
console(' Details : %s' % item.details)
console('-' * 79)
finally:
session.close()
@event('options.register')
def register_parser_arguments():
parser = options.register_command('history', do_cli, help='view the history of entries that FlexGet has accepted')
parser.add_argument('--limit', action='store', type=int, metavar='NUM', default=50,
help='limit to %(metavar)s results')
parser.add_argument('--search', action='store', metavar='TERM', help='limit to results that contain %(metavar)s')
@event('plugin.register')
def register_plugin():
plugin.register(PluginHistory, 'history', builtin=True, api_ver=2)
| Python | 0.000008 |
f08a01021d697fbf34a8f23b7cc51a566619a1d2 | Increase unit test coverage for common utils. | murano/tests/unit/common/test_utils.py | murano/tests/unit/common/test_utils.py | # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
from murano.common import utils
from murano.tests.unit import base
class UtilsTests(base.MuranoTestCase):
def test_validate_quotes(self):
self.assertTrue(utils.validate_quotes('"ab"'))
def test_validate_quotes_not_closed_quotes(self):
self.assertRaises(ValueError, utils.validate_quotes, '"ab","b""')
def test_validate_quotes_not_opened_quotes(self):
self.assertRaises(ValueError, utils.validate_quotes, '""ab","b"')
def test_validate_quotes_no_coma_before_opening_quotes(self):
self.assertRaises(ValueError, utils.validate_quotes, '"ab""b"')
def test_split_for_quotes(self):
self.assertEqual(["a,b", "ac"], utils.split_for_quotes('"a,b","ac"'))
def test_split_for_quotes_with_backslash(self):
self.assertEqual(['a"bc', 'de', 'fg,h', r'klm\\', '"nop'],
utils.split_for_quotes(r'"a\"bc","de",'
r'"fg,h","klm\\","\"nop"'))
def test_validate_body(self):
json_schema = json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
self.assertIsNotNone(utils.validate_body(json_schema))
json_schema = json.dumps(['body', {'body': ('baz', None, 1.0, 2)}])
self.assertIsNotNone(utils.validate_body(json_schema))
def test_build_entity_map(self):
entity = {"?": {"fun": "id"}}
self.assertEqual({}, utils.build_entity_map(entity))
entity = {"?": {"id": "id"}}
self.assertEqual({'id': {'?': {'id': 'id'}}},
utils.build_entity_map(entity))
entity = [{"?": {"id": "id1"}}, {"?": {"id": "id2"}}]
self.assertEqual({'id1': {'?': {'id': 'id1'}},
'id2': {'?': {'id': 'id2'}}},
utils.build_entity_map(entity))
def test_is_different(self):
t1 = "Hello"
t2 = "World"
self.assertTrue(utils.is_different(t1, t2))
t1 = "Hello"
t2 = "Hello"
self.assertFalse(utils.is_different(t1, t2))
t1 = {1, 2, 3, 4}
t2 = t1
self.assertFalse(utils.is_different(t1, t2))
t2 = {1, 2, 3}
self.assertTrue(utils.is_different(t1, t2))
t1 = [1, 2, {1, 2, 3, 4}]
t1[0] = t1
self.assertTrue(utils.is_different(t1, t2))
t1 = [t2]
t2 = [t1]
self.assertTrue(utils.is_different(t1, t2))
t1 = [{1, 2, 3}, {1, 2, 3}]
t2 = [{1, 2, 3}, {1, 2}]
self.assertTrue(utils.is_different(t1, t2))
t1 = datetime.date(2016, 8, 8)
t2 = datetime.date(2016, 8, 7)
self.assertTrue(utils.is_different(t1, t2))
t1 = {1: 1, 2: 2, 3: 3}
t2 = {1: 1, 2: 4, 3: 3}
self.assertTrue(utils.is_different(t1, t2))
t1 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": [1, 2, 3]}}
t2 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": "world\n\n\nEnd"}}
self.assertTrue(utils.is_different(t1, t2))
t1 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": [1, 2, 5]}}
t2 = {1: 1, 2: 2, 3: 3, 4: {"a": "hello", "b": [1, 3, 2, 5]}}
self.assertTrue(utils.is_different(t1, t2))
class ClassA(object):
__slots__ = ['x', 'y']
def __init__(self, x, y):
self.x = x
self.y = y
t1 = ClassA(1, 1)
t2 = ClassA(1, 2)
self.assertTrue(utils.is_different(t1, t2))
t1 = [1, 2, 3]
t1.append(t1)
t2 = [1, 2, 4]
t2.append(t2)
self.assertTrue(utils.is_different(t1, t2))
t1 = [1, 2, 3]
t2 = [1, 2, 4]
t2.append(t1)
t1.append(t2)
self.assertTrue(utils.is_different(t1, t2))
t1 = utils
t2 = datetime
self.assertTrue(utils.is_different(t1, t2))
t2 = "Not a module"
self.assertTrue(utils.is_different(t1, t2))
| # Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from murano.common import utils
from murano.tests.unit import base
class UtilsTests(base.MuranoTestCase):
def test_validate_quotes(self):
self.assertEqual(True, utils.validate_quotes('"ab"'))
def test_validate_quotes_not_closed_quotes(self):
self.assertRaises(ValueError, utils.validate_quotes, '"ab","b""')
def test_validate_quotes_no_coma_before_opening_quotes(self):
self.assertRaises(ValueError, utils.validate_quotes, '"ab""b"')
def test_split_for_quotes(self):
self.assertEqual(["a,b", "ac"], utils.split_for_quotes('"a,b","ac"'))
def test_split_for_quotes_with_backslash(self):
self.assertEqual(['a"bc', 'de', 'fg,h', r'klm\\', '"nop'],
utils.split_for_quotes(r'"a\"bc","de",'
r'"fg,h","klm\\","\"nop"'))
| Python | 0.000014 |
0b047f5b6123d851916ed12114512ddebec58225 | Add 20150509 question. | LeetCode/add_two_numbers.py | LeetCode/add_two_numbers.py | """
You are given two linked lists representing two non-negative numbers.
The digits are stored in reverse order and each of their nodes contain a single
digit. Add the two numbers and return it as a linked list.
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Notice: extra digit.
"""
class ListNode:
"""
Definition for singly-linked list.
"""
def __init__(self, x):
self.val = x
self.next = None
class Solution:
# @param {ListNode} l1
# @param {ListNode} l2
# @return {ListNode}
def addTwoNumbers(self, l1, l2):
new_root = None
temp_l1, temp_l2 = l1, l2
temp = new_root
extra_digit = 0
while None not in [temp_l1, temp_l2]:
value = temp_l1.val + temp_l2.val + extra_digit
if temp is None:
temp = ListNode(value)
new_root = temp
else:
new_node = ListNode(value)
temp.next = new_node
temp = new_node
if temp.val >= 10:
temp.val -= 10
extra_digit = 1
else:
extra_digit = 0
temp_l1 = temp_l1.next
temp_l2 = temp_l2.next
continue_temp = temp_l1 if temp_l1 is not None else temp_l2
while continue_temp is not None:
value = continue_temp.val + extra_digit
new_node = ListNode(value)
temp.next = new_node
temp = new_node
if temp.val >= 10:
temp.val -= 10
extra_digit = 1
else:
extra_digit = 0
continue_temp = continue_temp.next
if extra_digit >= 1:
new_node = ListNode(extra_digit)
temp.next = new_node
temp = new_node
return new_root
| Python | 0.000001 | |
e5502056a36507bd6d77bc814014b45cd9afc8bf | make a version bumping and version sorting system | dishpub/versioning.py | dishpub/versioning.py | import re
regdelexp = re.compile('[-,.\/]')
regnumeric = re.compile('[0-9]+')
def split_line_by_delimiter(line,regex):
splitline = []
splititr = regex.finditer(line)
lstart = 0
for i in splititr:
(mstart,mend) = i.span()
if lstart != mstart:
splitline.append(line[lstart:mstart])
splitline.append(line[mstart:mend])
lstart = mend
linelen = len(line)
if lstart != linelen:
splitline.append(line[lstart:linelen])
return splitline
def string_sort(x,y):
xsplit = split_line_by_delimiter(x,regnumeric)
ysplit = split_line_by_delimiter(y,regnumeric)
ysplitlen = len(ysplit)
xsplitlen = len(xsplit)
minsplitlen = ysplitlen
if xsplitlen < ysplitlen:
minsplitlen = xsplitlen
for i in range(minsplitlen):
if xsplit[i] == ysplit[i]:
continue
if (xsplit[i].isdigit() and ysplit[i].isdigit()):
rc = int(0)
if int(xsplit[i]) > int(ysplit[i]):
rc = -1
if int(xsplit[i]) < int(ysplit[i]):
rc = 1
return rc
if xsplit[i].isdigit():
return -1
if ysplit[i].isdigit():
return 1
if xsplit[i] > ysplit[i]:
return -1
if xsplit[i] < ysplit[i]:
return 1
if xsplitlen < ysplitlen:
return 1
if xsplitlen > ysplitlen:
return -1
return 0
def split_numeric_sort(x, y):
xsplit = split_line_by_delimiter(x,regdelexp)
ysplit = split_line_by_delimiter(y,regdelexp)
ysplitlen = len(ysplit)
xsplitlen = len(xsplit)
minsplitlen = ysplitlen
if xsplitlen < ysplitlen:
minsplitlen = xsplitlen
for i in range(minsplitlen):
if xsplit[i] == ysplit[i]:
continue
if (xsplit[i].isdigit() and ysplit[i].isdigit()):
rc = int(0)
if int(xsplit[i]) > int(ysplit[i]):
rc = -1
if int(xsplit[i]) < int(ysplit[i]):
rc = 1
return rc
if xsplit[i].isdigit():
return -1
if ysplit[i].isdigit():
return 1
rc = string_sort(xsplit[i],ysplit[i])
if rc != 0:
return rc
if xsplitlen < ysplitlen:
return 1
if xsplitlen > ysplitlen:
return -1
return 0
def bumpVersion(versionString, versionLevel = 0):
# 0 patch level
# 1 minor level
# 2 major version
split = split_line_by_delimiter(versionString,regnumeric)
length = len(split)
indexs = range(0,length )
indexs.reverse()
indexToBeBumped = -1
indexCounter = -1
output = ""
for i in indexs:
oldVal = split[i]
if split[i].isdigit():
indexCounter += 1
if indexCounter == versionLevel:
oldVal = str(int(split[i]) + 1)
output = oldVal + output
if indexCounter < versionLevel:
# We have not found the correct index to update
return None
return output
if __name__ == "__main__":
result = bumpVersion("0.0.1", 0)
if "0.0.2" != result:
print "Fail"
result = bumpVersion("0.0.1a", 0)
if "0.0.2a" != result:
print "Fail"
result = bumpVersion("0.0.1a", 1)
if "0.1.1a" != result:
print "Fail"
result = bumpVersion("0.0.1a", 2)
if "1.0.1a" != result:
print "Fail"
result = bumpVersion("0.0.1a", 3)
if None != result:
print "Fail"
| Python | 0.000014 | |
e46da8f316485c7c9e11ffe751108539f9254a68 | Create ClientUDP.py | ClientUDP.py | ClientUDP.py | '''
Created on 12 Feb 2015
@author: shutebt01
'''
#!/bin/env/python3
'''
Packet formating:
[type, src-name, src-group, data]
'''
import socket, threading, json
name = input("Enter User Name: ")
port = 16500
#host = input("Enter host: ")
room = "Global"
showall = False
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(('', port))
#s.connect((host, port))
class InputThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, target=self.input, name="Thread-Input")
def input(self):
global room
while True:
inp = input()
data = None
if not(inp.startswith('!')):
#assumes its a message if not a command
data = json.dumps(["Message", name, room, inp])
else:
# Creates initial packet with data for tracking
packet = ["Event", name, room]
split = inp.split(' ', 1)
if split[0] == "!pm":
pmsplit = split[1].split(' ', 1)
#TODO implement better validation
if (len(split) == 2):
#Adds data to packet
packet.append("pm")
packet.append(pmsplit[0])
packet.append(pmsplit[1])
data = json.dumps(packet)
if split[0] == "!room":
room = split[1]
print("You changed to room:" + room)
if split[0] == "!broadcast" or split[0] == "!bcast":
msg = split[1]
packet.append("bcast")
packet.append(msg)
data = json.dumps(packet)
if data:
s.sendto(data.encode("ascii"), ("<broadcast>", port))
class OutputThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, target=self.output, name="Thread-Output")
def output(self):
while True:
data = s.recv(2048).decode("ascii")
array = json.loads(data)
if array[0] == "Message":
if array[2] == room:
print(array[1] + " (" + array[2] + "):" + array[3])
elif array[0] == "Event":
if array[3] == "pm" and array[4] == name:
print(array[1] + " (" + array[2] + ") -> You: " + array[5])
elif array[3] == "bcast":
print(array[1] + " (" + "*" + "):" + array[4])
Inp = InputThread()
Inp.start()
Out = OutputThread()
Out.start()
| Python | 0 | |
bf4b4ae886d8b631c443d1d992159f1922232dca | Create code.py | Code/code.py | Code/code.py | from django.db import models
class Address(models.Model):
address=models.CharField(max_length=255,blank=True)
city=models.CharField(max_length=150,blank=True)
state=models.CharField(max_length=2,blank=True) // Such as US for Unitet States of America, IN for India
pin=models.CharField(max_length=15,blank=True)
class Contact(models.Model):
first_name=models.CharField(max_length=255,blank=True)
last_name=models.CharField(max_length=255,blank=True)
email=models.EmailField(blank=True)
phone=models.CharField(max_length=150,blank=True)
birthdate=models.CharField(auto_now_add=True)
address=models.ForeignKey(Address,null=True)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.