commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
0091af78bd191e34ecb621b20e79d6dd3d32ebb6 | Add unit tests for VocabularySet | tests/test_core.py | tests/test_core.py | #!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
| Python | 0 | |
1f9240f0b954afa9f587f468872c3e1e215f2eaa | Implement channel mode +s (or what's left of it) | txircd/modules/cmode_s.py | txircd/modules/cmode_s.py | from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput) | Python | 0 | |
9f59cf074c4f64616bf3a31fd5c6fc649e99e4ae | Checks whether all the case-based letters of the strings are uppercase | techgig_isupper.py | techgig_isupper.py | def main():
s=raw_input()
if s.isupper():
print "True"
else:
print "False"
main()
| Python | 0.999999 | |
2a26fc7f0ac6223ebcb20eb1de550e899e5728db | add beginnings of script for ball identification | scripts/hist.py | scripts/hist.py | import cv2
import numpy as np
frame = cv2.imread('/mnt/c/Users/T-HUNTEL/Desktop/hackathon/table3.jpg')
h,w,c = frame.shape
print frame.shape
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
BORDER_COLOR = 0
def flood_fill(image, x, y, value):
count = 1
points = [(x, y)]
"Flood fill on a region of non-BORDER_COLOR pixels."
if x >= image.shape[1] or y >= image.shape[0] or image[x,y] == BORDER_COLOR:
return None, None
edge = [(x, y)]
image[x, y] = value
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
if s <= image.shape[1] and y <= image.shape[0] and \
image[s, t] not in (BORDER_COLOR, value):
image[s, t] = value
points.append((s, t))
count += 1
newedge.append((s, t))
edge = newedge
return count, points
# thresholds for different balls / background
low_bkg = np.array([15, 40, 50], dtype=np.uint8)
high_bkg = np.array([40, 190, 200], dtype=np.uint8)
lower_blue = np.array([110,50,50], dtype=np.uint8)
upper_blue = np.array([130,255,255], dtype=np.uint8)
low_yellow = np.array([20, 30, 30], dtype=np.uint8)
high_yellow = np.array([30, 255, 255], dtype=np.uint8)
# mask out the background
mask = cv2.inRange(hsv, low_bkg, high_bkg)
mask = np.invert(mask)
# Bitwise-AND mask and original image
objects = cv2.bitwise_and(frame,frame, mask= mask)
hsv = cv2.cvtColor(objects, cv2.COLOR_BGR2HSV)
# mask the yellow balls
mask = cv2.inRange(hsv, low_yellow, high_yellow)
yellows = cv2.bitwise_and(objects, objects, mask=mask)
# find the biggest cloud of 1's in the yellow mask
biggest_cloud = []
biggest_count = 0
image = mask / 255.
while len(np.where(image == 1)[0]) > 0:
loc = np.where(image == 1)
y = loc[0][0]
x = loc[1][0]
count, cloud = flood_fill(image, y, x, 2)
if count > biggest_count:
print count
biggest_count = count
biggest_cloud = cloud
print biggest_cloud
print biggest_count
cv2.imwrite('mask.jpg', mask)
cv2.imwrite('yellows.jpg', yellows)
cv2.imwrite('frame.jpg', frame)
| Python | 0 | |
6234d8942e77ef2fda05cc7e15b901a788418070 | Create Benchmark.py | qiskit/aqua/components/optimizers/Benchmark.py | qiskit/aqua/components/optimizers/Benchmark.py | # -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from qiskit.aqua.components.variational_forms import RY
from qiskit.aqua.operator import Operator
from qiskit.aqua.components.optimizers import (CG, COBYLA, L_BFGS_B,
POWELL, SLSQP, TNC)
from qiskit.aqua.algorithms import VQE
from qiskit.aqua import QuantumInstance
from qiskit import Aer
class Benchmark():
'''
Class for benchmarking optimizers on various algorithms. VQE algorithm is the default.
'''
def __init__(self, num_qubits = 2, backend = 'statevector_simulator', disp = True,
input_optimizer = None, input_algorithm = None,
algorithm_cost_function = None, pauli_dict = None, var_form = RY,
benchmark_algorithms = [VQE],
benchmark_optimizers = [COBYLA, TNC, SLSQP, CG, L_BFGS_B, POWELL],
shots = 1024, maxiter = 1000):
'''
Setup for benchmarking class
Args:
num_qubits (int): number of qubits
backend (str): backend for simulator
disp (bool): Whether or not to display which algorithm and optimizer is currently being run
input_optimizer (optimizer class): custom optimizer class for benchmarking against default optimizers
input_algorithm (algorithm class): custom algorithm class for benchmarking against default algorithms
algorithm_cost_function (operator class): cost function for custom algorithm class
pauli_dict (dict): pauli dictionary corresponding to the number of qubits in the system for VQE
var_form (VaritionalForm): Variational form for ansatz if algorithm is VQE
benchmark_algorithms (list): default algorithms for benchmarking against
benchmark_optimizers (list): default optimizers for benchmarking against
shots (int): number of runs in each quantum instance
maxiter (int): maximum number of itterations for convergence on each optimizer
'''
self._num_qubits = num_qubits
self._backend = Aer.get_backend(backend)
self._shots = shots
self._maxiter = maxiter
self._disp = disp
self.optimizers = benchmark_optimizers if input_optimizer is None else [input_optimizer]+benchmark_optimizers
if len(self.optimizers) == 0:
raise ValueError("No optimizers to test.")
self.custom_cost = algorithm_cost_function
self.custom_algo = input_algorithm.__name__ if input_algorithm is not None else None
if self.custom_cost is None and self.custom_algo is not None:
raise ValueError("A cost function must be input for a custom algorithm.")
self.algorithms = benchmark_algorithms if input_algorithm is None else [input_algorithm]+benchmark_algorithms
if len(self.algorithms) == 0:
raise ValueError("No algorithms to run.")
if VQE in self.algorithms:
self._pauli_dict = pauli_dict
self._ansatz = var_form(num_qubits)
def _create_pauli_cost_function(self, pauli_dict = None):
'''
Creates a pauli dictionary cost function
Args:
pauli_dict (dict): A pauli dictionary to use as the Hamiltonian if
running VQE. If None, one will be made.
Returns:
(operator class) Pauli dictionary as an operator
'''
# pauli operators for a hamiltonian
if pauli_dict is None:
if self._num_qubits == 2:
# diatomic hydrogen (H-H system)
pauli_dict = {'paulis':
[{"coeff": {"imag": 0.0, "real": -1.052373245772859}, "label": "II"},
{"coeff": {"imag": 0.0, "real": 0.39793742484318045}, "label": "IZ"},
{"coeff": {"imag": 0.0, "real": -0.39793742484318045}, "label": "ZI"},
{"coeff": {"imag": 0.0, "real": -0.01128010425623538}, "label": "ZZ"},
{"coeff": {"imag": 0.0, "real": 0.18093119978423156}, "label": "XX"}]
}
else:
pauli_dict = {'paulis': [{"coeff": {"imag": 0.0, "real": 1.0}, "label": "X"*self._num_qubits}]}
return Operator.load_from_dict(pauli_dict)
def _run_algorithm(self, algorithm, optimizer, it, returns = ['eval_count','eval_time','energy']):
'''
Runs an instance of a specified algorithm and optimizer and returns statistics
Args:
algorithm (algorithm class): algorithm to run
optimizer (optimizer class): optimizer to use for the algorithm
it (int): Current run number
returns (list of str): Values to obtain from algorithm run results
Returns:
(dict) Results of algorithm optimization.
'''
if self._disp:
print("Algorithm: "+algorithm.__name__+
'\t| Optimizer: '+optimizer.__name__+
'\t| Iteration: '+str(it+1))
if algorithm.__name__ == 'VQE':
cost = self._create_pauli_cost_function(self._pauli_dict)
elif algorithm.__name__ == self.custom_algo:
cost = self.custom_cost
else:
raise ValueError("Algorithm name not recognized.")
save_iterations = {}
def callbackF(ind, par, m, s):
save_iterations["iter_"+str(ind).zfill(len(str(self._maxiter)))] = m
opt = optimizer(maxiter=self._maxiter)
if algorithm.__name__ == 'VQE':
algo = algorithm(cost, self._ansatz, opt, callback=callbackF)
else:
algo = algorithm(cost, opt, callback=callbackF)
qinstance = QuantumInstance(self._backend, shots=self._shots)
result = algo.run(qinstance)
result_dict = {k: result[k] for k in returns}
result_dict['iteration_data'] = save_iterations
return result_dict
def _benchmark_algorithm(self, algo, nruns):
'''
Runs all optimizers for a specified algorithm.
Args:
algo (algorithm class): Algorithm to run.
nruns (int): Number of runs per optimizer
'''
return {opt.__name__: {"run_"+str(i): self._run_algorithm(algo, opt, i)
for i in range(nruns)}
for opt in self.optimizers}
def benchmark(self, nruns = 1):
'''
Runs all instances of algorithm and optimizer pairs and returns a dictionary of the results.
Args:
nruns (int): Number of runs to perform per optimizer per algorithm. Default is 1.
Returns:
(dict) Results of all the algorithm and optimizer runs.
'''
self.results = {algo.__name__: self._benchmark_algorithm(algo,nruns) for algo in self.algorithms}
# TODO: add plotting functionality
return self.results
| Python | 0 | |
6a686a800a3579970a15fa9552b2eb4e1b6b3ed9 | add some tools for ml scoring | corgi/ml.py | corgi/ml.py | import numpy as np
import pandas as pd
from scipy.stats import kendalltau, spearmanr
from sklearn.metrics import (accuracy_score, f1_score, log_loss,
mean_squared_error, precision_score, recall_score)
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
classifier_scoring = {
'accuracy': accuracy_score,
'log_loss': log_loss,
'f1_score': lambda x, y: f1_score(x, y, average='weighted'),
'precision': lambda x, y: precision_score(x, y, average='weighted'),
'recall': lambda x, y: recall_score(x, y, average='weighted'),
}
regression_scoring = {
'mean_squared_error': mean_squared_error,
'kendalltau': lambda x, y: kendalltau(x, y).correlation,
'spearmanr': lambda x, y: spearmanr(x, y)[0],
}
def scores(y, y_pred, scoring=None):
if scoring is None:
raise Exception("cross_val_scores requires a dict of measures.")
scores = {}
for k, metric in scoring.items():
scores[k] = metric(y, y_pred)
return scores
def cross_val_scores(clf, X, y, cv=3, scoring=None):
if scoring is None:
raise Exception("cross_val_scores requires a dict of measures.")
X, y = np.array(X), np.array(y)
skf = StratifiedKFold(n_splits=cv)
scores = []
for train, test in tqdm(skf.split(X, y)):
clf.fit(X[train], y[train])
y_pred = clf.predict(X[test])
score = {}
for k, metric in scoring.items():
try:
score[k] = metric(y[test], y_pred)
except:
pass
scores.append(score)
return pd.DataFrame(scores)
| Python | 0 | |
2d7d4987eb06372496ce4a5b7b961a12deba9574 | add windows-specific tests for shell_{quote,split} | tests/util_test.py | tests/util_test.py | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from nanoemoji.util import shell_quote, shell_split
import pytest
# Source:
# https://github.com/python/cpython/blob/653e563/Lib/test/test_subprocess.py#L1198-L1214
LIST2CMDLINE_TEST_DATA = [
(["a b c", "d", "e"], '"a b c" d e'),
(['ab"c', "\\", "d"], 'ab\\"c \\ d'),
(['ab"c', " \\", "d"], 'ab\\"c " \\\\" d'),
(["a\\\\\\b", "de fg", "h"], 'a\\\\\\b "de fg" h'),
(['a\\"b', "c", "d"], 'a\\\\\\"b c d'),
(["a\\\\b c", "d", "e"], '"a\\\\b c" d e'),
(["a\\\\b\\ c", "d", "e"], '"a\\\\b\\ c" d e'),
(["ab", ""], 'ab ""'),
]
CMDLINE2LIST_TEST_DATA = [(cmdline, args) for args, cmdline in LIST2CMDLINE_TEST_DATA]
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows only")
@pytest.mark.parametrize(
"args, expected_cmdline",
LIST2CMDLINE_TEST_DATA,
ids=[s for _, s in LIST2CMDLINE_TEST_DATA],
)
def test_windows_shell_quote(args, expected_cmdline):
assert " ".join(shell_quote(s) for s in args) == expected_cmdline
@pytest.mark.skipif(not sys.platform.startswith("win"), reason="Windows only")
@pytest.mark.parametrize(
"cmdline, expected_args",
CMDLINE2LIST_TEST_DATA,
ids=[s for s, _ in CMDLINE2LIST_TEST_DATA],
)
def test_windows_shell_split(cmdline, expected_args):
assert shell_split(cmdline) == expected_args
| Python | 0 | |
a6ff8a5838f82be3d5b0b4196c03fbf7c15aff7a | Test dat.info | test.py | test.py | import unittest
import requests
port = 'http://localhost:6461'
def info():
call = port + '/api'
req = requests.get(call, stream=True)
print(req.content)
return req.status_code
class DatTest(unittest.TestCase):
def test_info(self):
self.assertEqual(info(), 200)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
26c49015b0c3be8045423306abb74eb7ea080f0b | Create test.py | test.py | test.py | # -*- coding: utf-8 -*-
import sys
import time
import feedparser
import nltk
import coding
import numpy as np
import nltk
import string
from nltk.corpus import stopwords
#coding.setup_console("utf8")
if __name__ == "__main__":
start_time = time.time()
if len(sys.argv) >= 1:
print "Старт " + str(start_time)
#app = locomotive.app.Application()
# ... дополнительная логика ...
#print feedparser.parse("http://feeds.nytimes.com/nyt/rss/Technology")
#print nltk.corpus.stopwords.words('russian')
#print nltk.download()
def read_data_file(file_name="data.csv"):
# Загружаем файл с кодировкай utf8
text = open(file_name,'r').read()
# Декодируем из utf8 в unicode - из внешней в рабочую
text = text.decode('cp1251')
# Работаем с текстом
return text
def save_result_file(file_name="data.csv", text=""):
# Кодируем тест из unicode в utf8 - из рабочей во внешнюю
text = text.encode('utf8')
# Сохраняем в файл с кодировкий utf8
open("new_" + file_name,'w').write(text)
def tokenize_me(file_text):
#firstly let's apply nltk tokenization
tokens = nltk.word_tokenize(file_text)
#let's delete punctuation symbols
tokens = [i for i in tokens if ( i not in string.punctuation )]
#deleting stop_words
stop_words = stopwords.words('russian')
stop_words.extend([u'что', u'это', u'так', u'вот', u'быть', u'как', u'в', u'—', u'к', u'на'])
tokens = [i for i in tokens if ( i not in stop_words )]
#cleaning words
tokens = [i.replace(u"«", "").replace(u"»", "") for i in tokens]
return tokens
text = read_data_file("data1.csv")
#s = text.rstrip(";")
print text
#d = np.array(text)
#d = ['Тест','списка']
tokens = tokenize_me(text)
#print ','.join(d)
print ','.join(tokens)
save_result_file("data1.csv",'\n'.join(tokens))
| Python | 0.000005 | |
18df3284fd6dc176b71c41599d02a24dc021f8db | add file that is useful for testing; but will be much more useful when I figure out how to turn of debugging output in Flask. | test.py | test.py | #!/usr/bin/env python
import os
from doctest import testmod, NORMALIZE_WHITESPACE, ELLIPSIS
import backend, client, frontend, misc, model, session
def tm(module):
testmod(module, optionflags=NORMALIZE_WHITESPACE | ELLIPSIS)
def run_doctests():
tm(backend)
tm(client)
tm(frontend)
tm(misc)
tm(model)
tm(session)
if __name__ == '__main__':
run_doctests()
| Python | 0 | |
ae6184a023f9a14c54663270d4a4294b8c3832f4 | Create test.py | test.py | test.py | import os
print("hello there")
| Python | 0.000002 | |
9309f7190314abdd8b56368147862453d17d97b5 | Create test.py | test.py | test.py | Python | 0.000005 | ||
d527bc83d44b91bb827c02907faf8cd7e7d49544 | Add dateutil gist | dateutil.py | dateutil.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8:et
"""Date and Time util
"""
__author__ = ["Jianlong Chen <jianlong99@gmail.com>"]
__date__ = "2013-07-17"
import datetime
def year():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y')
def date_time():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
def date():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
def hour():
return datetime.datetime.strftime(datetime.datetime.now(), '%H')
| Python | 0 | |
75a61dfe788102d04e1cc3b151e839fa9add724f | Fix review requests | tools/export/cdt/__init__.py | tools/export/cdt/__init__.py | import re
from os.path import join, exists, realpath, relpath, basename
from os import makedirs
from tools.export.makefile import Makefile, GccArm, Armc5, IAR
class Eclipse(Makefile):
"""Generic Eclipse project. Intended to be subclassed by classes that
specify a type of Makefile.
"""
def generate(self):
"""Generate Makefile, .cproject & .project Eclipse project file,
py_ocd_settings launch file, and software link .p2f file
"""
super(Eclipse, self).generate()
starting_dot = re.compile(r'(^[.]/|^[.]$)')
ctx = {
'name': self.project_name,
'elf_location': join('BUILD',self.project_name)+'.elf',
'c_symbols': self.toolchain.get_symbols(),
'asm_symbols': self.toolchain.get_symbols(True),
'target': self.target,
'include_paths': [starting_dot.sub('%s/' % self.project_name, inc) for inc in self.resources.inc_dirs],
'load_exe': str(self.LOAD_EXE).lower()
}
if not exists(join(self.export_dir,'eclipse-extras')):
makedirs(join(self.export_dir,'eclipse-extras'))
self.gen_file('cdt/pyocd_settings.tmpl', ctx,
join('eclipse-extras',self.target+'_pyocd_settings.launch'))
self.gen_file('cdt/necessary_software.tmpl', ctx,
join('eclipse-extras','necessary_software.p2f'))
self.gen_file('cdt/.cproject.tmpl', ctx, '.cproject')
self.gen_file('cdt/.project.tmpl', ctx, '.project')
class EclipseGcc(Eclipse, GccArm):
LOAD_EXE = True
NAME = "Eclipse-GCC-ARM"
class EclipseArmc5(Eclipse, Armc5):
LOAD_EXE = False
NAME = "Eclipse-Armc5"
class EclipseIAR(Eclipse, IAR):
LOAD_EXE = True
NAME = "Eclipse-IAR"
| import re
from os.path import join, exists, realpath, relpath, basename
from os import makedirs
from tools.export.makefile import Makefile, GccArm, Armc5, IAR
class Eclipse(Makefile):
"""Generic Eclipse project. Intended to be subclassed by classes that
specify a type of Makefile.
"""
def generate(self):
"""Generate Makefile, .cproject & .project Eclipse project file,
py_ocd_settings launch file, and software link .p2f file
"""
super(Eclipse, self).generate()
include_paths_replace_re= re.compile(r'(^[.]/|^[.]$)')
ctx = {
'name': self.project_name,
'elf_location': join('BUILD',self.project_name)+'.elf',
'c_symbols': self.toolchain.get_symbols(),
'asm_symbols': self.toolchain.get_symbols(True),
'target': self.target,
'include_paths': map(lambda s: include_paths_replace_re.sub('%s/' % self.project_name, s), self.resources.inc_dirs),
'load_exe': str(self.LOAD_EXE).lower()
}
if not exists(join(self.export_dir,'eclipse-extras')):
makedirs(join(self.export_dir,'eclipse-extras'))
self.gen_file('cdt/pyocd_settings.tmpl', ctx,
join('eclipse-extras',self.target+'_pyocd_settings.launch'))
self.gen_file('cdt/necessary_software.tmpl', ctx,
join('eclipse-extras','necessary_software.p2f'))
self.gen_file('cdt/.cproject.tmpl', ctx, '.cproject')
self.gen_file('cdt/.project.tmpl', ctx, '.project')
class EclipseGcc(Eclipse, GccArm):
LOAD_EXE = True
NAME = "Eclipse-GCC-ARM"
class EclipseArmc5(Eclipse, Armc5):
LOAD_EXE = False
NAME = "Eclipse-Armc5"
class EclipseIAR(Eclipse, IAR):
LOAD_EXE = True
NAME = "Eclipse-IAR"
| Python | 0 |
5186f3e4bfcaf033c4012e72c8cb766a0b903296 | Add file for updating strains | set_isotypes.py | set_isotypes.py | from gcloud import datastore
import requests
import time
ds = datastore.Client(project='andersen-lab')
url = "https://docs.google.com/spreadsheets/d/1V6YHzblaDph01sFDI8YK_fP0H7sVebHQTXypGdiQIjI/pub?gid=0&single=true&output=tsv"
gs = requests.get(url).text.encode("utf-8").splitlines()
gs = [str(x, 'utf-8').strip().split("\t") for x in gs]
gs = [x for x in gs if x[2]]
gs = [(x[0], x[2], x[3]) for x in gs]
WI_ISOTYPE = {}
WI_STRAIN = {}
for strain, isotype, prev_names in gs:
if prev_names != "NA":
prev_names = prev_names.split("|")
for p in prev_names:
if p:
WI_ISOTYPE[p] = isotype
WI_STRAIN[p] = strain
if strain and isotype:
WI_ISOTYPE[strain] = isotype
WI_STRAIN[strain] = strain
if isotype:
WI_ISOTYPE[isotype] = isotype
exclude_indices = ['most_abundant_sequence',
'fastqc_per_base_sequence_quality_data',
'fastqc_per_tile_sequence_quality_data',
'fastqc_per_sequence_quality_scores_data',
'fastqc_per_base_sequence_content_data',
'fastqc_per_sequence_gc_content_data',
'fastqc_per_base_n_content_data',
'fastqc_sequence_length_distribution_data',
'fastqc_sequence_duplication_levels_data',
'fastqc_overrepresented_sequences_data',
'fastqc_adapter_content_data',
'fastqc_kmer_content_data',
'fastqc_error']
def query_item(kind, filters=None, projection=()):
# filters:
# [("var_name", "=", 1)]
query = ds.query(kind=kind, projection = projection)
if filters:
for var, op, val in filters:
query.add_filter(var, op, val)
return query.fetch()
def get_item(kind, name):
return ds.get(ds.key(kind, name))
def update_item(kind, name, **kwargs):
item = get_item(kind, name)
if item is None:
m = datastore.Entity(key=ds.key(kind, name),
exclude_from_indexes=exclude_indices)
else:
m = datastore.Entity(key=ds.key(kind, name),
exclude_from_indexes=exclude_indices)
m.update(dict(item))
for key, value in kwargs.items():
if type(value) == str:
m[key] = value
elif type(value) == list:
if key in m:
m[key] += value
else:
m[key] = value
m[key] = list(set(m[key]))
# If date created of file is earlier
elif key == 'date_created' and item:
vtimestamp = time.mktime(value.timetuple())
dstimestamp = time.mktime(m['date_created'].timetuple())
if vtimestamp < dstimestamp:
m[key] = value
else:
m[key] = value
if 'fq_profile_count' in m:
m['fq_profile_count'] += 1
else:
m['fq_profile_count'] = 1
ds.put(m)
fastqs = query_item('fastq', filters = [['strain_type', '=', 'WI'], ['use', '=', True]])
for fq in fastqs:
if 'original_strain' in fq.keys():
if fq['original_strain'] in WI_STRAIN.keys():
fq['strain'] = WI_STRAIN[fq['original_strain']]
if fq['original_strain'] in WI_ISOTYPE.keys():
fq['isotype'] = WI_ISOTYPE[fq['original_strain']]
print([fq.key.name, fq['isotype'], fq['strain'], fq['original_strain']])
if 'seq_folder' in fq.keys():
if fq['seq_folder'] != "original_wi_seq":
if fq['library'] != fq['barcode'].replace("+", ""):
print(fq['library'] + "=>" + fq['barcode'].replace("+", ""))
fq['library'] = fq['barcode'].replace("+", "")
update_item('fastq', fq.key.name, **fq)
| Python | 0 | |
39fa13cf9b12f3828d4776d10532405c0ea43603 | Add an example | examples/example.py | examples/example.py | """
Flow as follows:
Create Service -> Create User -> Initiate Authentication -> Verify Pin
"""
from messente.verigator.api import Api
api = Api("username", "password")
service = api.services.create("http://example.com", "service_name")
user = api.users.create(service.id, "+xxxxxxxxxxx", "username")
auth_id = api.auth.initiate(service.id, user.id, api.auth.METHOD_SMS)
while True:
try:
input = raw_input # Python 2 compatibility
except NameError:
pass
token = input("Enter Sms Pin: ")
auth_res, error = api.auth.verify(service.id, user.id, api.auth.METHOD_SMS, token, auth_id)
if auth_res:
break
print("Not Verified... Reason: {}".format(error['result']))
print("Verified Successfully!")
| Python | 0 | |
265f8c48f4b257287dd004ba783a8aa6f94bb870 | Add Latin params file | cltk/tokenize/latin/params.py | cltk/tokenize/latin/params.py | """ Params: Latin
"""
__author__ = ['Patrick J. Burns <patrick@diyclassics.org>']
__license__ = 'MIT License.'
PRAENOMINA = ['a', 'agr', 'ap', 'c', 'cn', 'd', 'f', 'k', 'l', "m'", 'm', 'mam', 'n', 'oct', 'opet', 'p', 'post', 'pro', 'q', 's', 'ser', 'sert', 'sex', 'st', 't', 'ti', 'v', 'vol', 'vop', 'a', 'ap', 'c', 'cn', 'd', 'f', 'k', 'l', 'm', "m'", 'mam', 'n', 'oct', 'opet', 'p', 'paul', 'post', 'pro', 'q', 'ser', 'sert', 'sex', 'sp', 'st', 'sta', 't', 'ti', 'v', 'vol', 'vop']
CALENDAR = ['ian', 'febr', 'mart', 'apr', 'mai', 'iun', 'iul', 'aug', 'sept', 'oct', 'nov', 'dec'] \
+ ['kal', 'non', 'id', 'a.d']
MISC = ['coll', 'cos', 'ord', 'pl.', 's.c', 'suff', 'trib']
ABBREVIATIONS = set(
PRAENOMINA +
CALENDAR +
MISC
)
| Python | 0.000001 | |
8f6e10a6fe5d76a27369801ae998e3d7e30b667e | Implement Zhai_Luo support. | colour/adaptation/zhai2018.py | colour/adaptation/zhai2018.py | import numpy as np
def chromatic_adaptation_forward_Zhai2018(XYZb,
XYZwb,
Db,
XYZws,
Ds,
XYZwo,
CAT="CAT02"):
Ywo = XYZwo[1]
Ywb = XYZwb[1]
Yws = XYZws[1]
if CAT == "CAT02":
Mt = np.array([
[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0030, 0.0136, 0.9834],
])
if CAT == "CAT16":
Mt = np.array([
[0.401288, 0.650173, -0.051461],
[-0.250268, 1.204414, 0.045854],
[-0.002079, 0.048952, 0.953127],
])
RGBb = Mt @ XYZb
RGBwb = Mt @ XYZwb
RGBws = Mt @ XYZws
RGBwo = Mt @ XYZwo
Drgbb = Db * (Ywb / Ywo) * (RGBwo / RGBwb) + 1 - Db
Drgbs = Ds * (Yws / Ywo) * (RGBwo / RGBws) + 1 - Ds
Drgb = (Drgbb / Drgbs)
RGBs = Drgb * RGBb
XYZs = np.linalg.inv(Mt) @ RGBs
return XYZs
"""
XYZb = np.array([48.900,43.620,6.250])
XYZwb = np.array([109.850,100,35.585])
Db = 0.9407
XYZws = np.array([95.047,100,108.883])
Ds = 0.9800
XYZwo = np.array([100,100,100])
Zhai_Luo2(XYZb, XYZwb, Db, XYZws, Ds, XYZwo, 'CAT16')
"""
def chromatic_adaptation_inverse_Zhai2018(XYZs,
XYZwb,
Db,
XYZws,
Ds,
XYZwo,
CAT="CAT02"):
Ywo = XYZwo[1]
Ywb = XYZwb[1]
Yws = XYZws[1]
if CAT == "CAT02":
Mt = np.array([
[0.7328, 0.4296, -0.1624],
[-0.7036, 1.6975, 0.0061],
[0.0030, 0.0136, 0.9834],
])
if CAT == "CAT16":
Mt = np.array([
[0.401288, 0.650173, -0.051461],
[-0.250268, 1.204414, 0.045854],
[-0.002079, 0.048952, 0.953127],
])
RGBwb = Mt @ XYZwb
RGBws = Mt @ XYZws
RGBwo = Mt @ XYZwo
Drgbb = Db * (Ywb / Ywo) * (RGBwo / RGBwb) + 1 - Db
Drgbs = Ds * (Yws / Ywo) * (RGBwo / RGBws) + 1 - Ds
Drgb = (Drgbb / Drgbs)
RGBs = Mt @ XYZs
RGBb = RGBs / Drgb
RGBs = Drgb * RGBb
XYZb = np.linalg.inv(Mt) @ RGBb
return XYZb
"""
XYZs = np.array([40.374,43.694,20.517])
XYZwb = np.array([109.850,100,35.585])
Db = 0.9407
XYZws = np.array([95.047,100,108.883])
Ds = 0.9800
XYZwo = np.array([100,100,100])
Zhai_Luo_inverse2(XYZs, XYZwb, Db, XYZws, Ds, XYZwo, 'CAT16')
"""
| Python | 0 | |
0a9efede94c64d114cf536533b94a47210a90604 | Add viper.common.constants.py | viper/common/constants.py | viper/common/constants.py | # This file is part of Viper - https://github.com/botherder/viper
# See the file 'LICENSE' for copying permission.
import os
_current_dir = os.path.abspath(os.path.dirname(__file__))
VIPER_ROOT = os.path.normpath(os.path.join(_current_dir, "..", ".."))
| Python | 0 | |
2d7ea21c2d9171a79298866bf02abf64b849be0e | add a simple info cog | dog/ext/info.py | dog/ext/info.py | from textwrap import dedent
import discord
from discord.ext.commands import guild_only
from lifesaver.bot import Cog, group, Context
from lifesaver.utils import human_delta
class Info(Cog):
"""A cog that provides information about various entities like guilds or members."""
@group(aliases=['guild', 'guild_info', 'server_info'], invoke_without_command=True)
@guild_only()
async def server(self, ctx: Context):
"""Views information about this server."""
embed = discord.Embed(title=ctx.guild.name)
embed.set_thumbnail(url=ctx.guild.icon_url)
embed.set_footer(text=f'Owned by {ctx.guild.owner}', icon_url=ctx.guild.owner.avatar_url)
g: discord.Guild = ctx.guild
n_humans = sum(1 for m in g.members if not m.bot)
n_bots = len(g.members) - n_humans
embed.description = dedent(f"""\
{n_humans} humans, {n_bots} bots ({n_humans + n_bots} members)
Created {g.created_at}
{human_delta(g.created_at)} ago
""")
embed.add_field(name='Entities', value=dedent(f"""\
{len(g.text_channels)} text channels, {len(g.voice_channels)} voice channels, {len(g.categories)} categories
{len(g.roles)} roles
"""))
await ctx.send(embed=embed)
@server.command(aliases=['icon_url'])
@guild_only()
async def icon(self, ctx: Context):
"""Sends this server's icon."""
if not ctx.guild.icon_url:
await ctx.send('No server icon.')
return
await ctx.send(ctx.guild.icon_url_as(format='png'))
def setup(bot):
bot.add_cog(Info(bot))
| Python | 0 | |
eff85f039674ca9fe69294ca2e81644dc4ff4cb6 | add celery for all notification mail | gnowsys-ndf/gnowsys_ndf/ndf/views/tasks.py | gnowsys-ndf/gnowsys_ndf/ndf/views/tasks.py | from celery import task
from django.contrib.auth.models import User
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from gnowsys_ndf.notification import models as notification
from gnowsys_ndf.ndf.models import Node
from gnowsys_ndf.ndf.models import node_collection, triple_collection
import json
try:
from bson import ObjectId
except ImportError: # old pymongo
from pymongo.objectid import ObjectId
sitename = Site.objects.all()[0]
@task
def task_set_notify_val(request_user_id, group_id, msg, activ, to_user):
'''
Attach notification mail to celery task
'''
request_user = User.objects.get(id=request_user_id)
to_send_user = User.objects.get(id=to_user)
try:
group_obj = node_collection.one({'_id': ObjectId(group_id)})
site = sitename.name.__str__()
objurl = "http://test"
render = render_to_string(
"notification/label.html",
{
'sender': request_user.username,
'activity': activ,
'conjunction': '-',
'object': group_obj,
'site': site,
'link': objurl
}
)
notification.create_notice_type(render, msg, "notification")
notification.send([to_send_user], render, {"from_user": request_user})
return True
except Exception as e:
print "Error in sending notification- "+str(e)
return False
| Python | 0 | |
f956af85b27d104e84754b4d93a761b82ae39831 | add external_iterate.py | external_iterate.py | external_iterate.py | #!/usr/bin/env python
"""Compile a Myrial program into logical relational algebra."""
import raco.myrial.interpreter as interpreter
import raco.myrial.parser as parser
import raco.scheme
from raco import algebra
from raco import myrialang
from raco.compile import optimize
from raco.language import MyriaAlgebra
import argparse
import json
import os
import sys
def evaluate(plan):
if isinstance(plan, algebra.DoWhile):
evaluate(plan.left)
evaluate(plan.right)
elif isinstance(plan, algebra.Sequence):
for child in plan.children():
evaluate(child)
else:
logical = str(plan)
physical = optimize([('', plan)], target=MyriaAlgebra, source=algebra.LogicalAlgebra)
phys = myrialang.compile_to_json(logical, logical, physical)
print phys
json.dumps(phys)
def print_pretty_plan(plan, indent=0):
if isinstance(plan, algebra.DoWhile):
print '%s%s' % (' ' * indent, plan.shortStr())
print_pretty_plan(plan.left, indent + 4)
print_pretty_plan(plan.right, indent + 4)
elif isinstance(plan, algebra.Sequence):
print '%s%s' % (' ' * indent, plan.shortStr())
for child in plan.children():
print_pretty_plan(child, indent + 4)
else:
print '%s%s' % (' ' * indent, plan)
def parse_options(args):
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='parse_only',
help="Parse only", action='store_true')
parser.add_argument('file', help='File containing Myrial source program')
ns = parser.parse_args(args)
return ns
class FakeCatalog(object):
def __init__(self, catalog):
self.catalog = catalog
def get_scheme(self, relation_key):
return raco.Scheme(self.catalog[relation_key])
@classmethod
def load_from_file(cls, path):
with open(path) as fh:
return cls(eval(fh.read()))
def main(args):
opt = parse_options(args)
# Search for a catalog definition file
catalog_path = os.path.join(os.path.dirname(opt.file), 'catalog.py')
catalog = None
if os.path.exists(catalog_path):
catalog = FakeCatalog.load_from_file(catalog_path)
_parser = parser.Parser()
processor = interpreter.StatementProcessor(catalog)
with open(opt.file) as fh:
statement_list = _parser.parse(fh.read())
if opt.parse_only:
print statement_list
else:
processor.evaluate(statement_list)
plan = processor.get_physical_plan()
evaluate(plan)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| Python | 0 | |
40146a54f9857aaaf252f3e1e5de7dc73c6cd181 | add pl.proportions | scvelo/plotting/proportions.py | scvelo/plotting/proportions.py | from ..preprocessing.utils import sum_var
import matplotlib.pyplot as pl
import numpy as np
def proportions(adata, groupby='clusters', layers=['spliced', 'unspliced', 'ambigious'], highlight='unspliced',
add_labels_pie=True, add_labels_bar=True, fontsize=8, figsize=(10, 2), dpi=100, show=True):
"""
Parameters
----------
"""
# get counts per cell for each layer
layers_keys = [key for key in layers if key in adata.layers.keys()]
counts_layers = [sum_var(adata.layers[key]) for key in layers_keys]
counts_total = np.sum(counts_layers, 0)
counts_total += counts_total == 0
counts_layers = np.array([counts / counts_total for counts in counts_layers])
gspec = pl.GridSpec(1, 2, pl.figure(None, figsize, dpi=dpi))
colors = pl.get_cmap('tab20b')(np.linspace(0.10, 0.65, len(layers_keys)))
# pie chart of total abundances
ax = pl.subplot(gspec[0])
mean_abundances = np.mean(counts_layers, axis=1)
if highlight is None: highlight = 'none'
explode = [.1 if (l == highlight or l in highlight) else 0 for l in layers_keys]
autopct = '%1.0f%%' if add_labels_pie else None
pie = ax.pie(np.mean(counts_layers, axis=1), colors=colors, explode=explode,
autopct=autopct, shadow=True, startangle=45)
if autopct is not None:
for pct, color in zip(pie[-1], colors):
r, g, b, _ = color
pct.set_color('white' if r * g * b < 0.5 else 'darkgrey')
pct.set_fontweight('bold')
pct.set_fontsize(fontsize)
ax.legend(layers_keys, ncol=len(layers_keys), bbox_to_anchor=(0, 1), loc='lower left', fontsize=fontsize)
# bar chart of abundances per category
if groupby is not None and groupby in adata.obs.keys():
counts_groups = dict()
for cluster in adata.obs[groupby].cat.categories:
counts_groups[cluster] = np.mean(counts_layers[:, adata.obs[groupby] == cluster], axis=1)
labels = list(counts_groups.keys())
data = np.array(list(counts_groups.values()))
data_cum = data.cumsum(axis=1)
ax2 = pl.subplot(gspec[1])
for i, (colname, color) in enumerate(zip(layers_keys, colors)):
starts, widths = data_cum[:, i] - data[:, i], data[:, i]
xpos = starts + widths / 2
curr_xpos = xpos[0]
for i, (x, w) in enumerate(zip(xpos, widths)):
curr_xpos = curr_xpos if x - w / 2 + .05 < curr_xpos < x + w / 2 - .05 else x
xpos[i] = curr_xpos
ax2.barh(labels, widths, left=starts, height=0.9, label=colname, color=color)
if add_labels_bar:
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
for y, (x, c) in enumerate(zip(xpos, widths)):
ax2.text(x, y, '{:.0f}%'.format(c * 100), ha='center', va='center',
color=text_color, fontsize=fontsize, fontweight='bold')
ax2.legend(ncol=len(layers_keys), bbox_to_anchor=(0, 1), loc='lower left', fontsize=fontsize)
ax2.invert_yaxis()
ax2.set_xlim(0, np.nansum(data, axis=1).max())
ax2.margins(0)
ax2.set_xlabel('proportions', fontweight='bold', fontsize=fontsize * 1.2)
ax2.set_ylabel(groupby, fontweight='bold', fontsize=fontsize * 1.2)
ax2.tick_params(axis='both', which='major', labelsize=fontsize)
ax = [ax, ax2]
if show:
pl.show()
else:
return ax
| Python | 0.999557 | |
7f78d4ea286d9827aaa47022077de286195c2cd9 | Add a Fast(er) DS1820 reader | wipy/flash/lib/FDS1820.py | wipy/flash/lib/FDS1820.py | from onewire import *
import machine
import time
class FDS1820(object):
def __init__(self, onewire):
self.ow = onewire
self.roms = [rom for rom in self.ow.scan() if rom[0] == 0x10 or rom[0] == 0x28]
def read_temp(self, rom=None):
"""
Read and return the temperature of one DS18x20 device.
Pass the 8-byte bytes object with the ROM of the specific device you want to read.
If only one DS18x20 device is attached to the bus you may omit the rom parameter.
"""
rom = rom or self.roms[0]
ow = self.ow
ow.reset()
ow.select_rom(rom)
ow.write_byte(0x44) # Convert Temp
while True:
if ow.read_bit():
break
ow.reset()
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
return self.convert_temp(rom[0], data)
def read_temps(self):
"""
Read and return the temperatures of all attached DS18x20 devices.
"""
temps = []
ow=self.ow
ow.reset()
for rom in self.roms:
ow.select_rom(rom)
ow.write_byte(0x44)
while True:
if ow.read_bit():
break
ow.reset()
for rom in self.roms:
ow.select_rom(rom)
ow.write_byte(0xbe) # Read scratch
data = ow.read_bytes(9)
temps.append(self.convert_temp(rom[0], data))
return temps
def slow_read_temps(self):
temps=[];
for rom in self.roms:
temps.append(self.read_temp(rom))
return temps;
def convert_temp(self, rom0, data):
"""
Convert the raw temperature data into degrees celsius and return as a fixed point with 2 decimal places.
"""
temp_lsb = data[0]
temp_msb = data[1]
if rom0 == 0x10:
if temp_msb != 0:
# convert negative number
temp_read = temp_lsb >> 1 | 0x80 # truncate bit 0 by shifting, fill high bit with 1.
temp_read = -((~temp_read + 1) & 0xff) # now convert from two's complement
else:
temp_read = temp_lsb >> 1 # truncate bit 0 by shifting
count_remain = data[6]
count_per_c = data[7]
temp = 100 * temp_read - 25 + (count_per_c - count_remain) // count_per_c
return temp
elif rom0 == 0x28:
return (temp_msb << 8 | temp_lsb) * 100 // 16
else:
assert False
def tst():
dat = machine.Pin('GP30')
ow = OneWire(dat)
ds = FDS1820(ow)
print('devices:', ds.roms)
start=time.ticks_ms()
for x in range(0,3):
print('temperatures:', ds.slow_read_temps())
print(time.ticks_diff(start,time.ticks_ms()))
start=time.ticks_ms()
for x in range(0,3):
print('temperatures:', ds.read_temps())
print(time.ticks_diff(start,time.ticks_ms()))
| Python | 0 | |
9184d4cebf95ee31836970bedffaddc3bfaa2c2d | Prepare v2.20.8.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.8.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.20.7'
| Python | 0.000004 |
1bf65c4b18b1d803b9515f80056c4be5790e3bde | Prepare v1.2.276.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.276.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.275'
| Python | 0.000002 |
f4a4b733445abba45a0a168dde9b7c10248688a6 | Prepare v1.2.318.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.318.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.317'
| Python | 0.000002 |
860cf7b9743744c9d21796b227cf21d684fb5519 | Add test_modulepickling_change_cache_dir | test/test_cache.py | test/test_cache.py | from jedi import settings
from jedi.cache import ParserCacheItem, _ModulePickling
ModulePickling = _ModulePickling()
def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
"""
ModulePickling should not save old cache when cache_directory is changed.
See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_
"""
dir_1 = str(tmpdir.mkdir('first'))
dir_2 = str(tmpdir.mkdir('second'))
item_1 = ParserCacheItem('fake parser 1')
item_2 = ParserCacheItem('fake parser 2')
path_1 = 'fake path 1'
path_2 = 'fake path 2'
monkeypatch.setattr(settings, 'cache_directory', dir_1)
ModulePickling.save_module(path_1, item_1)
cached = ModulePickling.load_module(path_1, item_1.change_time - 1)
assert cached == item_1.parser
monkeypatch.setattr(settings, 'cache_directory', dir_2)
ModulePickling.save_module(path_2, item_2)
cached = ModulePickling.load_module(path_1, item_1.change_time - 1)
assert cached is None
| Python | 0.000003 | |
8a511662948bff2f878d5af31fd45d02eee6dd4b | MigrationHistory.applied should be NOT NULL | south/models.py | south/models.py | from django.db import models
class MigrationHistory(models.Model):
app_name = models.CharField(max_length=255)
migration = models.CharField(max_length=255)
applied = models.DateTimeField(blank=True)
class Meta:
unique_together = (('app_name', 'migration'),)
@classmethod
def for_migration(cls, migration):
try:
return cls.objects.get(app_name=migration.app_name(),
migration=migration.name())
except cls.DoesNotExist:
return cls(app_name=migration.app_name(),
migration=migration.name())
def get_migrations(self):
from south.migration import Migrations
return Migrations(self.app_name)
def get_migration(self):
return self.get_migrations().migration(self.migration)
| from django.db import models
class MigrationHistory(models.Model):
app_name = models.CharField(max_length=255)
migration = models.CharField(max_length=255)
applied = models.DateTimeField(blank=True, null=True)
class Meta:
unique_together = (('app_name', 'migration'),)
@classmethod
def for_migration(cls, migration):
try:
return cls.objects.get(app_name=migration.app_name(),
migration=migration.name())
except cls.DoesNotExist:
return cls(app_name=migration.app_name(),
migration=migration.name())
def get_migrations(self):
from south.migration import Migrations
return Migrations(self.app_name)
def get_migration(self):
return self.get_migrations().migration(self.migration)
| Python | 1 |
d082eb41c2ccef7178d228896a7658fe52bcbdec | Create directory for useless symbols remove | tests/UselessSymbolsRemove/__init__.py | tests/UselessSymbolsRemove/__init__.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
""" | Python | 0 | |
8dc6a8088a42d836d9182592bb3e7f2f3440ccc2 | Add ptpython config | .config/ptpython/config.py | .config/ptpython/config.py | """
Configuration example for ``ptpython``.
Copy this file to $XDG_CONFIG_HOME/ptpython/config.py
On Linux, this is: ~/.config/ptpython/config.py
"""
from prompt_toolkit.filters import ViInsertMode
from prompt_toolkit.key_binding.key_processor import KeyPress
from prompt_toolkit.keys import Keys
from prompt_toolkit.styles import Style
from ptpython.layout import CompletionVisualisation
__all__ = ["configure"]
def configure(repl):
"""
Configuration method. This is called during the start-up of ptpython.
:param repl: `PythonRepl` instance.
"""
# Show function signature (bool).
repl.show_signature = True
# Show docstring (bool).
repl.show_docstring = True
# Show the "[Meta+Enter] Execute" message when pressing [Enter] only
# inserts a newline instead of executing the code.
repl.show_meta_enter_message = True
# Show completions. (NONE, POP_UP, MULTI_COLUMN or TOOLBAR)
repl.completion_visualisation = CompletionVisualisation.POP_UP
# When CompletionVisualisation.POP_UP has been chosen, use this
# scroll_offset in the completion menu.
repl.completion_menu_scroll_offset = 0
# Show line numbers (when the input contains multiple lines.)
repl.show_line_numbers = False
# Show status bar.
repl.show_status_bar = True
# When the sidebar is visible, also show the help text.
repl.show_sidebar_help = True
# Swap light/dark colors on or off
repl.swap_light_and_dark = False
# Highlight matching parethesis.
repl.highlight_matching_parenthesis = True
# Line wrapping. (Instead of horizontal scrolling.)
repl.wrap_lines = True
# Mouse support.
repl.enable_mouse_support = True
# Complete while typing. (Don't require tab before the
# completion menu is shown.)
repl.complete_while_typing = False
# Fuzzy and dictionary completion.
repl.enable_fuzzy_completion = False
repl.enable_dictionary_completion = False
# Vi mode.
repl.vi_mode = False
# Paste mode. (When True, don't insert whitespace after new line.)
repl.paste_mode = False
# Use the classic prompt. (Display '>>>' instead of 'In [1]'.)
repl.prompt_style = "classic" # 'classic' or 'ipython'
# Don't insert a blank line after the output.
repl.insert_blank_line_after_output = False
# History Search.
# When True, going back in history will filter the history on the records
# starting with the current input. (Like readline.)
# Note: When enable, please disable the `complete_while_typing` option.
# otherwise, when there is a completion available, the arrows will
# browse through the available completions instead of the history.
repl.enable_history_search = True
# Enable auto suggestions. (Pressing right arrow will complete the input,
# based on the history.)
repl.enable_auto_suggest = False
# Enable open-in-editor. Pressing C-x C-e in emacs mode or 'v' in
# Vi navigation mode will open the input in the current editor.
repl.enable_open_in_editor = True
# Enable system prompt. Pressing meta-! will display the system prompt.
# Also enables Control-Z suspend.
repl.enable_system_bindings = True
# Ask for confirmation on exit.
repl.confirm_exit = True
# Enable input validation. (Don't try to execute when the input contains
# syntax errors.)
repl.enable_input_validation = True
# Use this colorscheme for the code.
repl.use_code_colorscheme("default")
# repl.use_code_colorscheme("pastie")
# Set color depth (keep in mind that not all terminals support true color).
# repl.color_depth = "DEPTH_1_BIT" # Monochrome.
# repl.color_depth = "DEPTH_4_BIT" # ANSI colors only.
repl.color_depth = "DEPTH_8_BIT" # The default, 256 colors.
# repl.color_depth = "DEPTH_24_BIT" # True color.
# Min/max brightness
repl.min_brightness = 0.0 # Increase for dark terminal backgrounds.
repl.max_brightness = 1.0 # Decrease for light terminal backgrounds.
# Syntax.
repl.enable_syntax_highlighting = True
# Get into Vi navigation mode at startup
repl.vi_start_in_navigation_mode = False
# Preserve last used Vi input mode between main loop iterations
repl.vi_keep_last_used_mode = False
# Install custom colorscheme named 'my-colorscheme' and use it.
"""
repl.install_ui_colorscheme("my-colorscheme", Style.from_dict(_custom_ui_colorscheme))
repl.use_ui_colorscheme("my-colorscheme")
"""
# Add custom key binding for PDB.
"""
@repl.add_key_binding("c-b")
def _(event):
" Pressing Control-B will insert "pdb.set_trace()" "
event.cli.current_buffer.insert_text("\nimport pdb; pdb.set_trace()\n")
"""
# Typing ControlE twice should also execute the current command.
# (Alternative for Meta-Enter.)
"""
@repl.add_key_binding("c-e", "c-e")
def _(event):
event.current_buffer.validate_and_handle()
"""
# Typing 'jj' in Vi Insert mode, should send escape. (Go back to navigation
# mode.)
"""
@repl.add_key_binding("j", "j", filter=ViInsertMode())
def _(event):
" Map 'jj' to Escape. "
event.cli.key_processor.feed(KeyPress(Keys("escape")))
"""
# Custom key binding for some simple autocorrection while typing.
"""
corrections = {
"impotr": "import",
"pritn": "print",
}
@repl.add_key_binding(" ")
def _(event):
" When a space is pressed. Check & correct word before cursor. "
b = event.cli.current_buffer
w = b.document.get_word_before_cursor()
if w is not None:
if w in corrections:
b.delete_before_cursor(count=len(w))
b.insert_text(corrections[w])
b.insert_text(" ")
"""
# Add a custom title to the status bar. This is useful when ptpython is
# embedded in other applications.
"""
repl.title = "My custom prompt."
"""
# Custom colorscheme for the UI. See `ptpython/layout.py` and
# `ptpython/style.py` for all possible tokens.
_custom_ui_colorscheme = {
# Blue prompt.
"prompt": "bg:#eeeeff #000000 bold",
# Make the status toolbar red.
"status-toolbar": "bg:#ff0000 #000000",
}
| Python | 0.000002 | |
0c3a1d6df2350692755b33abadaa6d4355c8134c | Create separate_genes.py | separate_genes.py | separate_genes.py | """
Script to separate genes from MiSeq run fastQ files (NEXTERA style)
Use it after the tail and adapter have been trimmed from the seqs (see trim_adaptors.py),
and merged paired-end reads (I've been using flash).
This script also trims forward and reverse primers.
It will produce one file for each gene (4 here - 16S, 18S, UPA, tufA)
Created on Mon Jul 28 17:27:33 2014
@author: VanessaRM
"""
""" USAGE:
python speratae_genes.py my_fastq_file.fastq
"""
from Bio import SeqIO
import regex
import sys
# Help text
if len(sys.argv) == 1:
print ""
print "Script to separate genes and trim primers from a fastQ file."
print "Allows one mismatch in the primer sequence"
print ""
print "Usage: supply the file name"
print "ex: python speratae_genes.py my_fastq_file.fastq"
print ""
print "If you want to run it for multiple files, use the shell:"
print "for file in *_R1_001.fastq; do python separate_genes.py $file; done >> screen.out 2>> screen.err &"
print ""
sys.exit()
# input files and arguments:
input_file = str(sys.argv[1])
output_file_16S = input_file + "_16S.fastq"
output_file_18S = input_file + "_18S.fastq"
output_file_UPA = input_file + "_UPA.fastq"
output_file_tufA = input_file + "_tufA.fastq"
output_file_umatched = input_file + "_unmatched.fastq"
# Globals
finder = ""
# Create lists to store seqs
#all_seqs =[] #just to see if the code behaves well
s16_list =[]
s18_list = []
UPA_list=[]
tufA_list =[]
unmatched=[]
# Forward Primers (\w = ambiguous positions, only 12 "internal" bases used), allowing for 1 mismatch.
p16S_f = '(GTGCCAGC\wGCCGCGGTAA){e<=1}' #F515
p18S_f = '(GGTGGTGCATGGCCGTTCTTAGTT){e<=1}' #NF1
pUPA_f = '(GGACAGAAAGACCCTATGAA){e<=1}' #p23SrV_f1
ptufA_f = '(AC\wGG\wCG\wGG\wAC\wGT){e<=1}' #Oq_tuf2F
#Reverse primers(their reverse complement):
p16S_r = '(ATTAGA\wACCC\w\wGTAGTCC){e<=1}' #R806 rc ATTAGAWACCCBDGTAGTCC
p18S_r = '(ATTACGTCCCTGCCCTTTGTA){e<=1}' # B18Sr2B rc
pUPA_r = '(CTCTAGGGATAACAGGCTGA){e<=1}' #p23SrV_r1 rc
ptufA_r = '(GCG\wTT\wGC\wATTCG\wGAAGG){e<=1}' #tufAR rc
#define primer_finder function
def primer_finder (records, primer_f1, primer_r1, primer_f2, primer_r2, primer_f3, primer_r3, primer_f4, primer_r4):
"Trims the primers and saves the sequences in amplicon-separated files. Put primers in order: 16S, 18S, UPA, tufA"
for record in records:
sequence = str(record.seq)
#Initial values of cut_off - in case you don't need to trim anything
cut_off_f= 0
cut_off_r = len(sequence)
#Search the primers f and r
index_f = regex.search((primer_f1), sequence)
index_r = regex.search((primer_r1), sequence)
if index_f != None:
#found the forward primer, so define where the sequence needs to be trimmed
cut_off_f = int(index_f.span()[1])
if index_r != None:
#found the reverse primer
cut_off_r = int(index_r.span()[0]+1) #the +1 is to cut just when the primer starts.
#Store the trimmed seq
if index_f or index_r != None:
s16_list.append(record [cut_off_f:cut_off_r])
else: #search for next primer
index_f = regex.search((primer_f2), sequence)
index_r = regex.search((primer_r2), sequence)
if index_f != None:
cut_off_f = int(index_f.span()[1])
if index_r != None:
cut_off_r = int(index_r.span()[0]+1)
if index_f or index_r != None:
s18_list.append(record [cut_off_f:cut_off_r])
else:
index_f = regex.search((primer_f3), sequence)
index_r = regex.search((primer_r3), sequence)
if index_f != None:
cut_off_f = int(index_f.span()[1])
if index_r != None:
cut_off_r = int(index_r.span()[0]+1)
if index_f or index_r != None:
UPA_list.append(record [cut_off_f:cut_off_r])
else:
index_f = regex.search((primer_f4), sequence)
index_r = regex.search((primer_r4), sequence)
if index_f != None:
cut_off_f = int(index_f.span()[1])
if index_r != None:
cut_off_r = int(index_r.span()[0]+1)
if index_f or index_r != None:
tufA_list.append(record [cut_off_f:cut_off_r])
else:
unmatched.append(record)
#Iterate over fastq file
print ("separating genes... It can take a while...")
original_reads = SeqIO.parse(input_file, "fastq")
do_it = primer_finder(original_reads,p16S_f,p16S_r,p18S_f,p18S_r,pUPA_f,pUPA_r,ptufA_f,ptufA_r)
count_um = SeqIO.write(unmatched, output_file_umatched, "fastq")
print ""
print "%i sequences did not match the primers and were stored in the %s file." %(count_um, output_file_umatched)
print ""
count_16S = SeqIO.write(s16_list, output_file_16S, "fastq")
print"",
print "Saved %i reads in the %s file." %(count_16S, output_file_16S)
count_18S = SeqIO.write(s18_list, output_file_18S, "fastq")
print""
print "Saved %i reads in the %s file." %(count_18S, output_file_18S)
count_UPA = SeqIO.write(UPA_list, output_file_UPA, "fastq")
print""
print "Saved %i reads in the %s file." %(count_UPA, output_file_UPA)
count_tufA = SeqIO.write(tufA_list, output_file_tufA, "fastq")
print""
print "Saved %i reads in the %s file." %(count_tufA, output_file_tufA)
print""
print "Done!"
| Python | 0 | |
3c82f0228095b2616b35a2881f51c93999fdd79b | Test models/FieldMapper | tests/test_models/test_field_mapper.py | tests/test_models/test_field_mapper.py | import json
import jsonschema
from django.test import TestCase
from core.models import FieldMapper
from tests.utils import json_string
class FieldMapperTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.attributes = {
'name': 'Test Field Mapper',
'config_json': json_string({"add_literals":{"foo":"bar"}}),
'field_mapper_type': 'xml2kvp'
}
cls.field_mapper = FieldMapper(**cls.attributes)
def test_str(self):
self.assertEqual('Test Field Mapper, FieldMapper: #{}'.format(FieldMapperTestCase.field_mapper.id),
format(FieldMapperTestCase.field_mapper))
def test_as_dict(self):
as_dict = FieldMapperTestCase.field_mapper.as_dict()
for k, v in FieldMapperTestCase.attributes.items():
self.assertEqual(as_dict[k], v)
def test_config(self):
self.assertEqual(json.loads(FieldMapperTestCase.attributes['config_json']),
FieldMapperTestCase.field_mapper.config)
def test_config_none(self):
no_config_mapper = FieldMapper(name='new field mapper')
self.assertIsNone(no_config_mapper.config)
def test_validate_config_json(self):
self.assertIsNone(FieldMapperTestCase.field_mapper.validate_config_json())
def test_validate_config_json_invalid(self):
invalid_config_mapper = FieldMapper(config_json=json_string({"add_literals": "invalid value"}))
self.assertRaises(jsonschema.exceptions.ValidationError,
invalid_config_mapper.validate_config_json)
def test_validate_config_json_provided(self):
invalid_config_mapper = FieldMapper(config_json=json_string({"add_literals": "invalid value"}))
self.assertIsNone(invalid_config_mapper.validate_config_json(json_string({"add_literals":{"foo":"bar"}})))
| Python | 0 | |
ebe10d39064410fc49ac90e38339a54d0ed47c80 | update hooks for sqlalchemy | setup/hooks/hook-sqlalchemy.py | setup/hooks/hook-sqlalchemy.py | __author__ = 'stephanie'
# Copyright (C) 2009, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# Contributed by Greg Copeland
from PyInstaller.hooks.hookutils import exec_statement
# include most common database bindings
# some database bindings are detected and include some
# are not. We should explicitly include database backends.
hiddenimports = ['pysqlite2', 'MySQLdb', 'psycopg2', 'pyodbc', 'pymysql']
print "in custom sql alchemy hook "
# sqlalchemy.databases package from pre 0.6 sqlachemy versions
databases = exec_statement("import sqlalchemy.databases;print sqlalchemy.databases.__all__")
databases = eval(databases.strip())
for n in databases:
hiddenimports.append("sqlalchemy.databases." + n)
# sqlalchemy.orm package from pre 0.6 sqlachemy versions
orm = exec_statement("import sqlalchemy.ormprint sqlalchemy.orm.__all__")
orm = eval(orm.strip())
for n in orm:
hiddenimports.append("sqlalchemy.orm." + n)
# sqlalchemy.dialects package from 0.6 and newer sqlachemy versions
version = exec_statement('import sqlalchemy; print sqlalchemy.__version__')
is_alch06 = version >= '0.6'
if is_alch06:
dialects = exec_statement("import sqlalchemy.dialects;print sqlalchemy.dialects.__all__")
dialects = eval(dialects.strip())
for n in databases:
hiddenimports.append("sqlalchemy.dialects." + n)
| Python | 0 | |
7f8f5e14f88304b272423ab12728d5329a2ba808 | use raw strings for urls | shop/urls/cart.py | shop/urls/cart.py | from django.conf.urls.defaults import url, patterns
from shop.views.cart import CartDetails, CartItemDetail
urlpatterns = patterns('',
url(r'^delete/$', CartDetails.as_view(action='delete'), # DELETE
name='cart_delete'),
url(r'^item/$', CartDetails.as_view(action='post'), # POST
name='cart_item_add'),
url(r'^$', CartDetails.as_view(), name='cart'), # GET
url(r'^update/$', CartDetails.as_view(action='put'),
name='cart_update'),
# CartItems
url(r'^item/(?P<id>[0-9]+)$', CartItemDetail.as_view(),
name='cart_item'),
url(r'^item/(?P<id>[0-9]+)/delete$',
CartItemDetail.as_view(action='delete'),
name='cart_item_delete'),
)
| from django.conf.urls.defaults import url, patterns
from shop.views.cart import CartDetails, CartItemDetail
urlpatterns = patterns('',
url(r'^delete/$', CartDetails.as_view(action='delete'), # DELETE
name='cart_delete'),
url('^item/$', CartDetails.as_view(action='post'), # POST
name='cart_item_add'),
url(r'^$', CartDetails.as_view(), name='cart'), # GET
url(r'^update/$', CartDetails.as_view(action='put'),
name='cart_update'),
# CartItems
url('^item/(?P<id>[0-9]+)$', CartItemDetail.as_view(),
name='cart_item'),
url('^item/(?P<id>[0-9]+)/delete$',
CartItemDetail.as_view(action='delete'),
name='cart_item_delete'),
)
| Python | 0.000009 |
45869cdf6087cd625db385ef52475d98c9842efa | add migen_local_install script | migen_local_install.py | migen_local_install.py | import os
os.system("git clone http://github.com/m-labs/migen")
os.system("mv migen migen_tmp")
os.system("mv migen_tmp/migen migen")
os.system("rm -rf migen_tmp") | Python | 0.000001 | |
5f31e729ce6752c2f0a6b7f19f76c2a7e95636b9 | Create friends-of-appropriate-ages.py | Python/friends-of-appropriate-ages.py | Python/friends-of-appropriate-ages.py | # Time: O(a^2 + n), a is the number of ages,
# n is the number of people
# Space: O(a)
# Some people will make friend requests.
# The list of their ages is given and ages[i] is the age of the ith person.
#
# Person A will NOT friend request person B (B != A)
# if any of the following conditions are true:
#
# age[B] <= 0.5 * age[A] + 7
# age[B] > age[A]
# age[B] > 100 && age[A] < 100
# Otherwise, A will friend request B.
#
# Note that if A requests B, B does not necessarily request A.
# Also, people will not friend request themselves.
#
# How many total friend requests are made?
#
# Example 1:
#
# Input: [16,16]
# Output: 2
# Explanation: 2 people friend request each other.
# Example 2:
#
# Input: [16,17,18]
# Output: 2
# Explanation: Friend requests are made 17 -> 16, 18 -> 17.
# Example 3:
#
# Input: [20,30,100,110,120]
# Output:
# Explanation: Friend requests are made 110 -> 100, 120 -> 110, 120 -> 100.
#
# Notes:
# - 1 <= ages.length <= 20000.
# - 1 <= ages[i] <= 120.
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
import collections
class Solution(object):
def numFriendRequests(self, ages):
"""
:type ages: List[int]
:rtype: int
"""
def request(a, b):
return 0.5*a+7 < b <= a
c = collections.Counter(ages)
return sum(int(request(a, b)) * c[a]*(c[b]-int(a == b))
for a in c
for b in c)
| Python | 0.000029 | |
9caf9d3bfaaff9d7721f611d9c351dd14f67daa6 | add log progress | log_progress.py | log_progress.py | def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = size / 200 # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{index} / ?'.format(index=index)
else:
progress.value = index
label.value = u'{index} / {size}'.format(
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = unicode(index or '?') | Python | 0.000001 | |
d9c7ce7f2b47bee3b2e657157fe4df8f9a00973a | Create smiles_preview.py | smiles_preview.py | smiles_preview.py | import sublime
import sublime_plugin
import base64
import os
import re
class SmilesPreview(sublime_plugin.EventListener):
def on_hover(self, view, point, hover_zone):
if (hover_zone == sublime.HOVER_TEXT):
# locate smiles in the string. smiles string should be at the beginning and followed by tab (cxsmiles)
hovered_line_text = view.substr(view.line(point)).strip()
smiles_regex = re.compile(r'^([^J][A-Za-z0-9@+\-\[\]\(\)\\\/%=#$]+)\t', re.IGNORECASE)
if (smiles_regex.match(hovered_line_text)):
smiles_string = smiles_regex.match(hovered_line_text).group(0)
file_name = "1.png"
os.system("obabel -ismi -:" + smiles_string + "-opng -O " + file_name)
# Check that file exists
if (file_name and os.path.isfile(file_name)):
encoded = str(base64.b64encode(
open(file_name, "rb").read()
), "utf-8")
view.show_popup('<img src="data:image/png;base64,' +
encoded +
'">',
flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
location=point)
return
return
return
| Python | 0.000658 | |
fbeb3d04b16afa0b2daf49597a07c32b0d72630c | Add missing mica.report __init__ to project | mica/report/__init__.py | mica/report/__init__.py | from .report import main, update
| Python | 0.000019 | |
dca9931e894c1e5cae9f5229b04cc72c31eef5f5 | Create a.py | a.py | a.py | # this code is wrote on python
a = 3
print a
| Python | 0.000489 | |
2c0a06a8e460de06dd9a929baa02e2d369fbe0a6 | Prepare v2.17.4.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.17.4.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.17.3'
| Python | 0.000003 |
b815b2e94814e86ba2e4713d15aa2143594344bc | Prepare v2.13.13.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.13.13.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.13.12'
| Python | 0.000004 |
a30a6104554fb39d068fd8aadbb128dff1d482fb | Create dl.py | dl.py | dl.py | #!/usr/bin/env python
import requests, urllib2, os, shutil, sys, futures
from time import sleep
download_board = sys.argv[1]
def download(**kwargs):
with open('./'+download_board+'/'+kwargs['filename'], 'wb') as handle:
request = requests.get(kwargs['url'], stream=True)
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
if os.path.exists("stopcron.txt"):
print "stopcron.txt exists, downloader is aborting"
exit()
if not os.path.exists(download_board+"-modified.txt"):
shutil.copy(".backup_modified.txt", download_board+"-modified.txt")
if os.path.getsize(download_board+"-modified.txt") == 0:
shutil.copy(".backup_modified.txt", download_board+"-modified.txt")
pages = []
with open(download_board+"-modified.txt", 'r') as f:
modified = [s.strip("\n") for s in f.readlines()]
realch = 0
for a in xrange(15):
p = requests.get("http://a.4cdn.org/"+download_board+"/%s.json" % str(a), headers={'If-Modified-Since': str(modified[a])})
if p.status_code == 200 or len(modified[a]) == 0:
pages.append(p.json())
modified[a] = p.headers['Last-Modified']
sleep(1.0)
a = a + 1
with open(download_board+"-modified.txt", 'w') as f:
for a in modified:
f.write(a+"\n")
links = []
already = 0
links = []
filenames = []
for page in pages:
for thread in page['threads']:
for post in thread['posts']:
if u'filename' in post:
filename_clean = post[u'filename']
ext_clean = post[u'ext']
if 'filename' in post and not os.path.exists("./"+download_board+"/"+filename_clean+ext_clean):
links.append("http://i.4cdn.org/"+download_board+"/"+filename_clean+ext_clean)
filenames.append(filename_clean+ext_clean)
if not os.path.exists("./"+download_board+"/"):
os.makedirs("./"+download_board+"/")
with futures.ThreadPoolExecutor(max_workers=10) as e:
for i in xrange(len(links)):
e.submit(download, url=links[i], filename=filenames[i])
print "[chanrip] %s downloaded" % (str(len(links)))
| Python | 0 | |
0b90446471805276ed141800337e6044ce130b93 | Test for the bugfix of Project.last_update | akvo/rsr/tests/models/test_project.py | akvo/rsr/tests/models/test_project.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from unittest import TestCase
from django.contrib.auth import get_user_model
from akvo.rsr.models import Project
from akvo.rsr.models import ProjectUpdate, PayPalGateway, MollieGateway, PaymentGatewaySelector
class ProjectModelTestCase(TestCase):
"""Tests for the project model"""
def test_project_last_update(self):
""" Test Project.last_update. The field is a denormalization keeping track of the latest
update for a project, if any. When deletion of updates was introduced, a bug occurs when
deleting the latest update, as the Project.last_update field was set to None in that case.
The tests check that the fix for this bug works correctly
"""
# setup needed model instances
paypal = PayPalGateway.objects.create(name='paypal')
mollie = MollieGateway.objects.create(name='mollie')
project_1 = Project.objects.create(title="Test project 1")
user_1 = get_user_model().objects.create(email='user1@com.com')
update_1 = ProjectUpdate.objects.create(title="Test update 1", project=project_1, user=user_1)
update_2 = ProjectUpdate.objects.create(title="Test update 2", project=project_1, user=user_1)
# check that update_2 is the latest
self.assertTrue(update_1.created_at < update_2.created_at)
# check that update_2 is Project.last_update
self.assertEqual(project_1.last_update, update_2)
update_2.delete()
# now update_1 should be last_update
self.assertEqual(project_1.last_update, update_1)
update_1.delete()
# now last_update is None
self.assertEqual(project_1.last_update, None)
| Python | 0 | |
30381ced0d7535428398b3df5f1caffd684b20d5 | Implement K means network. | KMeansnet.py | KMeansnet.py | import numpy as np
class Kmeansnet(object):
def __init__(self, data, clusters, eta):
self.data = data
self.n_dim = data.shape[1]
self.num_clusters = clusters
self.weights = np.random.rand(self.num_clusters, self.n_dim)
self.eta = eta
def calc_dist(self, inp, weights):
return np.sum((weights * inp), axis=1)
def normalise_data(self, data):
normalisers = np.sqrt(np.sum(data ** 2, axis=1)).reshape(self.data.shape[0], 1)
return data / normalisers
def train(self, epochs):
self.data = self.normalise_data(self.data)
for i in range(epochs):
for d in range(self.data.shape[0]):
dist = self.calc_dist(self.data[d, :], self.weights)
cluster = np.argmax(dist)
self.weights[cluster, :] += self.eta * self.data[d, :] - self.weights[cluster, :]
def predict(self, inp):
dist = self.calc_dist(inp, self.weights)
best = np.argmax(dist)
return best
def predict_all(self, data):
best = np.zeros((data.shape[0], 1))
for i in range(data.shape[0]):
best[i] = self.predict(data[i, :])
return best
| Python | 0 | |
b876332debd21edb3e3b84f01bb8aec5196bd8d8 | add enumerating partition | resource-4/combinatorics/integer-partitions/enumerating/partition.py | resource-4/combinatorics/integer-partitions/enumerating/partition.py | #zero
if n == 0:
yield []
return
#modify
for ig in partitions(n-1):
yield [1] + ig
if ig and (len(ig) < 2 or ig[1] > ig[0]):
yield [ig[0] + 1] + ig[1:]
| Python | 0.000044 | |
39714efdbfb9620acb1bb43fa8a3dbf59bfbef85 | add shortnaturaltime template filter | wypok/templatetags/shortnaturaltime.py | wypok/templatetags/shortnaturaltime.py | # from: https://github.com/ollieglass/django-shortnaturaltime
from django import template
from django.utils.timezone import utc
import time
from datetime import datetime, timedelta, date
register = template.Library()
def _now():
return datetime.utcnow().replace(tzinfo=utc)
# return datetime.now()
def abs_timedelta(delta):
"""Returns an "absolute" value for a timedelta, always representing a
time distance."""
if delta.days < 0:
now = _now()
return now - (now + delta)
return delta
def date_and_delta(value):
"""Turn a value into a date and a timedelta which represents how long ago
it was. If that's not possible, return (None, value)."""
now = _now()
if isinstance(value, datetime):
date = value
delta = now - value
elif isinstance(value, timedelta):
date = now - value
delta = value
else:
try:
value = int(value)
delta = timedelta(seconds=value)
date = now - delta
except (ValueError, TypeError):
return (None, value)
return date, abs_timedelta(delta)
def shortnaturaldelta(value, months=True):
"""Given a timedelta or a number of seconds, return a natural
representation of the amount of time elapsed. This is similar to
``naturaltime``, but does not add tense to the result. If ``months``
is True, then a number of months (based on 30.5 days) will be used
for fuzziness between years."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
use_months = months
seconds = abs(delta.seconds)
days = abs(delta.days)
years = days // 365
days = days % 365
months = int(days // 30.5)
if not years and days < 1:
if seconds == 0:
return "1s"
elif seconds == 1:
return "1s"
elif seconds < 60:
return "%ds" % (seconds)
elif 60 <= seconds < 120:
return "1m"
elif 120 <= seconds < 3600:
return "%dm" % (seconds // 60)
elif 3600 <= seconds < 3600*2:
return "1h"
elif 3600 < seconds:
return "%dh" % (seconds // 3600)
elif years == 0:
if days == 1:
return "1d"
if not use_months:
return "%dd" % days
else:
if not months:
return "%dd" % days
elif months == 1:
return "1m"
else:
return "%dm" % months
elif years == 1:
if not months and not days:
return "1y"
elif not months:
return "1y %dd" % days
elif use_months:
if months == 1:
return "1y, 1m"
else:
return "1y %dm" % months
else:
return "1y %dd" % days
else:
return "%dy" % years
@register.filter
def shortnaturaltime(value, future=False, months=True):
"""Given a datetime or a number of seconds, return a natural representation
of that time in a resolution that makes sense. This is more or less
compatible with Django's ``naturaltime`` filter. ``future`` is ignored for
datetimes, where the tense is always figured out based on the current time.
If an integer is passed, the return value will be past tense by default,
unless ``future`` is set to True."""
now = _now()
date, delta = date_and_delta(value)
if date is None:
return value
# determine tense by value only if datetime/timedelta were passed
if isinstance(value, (datetime, timedelta)):
future = date > now
delta = shortnaturaldelta(delta)
if delta == "a moment":
return "now"
return delta
| Python | 0 | |
d9c95fcf89f0e72c3504a4988e6d4fb6ef2ae6cd | Add the timeseries neural network | src/backend/timeseries_nnet.py | src/backend/timeseries_nnet.py | # Modified code from https://github.com/hawk31/nnet-ts
import logging
import numpy as np
from keras.optimizers import SGD
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from sklearn.preprocessing import StandardScaler
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class TimeSeriesNnet(object):
def __init__(self, timeseries, hidden_layers=[20, 15, 5],
activation_functions=['relu', 'relu', 'relu'],
optimizer=SGD(), loss='mean_absolute_error',
lag=11):
self._hidden_layers = hidden_layers
self._activation_functions = activation_functions
self._optimizer = optimizer
self._loss = loss
self._lag = lag
self._timeseries = self._prepare_data(timeseries)
self._scaler = StandardScaler()
self._nn = Sequential()
if len(self._hidden_layers) != len(self._activation_functions):
raise Exception('hidden_layers size must match'
'activation_functions size')
def _prepare_data(self, timeseries):
return np.array(timeseries, dtype='float64')
def fit(self, epochs=10000, verbose=0):
timeseries_len = len(self._timeseries)
if self._lag >= timeseries_len:
raise ValueError('Lag is higher than length of the timeseries')
X = np.zeros((timeseries_len - self._lag, self._lag), dtype='float64')
y = np.log(self._timeseries[self._lag:])
# Building X matrixs
logging.info('Building regressor matrix')
for i in range(0, timeseries_len - self._lag):
X[i, :] = self._timeseries[range(i, i + self._lag)]
logging.info('Scaling data')
self._scaler.fit(X)
X = self._scaler.transform(X)
# Neural net architecture
logging.info('Checking network consistency')
self._nn.add(Dense(self._hidden_layers[0], input_shape=(X.shape[1],)))
self._nn.add(Activation(self._activation_functions[0]))
for layer_size, activation_function in zip(
self._hidden_layers[1:], self._activation_functions[1:]):
self._nn.add(Dense(layer_size))
self._nn.add(Activation(activation_function))
# Add final node
self._nn.add(Dense(1))
self._nn.add(Activation('linear'))
self._nn.compile(loss=self._loss, optimizer=self._optimizer)
# Train neural net
logging.info('Training neural net')
self._nn.fit(X, y, nb_epoch=epochs, verbose=verbose)
def predict_ahead(self, n_ahead=1):
# Store predictions and predict iteratively
predictions = np.zeros(n_ahead)
timeseries = self._timeseries
for i in range(n_ahead):
current_x = self._scaler.transform(
timeseries[-self._lag:].reshape((1, self._lag)))
next_pred = self._nn.predict(current_x)
predictions[i] = np.exp(next_pred[0, 0])
timeseries = np.concatenate((
timeseries, np.exp(next_pred[0, :])), axis=0)
return predictions
| Python | 0.999953 | |
fefb9a9fa5a7c6080bc52896e2d1517828b01a3d | Add all PLs to db | migrations/versions/299e1d15a55f_populate_provincial_legislatures.py | migrations/versions/299e1d15a55f_populate_provincial_legislatures.py | """populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
| Python | 0 | |
253ff8bc8f848effea6ad7602b6424cf997c926c | rename celeba_multitask_acc to celeba_multilabel_acc | caffe/result/celeba_multilabel_acc.py | caffe/result/celeba_multilabel_acc.py | import os
import numpy as np
import sys
label_file = open('/home/hypan/data/celebA/test.txt', 'r')
lines = label_file.readlines()
label_file.close()
acc = np.zeros(40)
cou = 0
for line in lines:
info = line.strip('\r\n').split()
name = info[0].split('.')[0]
gt_labels = info[1: ]
feat_path = '/home/hypan/data/celebA/result/' + sys.argv[1] + '/test_feature/' + name + '.npy'
if not os.path.exists(feat_path):
print '{} has not predict feature.'.format(name)
pd_labels = np.load(feat_path)
cnt = len(pd_labels)
for i in range(cnt):
gt_label = int(gt_labels[i])
pd_label = pd_labels[i]
if pd_label > 0:
pd_label = 1
else:
pd_label = -1
if gt_label == pd_label:
acc[i] += 1
cou += 1
for i in range(40):
print i, acc[i] * 1.0 / cou
| Python | 0.999996 | |
d2e165ace4fc26b51e18494c4878f95ebcefa20a | add api | web/routers/api.py | web/routers/api.py | # coding: utf-8
import os
import json
import time
import datetime
import humanize
import flask
from flask import request, flash, redirect, url_for, render_template
import models
import gcfg
bp = flask.Blueprint('api', __name__)
@bp.route('/')
def home():
return flask.render_template('api.html')
@bp.route('/v1/repolist')
@models.db_session
def repolist():
goos='windows'
goarch='amd64'
data = []
for r in models.select(r for r in models.Recommend)[:]:
item = dict(
reponame=r.repo.name,
alias=r.name,
author=r.repo.author,
description=r.repo.description,
offical=r.repo.offcial,
category=r.category.name if r.category else None,
stars=r.repo.stars,
osarch=goos+'-'+goarch,
)
files = []
for b in r.repo.builds:
if not b.downloadable:
continue
# actually only one loop
file = {'label':b.tag, 'updated':b.updated}
for f in models.select(f for f in models.File \
if f.build==b and f.os == goos and f.arch == goarch)[:1]:
file.update({'binfiles': [os.path.basename(f.reponame)], # FIXME: need to parse from gobuildrc
'size': f.size, 'url': f.outlink, 'sha1': f.sha})
files.append(file)
if files:
item['files'] = files
data.append(item)
data.append(dict(
reponame = 'github.com/codeskyblue/cgotest',
description='this is is just a test program',
alias='cgotest', # this could be null
author='unknown,lunny',
offical=True,
category='music',
stars=18,
files=[
{'label': 'branch:master', 'url': 'http://gobuild3.qiniudn.com/github.com/gogits/gogs/branch-v-master/gogs-linux-386.tar.gz', 'binfiles': ['gogs'], 'sha1': '408eebced1c2cdbd363df2fe843831bf337d4273', 'size': 7000000},
{'label': 'tag:v0.5.2', 'url': 'http://gobuild3.qiniudn.com/github.com/gogits/gogs/tag-v-v0.5.2/gogs-linux-386.tar.gz', 'binfiles': ['gogs'], 'sha1': '960e329d46ec7a79745cf3438eaf3c3151d38d97', 'size': 7100000}],
))
return flask.jsonify({'status': 0, 'message': 'success', 'osarch': 'linux-386', 'data': data})
| Python | 0 | |
edbdf0d955eb387d74a73997cd11a2d05550e05a | add new action plugin junos_config | lib/ansible/plugins/action/junos_config.py | lib/ansible/plugins/action/junos_config.py | #
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import os
import time
import glob
import urlparse
from ansible.plugins.action import ActionBase
from ansible.utils.unicode import to_unicode
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
result['changed'] = False
src = self._task.args.get('src')
if src:
if src.endswith('.xml'):
fmt = 'xml'
elif src.endswith('.set'):
fmt = 'set'
else:
fmt = 'text'
if self._task.args.get('format') is None:
self._task.args['format'] = fmt
try:
self._handle_source()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
if self._task.args.get('comment') is None:
self._task.args['comment'] = self._task.name
result.update(self._execute_module(module_name=self._task.action,
module_args=self._task.args, task_vars=task_vars))
if self._task.args.get('backup') and result.get('_backup'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
self._write_backup(task_vars['inventory_hostname'], result['_backup'])
if '_backup' in result:
del result['_backup']
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
def _handle_source(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlparse.urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'files', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('Unable to load source file')
try:
with open(source, 'r') as f:
template_data = to_unicode(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
self._task.args['src'] = self._templar.template(template_data)
| Python | 0 | |
6c9760b328716d6b2e099698293c93cba9361932 | Add script for testing error reporting. | checkserver/testchecks/check_error.py | checkserver/testchecks/check_error.py | #!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
| Python | 0 | |
bc651b5ca15cf41eece321b77142c2973bd41ede | Add a sqlite config | zinnia/tests/implementations/sqlite.py | zinnia/tests/implementations/sqlite.py | """Settings for testing zinnia on SQLite"""
from zinnia.tests.implementations.settings import * # noqa
DATABASES = {
'default': {
'NAME': 'zinnia.db',
'ENGINE': 'django.db.backends.sqlite3'
}
}
| Python | 0.000002 | |
b546ac87cd3e3821619a5ac7ed7806c1f569a3cd | Create PySMS.py | PySMS.py | PySMS.py | # -*- coding: utf-8 -*-
import smtplib
from time import strftime
# User account credentials -- (gmail username and password)
USERNAME = ''
PASSWORD = ''
# Routing -- (FROMADDR can be null iirc)
FROMADDR = ''
TOADDRS = ''
# Message Body
MESSAGE = ''
def SendMessage(MESSAGE):
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(USERNAME, PASSWORD)
server.sendmail(FROMADDR, TOADDRS, MESSAGE)
server.quit()
def TimeStamp():
return strftime('%-I:%M %p - %b %d %Y')
| Python | 0.000001 | |
36a8a2f52f1b85d70cda0bf399a371a4c04d0ccd | add utility script to easily launch the bottle development server | util/dev_runner.py | util/dev_runner.py | import os, dmon, bottle
os.chdir(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
bottle.run(host='localhost', port=8001) | Python | 0 | |
2fdace2e358ede8da1a6f569b063548f8969d825 | Add supervisor config generator | util/supervisor.py | util/supervisor.py | from util.config import Configuration
from configparser import ConfigParser
import platform
import os
class Supervisor:
__config = Configuration()
def __init__(self):
self.__config_file = self.__config.get_config_dir() + '/supervisor.conf'
def generate_config(self, servers):
parser = ConfigParser()
config_dir = self.__config.get_config_dir()
parser.add_section('unix_http_server')
parser.set('unix_http_server', 'file', config_dir + '/supervisor.sock')
parser.set('unix_http_server', 'chmod', '0700')
parser.add_section('supervisord')
parser.set('supervisord', 'logfile', config_dir + '/supervisor_error.log')
parser.set('supervisord', 'pidfile', config_dir + '/supervisor.pid')
parser.add_section('rpcinterface:supervisor')
parser.set('rpcinterface:supervisor', 'supervisor.rpcinterface_factory', 'supervisor.rpcinterface:make_main_rpcinterface')
parser.add_section('supervisorctl')
parser.set('supervisorctl', 'serverurl', 'unix://' + config_dir + '/supervisor.sock')
ql_executable = self.get_ql_executable()
for sid,data in servers.items():
name = 'qlds_' + sid
section = 'program:' + name
parser.add_section(section)
parser.set(section, 'command', self.build_command_line(data, ql_executable))
parser.set(section, 'process_name', name)
parser.set(section, 'autorestart', 'true')
if os.path.isfile(self.__config_file) and not os.access(self.__config_file, os.W_OK):
raise IOError('Cannot write to file ' + self.__config_file)
with (open(self.__config_file, 'w+')) as config_fp:
parser.write(config_fp)
def build_command_line(self, server, executable):
command_line = [executable]
for k,v in server.items():
command_line.append('+set %s %s' % (k, v))
return ' '.join(command_line)
def get_ql_executable(self):
if platform.architecture()[0] == '64bit':
executable = 'run_server_x64.sh'
else:
executable = 'run_server_x86.sh'
return os.path.expanduser(self.__config.get('dir', 'ql')) + '/' + executable
def get_config_location(self):
return self.__config_file
| Python | 0 | |
9c731cd17ccc853207b715b778622274b28e9efd | Create clientsocket.py | clientsocket.py | clientsocket.py |
#!/usr/bin/env python
import socket
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect(("www.google.com", 80))
request = "GET / HTTP/1.0\n\n"
clientSocket.sendall(request)
response = bytearray()
while True:
part = clientSocket.recv(1024)
if (part):
response.extend(part)
else:
break
print response
| Python | 0.000002 | |
885ff9c8886abd30518d2cd149f37f0ba507bb71 | add 6 | 006.py | 006.py | def sum_squares(l):
return reduce(lambda x, y: x + y**2, l)
def square_sums(l):
return reduce(lambda x, y: x + y, l) ** 2
r = range(1, 101)
ssum = sum_squares(r)
ssquare = square_sums(r)
delta = ssquare - ssum
print ssum, ssquare, delta
| Python | 0.999998 | |
a03da2611de32a38ab5b505f85136a3a9c5345f3 | add ycm config | .ycm_extra_conf.py | .ycm_extra_conf.py | # This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
# c/c++ include path
'-isystem',
'/usr/include/c++/4.8',
'-isystem',
'/usr/include/c++/4.8.5',
'-isystem',
'/usr/include/c++/4.9.3',
'-isystem',
'/usr/include/c++/5',
'-isystem',
'/usr/include/c++/6',
'-isystem',
'/usr/include/c++/7',
'-isystem',
'/usr/include/c++/8',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
# 3rdparty
'-isystem',
'/usr/local/3rdparty/include',
# project
'-isystem',
'./',
#'-isystem',
#'../BoostParts',
#'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
#'/System/Library/Frameworks/Python.framework/Headers',
#'-isystem',
#'../llvm/include',
#'-isystem',
#'../llvm/tools/clang/include',
#'-I',
#'.',
#'-I',
#'./ClangCompleter',
#'-isystem',
#'./tests/gmock/gtest',
#'-isystem',
#'./tests/gmock/gtest/include',
#'-isystem',
#'./tests/gmock',
#'-isystem',
#'./tests/gmock/include',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if not database:
return {
'flags': flags,
'include_paths_relative_to_dir': DirectoryOfThisScript()
}
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object.
final_flags = list( compilation_info.compiler_flags_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
return {
'flags': final_flags,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_
}
| Python | 0.000001 | |
39798f325aa0fdcfeb24ae4db97107c26e88fec9 | Create month_comparison.py | month_comparison.py | month_comparison.py | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 18 10:30:22 2015
Last updated on Tue Jul 30 12:36 2015
@author: O. B. Alam
@email: oalam@haystack.mit.edu
"""
import numpy as np
import matplotlib.pyplot as plt
import shlex
###############################################################################
content = [] # list for all data
lt = [] # list for ozone localtime values
mt = [] # list for ozone month values
vel = [] # list for ozone velocities
err = [] # list for ozone errors
plotvel = [] # list for plotting the weighted averages
plotstd = [] # list for plotting the errorbars
plotvel2 = [] # list for plotting the weighted averages
plotstd2 = [] # list for plotting the errorbars
month = [] # list for lidar months
plotx = ['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D']
plotxhalf = ['J', '', 'F', '', 'M', '', 'A', '', 'M', '', 'J', '', 'J', '', 'A', '', 'S', '', 'O', '', 'N', '', 'D', '']
###############################################################################
''' returns the weighted average of a data set, given its error set '''
def wavg(data, err):
num = sum(x*(1/y)*(1/y) for x, y in zip(data, err))
denom = sum((1/y)*(1/y) for y in err)
return num/denom
''' returns a normal average of a data set with weight w = 1 '''
def navg(data, err):
return sum(data)/len(data)
''' returns the weighted standard deviation of a data set, given its error set '''
def wstd(data, err):
num1 = sum((1/y)*(1/y)*(x-wavg(data, err))*(x-wavg(data, err))
for x, y in zip(data, err))
num2 = sum((1/y)*(1/y) for y in err)
num3 = sum((1/y)*(1/y)*(1/y)*(1/y) for y in err)
return np.sqrt(num1*num2/(num2*num2-num3))
''' returns the weighted standard deviation of a data set, given its error set
and with weight w = 1. '''
def wstd1(data, err):
num1 = sum((x-navg(data, err))*(x-navg(data, err)) for x in data)
num2 = sum(err)
num3 = num2
return np.sqrt(num1*num2/(num2*num2-num3))
''' returns the standard deviation of a data set, given its error set '''
def std(data, err):
num = sum((x-wavg(data,err))*(x-wavg(data,err)) for x in data)
denom = len(data) - 1
return np.sqrt(num/denom)
###############################################################################
''' plots ozone spectrometer velocity measurements vs. month '''
def plot_aeer_monthly(mt, vel, err):
with open('vel_vs_month.txt') as q: content = q.readlines()
content = [','.join(shlex.split(x)) for x in content]
for x in content:
eachRow = x.split(',')
mt.append(float(eachRow[1]))
vel.append(float(eachRow[3]))
err.append(float(eachRow[6]))
mt = np.array(mt)
vel = np.array(vel)
err = np.array(err)
inds = mt.argsort()
mt = mt[inds].tolist()
vel = vel[inds].tolist()
err = err[inds].tolist()
mt = [int(x) for x in mt]
# convert days of the year to month number
for i in xrange(len(mt)):
if mt[i] == 31: mt[i] = 2
if mt[i] == 59: mt[i] = 3
if mt[i] == 90: mt[i] = 4
if mt[i] == 120: mt[i] = 5
if mt[i] == 151: mt[i] = 6
if mt[i] == 181: mt[i] = 7
if mt[i] == 212: mt[i] = 8
if mt[i] == 243: mt[i] = 9
if mt[i] == 273: mt[i] = 10
if mt[i] == 304: mt[i] = 11
if mt[i] == 334: mt[i] = 12
for i in range(1,13):
if i != 12:
ii = mt.index(i)
ij = mt.index(i+1)
plotvel.append(navg(vel[ii:ij], err[ii:ij]))
plotstd.append(wstd(vel[ii:ij], err[ii:ij]))
else:
plotvel.append(navg(vel[ij:len(vel)], err[ij:len(vel)]))
plotstd.append(wstd(vel[ij:len(vel)], err[ij:len(vel)]))
plt.subplot(211)
plt.xticks(range(1,13), plotx)
plt.plot(range(1,13), plotvel, '.', label = "Ozone", c = 'blue')
plt.errorbar(range(1,13), plotvel, yerr = plotstd, fmt = '.', c = 'blue')
plt.legend(loc=3) # place the plot legend at the bottom right corner
plt.xlim(0,13)
###############################################################################
plot_aeer_monthly(mt, vel, err)
vel = []
err = []
min_month = 1
max_month = 13
dt = 0.5
with open('out_halfmon.txt') as q: vel = q.readlines()
vel = [x.strip('\n') for x in vel]
vel = [x.strip(' ') for x in vel]
vel = [float(x) for x in vel]
with open('out_err_halfmon.txt') as q: err = q.readlines()
err = [x.strip('\n') for x in err]
err = [x.strip(' ') for x in err]
err = [float(x) for x in err]
vel[-1] = np.nan
err[-1] = np.nan
plt.subplot(212)
plt.xticks(np.arange(min_month, max_month, dt), plotxhalf)
plt.plot(np.arange(min_month, max_month, dt), vel, '.', c = 'red', label = 'Fabry-Perot')
plt.errorbar(np.arange(min_month, max_month, dt), vel, yerr = err, fmt = '.', c = 'red')
plt.ylim(-30,30)
plt.xlim(0,13)
plt.legend(loc=3)
plt.xlabel('Month')
plt.ylabel('Velocity (m/s)')
plt.show()
| Python | 0.000001 | |
7ce57e27265d4ea7639aaf6f806b9312d17c5c5a | Create HR_pythonSwapCase.py | HR_pythonSwapCase.py | HR_pythonSwapCase.py | #pythonSwapCase.py
def swap_case(s):
return s.swapcase()
| Python | 0.000165 | |
104ce4eb41a8d1d8307618f619dbf5336af1056d | Add CVE plugin. | plumeria/plugins/cve.py | plumeria/plugins/cve.py | import re
import urllib.parse
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
CVE_PATTERN = re.compile("^(CVE-\\d{4,5}-\d+)$", re.IGNORECASE)
@commands.register("cve", category="Search")
@rate_limit()
async def cve(message):
"""
Look up information about a CVE.
Example::
/cve CVE-2010-3213
Response::
CVE-2010-3213 - Cross-site request forgery (CSRF) vulner[...]
Auth: NONE / Complexity: MEDIUM / Vector: NETWORK
https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-3213
• (462) Cross-Domain Search Timing
• (467) Cross Site Identification
• (62) Cross Site Request Forgery (aka Session Riding)
• (111) JSON Hijacking (aka JavaScript Hijacking)
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
m = CVE_PATTERN.search(q)
if not m:
raise CommandError("No CVE found in the given input")
r = await http.get("https://cve.circl.lu/api/cve/{}".format(m.group(1).upper()))
data = r.json()
if len(data.keys()):
capecs = "\n".join(
map(lambda e: "\u2022 ({id}) {name}".format(id=e['id'], name=e['name']), data.get("capec", [])))
return "**{cve}** [{cvss}] - {summary}\n*Auth: {auth} / Complexity: {complexity} / Vector: {vector}*\n<{url}>\n{capecs}".format(
cve=data['id'],
cvss=data['cvss'],
summary=data['summary'],
auth=data['access']['authentication'],
complexity=data['access']['complexity'],
vector=data['access']['vector'],
capecs=capecs,
url="https://cve.mitre.org/cgi-bin/cvename.cgi?name={}".format(urllib.parse.quote(data['id'])))
else:
raise CommandError("no results found")
| Python | 0 | |
ac7c5f51e270e48d3be9363a7c65b4b2f019c90c | Add tests for xkcd bot. | contrib_bots/bots/xkcd/test_xkcd.py | contrib_bots/bots/xkcd/test_xkcd.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
| Python | 0 | |
552e2381b25c9d3591e7b4bf4a4c5796744b15ba | Add demo configuration | .salt/files/demo.py | .salt/files/demo.py | from .prod import *
LEAFLET_CONFIG['TILES'] = [
(gettext_noop('Scan'), 'http://{s}.livembtiles.makina-corpus.net/makina/OSMTopo/{z}/{x}/{y}.png', 'OSM Topo'),
(gettext_noop('Ortho'), 'https://{s}.tiles.mapbox.com/v3/makina-corpus.i3p1001l/{z}/{x}/{y}.png', '© MapBox Satellite'),
]
LEAFLET_CONFIG['SRID'] = 3857
ALTIMETRIC_PROFILE_COLOR = '#F77E00'
MAPENTITY_CONFIG['MAP_BACKGROUND_FOGGED'] = False
| Python | 0 | |
4b4bfd8d1bfb5e6db7ac5d24be526f188ceb6e68 | add payout exceptions | bluebottle/payouts_dorado/exceptions.py | bluebottle/payouts_dorado/exceptions.py | class PayoutException(Exception):
def __init__(self, message, error_list=None):
self.message = message
self.error_list = error_list
def __str__(self):
return str(self.message)
def __unicode__(self):
return unicode(self.message)
| Python | 0 | |
41e775e0a1b7cfeaeedd078913a2f8e004999b3a | Create linkchecker.py | linkchecker.py | linkchecker.py | #!/usr/bin/env python
import pbs as sh
import os
import commands
import sys
import urllib2
class linkchecker(object):
def __init__(self):
self.urlhostnames = {}
self.tracing = {}
self.printdebug = True
def debug(self, text):
if self.printdebug:
print "DEBUG:", text
def hostexist(self, url):
hostname = self.hostfromurl(url)
self.debug("Hostname: " + hostname)
checkhost = ""
try:
checkhost = str(sh.host(hostname))
except Exception as ex:
print "no host", ex
self.debug("Reply from host command: " + checkhost)
if "not found" in checkhost or checkhost == "":
return False
else:
return True
def isurl(self, url):
if url in self.urlhostnames:
return True
else:
if "." in url:
urlparts = url.split("/")
if (urlparts[0] == "http:" or urlparts[0] == "https:"):
return True
else:
return False
def urlexist(self, url):
check = urllib2.urlopen(headrequest(url))
if check.getcode() == 200:
return True
else:
print "Meldung vom Webserver:", check.getcode()
return False
def hostfromurl(self, url):
urlparts = url.split("/")
if url in self.urlhostnames:
return self.urlhostnames[url]
else:
if self.isurl(url):
host = urlparts[2]
self.urlhostnames[url] = host
return host
else:
print "Kein valider URL erkannt!"
def urlspeed(self, url):
hostname = self.hostfromurl(url)
pingresult = ""
try:
pingresult = sh.ping("-c1", hostname)
except Exception as ex:
self.debug(ex)
pingresult = pingresult.split("\n")
resultlen = len(pingresult)
for pingline in pingresult:
if "time=" in pingline:
pinglineparts = pingline.split(" ")
measure = pinglineparts.pop()
pingtime = pinglineparts.pop().replace("time=", "")
return float(pingtime)
def hostip(self, url):
hostname = self.hostfromurl(url)
hostanswer = sh.host(hostname)
hostip = ""
if "not found" in hostanswer:
hostip = "nf"
else:
self.debug(hostanswer)
hostanswer = hostanswer.split("\n")
hostanswer = hostanswer[0].split(" ")
hostip = hostanswer[3]
return hostname, hostip
def trace(self, url):
hostname = self.hostfromurl(url)
if url in self.tracing:
return hostname, self.tracing[url]
else:
hosttrace = sh.traceroute(hostname)
hosttrace = hosttrace.split("\n")
hops = len(hosttrace) - 2
self.tracing[url] = hops
return hostname, hops
def checkall(self, url, savefile=""):
if self.hostexist(url):
if savefile == "":
if self.urlexist(url):
print "Webadresse", url, "ist vorhanden."
else:
print "Huch? Wo ist denn", url, "abgeblieben?"
responsetime = "Antwortzeit von " + self.hostfromurl(url) + ": " + str(self.urlspeed(url))
print responsetime
hostname, hostip = self.hostip(url)
if hostip == "nf":
print "Diesen Host gibt es nicht."
else:
print "Die IP zu " + hostname + " lautet: '" + hostip.strip() + "'."
# hostname, hops = self.trace(url)
# print "Von hier aus sind es", hops, "Hops bis", hostname
return True
else:
outfile = open(savefile, "a")
if self.urlexist(url):
outtext = "Webadresse " + url + " ist vorhanden.\n"
outfile.write(outtext)
else:
outtext = "Huch? Wo ist denn " + url + " abgeblieben?\n"
outfile.write(outtext)
responsetime = "Antwortzeit von " + self.hostfromurl(url) + ": " + str(self.urlspeed(url))
outfile.write(responsetime)
hostname, hostip = self.hostip(url)
if hostip == "nf":
outtext = "Diesen Host gibt es nicht.\n"
else:
outtext = "Die IP zu " + hostname + " lautet: '" + hostip.strip() + "'.\n"
outfile.write(outtext)
# hostname, hops = self.trace(url)
# outtext = "Von hier aus sind es " + str(hops) + " Hops bis " + hostname + ".\n\n"
# outfile.write(outtext)
outfile.close()
return True
else:
pass
class headrequest(urllib2.Request):
def get_method(self):
return "HEAD"
myLinkchecker = linkchecker()
url = ""
savefile = ""
sourcefile = ""
do = False
argcount = 0
for pars in sys.argv:
if pars[0] == "-":
if pars == "-u":
url = sys.argv[argcount+1]
elif pars == "-f":
savefile = sys.argv[argcount+1]
elif pars == "-l":
sourcefile = sys.argv[argcount+1]
elif pars == "-h":
print "-f Filename: filename for saving"
print "-h: this help"
print "-l Filename: use list of URLs in this file for checking"
print "-u URL: check this URL"
argcount += 1
if savefile != "":
if os.path.isfile(savefile):
os.remove(savefile)
if sourcefile == "":
if url != "" and savefile != "":
do = myLinkchecker.checkall(url, savefile)
elif url != "" and savefile == "":
do = myLinkchecker.checkall(url)
elif url == "":
print "No URL given. Nothing to do."
else:
sf = open(sourcefile)
urllist = sf.read()
sf.close()
urllist = urllist.split("\n")
for url in urllist:
if myLinkchecker.isurl(url):
if savefile == "":
myLinkchecker.checkall(url)
else:
myLinkchecker.checkall(url, savefile)
else:
outtext = "'" + url + "' does not exist!"
if savefile == "":
print outtext
else:
pass
if do:
print "Done."
| Python | 0 | |
5908d941fc113ee02b7d5962f0209a528ab9ecb1 | Add cross-site css module | core/modules/uses_stylesheet_naver.py | core/modules/uses_stylesheet_naver.py | from bs4 import BeautifulSoup
"""
Sites that are in the Naver domain are already checked by is_masquerading. So no need to check url again
"""
def uses_stylesheet_naver(resp):
print('uses_stylesheet_naver')
answer = "U"
current_page = BeautifulSoup(resp.text, 'lxml')
stylesheets = current_page.find_all('link', rel="stylesheet")
for stylesheet in stylesheets:
if "naver.com" in stylesheet['href']:
return "P"
return answer
| Python | 0.000001 | |
d217ee9c830a6cccb70155ceff44746b4e5215d6 | Add missing csv migration | saleor/csv/migrations/0004_auto_20200604_0633.py | saleor/csv/migrations/0004_auto_20200604_0633.py | # Generated by Django 3.0.6 on 2020-06-04 11:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("csv", "0003_auto_20200520_0247"),
]
operations = [
migrations.AlterField(
model_name="exportevent",
name="type",
field=models.CharField(
choices=[
("export_pending", "Data export was started."),
("export_success", "Data export was completed successfully."),
("export_failed", "Data export failed."),
("export_deleted", "Export file was started."),
(
"exported_file_sent",
"Email with link to download file was sent to the customer.",
),
(
"Export_failed_info_sent",
"Email with info that export failed was sent to the customer.",
),
],
max_length=255,
),
),
]
| Python | 0.000011 | |
6dde0b138a34698e113a3f6b017956958a4335d4 | rework blink.py | blink.py | blink.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
####
import pygame as pyg
import itertools as it
####
class Grid(object):
"""A Grid Abstraction"""
def __init__(self, dx=1, dy=1, width=1, height=1, xoff=0, yoff=0):
"""dx, dy should be > 0"""
self.dx = dx
self.dy = dy
self.width= width
self.height= height
self.xoff = xoff
self.yoff = yoff
def __call__(self, x, y):
"""Convert grid coordinates to canvas coordinates."""
return self.xoff + x * self.dx, self.yoff + y * self.dy
def box(self, x, y):
"""Return rectangle parameters for pygame."""
return self(x, y) + (self.width, self.height)
####
class Display(object):
"""Pygame Stuff"""
def __init__(self, controller, grid, width, height, backcol=(0, 0, 0), fps=30):
self.controller = controller
self.grid = grid
self.width = width
self.height = height
self.backcol = backcol
self.fps = fps
self.quit_keys = pyg.K_ESCAPE, pyg.K_q
pyg.init()
self.canvas = pyg.display.set_mode((width, height), pyg.FULLSCREEN|pyg.DOUBLEBUF)
def run(self):
running = True
clock = pyg.time.Clock()
while running:
clock.tick(self.fps)
running = self.dispatch_events()
self.controller.process()
self.flip()
else:
self.quit()
def dispatch_events(self):
for event in pyg.event.get():
if event.type == pyg.QUIT:
return False
if event.type == pyg.KEYDOWN:
if event.key in self.quit_keys:
return False
else:
return True
def set_color(self, rgb):
self.act_color = rgb
def rect(self, x, y):
pyg.draw.rect(self.canvas, self.act_color, self.grid.box(x, y))
def flip(self):
pyg.display.flip()
self.canvas.fill(self.backcol)
def quit(self):
pyg.quit()
####
class LedColumn(object):
"""A Column of Rectangles (the LEDs)."""
def __init__(self, colors):
self.colors = colors
self.states = [0] * len(colors)
def __getitem__(self, i):
return self.colors[i], self.states[i]
def __setitem__(self, i, on_off):
self.states[i] = on_off
####
class LedMatrix(object):
"""Coluns and Rows"""
def __init__(self, columns, rows, colors):
self.rows = rows
self.columns = columns
self.mat = [LedColumn(colors[row]) for row in xrange(columns)]
self.act_column = it.cycle(range(columns))
def __getitem__(self, column):
return self.mat[column]
def set_led(self, column, row, on_off):
self[column].states[row] = on_off
def draw_column(self, device):
column = self.act_column.next()
for i, (col, state) in enumerate(self[column]):
if state:
device.set_color(col)
device.rect(column, i)
####
class Controller(object):
"""The C in MVC"""
def __init__(self, demo, grid, width, height, fps):
self.demo = demo
self.display = Display(self, grid, width, height, fps)
def process(self):
self.demo.animate(self.display)
def run(self):
self.display.run()
####
class Demo(object):
"""The Model in MVC"""
def __init__(self, columns, rows, speed=100, one_column=False):
colors = [[((0, 255, 0), (255, 0, 0))[(c+r) % 2] for r in xrange(rows)]
for c in xrange(columns)]
self.matrix = LedMatrix(columns, rows, colors)
self.xs = it.cycle(xrange(columns))
self.ys = it.cycle(xrange(rows))
self.duration = it.cycle(xrange(max(1, speed)))
self.ax = 0
self.ay = 0
self.columns = columns
self.one_column = one_column
def animate(self, display):
if not self.duration.next():
self.ax = self.xs.next()
if not self.ax:
self.ay = self.ys.next()
self.matrix.set_led(self.ax, self.ay, 0)
if self.one_column:
self.matrix.draw_column(display)
else:
for _ in self.matrix:
self.matrix.draw_column(display)
self.matrix.set_led(self.ax, self.ay, 1)
####
def main():
width = 800
height = 600
## experimentation values
columns = 12
rows = 8
xspace = 10
yspace = 10
fps = 30
speedfac = 0.03
# 1 column per frame flag
one_column = False #True
dx = width // columns
dy = height // rows
grid = Grid(dx, dy, dx - xspace, dy - yspace, xspace // 2, yspace // 2)
demo = Demo(columns, rows, int(fps * speedfac), one_column)
Controller(demo, grid, width, height, fps).run()
####
if __name__ == '__main__':
main()
| Python | 0.000003 | |
2b86b727cd701464969de5679d30f9bea38a08f3 | Create TheDescent.py | Easy/TheDescent/TheDescent.py | Easy/TheDescent/TheDescent.py | import sys
import math
while True:
tallest_index = -1
tallest_height = -1
for i in range(8):
mountain_h = int(input()) # represents the height of one mountain.
if(tallest_height != -1):
if(mountain_h > tallest_height):
tallest_index = i
tallest_height = mountain_h
else:
tallest_index = i
tallest_height = mountain_h
print(tallest_index)
| Python | 0.000001 | |
2a01ffdbac602873615925e6f99fab801a324fef | Create minesweeper.py | minesweeper.py | minesweeper.py | import tkinter
import random
def pol_param():
size = 50
a = 8
b = 8
n = 10
return [size, a, b, n]
def perevod(a, v, d):
f = a*v + d
return f
def sozd_bomb(a, b, n):
m = []
for x in range(n):
k = (random.randrange(a * b))
while k in m:
k = (random.randrange(a * b))
m.append (k)
return m
def prov(m, f):
if f in m:
return 1
else:
return 0
def sh_kl(m, f, a):
if f in m:
return(9)
c = prov (m, (f + 1)) + prov (m, (f - 1)) + prov (m, (f + a)) + prov (m, (f - a)) + prov (m, (f + a + 1)) + prov (m, (f + 1 - a)) + prov (m, (f - 1 - a)) + prov (m, (f - 1 + a))
return c
def sh_znach(m, a, b):
pole = []
for i in range(b):
pole.append([0; 0; 0] * a)
for x in range(a):
for y in range(b):
pole[x][y][0] = sh_kl(m, perevod(a, x, y), a)
pole.append(0)
return pole
def right_click(a, b, pole, k):
if pole[-1] == 0:
if pole[b][a][1] == 0:
if pole[b][a][2] == 0:
pole[b][a][2] = 1
else:
pole[b][a][2] = 0
paint(pole)
return (pole)
def left_click(a, b, pole, k):
if pole[-1] == 0:
if pole[b][a][2] == 0:
pole[b][a][1] = 0
paint(pole)
if pole[b][a][0] == 9:
pole[-1] = 1
return (pole)
def LClick(x, y, size, pole, k):
x, y = event.x, event.y
b = y//size
a = x//size
left_click(a, b, pole)
def RClick(event, size, pole, k):
x, y = event.x, event.y
b = y//size
a = x//size
right_click(a, b, pole)
size = dan[0]
dan = pol_param()
m = sozd_bomb(dan[1], dan[2], dan[3])
canvas = tkinter.Canvas()
canvas.pack()
pole = sh_znach(m, dan[1], dan[2])
canvas.bind("<Button-2>", lambda evt: RClick(evt, size, pole))
canvas.bind("<Button-1>", lambda evt: LClick(evt, size, pole))
canvas.mainloop()
print(m)
print(pole)
| Python | 0.000001 | |
280aa4c8db7b5580b73ab6980f10d21a6ef2d761 | Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes. | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/PyGameOutput.py | Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/PyGameOutput.py | import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
| Python | 0 | |
4433cadaa39dd84b922329c84a7e791d81cac7c6 | Add a very simple test that *must* always pass. * Useful for testing the newstyle API | nettests/simpletest.py | nettests/simpletest.py | from ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
| Python | 0.000001 | |
73b085113b70df87098190669cfad7867d7ad57c | Create nshield-main.py | nshield-main.py | nshield-main.py | #!/usr/bin/python
# Author: Sami Yessou - samiii@protonmail.com
# nShield - An Easy and Simple Anti-DDoS solution for VPS,Dedicated Servers and IoT devices
# (To be)*Features: Blocks known attackers from the Web and allows users to CDN/Proxying their site with an offsite VPS/Servers
# Still in beta
import os
# enable from init script os.popen("""echo ':msg, contains, "nShield" /var/log/nshield.log' >> /etc/rsyslog.conf && service rsyslog restart""")
#read conf and save variables
dryrun = os.popen("cat /etc/nshield/nshield.conf | grep dry | awk '{print $3}'").read()
basic_ddos = os.popen("cat /etc/nshield/nshield.conf | grep basic_ddos | awk '{print $3}'").read()
under_attack = os.popen("cat /etc/nshield/nshield.conf | grep under_attack | awk '{print $3}'").read()
resolve_asn = os.popen("cat /etc/nshield/nshield.conf | grep resolve_asn | awk '{print $3}'").read()
#Update firehol ip/netsets
os.popen("rm -rf firehol_level1.netset && wget -qN https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/firehol_level1.netset")
os.popen("rm -rf botscout_1d.ipset && wget -qN https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/botscout_1d.ipset")
os.popen("rm -rf bi_any_2_30d.ipset && wget -qN https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/bi_any_2_30d.ipset")
os.popen("rm -rf snort_ipfilter.ipset && wget -qN https://raw.githubusercontent.com/firehol/blocklist-ipsets/master/snort_ipfilter.ipset")
#Load all sets from cwd
blocklist_ipset=os.popen("cat *.ipset").read()
blocklist_netset=os.popen("cat *.netset").read()
whitelist=os.popen("cat /etc/nshield/whitelist").read()
# Get top 10 Nginx reqs
nginx_iplist=os.popen("cat /var/log/nginx/access.log | awk ' { print $1}' | sort -n | uniq -c | sort -rn | head").read()
splitted_nginx_iplist = nginx_iplist.split()
# For every IP check ASN & Reputation
print "Top 10 NGINX Requests are coming from these IPs : \n"+nginx_iplist
print "Top 10 ASN by NGINX Requests: \n"
for ip in splitted_nginx_iplist:
if "." in ip:
print ip+" - MNT BY: "+os.popen("curl -s ipinfo.io/"+ip+"/org").read()
ipt_iplist=os.popen("cat /var/log/nshield.log | awk '{ print $12 }' | sed s'/SRC=//' | sort -n | uniq -c | grep -v DST").read()
top_ipt_iplist=os.popen("cat /var/log/nshield.log | awk '{ print $12 }' | sed s'/SRC=//' | sort -n | uniq -c | sort -rn | grep -v DST | head").read()
splitted_ipt_iplist=ipt_iplist.split()
splitted_top_ipt_iplist=top_ipt_iplist.split()
print "Top 10 TCP Requests are coming from these IPs : \n"+top_ipt_iplist
print "Top 10 ASN of ipt logged Requests: \n"
for ip in splitted_top_ipt_iplist:
if "." in ip:
print ip+" - MNT BY: "+os.popen("curl -s ipinfo.io/"+ip+"/org").read()
for ip in splitted_ipt_iplist:
if "." in ip:
if ip in blocklist_ipset and conns >= 10:
print "Blocking "+ip+" because found in ipsets and more than 10 reqs"
iptblock="iptables -I INPUT -s "+ip+" -m comment --comment nShield-Blocked-from-ipset+10reqs -j DROP"
if dryrun is 1:
print "Dry Run.."
else:
os.popen(iptblock)
subnet=ip.split('.')
netset=subnet[0]+"."+subnet[1]+"."+subnet[2]
if netset in blocklist_netset:
print "Blocking "+ip+" because found in netsets"
iptblock="iptables -I INPUT -s "+netset+".0/24 -m comment --comment nShield-Blocked-from-netsets -j DROP"
if dryrun is 1:
print "Dry Run.."
else:
os.popen(iptblock)
else:
conns=ip
print "Setting up whitelist .."
os.popen("iptables -I INPUT -i lo -j ACCEPT && iptables -I INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT")
#check if its real ip before
for ip in whitelist.split():
print ip
os.popen("iptables -I INPUT -s "+ip+" -j ACCEPT -m comment --comment nShield-whitelist")
if basic_ddos:
print "Setting up Basic DDoS Protection"
# Block SYN FLOOD
os.popen("iptables -A INPUT -p tcp ! --syn -m state --state NEW -j DROP")
# Block XMAS Scan
os.popen("iptables -A INPUT -p tcp --tcp-flags ALL ALL -j DROP")
# Smurf attack protection
os.popen("iptables -A INPUT -p icmp -m icmp --icmp-type timestamp-request -j DROP && iptables -A INPUT -p icmp -m limit --limit 1/second -j ACCEPT")
os.popen("/sbin/sysctl -w net/netfilter/nf_conntrack_tcp_loose=0")
os .popen("echo 1000000 > /sys/module/nf_conntrack/parameters/hashsize && /sbin/sysctl -w net/netfilter/nf_conntrack_max=2000000 && /sbin/sysctl -w net.ipv4.tcp_syn_retries=2 && /sbin/sysctl -w net.ipv4.tcp_rfc1337=1 && /sbin/sysctl -w net.ipv4.tcp_synack_retries=1")
print "\nBlocking XMAS scan, Smurf, ICMP attacks & SYN flood"
if under_attack:
# burst connections and add rate limits
os.popen('iptables -A INPUT -p tcp --syn -m hashlimit --hashlimit 15/s --hashlimit-burst 30 --hashlimit-mode srcip --hashlimit-name synattack -j ACCEPT && iptables -A INPUT -p tcp --syn -j DROP')
| Python | 0 | |
6c7e3c4e56151dc4723d0c6ae64de6b1acfc0673 | Add some prelimenary tests for new action classes. | st2common/tests/test_shell_action_system_model.py | st2common/tests/test_shell_action_system_model.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pwd
import os
import copy
import unittest2
from st2common.models.system.action import ShellCommandAction
from st2common.models.system.action import ShellScriptAction
LOGGED_USER_USERNAME = pwd.getpwuid(os.getuid())[0]
class ShellCommandActionTestCase(unittest2.TestCase):
def setUp(self):
self._base_kwargs = {
'name': 'test action',
'action_exec_id': '1',
'command': 'ls -la',
'env_vars': {},
'timeout': None
}
def test_user_argument(self):
# User is the same as logged user, no sudo should be used
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = LOGGED_USER_USERNAME
action = ShellCommandAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, 'ls -la')
# User is different, sudo should be used
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = 'mauser'
action = ShellCommandAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, 'sudo -u mauser -- bash -c \'ls -la\'')
# sudo is used, it doesn't matter what user is specified since the
# command should run as root
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = True
kwargs['user'] = 'mauser'
action = ShellCommandAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, 'sudo -- bash -c \'ls -la\'')
class ShellScriptActionTestCase(unittest2.TestCase):
def setUp(self):
self._base_kwargs = {
'name': 'test action',
'action_exec_id': '1',
'script_local_path_abs': '/tmp/foo.sh',
'named_args': {},
'positional_args': [],
'env_vars': {},
'timeout': None
}
def test_user_argument(self):
# User is the same as logged user, no sudo should be used
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = LOGGED_USER_USERNAME
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, '/tmp/foo.sh')
# User is different, sudo should be used
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = 'mauser'
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, 'sudo -u mauser -- bash -c /tmp/foo.sh')
# sudo is used, it doesn't matter what user is specified since the
# command should run as root
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = True
kwargs['user'] = 'mauser'
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, 'sudo -- bash -c /tmp/foo.sh')
def test_command_construction_with_parameters(self):
# same user, named args, no positional args
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = LOGGED_USER_USERNAME
kwargs['named_args'] = {'key1': 'value1', 'key2': 'value2'}
kwargs['positional_args'] = []
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, '/tmp/foo.sh key2=value2 key1=value1')
# different user, named args, no positional args
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = 'mauser'
kwargs['named_args'] = {'key1': 'value1', 'key2': 'value2'}
kwargs['positional_args'] = ''
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
expected = 'sudo -u mauser -- bash -c \'/tmp/foo.sh key2=value2 key1=value1\''
self.assertEqual(command, expected)
# same user, positional args, no named args
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = LOGGED_USER_USERNAME
kwargs['named_args'] = {}
kwargs['positional_args'] = 'ein zwei drei'
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, '/tmp/foo.sh ein zwei drei')
# different user, named args, positional args
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = 'mauser'
kwargs['named_args'] = {}
kwargs['positional_args'] = 'ein zwei drei'
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
expected = 'sudo -u mauser -- bash -c \'/tmp/foo.sh ein zwei drei\''
self.assertEqual(command, expected)
# same user, positional and named args
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = LOGGED_USER_USERNAME
kwargs['named_args'] = {'key1': 'value1', 'key2': 'value2'}
kwargs['positional_args'] = 'ein zwei drei'
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
self.assertEqual(command, '/tmp/foo.sh key2=value2 key1=value1 ein zwei drei')
# different user, positional and named args
kwargs = copy.deepcopy(self._base_kwargs)
kwargs['sudo'] = False
kwargs['user'] = 'mauser'
kwargs['named_args'] = {'key1': 'value1', 'key2': 'value2'}
kwargs['positional_args'] = 'ein zwei drei'
action = ShellScriptAction(**kwargs)
command = action.get_full_command_string()
expected = ('sudo -u mauser -- bash -c \'/tmp/foo.sh key2=value2 '
'key1=value1 ein zwei drei\'')
self.assertEqual(command, expected)
| Python | 0 | |
124c4f30455d0892608622ddd09a0e7d83c3e8da | Create xmltodict_implementation.py | Useful-Libs/xmltodict_implementation.py | Useful-Libs/xmltodict_implementation.py | import xmltodict
with open('path/to/file.xml') as fd:
doc = xmltodict.parse(fd.read())
doc['mydocument']['@has'] # == u'an attribute'
doc['mydocument']['and']['many'] # == [u'elements', u'more elements']
doc['mydocument']['plus']['@a'] # == u'complex'
doc['mydocument']['plus']['#text'] # == u'element as well'
| Python | 0.000006 | |
3971a15aea15e097fac00f680068b505cc9047b8 | Add new descend functionality. | python/seqan/descend.py | python/seqan/descend.py | #
# Copyright John Reid 2014
#
"""
Code to descend indexes with top-down and top-down-history iterators.
"""
def depthpredicate(maxdepth):
"""Create a predicate that only descends the tree to a maximum depth.
"""
def predicate(it):
return it.repLength < maxdepth
return predicate
def suffixpredicate(suffix):
"""Create a predicate that only descends the part of the tree
that matches the suffix.
"""
def predicate(it):
minlen = min(it.repLength, len(suffix))
return suffix[:minlen] == it.representative[:minlen]
return predicate
class Descender(object):
"""Mix-in class to descend an index. Base class must implement
the visit_node(parent, child) method. A predicate can be supplied to
filter parts of the tree from the descent. See depthpredicate() and
suffixpredicate().
"""
def descend(self, it):
"""Descend the index."""
if self.visitvertex(it):
if it.goDown():
while True:
self.descend(it)
if not it.goRight():
break
it.goUp()
class ParallelDescender(object):
"""Descends two indexes (primary and secondary) in parallel. Each vertex in
the primary is visited and the corresponding vertices (or closest vertices)
in the secondary are simultaneously visited. The two iterators are called
synchronised if and only if the representative of the primary iterator
matches that start of the representative of the secondary iterator.
"""
def descend(self, primit, secit, stillsynced=True):
self.visitvertex(primit, secit, stillsynced)
assert stillsynced == (
primit.repLength <= secit.repLength
and primit.representative == secit.representative[:primit.repLength])
if primit.goDown():
while True:
# We have moved the primary iterator,
# we should check if we are still synchronised
if stillsynced:
parentstart = primit.repLength - primit.parentEdgeLength
end = min(primit.repLength, secit.repLength)
newstillsynced = (
primit.representative[parentstart:end]
== secit.representative[parentstart:end])
else:
newstillsynced = False
# Count the number of vertexes we descend
numgodowns = 0
# Only move the secondary iterator if we are still
# synchronised with primary iterator
if newstillsynced:
# Move secondary iterator to same (or similar) position
# as primary iterator
while secit.repLength < primit.repLength:
# Try and descend
if not secit.goDown(
primit.representative[secit.repLength]):
#logger.debug('Could not goDown()')
newstillsynced = False
break
numgodowns += 1
# Check we descended successfully
start = secit.repLength - secit.parentEdgeLength + 1
end = min(primit.repLength, secit.repLength)
if secit.representative[start:end] != primit.representative[start:end]:
# We did not manage to match primary's parent edge
#logger.debug('Parent edge mismatch')
newstillsynced = False
break
# recurse
self.descend(primit, secit, newstillsynced)
# Move secondary iterator back up to original position
for i in xrange(numgodowns):
secit.goUp()
# Go to next vertex in primary index
if not primit.goRight():
break
primit.goUp()
class CallbackParallelDescender(ParallelDescender):
"""Class that descends two indexes in a top-down manner,
calling a callback at each vertex."""
def __init__(self, callback):
ParallelDescender.__init__(self)
self.visitvertex = callback
| Python | 0 | |
1e6956fb793e12b720b521feb4c0eeabaf490cea | add cache.py to cleanup cache | cache.py | cache.py | #!/usr/bin/python
import os
import time
from workflow import Workflow
AGE = 3600 * 24
LOG = None
class Cache(object):
def __init__(self):
global LOG
self.wf = Workflow()
LOG = self.wf.logger
self.cachedir = self.wf.cachedir
self.wf.cached_data_age = self.cached_data_age
def cached_data_age(self, name):
cache_path = self.wf.cachefile(name)
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def clean(self):
for file in os.listdir(self.wf.cachedir):
if file.endswith(".log"):
continue
if not self.wf.cached_data_fresh(file, AGE):
LOG.debug("deleting cache file: " + file)
os.remove(self.wf.cachefile(file))
if __name__=="__main__":
cache = Cache()
cache.clean()
| Python | 0.000001 | |
25e09e4dbbc6dbc87c3b1cc2833021a9ae022a0e | Create compact/struct.py for python2/3 compatibility | pyvisa/compat/struct.py | pyvisa/compat/struct.py | # -*- coding: utf-8 -*-
"""
pyvisa.compat.struct
~~~~~~~~~~~~~~~~~~~~~~~~~
Python 2/3 compatibility for struct module
:copyright: 2015, PSF
:license: PSF License
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import sys
import struct
# we always want the exception to be able to catch it
error = struct.error
# compatibility for unicode literals was introduced in 2.7.8
# if we're above that there is nothing to do except aliasing
if sys.hexversion >= 0x02070800:
pack = struct.pack
pack_into = struct.pack_into
unpack = struct.unpack
unpack_from = struct.unpack_from
calcsize = struct.calcsize
else:
def pack(fmt, *args):
return struct.pack(str(fmt), *args)
def pack_into(fmt, *args, **argk):
return struct.pack_into(str(fmt), *args, **argk)
def unpack(fmt, string):
return struct.unpack(str(fmt), string)
def unpack_from(fmt, *args, **kwargs):
return struct.unpack_from(str(fmt), *args, **kwargs)
def calcsize(fmt):
return struct.calcsize(str(fmt))
| Python | 0 | |
ef8bc0ddffa142e8580606377bff1d2737365711 | add various utilities in dog.util | dog/util.py | dog/util.py | import discord
def make_profile_embed(member):
embed = discord.Embed()
embed.set_author(name=f'{member.name}#{member.discriminator}',
icon_url=member.avatar_url)
return embed
def american_datetime(datetime):
return datetime.strftime('%m/%d/%Y %I:%M:%S %p')
def pretty_timedelta(delta):
big = ''
if delta.days >= 7 and delta.days < 21:
weeks = round(delta.days / 7, 2)
plural = 's' if weeks == 0 or weeks > 1 else ''
big = f'{weeks} week{plural}'
# assume that a month is 31 days long, i am not trying
# to be aware
if delta.days >= 21 and delta.days < 365:
days = round(delta.days / 31, 2)
plural = 's' if days == 0 or days > 1 else ''
big = f'{days} month{plural}'
if delta.days >= 365:
years = round(delta.days / 365)
plural = 's' if years == 0 or years > 1 else ''
big = f'{years} year{plural}'
m, s = divmod(delta.seconds, 60)
h, m = divmod(m, 60)
return '{}, {:02d}h{:02d}m{:02d}s'.format(big, h, m, s)
| Python | 0.000251 | |
976eda9cbbce4a4fad759c4197b630209dd5a2bd | add programmer wrapper script | dist/tools/programmer/programmer.py | dist/tools/programmer/programmer.py | #!/usr/bin/env python3
# Copyright (C) 2021 Inria
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import time
import shlex
import subprocess
import argparse
from contextlib import contextmanager
SUCCESS = "\033[32;1m✓\033[0m"
FAILED = "\033[31;1m×\033[0m"
SPIN = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
class Programmer:
@contextmanager
def spawn_process(self):
"""Yield a subprocess running in background."""
kwargs = {} if self.verbose else {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT
}
yield subprocess.Popen(shlex.split(self.cmd), **kwargs)
def spin(self, process):
"""Print a spinning icon while programmer process is running."""
while process.poll() is None:
for index in range(len(SPIN)):
sys.stdout.write(
"\r \033[36;1m{}\033[0m {} in progress "
"(programmer: '{}')"
.format(SPIN[index], self.action, self.programmer)
)
sys.stdout.flush()
time.sleep(0.1)
def print_status(self, process, elapsed):
"""Print status of background programmer process."""
print(
"\r \u001b[2K{} {} {} (programmer: '{}' - duration: {:0.2f}s)"
.format(
FAILED if process.returncode != 0 else SUCCESS,
self.action,
"failed!" if process.returncode != 0 else "done!",
self.programmer,
elapsed
)
)
# Print content of stdout (which also contain stderr) when the
# subprocess failed
if process.returncode != 0:
print(process.stdout.read().decode())
else:
print(
"(for full programmer output add PROGRAMMER_QUIET=0 or "
"QUIET=0 to the make command line)"
)
def run(self):
"""Run the programmer in a background process."""
if not self.cmd.strip():
# Do nothing if programmer command is empty
return 0
if self.verbose:
print(self.cmd)
start = time.time()
with self.spawn_process() as proc:
try:
if self.verbose:
proc.communicate()
else:
self.spin(proc)
except KeyboardInterrupt:
proc.terminate()
proc.kill()
elapsed = time.time() - start
if not self.verbose:
# When using the spinning icon, print the programmer status
self.print_status(proc, elapsed)
return proc.returncode
def main(parser):
"""Main function."""
programmer = Programmer()
parser.parse_args(namespace=programmer)
# Return with same return code as subprocess
sys.exit(programmer.run())
def parser():
"""Return an argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument("--action", help="Programmer action")
parser.add_argument("--cmd", help="Programmer command")
parser.add_argument("--programmer", help="Programmer")
parser.add_argument(
"--verbose", action='store_true', default=False, help="Verbose output"
)
return parser
if __name__ == "__main__":
main(parser())
| Python | 0.000001 | |
595356b13c68dbd3ecd50fe4eede1b479e918056 | This is junk | django-hq/apps/transformers/junk.py | django-hq/apps/transformers/junk.py | # This is a test
# for linefeeds | Python | 0.999993 | |
a7f97bbb5019b073d211c999d05b0500434d3c75 | Use six.moves.http_client instead of httplib. | oscar/test/testcases.py | oscar/test/testcases.py | from six.moves import http_client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django_webtest import WebTest
from purl import URL
from oscar.core.compat import get_user_model
User = get_user_model()
def add_permissions(user, permissions):
"""
:param permissions: e.g. ['partner.dashboard_access']
"""
for permission in permissions:
app_label, _, codename = permission.partition('.')
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
user.user_permissions.add(perm)
class WebTestCase(WebTest):
is_staff = False
is_anonymous = False
username = 'testuser'
email = 'testuser@buymore.com'
password = 'somefancypassword'
is_superuser = False
permissions = []
def setUp(self):
self.user = None
if not self.is_anonymous or self.is_staff:
self.user = User.objects.create_user(self.username, self.email,
self.password)
self.user.is_staff = self.is_staff
perms = self.permissions
add_permissions(self.user, perms)
self.user.save()
self.login()
def get(self, url, **kwargs):
kwargs.setdefault('user', self.user)
return self.app.get(url, **kwargs)
def post(self, url, **kwargs):
kwargs.setdefault('user', self.user)
return self.app.post(url, **kwargs)
def login(self, username=None, password=None):
username = username or self.username
password = password or self.password
self.client.login(username=username, password=password)
# Custom assertions
def assertIsRedirect(self, response, expected_url=None):
self.assertTrue(response.status_code in (
http_client.FOUND, http_client.MOVED_PERMANENTLY))
if expected_url:
location = URL.from_string(response['Location'])
self.assertEqual(expected_url, location.path())
def assertRedirectsTo(self, response, url_name):
self.assertTrue(str(response.status_code).startswith('3'))
location = response.headers['Location']
redirect_path = location.replace('http://localhost:80', '')
self.assertEqual(reverse(url_name), redirect_path)
def assertNoAccess(self, response):
self.assertContext(response)
self.assertTrue(response.status_code in (http_client.NOT_FOUND,
http_client.FORBIDDEN))
def assertRedirectUrlName(self, response, name, kwargs=None):
self.assertIsRedirect(response)
location = response['Location'].replace('http://testserver', '')
self.assertEqual(location, reverse(name, kwargs=kwargs))
def assertIsOk(self, response):
self.assertEqual(http_client.OK, response.status_code)
def assertContext(self, response):
self.assertTrue(response.context is not None,
'No context was returned')
def assertInContext(self, response, key):
self.assertContext(response)
self.assertTrue(key in response.context,
"Context should contain a variable '%s'" % key)
| from six.moves import http_client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Permission
from django_webtest import WebTest
from purl import URL
from oscar.core.compat import get_user_model
User = get_user_model()
def add_permissions(user, permissions):
"""
:param permissions: e.g. ['partner.dashboard_access']
"""
for permission in permissions:
app_label, _, codename = permission.partition('.')
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
user.user_permissions.add(perm)
class WebTestCase(WebTest):
is_staff = False
is_anonymous = False
username = 'testuser'
email = 'testuser@buymore.com'
password = 'somefancypassword'
is_superuser = False
permissions = []
def setUp(self):
self.user = None
if not self.is_anonymous or self.is_staff:
self.user = User.objects.create_user(self.username, self.email,
self.password)
self.user.is_staff = self.is_staff
perms = self.permissions
add_permissions(self.user, perms)
self.user.save()
self.login()
def get(self, url, **kwargs):
kwargs.setdefault('user', self.user)
return self.app.get(url, **kwargs)
def post(self, url, **kwargs):
kwargs.setdefault('user', self.user)
return self.app.post(url, **kwargs)
def login(self, username=None, password=None):
username = username or self.username
password = password or self.password
self.client.login(username=username, password=password)
# Custom assertions
def assertIsRedirect(self, response, expected_url=None):
self.assertTrue(response.status_code in (
http_client.FOUND, http_client.MOVED_PERMANENTLY))
if expected_url:
location = URL.from_string(response['Location'])
self.assertEqual(expected_url, location.path())
def assertRedirectsTo(self, response, url_name):
self.assertTrue(str(response.status_code).startswith('3'))
location = response.headers['Location']
redirect_path = location.replace('http://localhost:80', '')
self.assertEqual(reverse(url_name), redirect_path)
def assertNoAccess(self, response):
self.assertContext(response)
self.assertTrue(response.status_code in (httplib.NOT_FOUND,
httplib.FORBIDDEN))
def assertRedirectUrlName(self, response, name, kwargs=None):
self.assertIsRedirect(response)
location = response['Location'].replace('http://testserver', '')
self.assertEqual(location, reverse(name, kwargs=kwargs))
def assertIsOk(self, response):
self.assertEqual(httplib.OK, response.status_code)
def assertContext(self, response):
self.assertTrue(response.context is not None,
'No context was returned')
def assertInContext(self, response, key):
self.assertContext(response)
self.assertTrue(key in response.context,
"Context should contain a variable '%s'" % key)
| Python | 0 |
d9c197840282c6bdedf5e001a1092aa707ae139c | update email field length | corehq/apps/data_analytics/migrations/0008_auto_20161114_1903.py | corehq/apps/data_analytics/migrations/0008_auto_20161114_1903.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2016-11-14 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_analytics', '0007_auto_20160819_1423'),
]
operations = [
migrations.AlterField(
model_name='maltrow',
name='email',
field=models.EmailField(max_length=254),
),
]
| Python | 0.000001 | |
0c2f730ad2e4db7f53b2a867711e00048428d82d | add rest api | src/emuvim/examples/simple_topology_restapi.py | src/emuvim/examples/simple_topology_restapi.py | """
This is an example topology for the distributed cloud emulator (dcemulator).
(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
This is an example that shows how a user of the emulation tool can
define network topologies with multiple emulated cloud data centers.
The definition is done with a Python API which looks very similar to the
Mininet API (in fact it is a wrapper for it).
We only specify the topology *between* data centers not within a single
data center (data center internal setups or placements are not of interest,
we want to experiment with VNF chains deployed across multiple PoPs).
The original Mininet API has to be completely hidden and not be used by this
script.
"""
import logging
from mininet.log import setLogLevel
from emuvim.dcemulator.net import DCNetwork
from emuvim.api.rest.compute import RestApiEndpoint
#from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint
from emuvim.api.zerorpc.network import ZeroRpcApiEndpointDCNetwork
logging.basicConfig(level=logging.INFO)
def create_topology1():
"""
1. Create a data center network object (DCNetwork)
"""
net = DCNetwork()
"""
1b. add a monitoring agent to the DCNetwork
"""
mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
mon_api.connectDCNetwork(net)
mon_api.start()
"""
2. Add (logical) data centers to the topology
(each data center is one "bigswitch" in our simplified
first prototype)
"""
dc1 = net.addDatacenter("datacenter1")
dc2 = net.addDatacenter("datacenter2")
dc3 = net.addDatacenter("long_data_center_name3")
dc4 = net.addDatacenter(
"datacenter4",
metadata={"mydata": "we can also add arbitrary metadata to each DC"})
"""
3. You can add additional SDN switches for data center
interconnections to the network.
"""
s1 = net.addSwitch("s1")
"""
4. Add links between your data centers and additional switches
to define you topology.
These links can use Mininet's features to limit bw, add delay or jitter.
"""
net.addLink(dc1, dc2)
net.addLink("datacenter1", s1)
net.addLink(s1, dc3)
net.addLink(s1, "datacenter4")
"""
5. We want to access and control our data centers from the outside,
e.g., we want to connect an orchestrator to start/stop compute
resources aka. VNFs (represented by Docker containers in the emulated)
So we need to instantiate API endpoints (e.g. a zerorpc or REST
interface). Depending on the endpoint implementations, we can connect
one or more data centers to it, which can then be controlled through
this API, e.g., start/stop/list compute instances.
"""
# create a new instance of a endpoint implementation
api1 = RestApiEndpoint("127.0.0.1", 5000)
# connect data centers to this endpoint
api1.connectDatacenter(dc1)
api1.connectDatacenter(dc2)
api1.connectDatacenter(dc3)
api1.connectDatacenter(dc4)
# run API endpoint server (in another thread, don't block)
api1.start()
"""
6. Finally we are done and can start our network (the emulator).
We can also enter the Mininet CLI to interactively interact
with our compute resources (just like in default Mininet).
But we can also implement fully automated experiments that
can be executed again and again.
"""
net.start()
net.CLI()
# when the user types exit in the CLI, we stop the emulator
net.stop()
def main():
setLogLevel('info') # set Mininet loglevel
create_topology1()
if __name__ == '__main__':
main()
| Python | 0 | |
8ccf3d937d25ec93d1ce22d60735ffbcaf776fe3 | Add a script for plotting distance to target. | analysis/plot-target-distance.py | analysis/plot-target-distance.py | import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
| Python | 0 | |
d8c18d9244ca09e942af57d74a407498c25d05ce | Add Linear Discriminant Analaysis. | LDA.py | LDA.py | import numpy as np
from scipy import linalg as LA
class LDA(object):
def __init__(self, data_inputs, data_labels):
self.data_inputs = np.array(data_inputs)
self.data_labels = data_labels
self.test_cases = self.data_inputs.shape[0]
self.labels = np.unique(data_labels)
self.Sw = np.zeros((self.data_inputs.shape[1], self.data_inputs.shape[1]))
self.Sb = self.Sw.copy()
def analyse(self):
C = np.cov(self.data_inputs.T)
for label in self.labels:
indices = np.where(self.data_labels == label)
points = self.data_inputs[indices[0]]
classcov = np.cov(points.T)
self.Sw += (np.float(points.shape[0])/self.test_cases) * classcov
self.Sb = C - self.Sw
evals, evecs = LA.eig(self.Sw, self.Sb)
indices = np.argsort(evals)
indices = indices[::-1]
evals = evals[indices]
evecs = evecs[indices]
self.eigen_vals = evals
self.eigen_vecs = evecs
def reduce_dim(self, red_n, data_inputs=None):
w = self.eigen_vecs[:,:red_n]
if data_inputs is None:
data_inputs = self.data_inputs
return np.dot(data_inputs, w)
def expand_dim(self, red_data):
red_n = red_data.shape[1]
return np.transpose(np.dot(self.eigen_vecs[:,:red_n], red_data.T))
| Python | 0.000016 | |
3cc1bceaca2fe74d3d9f9fa846f976ba99cc7dee | Create RDF.py | RDF.py | RDF.py | from sys import argv
import pandas as pd
import numpy as np
from functions import crdf
import time
import accelerate as acc
import matplotlib
from matplotlib import pyplot as plt
fn = argv[1]
print('Box origin must be at the center!')
pos = pd.read_csv(fn, delim_whitespace=True, squeeze=1, header=None).values
import time
import numpy as np
Ndim = 500 # Finess of delta function
V = box[0]*box[1]*box[2]
rho_bins = Ndim**3/V # Number density of bins
rho = pos.shape[0]/V
s = time.time()
p, e = np.histogramdd(pos, bins=(Ndim, Ndim, Ndim), range=((-box[0]/2, box[0]/2), (-box[1]/2, box[1]/2),(-box[2]/2, box[2]/2)))
print('Binning particles: %s' % (time.time()-s))
p = np.fft.fftshift(p) # POS is of center-origin, here move origin to cornor.
s = time.time()
fp = acc.mkl.fftpack.fftn(p) # Accelerate package
print('FFT time: %s' % (time.time()-s))
FP = fp*fp.conj()
s = time.time()
RDF = np.fft.ifftn(FP).real # IFFT{<rho(K)rho(-K)>}, 1/N\sum_i......(see numpy.fft, so rho_bins is needed)
print('IFFT time: %s' % (time.time()-s))
RDF[0,0,0] -= pos.shape[0]
RDF = np.fft.fftshift(RDF)
rbin = 0.2 # >= box / Ndiv
rx = e[0][:Ndim] + 0.5*(e[0][-1]-e[0][-2])
ry = e[1][:Ndim] + 0.5*(e[1][-1]-e[1][-2])
rz = e[2][:Ndim] + 0.5*(e[2][-1]-e[2][-2])
from numba import jit
@jit # normalize g(R) to g(r)
def norm_r(RDF, rbin, rx, ry, rz):
rdf = np.zeros(int(box.max()/2*3**0.5/rbin)+1, dtype=np.float)
cter = np.zeros(rdf.shape, dtype=np.float)
for i in range(Ndim):
for j in range(Ndim):
for k in range(Ndim):
rr = rx[i]**2+ry[j]**2+rz[k]**2
r = int(rr**0.5/rbin)
rdf[r] += RDF[i,j,k]
cter[r] += 1
return np.nan_to_num(rdf/cter)
rdf = norm_r(RDF, rbin, rx,ry,rz)
rdf /= pos.shape[0] * rho # NA*NB/V for gAB(r)
rdf *= rho_bins # NORMED BY BIN DENSITY
rr = np.arange(rdf.shape[0])*rbin
o = open('rdf.txt', 'w')
for i, y in enumerate(rdf):
o.write('%.8f %.8f\n' % ((i+0.5) * rbin, y))
o.close()
| Python | 0.000004 | |
bce02c436479adf3bf3deae22704be5cf1cace89 | set addr_list and contact_list attr #5635 | erpnext/utilities/address_and_contact.py | erpnext/utilities/address_and_contact.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def load_address_and_contact(doc, key):
"""Loads address list and contact list in `__onload`"""
from erpnext.utilities.doctype.address.address import get_address_display
doc.get("__onload")["addr_list"] = [a.update({"display": get_address_display(a)}) \
for a in frappe.get_all("Address",
fields="*", filters={key: doc.name},
order_by="is_primary_address desc, modified desc")]
if doc.doctype != "Lead":
doc.get("__onload")["contact_list"] = frappe.get_all("Contact",
fields="*", filters={key: doc.name},
order_by="is_primary_contact desc, modified desc")
def has_permission(doc, ptype, user):
links = get_permitted_and_not_permitted_links(doc.doctype)
if not links.get("not_permitted_links"):
# optimization: don't determine permissions based on link fields
return True
# True if any one is True or all are empty
names = []
for df in (links.get("permitted_links") + links.get("not_permitted_links")):
doctype = df.options
name = doc.get(df.fieldname)
names.append(name)
if name and frappe.has_permission(doctype, ptype, doc=name):
return True
if not any(names):
return True
else:
return False
def get_permission_query_conditions_for_contact(user):
return get_permission_query_conditions("Contact")
def get_permission_query_conditions_for_address(user):
return get_permission_query_conditions("Address")
def get_permission_query_conditions(doctype):
links = get_permitted_and_not_permitted_links(doctype)
if not links.get("not_permitted_links"):
# when everything is permitted, don't add additional condition
return ""
elif not links.get("permitted_links"):
conditions = []
# when everything is not permitted
for df in links.get("not_permitted_links"):
# like ifnull(customer, '')='' and ifnull(supplier, '')=''
conditions.append("ifnull(`tab{doctype}`.`{fieldname}`, '')=''".format(doctype=doctype, fieldname=df.fieldname))
return "( " + " and ".join(conditions) + " )"
else:
conditions = []
for df in links.get("permitted_links"):
# like ifnull(customer, '')!='' or ifnull(supplier, '')!=''
conditions.append("ifnull(`tab{doctype}`.`{fieldname}`, '')!=''".format(doctype=doctype, fieldname=df.fieldname))
return "( " + " or ".join(conditions) + " )"
def get_permitted_and_not_permitted_links(doctype):
permitted_links = []
not_permitted_links = []
meta = frappe.get_meta(doctype)
for df in meta.get_link_fields():
if df.options not in ("Customer", "Supplier", "Company", "Sales Partner"):
continue
if frappe.has_permission(df.options):
permitted_links.append(df)
else:
not_permitted_links.append(df)
return {
"permitted_links": permitted_links,
"not_permitted_links": not_permitted_links
}
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def load_address_and_contact(doc, key):
"""Loads address list and contact list in `__onload`"""
from erpnext.utilities.doctype.address.address import get_address_display
doc.get("__onload").addr_list = [a.update({"display": get_address_display(a)}) \
for a in frappe.get_all("Address",
fields="*", filters={key: doc.name},
order_by="is_primary_address desc, modified desc")]
if doc.doctype != "Lead":
doc.get("__onload").contact_list = frappe.get_all("Contact",
fields="*", filters={key: doc.name},
order_by="is_primary_contact desc, modified desc")
def has_permission(doc, ptype, user):
links = get_permitted_and_not_permitted_links(doc.doctype)
if not links.get("not_permitted_links"):
# optimization: don't determine permissions based on link fields
return True
# True if any one is True or all are empty
names = []
for df in (links.get("permitted_links") + links.get("not_permitted_links")):
doctype = df.options
name = doc.get(df.fieldname)
names.append(name)
if name and frappe.has_permission(doctype, ptype, doc=name):
return True
if not any(names):
return True
else:
return False
def get_permission_query_conditions_for_contact(user):
return get_permission_query_conditions("Contact")
def get_permission_query_conditions_for_address(user):
return get_permission_query_conditions("Address")
def get_permission_query_conditions(doctype):
links = get_permitted_and_not_permitted_links(doctype)
if not links.get("not_permitted_links"):
# when everything is permitted, don't add additional condition
return ""
elif not links.get("permitted_links"):
conditions = []
# when everything is not permitted
for df in links.get("not_permitted_links"):
# like ifnull(customer, '')='' and ifnull(supplier, '')=''
conditions.append("ifnull(`tab{doctype}`.`{fieldname}`, '')=''".format(doctype=doctype, fieldname=df.fieldname))
return "( " + " and ".join(conditions) + " )"
else:
conditions = []
for df in links.get("permitted_links"):
# like ifnull(customer, '')!='' or ifnull(supplier, '')!=''
conditions.append("ifnull(`tab{doctype}`.`{fieldname}`, '')!=''".format(doctype=doctype, fieldname=df.fieldname))
return "( " + " or ".join(conditions) + " )"
def get_permitted_and_not_permitted_links(doctype):
permitted_links = []
not_permitted_links = []
meta = frappe.get_meta(doctype)
for df in meta.get_link_fields():
if df.options not in ("Customer", "Supplier", "Company", "Sales Partner"):
continue
if frappe.has_permission(df.options):
permitted_links.append(df)
else:
not_permitted_links.append(df)
return {
"permitted_links": permitted_links,
"not_permitted_links": not_permitted_links
}
| Python | 0 |
10dbbe5b10abf954ab912fc3a2cdfe1532bf71cf | test file added | cortex-py/test/test_cortex.py | cortex-py/test/test_cortex.py | import time
import cortex
class MyDataHandler:
def __init__(self):
self.alldata = []
def MyErrorHandler(self, iLevel, msg):
print("ERROR: ")
print(iLevel, msg.contents)
return 0
def MyDataHandler(self, Frame):
print("got called")
try:
print("Received multi-cast frame no %d\n"%(Frame.contents.iFrame))
print "Bodies: ", Frame.contents.nBodies
print "BodyData: ", Frame.contents.BodyData[0].szName
print "Number of Markers of Body[0]: ", Frame.contents.BodyData[0].nMarkers
for i in range(Frame.contents.BodyData[0].nMarkers):
print "MarkerX ", Frame.contents.BodyData[0].Markers[i][0]
print "MarkerY ", Frame.contents.BodyData[0].Markers[i][1]
print "MarkerZ ", Frame.contents.BodyData[0].Markers[i][2]
print "BodyMarker[2].x: ", Frame.contents.BodyData[0].Markers[3][0]
print "Unidentified markers: ", Frame.contents.nUnidentifiedMarkers
print "Delay: ", Frame.contents.fDelay
print "", Frame.contents.UnidentifiedMarkers[0][0]
self.alldata.append(Frame.contents.UnidentifiedMarkers[0][0])
except:
print("Frame empty")
return 0
if __name__ == "__main__":
my_obj = MyDataHandler()
Cortex_SetErrorMsgHandlerFunc(my_obj.MyErrorHandler)
Cortex_SetDataHandlerFunc(my_obj.MyDataHandler)
if Cortex_Initialize() != 0:
print("ERROR: unable to initialize")
Cortex_Exit()
exit(0)
pBodyDefs = Cortex_GetBodyDefs()
if pBodyDefs == None:
print("Failed to get body defs")
else:
print("Got body defs")
print("bodydefs: ", pBodyDefs.contents.nBodyDefs)
print "Marker names: "
print "", pBodyDefs.contents.BodyDefs[0].szName
for i in range(pBodyDefs.contents.BodyDefs[0].nMarkers):
print "Marker: ", pBodyDefs.contents.BodyDefs[0].szMarkerNames[i]
Cortex_FreeBodyDefs(pBodyDefs)
pBodyDefs = None
pResponse = c_void_p
nBytes = c_int
retval = Cortex_Request("GetContextFrameRate", pResponse, nBytes)
if retval != 0:
print("ERROR, GetContextFrameRate")
#contextFrameRate = cast(pResponse, POINTER(c_float))
#print("ContextFrameRate = %3.1f Hz", contextFrameRate)
print("*** Starting live mode ***")
retval = Cortex_Request("LiveMode", pResponse, nBytes)
time.sleep(1.0)
retval = Cortex_Request("Pause", pResponse, nBytes)
print("*** Paused live mode ***")
print("****** Cortex_Exit ******")
retval = Cortex_Exit();
print my_obj.alldata
| Python | 0 | |
8b257c2a4b8f949f81965b7ffaa80d18c48974a4 | add app framework | app.py | app.py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| Python | 0.000003 | |
e258b608c40b2abca30fbc85601e05c48558fff9 | add weird migration | webapp/calendars/migrations/0023_auto_20160109_1307.py | webapp/calendars/migrations/0023_auto_20160109_1307.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('calendars', '0022_auto_20151121_1628'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AlterField(
model_name='category',
name='color',
field=models.CharField(max_length=16, default='primary', choices=[('primary', 'Niebieski'), ('success', 'Zielony'), ('info', 'Jasno niebieski'), ('warning', 'Żółty'), ('danger', 'Czerwony')]),
),
]
| Python | 0.000002 | |
7b15ad790631926030f8b0b6c32f214f2c8001b1 | Create __init__.py | cno/boolean/__init__.py | cno/boolean/__init__.py | Python | 0.000429 | ||
edf6e9ceacab9aa2d8795340089182ead07c30b3 | Add ipopt v3.12.4 package. | var/spack/repos/builtin/packages/ipopt/package.py | var/spack/repos/builtin/packages/ipopt/package.py | from spack import *
class Ipopt(Package):
"""Ipopt (Interior Point OPTimizer, pronounced eye-pea-Opt) is a
software package for large-scale nonlinear optimization."""
homepage = "https://projects.coin-or.org/Ipopt"
url = "http://www.coin-or.org/download/source/Ipopt/Ipopt-3.12.4.tgz"
version('3.12.4', '12a8ecaff8dd90025ddea6c65b49cb03')
version('3.12.3', 'c560cbfa9cbf62acf8b485823c255a1b')
version('3.12.2', 'ec1e855257d7de09e122c446506fb00d')
version('3.12.1', 'ceaf895ce80c77778f2cab68ba9f17f3')
version('3.12.0', 'f7dfc3aa106a6711a85214de7595e827')
depends_on("blas")
depends_on("lapack")
depends_on("pkg-config")
depends_on("mumps+double~mpi")
def install(self, spec, prefix):
# Dependency directories
blas_dir = spec['blas'].prefix
lapack_dir = spec['lapack'].prefix
mumps_dir = spec['mumps'].prefix
# Add directory with fake MPI headers in sequential MUMPS
# install to header search path
mumps_flags = "-ldmumps -lmumps_common -lpord -lmpiseq"
mumps_libcmd = "-L%s " % mumps_dir.lib + mumps_flags
# By convention, spack links blas & lapack libs to libblas & liblapack
blas_lib = "-L%s" % blas_dir.lib + " -lblas"
lapack_lib = "-L%s" % lapack_dir.lib + " -llapack"
configure_args = [
"--prefix=%s" % prefix,
"--with-mumps-incdir=%s" % mumps_dir.include,
"--with-mumps-lib=%s" % mumps_libcmd,
"--enable-shared",
"--with-blas-incdir=%s" % blas_dir.include,
"--with-blas-lib=%s" % blas_lib,
"--with-lapack-incdir=%s" % lapack_dir.include,
"--with-lapack-lib=%s" % lapack_lib
]
configure(*configure_args)
# IPOPT does not build correctly in parallel on OS X
make(parallel=False)
make("test", parallel=False)
make("install", parallel=False)
| Python | 0 | |
b69476c28ed3e67d77c93f8c1fc75fde0cb33f2a | Add WikiaSearch module | commands/WikiaSearch.py | commands/WikiaSearch.py | import requests
from CommandTemplate import CommandTemplate
from IrcMessage import IrcMessage
import Constants
class Command(CommandTemplate):
triggers = ['wikiasearch']
helptext = "Searches a wiki on Wikia.com. Usage: '{commandPrefix}wikiasearch [wiki-name] [search]'. Wiki names aren't case-sensitive, but searches are, sorry"
def execute(self, message):
"""
:type message: IrcMessage
"""
#First check if enough parameters were passed
if message.messagePartsLength == 0:
return message.reply("Please tell me which Wikia wiki you want me to search, there's a BILLION of 'em", "say")
elif message.messagePartsLength == 1:
return message.reply("What do you want me to search for on the {} Wikia wiki?".format(message.messageParts[0]), "say")
searchterm = " ".join(message.messageParts[1:])
articleSearchResult = self.retrieveArticleAbstract(message.messageParts[0], searchterm)
message.reply(articleSearchResult[1], "say")
@staticmethod
def retrieveArticleAbstract(wikiName, articleName):
#Retrieve the page, if we can
try:
r = requests.get("http://{}.wikia.com/api/v1/Articles/Details".format(wikiName), params={"titles": articleName.replace(" ", "_"), "abstract": "200"}, timeout=10.0)
except requests.exceptions.Timeout:
return (False, "Apparently Wikia got caught up reading that article, because it didn't get back to me. Maybe try again later")
#If the wiki doesn't exist, we get redirected to a different page
if r.url == "http://community.wikia.com/wiki/Community_Central:Not_a_valid_community?from={}.wikia.com".format(wikiName.lower()):
return (False, "Apparently the wiki '{}' doesn't exist on Wikia. You invented a new fandom!".format(wikiName))
#Request succeeded, wiki exists
apireply = r.json()
#If the requested page doesn't exist, the return is empty
if len(apireply['items']) == 0:
return (False, "Apparently the page '{}' doesn't exist. Seems you know more about {} than the fandom. Or maybe you made a typo?".format(articleName, wikiName))
articleId = apireply['items'].keys()[0]
articleInfo = apireply['items'][articleId]
print "[WikiaSearch] article info:", articleInfo
#Apparently the page exists. It could still be a redirect page though
if articleInfo['abstract'].startswith("REDIRECT "):
redirectArticleName = articleInfo['abstract'].split(' ', 1)[1]
return Command.retrieveArticleAbstract(wikiName, redirectArticleName)
#From here it's a success. We need the URL to append
url = "{}{}".format(apireply['basepath'], articleInfo['url'])
#Check if it isn't a disambiguation page
if articleInfo['abstract'].startswith("{} may refer to:".format(articleInfo['title'])):
return (True, "Apparently '{}' can mean multiple things. Who knew? Here's the list of what it can mean: {}".format(articleName, url))
#Seems we got an article start! Return that
return (True, articleInfo['abstract'] + Constants.GREY_SEPARATOR + url)
| Python | 0 | |
fe32ab94bbf36621fa926d565a7720b52f1d5f11 | 268. Missing Number. In-place | p268_inplace.py | p268_inplace.py | import unittest
class Solution(object):
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
for num in nums:
if num == -1:
continue
while num != n and nums[num] != -1:
next_num = nums[num]
nums[num] = -1
num = next_num
for i, num in enumerate(nums):
if num != -1:
return i
return n
class Test(unittest.TestCase):
def test(self):
self._test([0, 1, 3], 2)
self._test([0, 1, 2], 3)
def _test(self, nums, expected):
actual = Solution().missingNumber(nums)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| Python | 0.999999 | |
533aeb6cdc045f7d4cbfc4bc20dd89da4179ab35 | Add application class to hold all the resources pertaining to an end-user application including RPC servers, HTTP servers etc. | app/core/services/application.py | app/core/services/application.py | from threading import RLock
from uuid import uuid4
from app.core.messaging import Sender
class Application(object):
APPLICATION_INFO_QUEUE = "/_system/applications"
def __init__(self):
self.unique_id = str(uuid4())
self.rpc_servers = {}
self.app_servers = {}
self.info_lock = RLock()
def register_rpc_server(self, rpc_server):
with self.info_lock:
names = {x.name for x in self.rpc_servers.values()}
if rpc_server.name in names:
raise ValueError("Name already exists: " + rpc_server.name)
self.rpc_servers[rpc_server.queue_name] = rpc_server
self.push_update()
def register_application_server(self, server):
with self.info_lock:
self.app_servers[server.id] = server
self.push_update()
def push_update(self):
sender = Sender(self.APPLICATION_INFO_QUEUE)
sender.start()
sender.send(self.info_message, headers={"KEY": self.unique_id})
sender.close()
@property
def info_message(self):
with self.info_lock:
return {
"apps": {x.unique_id: x.info_message for x in self.app_servers},
"rpc": {x.unique_id: x.info_message for x in self.rpc_servers}
}
| Python | 0 | |
1df5619347b8f3e2a9fd49c95455e8b3aba07cf9 | Add example of desired new quick server usage | examples/quick_server.py | examples/quick_server.py | import hug
@hug.get()
def quick():
return "Serving!"
if __name__ == '__main__':
__hug__.serve()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.