commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
b5d1be9069507feaeb41cfcf9cd774a244ffe49c | Add Activity model | sqlalchemy_continuum/ext/activity_stream.py | sqlalchemy_continuum/ext/activity_stream.py | import sqlalchemy as sa
from sqlalchemy_utils import generic_relationship, JSONType
class Activity(object):
@declared_attr
def actor_id(self):
return sa.Column(
sa.Integer,
sa.ForeignKey('user.id'),
index=True
)
@declared_attr
def actor(self):
return sa.orm.relationship('User')
verb = sa.Column(sa.Unicode(255))
data = sa.Column(JSONType)
# This is used to discriminate between the linked tables.
object_type = sa.Column(sa.Unicode(255))
# This is used to point to the primary key of the linked row.
object_id = sa.Column(sa.Integer)
object = generic_relationship(object_type, object_id)
# This is used to discriminate between the linked tables.
target_type = sa.Column(sa.Unicode(255))
# This is used to point to the primary key of the linked row.
target_id = sa.Column(sa.Integer)
target = generic_relationship(target_type, target_id)
| Python | 0.000001 | |
98b738e21918d1b6c4f2193cf229c518c9913974 | add standalone affordance server script | src/python/scripts/affordanceServer.py | src/python/scripts/affordanceServer.py | from ddapp import consoleapp
from ddapp import lcmobjectcollection
from ddapp.timercallback import TimerCallback
import datetime
def main():
app = consoleapp.ConsoleApp()
meshCollection = lcmobjectcollection.LCMObjectCollection('MESH_COLLECTION_COMMAND')
affordanceCollection = lcmobjectcollection.LCMObjectCollection('AFFORDANCE_COLLECTION_COMMAND')
meshCollection.sendEchoRequest()
affordanceCollection.sendEchoRequest()
def printCollection():
print
print '----------------------------------------------------'
print datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print '%d affordances' % len(affordanceCollection.collection)
for desc in affordanceCollection.collection.values():
print
print 'name:', desc['Name']
print 'type:', desc['classname']
timer = TimerCallback(targetFps=0.2)
timer.callback = printCollection
timer.start()
#app.showPythonConsole()
app.start()
if __name__ == '__main__':
main()
| Python | 0 | |
8e4d60645fb45e37c7a947b3a86219e5fd15c194 | Add py-geeup package (#12367) | var/spack/repos/builtin/packages/py-geeup/package.py | var/spack/repos/builtin/packages/py-geeup/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGeeup(PythonPackage):
"""Simple Client for Earth Engine Uploads with Selenium Support."""
homepage = "https://github.com/samapriya/geeup"
url = "https://pypi.io/packages/source/g/geeup/geeup-0.2.4.tar.gz"
version('0.2.4', sha256='20f62306ea900d7fa28a97cc92204716212dc030c50a6ac8214772a61a1a83fe')
depends_on('py-setuptools@38.3.0:', type='build')
depends_on('py-earthengine-api@0.1.87:', type=('build', 'run'))
depends_on('py-requests@2.10.0:', type=('build', 'run'))
depends_on('py-retrying@1.3.3:', type=('build', 'run'))
depends_on('py-beautifulsoup4@4.5.1:', type=('build', 'run'))
depends_on('py-pandas@0.23.0:', type=('build', 'run'))
depends_on('py-psutil@5.4.5:', type=('build', 'run'))
depends_on('py-requests-toolbelt@0.7.0:', type=('build', 'run'))
depends_on('py-pytest@3.0.0:', type=('build', 'test'))
depends_on('py-future@0.16.0:', type=('build', 'run'))
depends_on('py-google-cloud-storage@1.1.1:', type=('build', 'run'))
depends_on('py-selenium@3.13.0:', type=('build', 'run'))
depends_on('py-pysmartdl', type=('build', 'run'))
depends_on('py-pysmartdl@1.2.5', type=('build', 'run'), when='^python@:3.3')
depends_on('py-pysmartdl@1.3.1:', type=('build', 'run'), when='^python@3.4:')
depends_on('py-pathlib@1.0.1:', type=('build', 'run'))
depends_on('py-lxml@4.1.1:', type=('build', 'run'))
depends_on('py-oauth2client@4.1.3:', type=('build', 'run'))
| Python | 0 | |
eb4bcaf1a94963bc1af697180a31a48a84333eb6 | Add exclusive state for /* */ comments to the curly lexer. Seems to fix leaf node comments too. | libraries/vyconf/configfile/curly/lexer.py | libraries/vyconf/configfile/curly/lexer.py | # vyconf.configfile.curly.lexer: lexer for the curly config
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import ply.lex as lex
class Lexer(object):
# Multiline comment can't be extracted with regex,
# so we have exclusive state for it
states = (
('COMMENT', 'exclusive'),
)
tokens = (
'LBRACE',
'RBRACE',
'IDENTIFIER',
'STRING',
'NODE_COMMENT',
'SEMICOLON',
'NEWLINE'
)
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_SEMICOLON = r';'
# /* */ comment. This is a bit complicated.
# VyConf is supposed to store node comments along with nodes
# and display them in the config etc., that's why all the hassle
def t_COMMENT(self, t):
r'/\*'
t.lexer.code_start = t.lexer.lexpos
t.lexer.level = 1
t.lexer.begin('COMMENT')
t_COMMENT_ignore = '\n'
def t_COMMENT_anything(self, t):
r'(\s|\w)+'
def t_COMMENT_error(self, t):
print("Illegal character '{0}'".format(t.value[0]))
t.lexer.skip(1)
def t_COMMENT_end(self, t):
r'\*/'
tmp_str = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-2]
t.value = tmp_str.strip()
t.type = "NODE_COMMENT"
t.lexer.lineno += t.value.count('\n')
t.lexer.begin('INITIAL')
return t
# The comment stuff is over
# Define a rule so we can track line numbers
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
return t
def t_IDENTIFIER(self, t):
r'[^\s;{}\"\']+'
return t
def t_STRING(self, t):
r'\"([^\\"]|(\\.))*\"'
escaped = 0
str = t.value[1:-1]
new_str = ""
for i in range(0, len(str)):
c = str[i]
if escaped:
if c == "n":
c = "\n"
elif c == "t":
c = "\t"
new_str += c
escaped = 0
else:
if c == "\\":
escaped = 1
else:
new_str += c
t.value = new_str
return t
t_ignore = ' \t\n'
# Error handling rule
def t_error(self, t):
print("Illegal character '{0}'".format(t.value[0]))
t.lexer.skip(1)
# Build the lexer
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
def input(self, text):
self.lexer.input(text)
def token(self):
self.last_token = self.lexer.token()
return self.last_token
| # vyconf.configfile.curly.lexer: lexer for the curly config
#
# Copyright (C) 2014 VyOS Development Group <maintainers@vyos.net>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
# USA
import ply.lex as lex
class Lexer(object):
tokens = (
'LBRACE',
'RBRACE',
'IDENTIFIER',
'STRING',
'NODE_COMMENT',
'SEMICOLON',
'NEWLINE'
)
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_SEMICOLON = r';'
# TODO: add multiline comment support
def t_NODE_COMMENT(self, t):
r'/\*(.*)\*/'
str = t.value[2:-2] # Strip off /* and */
str = str.strip()
t.value = str
return t
# Define a rule so we can track line numbers
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
return t
def t_IDENTIFIER(self, t):
r'[^\s;{}\"\']+'
return t
def t_STRING(self, t):
r'\"([^\\"]|(\\.))*\"'
escaped = 0
str = t.value[1:-1]
new_str = ""
for i in range(0, len(str)):
c = str[i]
if escaped:
if c == "n":
c = "\n"
elif c == "t":
c = "\t"
new_str += c
escaped = 0
else:
if c == "\\":
escaped = 1
else:
new_str += c
t.value = new_str
return t
t_ignore = ' \t\n'
# Error handling rule
def t_error(self, t):
print("Illegal character '{0}'".format(t.value[0]))
t.lexer.skip(1)
# Build the lexer
def build(self, **kwargs):
self.lexer = lex.lex(module=self, **kwargs)
def input(self, text):
self.lexer.input(text)
def token(self):
self.last_token = self.lexer.token()
return self.last_token
| Python | 0 |
09aada1e7b734bde947a7031f97ecce34b8c65b2 | Create magical_marvelous_tour.py | Google_Code_Jam/2014/Round_3/A/magical_marvelous_tour.py | Google_Code_Jam/2014/Round_3/A/magical_marvelous_tour.py | #!/usr/bin/python -tt
"""Solves Google Code Jam 2014 Round 3 Problem A
(https://code.google.com/codejam/contest/3024486/dashboard#s=p0)
"Magical, Marvelous Tour"
"""
import sys
def read_input():
"""Parses problem data from stdin.
Args:
None
Returns:
List of test cases, in order specified, each of which is list of
integers representing values of n, p, q, r, s, in that order
"""
lines = sys.stdin.read().splitlines()
num_test_cases = int(lines[0])
assert num_test_cases == len(lines) - 1
test_cases = []
for line in lines[1:]:
test_cases.append([int(x) for x in line.split()])
return test_cases
def get_best_range(devices):
"""Determines best choice of range for Arnar.
Args:
devices: list of integers where value of element i is number of
transistors in device i
Returns:
Tuple consisting of tuple of indices defining range and integer of
number of transistors in range Arnar will choose
"""
# Since Solveig will always choose interval with most transistors, Arnar's
# best chance of winning is with whatever partition which maximizes the
# number of transistors in the interval with the second-highest number of
# transistors.
# brute-force approach: try all possible partitions; however, we don't
# recompute the number of devices in each of the three intervals from
# scratch; rather, we set them at the start and update them with at most a
# one addition or subtraction as we change the partitions
num_best = 0
range_best = (0, 0)
# generate array with cumulative transistor sum for each device index, i.e.
# element i reflects number of transistors in devices with indices < i
cumulative_sums = [0]
for i in range(1, len(devices) + 1):
cumulative_sums.append(cumulative_sums[i - 1] + devices[i - 1])
for i in range(len(devices)):
for j in range(i, len(devices)):
interval_sums = [cumulative_sums[i],
cumulative_sums[j + 1] - cumulative_sums[i],
cumulative_sums[len(devices)] -
cumulative_sums[j + 1]]
assert sum(interval_sums) == cumulative_sums[len(devices)]
# NOTE: following is faster than sorting list of 3 elements and
# adding elements 0 and 1
num_arnar = cumulative_sums[len(devices)] - max(interval_sums)
if num_arnar > num_best:
num_best = num_arnar
range_best = (i, j)
return (range_best, num_best)
def main():
test_cases = read_input()
i = 1
for test_case in test_cases:
(n, p, q, r, s) = test_case
devices = [(x * p + q) % r + s for x in range(n)]
num_transistors_total = sum(devices)
((range_start, range_end), num_transistors_arnar) = \
get_best_range(devices)
probability_win = num_transistors_arnar / float(num_transistors_total)
print 'Case #%d: %.10f' % (i, probability_win)
i += 1
if __name__ == '__main__':
main()
| Python | 0.999893 | |
c50628d1cf984be774cdf1bc6728b9c1cb3f94fa | Create Assignment2Solution.py | Assignments/Assignment2Solution.py | Assignments/Assignment2Solution.py | # Your name here
# Assignment 2: Process a folder of shapefiles
# Using the os library, find all shapefiles,and only shapefiles in a given folder and buffer them as before.
# Catch exceptions to handle invalid shapefiles.
import arcpy
import os
def main(inputfolder,prefix,outputfolder):
"""Buffer all shapefiles in inputfolder, appending with prefix and output to outputfolder."""
filelist = os.listdir(inf)
for f in filelist:
if f.endswith('.shp'):
try:
input = inputfolder + f
output = outputfolder + prefix + f
arcpy.Buffer_analysis (input, output, u'500 Feet')
except Exception as e:
print "Unable to buffer", f
print e
return outputfolder
if __name__ == '__main__':
# Arguments must be supplied in the __main__ block, not in the function called.
inf = u'C:\\Facilities\\'
p = u'Buffered_'
outf = u'C:\\Facilities\\'
# Print output location to standard output
print "Output written to", main(inf, p, outf)
| Python | 0 | |
4f32369efb0b2cd8540cc78132cadfbed6e68ae8 | Read and write xls files | src/petlx/xls.py | src/petlx/xls.py | """
Read and write xls files, using xlrd.
"""
import os
import petl
from petlx.util import UnsatisfiedDependency
dep_message = """
The package xlrd is required. pip install xlrd.
"""
def fromxls(filename, sheetname):
"""
Extract a table from a sheet in an Excel (.xls) file.
N.B., the sheet name is case sensitive, so watch out for, e.g., 'Sheet1'.
The package xlrd is required. Try ``pip install xlrd``.
"""
return XLSView(filename, sheetname)
class XLSView(petl.util.RowContainer):
def __init__(self, filename, sheetname='Sheet1'):
self.filename = filename
self.sheetname = sheetname
def __iter__(self):
try:
import xlrd
except ImportError as e:
raise UnsatisfiedDependency(e, dep_message)
wb = xlrd.open_workbook(filename=self.filename)
ws = wb.sheet_by_name(self.sheetname)
return (ws.row_values(rownum) for rownum in range(0,ws.nrows))
import sys
from petlx.integration import integrate
integrate(sys.modules[__name__]) | Python | 0 | |
24d742e444c84df99629d8a6aff7ca7e6c90f995 | Add adhoc script to detect jobs with stuck ActiveInvocations list. | scheduler/misc/detect_stuck_active_invs.py | scheduler/misc/detect_stuck_active_invs.py | #!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0 | |
cf78037980a9345c12b1e2562bc4eda63cea95b3 | Add a simple regression test to go with r143260. CommandInterpreter::PreprocessCommand() should not infinite loop when a target has not been specified yet. | test/functionalities/backticks/TestBackticksWithoutATarget.py | test/functionalities/backticks/TestBackticksWithoutATarget.py | """
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| Python | 0.000178 | |
905bccf6f66020939202a4be50cffade071881dc | Create multi_time_frame_strategy.py | vnpy/app/cta_strategy/strategies/multi_time_frame_strategy.py | vnpy/app/cta_strategy/strategies/multi_time_frame_strategy.py | from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
Direction,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager,
)
class MultiTimeframeStrategy(CtaTemplate):
""""""
author = '用Python的交易员'
rsi_signal = 20
rsi_window = 14
fast_window = 5
slow_window = 20
fixed_size = 1
rsi_value = 0
rsi_long = 0
rsi_short = 0
fast_ma = 0
slow_ma = 0
ma_trend = 0
parameters = [ 'rsi_signal', 'rsi_window', 'fast_window', 'slow_window','fixed_size']
variables = ['rsi_value','rsi_long','rsi_short','fast_ma','slow_ma','ma_trend']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(MultiTimeframeStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.rsi_long = 50 + self.rsi_signal
self.rsi_short = 50 - self.rsi_signal
self.bg5 = BarGenerator(self.on_bar,5, self.on_5min_bar)
self.am5 = ArrayManager()
self.bg15 = BarGenerator(self.on_bar,15, self.on_15min_bar)
self.am15 = ArrayManager()
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(10)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.bg5.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.bg5.update_bar(bar)
self.bg15.update_bar(bar)
def on_5min_bar(self, bar:BarData):
""""""
self.cancel_all()
self.am5.update_bar(bar)
if not self.am5.inited:
return
if not self.ma_trend:
return
self.rsi_value = self.am5.rsi(self.rsi_window)
if self.pos == 0:
if self.ma_trend > 0 and self.rsi_value >= self.rsi_long:
self.buy(bar.close_price+5, self.fixed_size)
elif self.ma_trend < 0 and self.rsi_value <= self.rsi_short:
self.short(bar.close_price-5, self.fixed_size)
elif self.pos > 0:
if self.ma_trend < 0 or self.rsi_value < 50:
self.sell(bar.close_price-5, abs(self.pos))
elif self.pos < 0:
if self.ma_trend > 0 or self.rsi_value > 50:
self.cover(bar.close_price+5, abs(self.pos))
self.put_event()
def on_15min_bar(self, bar:BarData):
""""""
self.am15.update_bar(bar)
if not self.am15.inited:
return
self.fast_ma = self.am15.sma(self.fast_window)
self.slow_ma = self.am15.sma(self.slow_window)
if self.fast_ma > self.slow_ma:
self.ma_trend = 1
else:
self.ma_trend = -1
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
| Python | 0.000113 | |
28e226a47d16fb6a52c937031be19d8832e7e5c4 | Bump development version | ckeditor_filebrowser_filer/__init__.py | ckeditor_filebrowser_filer/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.2.0.b1'
| # -*- coding: utf-8 -*-
__version__ = '0.1.1'
| Python | 0 |
dc200e50020637650c8a5dfe76895b0a033a8cea | Add tests for verifying that deactivating password works | akvo/rsr/tests/models/test_login_logging.py | akvo/rsr/tests/models/test_login_logging.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
from datetime import timedelta
from django.forms import ValidationError
from django.conf import settings
from django.test import Client
from akvo.rsr.models import LoginLog
from akvo.rsr.models.login_log import MAX_FAILED_LOGINS
from akvo.rsr.tests.base import BaseTestCase
class LoginLoggingTestCase(BaseTestCase):
"""Tests for the login logging model"""
def setUp(self):
self.email = 'frank@example.com'
self.password = 'password'
self.user = self.create_user(self.email, self.password)
self.c = Client(HTTP_HOST=settings.RSR_DOMAIN)
def test_successful_login_creates_log_entry(self):
# When
self.c.login(username=self.email, password=self.password)
# Then
logs = LoginLog.objects.filter(email=self.email)
self.assertTrue(logs.exists())
self.assertTrue(logs.first().success)
def test_failed_login_creates_log_entry(self):
# When
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
# Then
logs = LoginLog.objects.filter(email=self.email)
self.assertTrue(logs.exists())
self.assertFalse(logs.first().success)
def test_password_deactivates_after_max_attempts(self):
# Given
for _ in range(MAX_FAILED_LOGINS - 1):
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
# When
with self.assertRaises(ValidationError) as assertion:
self.c.login(username=self.email, password='')
# Then
self.assertIn('Login has been disabled', assertion.exception.message)
def test_logins_post_password_deactivation_ignored(self):
# When
for _ in range(MAX_FAILED_LOGINS + 10):
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
with self.assertRaises(ValidationError) as assertion:
self.c.login(username=self.email, password=self.password)
# Then
self.assertIn('Login has been disabled', assertion.exception.message)
logs = LoginLog.objects.filter(email=self.email)
self.assertEqual(MAX_FAILED_LOGINS, logs.count())
def test_login_works_after_deactivation_time(self):
# Given
for _ in range(MAX_FAILED_LOGINS + 10):
with self.assertRaises(ValidationError):
self.c.login(username=self.email, password='')
# HACK: Set the creation time of these login attempts to older than login_disable_time
time_delta = settings.LOGIN_DISABLE_TIME * 2
creation_time = LoginLog.objects.first().created_at - timedelta(seconds=time_delta)
LoginLog.objects.update(created_at=creation_time)
# When
self.c.login(username=self.email, password=self.password)
# Then
log_entry = LoginLog.objects.filter(email=self.email).first()
self.assertTrue(log_entry.success)
| Python | 0 | |
e28a6423f63a169b46ebe46e9690d3858f953909 | Add tests | apps/commons/tests/test_accepted_locales.py | apps/commons/tests/test_accepted_locales.py | import os
import shutil
from django.conf import settings
import test_utils
import manage
class AcceptedLocalesTest(test_utils.TestCase):
"""Test lazy evaluation of locale related settings.
Verify that some localization-related settings are lazily evaluated based
on the current value of the DEV variable. Depending on the value,
DEV_LANGUAGES or PROD_LANGUAGES should be used.
"""
locale = manage.path('locale')
locale_bkp = manage.path('locale_bkp')
@classmethod
def setup_class(cls):
"""Create a directory structure for locale/.
Back up the existing locale/ directory and create the following
hierarchy in its place:
- locale/en-US/LC_MESSAGES
- locale/fr/LC_MESSAGES
- locale/templates/LC_MESSAGES
- locale/empty_file
Also, set PROD_LANGUAGES to ('en-US',).
"""
if os.path.exists(cls.locale_bkp):
raise Exception('A backup of locale/ exists at %s which might '
'mean that previous tests didn\'t end cleanly. '
'Skipping the test suite.' % cls.locale_bkp)
cls.DEV = settings.DEV
cls.PROD_LANGUAGES = settings.PROD_LANGUAGES
cls.DEV_LANGUAGES = settings.DEV_LANGUAGES
settings.PROD_LANGUAGES = ('en-US',)
os.rename(cls.locale, cls.locale_bkp)
for loc in ('en-US', 'fr', 'templates'):
os.makedirs(os.path.join(cls.locale, loc, 'LC_MESSAGES'))
open(os.path.join(cls.locale, 'empty_file'), 'w').close()
@classmethod
def teardown_class(cls):
"""Remove the testing locale/ dir and bring back the backup."""
settings.DEV = cls.DEV
settings.PROD_LANGUAGES = cls.PROD_LANGUAGES
settings.DEV_LANGUAGES = cls.DEV_LANGUAGES
shutil.rmtree(cls.locale)
os.rename(cls.locale_bkp, cls.locale)
def test_build_dev_languages(self):
"""Test that the list of dev locales is built properly.
On dev instances, the list of accepted locales should correspond to
the per-locale directories in locale/.
"""
settings.DEV = True
assert (settings.DEV_LANGUAGES == ['en-US', 'fr'] or
settings.DEV_LANGUAGES == ['fr', 'en-US']), \
'DEV_LANGUAGES do not correspond to the contents of locale/.'
def test_dev_languages(self):
"""Test the accepted locales on dev instances.
On dev instances, allow locales defined in DEV_LANGUAGES.
"""
settings.DEV = True
# simulate the successful result of the DEV_LANGUAGES list
# comprehension defined in settings.
settings.DEV_LANGUAGES = ['en-US', 'fr']
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US', 'fr': 'fr'}, \
('DEV is True, but DEV_LANGUAGES are not used to define the '
'allowed locales.')
def test_prod_languages(self):
"""Test the accepted locales on prod instances.
On stage/prod instances, allow locales defined in PROD_LANGUAGES.
"""
settings.DEV = False
assert settings.LANGUAGE_URL_MAP == {'en-us': 'en-US'}, \
('DEV is False, but PROD_LANGUAGES are not used to define the '
'allowed locales.')
| Python | 0.000001 | |
bb9fd71dc06ac39b461b4109e341fe7cd4172c76 | use self.create_socket() | tests/twisted/file-transfer/test-receive-file-and-disconnect.py | tests/twisted/file-transfer/test-receive-file-and-disconnect.py | import socket
from file_transfer_helper import exec_file_transfer_test, ReceiveFileTest
class ReceiveFileAndDisconnectTest(ReceiveFileTest):
def receive_file(self):
s = self.create_socket()
s.connect(self.address)
# disconnect
self.conn.Disconnect()
self.q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
return True
if __name__ == '__main__':
exec_file_transfer_test(ReceiveFileAndDisconnectTest)
| import socket
from file_transfer_helper import exec_file_transfer_test, ReceiveFileTest
class ReceiveFileAndDisconnectTest(ReceiveFileTest):
def receive_file(self):
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(self.address)
# disconnect
self.conn.Disconnect()
self.q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
return True
if __name__ == '__main__':
exec_file_transfer_test(ReceiveFileAndDisconnectTest)
| Python | 0.000001 |
5dfd7b1534e19242ab778d535e2de13b424578f7 | Add examples | example.py | example.py | #!/usr/bin/python3
#
# Copyright (c) 2016, Fabian Affolter <fabian@affolter-engineering.ch>
# Released under the MIT license. See LICENSE file for details.
#
import fixerio
# Our base currency is the Czech Koruna instead of the default (EUR).
BASE = 'CZK'
exchange = fixerio.Fixer(base=BASE)
print('Current exchange rates:')
for currency, rate in exchange.convert().get('rates').items():
print('{} : {}'.format(currency, rate))
print('Current exchange rates for CHF:')
# Check if the target currency exists
if exchange.currency_available('CHF'):
print(exchange.convert().get('rates')['CHF'])
| Python | 0 | |
5afdd7775dd1aa232d3ca8fa2852f4a36918f224 | add management command to fix forms whose non-ascii chars are corrupted | corehq/apps/cleanup/management/commands/fix_corrupted_forms.py | corehq/apps/cleanup/management/commands/fix_corrupted_forms.py | # encoding: utf-8
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.management import BaseCommand
from six.moves import input
from corehq.apps.app_manager.dbaccessors import get_apps_by_id
SUSPICIOUS_STRINGS = [
international_character.encode('utf-8').decode('latin1')
for international_character in [
'á', 'é', 'í', 'ó', 'ú',
'Á', 'É', 'Í', 'Ó', 'Ú',
'’',
] # TODO - add more common non-ascii characters
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('app_id')
parser.add_argument('form_id')
parser.add_argument(
'--cleanup',
action='store_true',
dest='cleanup',
default=False,
)
# https://dimagi-dev.atlassian.net/browse/HI-747
def handle(self, domain, app_id, form_id, cleanup=False, **options):
app = get_apps_by_id(domain, app_id)[0]
form = app.get_form(form_id)
source = form.source
if any(suspicious_string in source for suspicious_string in SUSPICIOUS_STRINGS):
print('FORM CONTAINS SUSPICIOUS STRING')
if cleanup:
if 'y' == input('Did you confirm that there are no app updates to publish? [y/N]'):
print('Cleaning form...')
form.source = source.encode('latin1').decode('utf-8')
app.save()
print('Done.')
else:
print('Aborting...')
| Python | 0 | |
3cf56093b9d132a5089a70a12feb73c4be987da8 | Add mtnpatch.py, a script to parse and import a full monotone diff | contrib/mtnpatch.py | contrib/mtnpatch.py | #!/usr/bin/env python
import sys, os, string, getopt
mtncmd = "monotone"
def main(argv = None):
if argv is None:
argv = sys.argv
opts, list = getopt.getopt(sys.argv[1:], ':R')
if len(list) < 1:
print "You must specify a file"
return 2
reverse = False
for o, a in opts:
if o == "-R":
reverse = True
if os.path.exists(list[0]):
input = open(list[0], 'r')
renameFrom = ""
cmd = ""
if reverse:
print "patch -R -p0 < %s" % list[0]
else:
print "patch -p0 < %s" % list[0]
for line in input:
if len(line) > 0:
if line[0] == '#':
parts = line.split()
if len(parts) > 2:
cmd = parts[1]
# deal with whilespace in filenames (badly)
fileName = parts[2]
i = 3
while i < len(parts) and fileName.count('"') % 2:
fileName += " %s" % parts[i]
if cmd == "delete_file":
if reverse:
print "%s add %s" % (mtncmd, fileName)
else:
print "%s drop -e %s" % (mtncmd, fileName)
elif cmd == "add_file":
if reverse:
print "%s drop -e %s" % (mtncmd, fileName)
else:
print "%s add %s" % (mtncmd, fileName)
elif cmd == "rename_file":
renameFrom = fileName
elif cmd == "to" and renameFrom != "":
if reverse:
print "%s rename -e %s %s" % (mtncmd, fileName, renameFrom)
else:
print "%s rename -e %s %s" % (mtncmd, renameFrom, fileName)
renameFrom = ""
else:
cmd = ""
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
4430b1957b642e87cd263455e371bf1d634101b0 | Add buildone command | cerbero/commands/buildone.py | cerbero/commands/buildone.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#from cerbero.oven import Oven
from cerbero.commands import Command, register_command
from cerbero.cookbook import CookBook
from cerbero.errors import FatalError
from cerbero.oven import Oven
from cerbero.utils import _, N_, ArgparseArgument
class BuildOne(Command):
doc = N_('Build or rebuild a single recipe without its dependencies')
name = 'buildone'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('recipe', nargs=1,
help=_('name of the recipe to build')),
])
def run(self, config, args):
cookbook = CookBook.load(config)
recipe_name = args.recipe[0]
recipe = cookbook.get_recipe(recipe_name)
if recipe is None:
raise FatalError(_("Recipe %s not found" % recipe_name))
oven = Oven(recipe, cookbook, force=True, no_deps=True)
oven.start_cooking()
register_command(BuildOne)
| Python | 0.000005 | |
6244e0b40d847687b7ff875a48fb08060efc97bf | Solve Within PyCharm | Newsolver.py | Newsolver.py | from __future__ import division
from pyomo.environ import *
from pyomo.opt import SolverFactory
model = AbstractModel()
model.M = Set()
model.N = Set()
model.n = Param()
model.c = Param(model.M, model.N)
model.x = Var(model.M, model.N, domain=Binary)
model.u = Var(model.M, domain=NonNegativeIntegers)
def object(model):
return sum(model.c[i,j]*model.x[i,j] for (i,j) in model.M*model.N if i!=j)
model.obj = Objective(rule=object)
def const1(model,j):
return sum(model.x[i,j] for i in model.M if i!=j) == 1
model.cons = Constraint(model.N, rule= const1)
def const2(model,i):
return sum(model.x[i,j] for j in model.N if j!=i) ==1
model.cons2 = Constraint(model.M, rule=const2)
def const3(model,i,j):
if i==j or i <2 or j<2:
return Constraint.Skip
return model.u[i]-model.u[j]+model.n*model.x[i,j] <= model.n-1
model.cons3 = Constraint(model.M, model.N, rule=const3)
instance = model.create("salesman.dat")
instance.pprint()
opt = SolverFactory('glpk')
results = opt.solve(instance, tee=True)
results.write()
instance.solutions.load_from(results)
for v in instance.component_objects(Var, active=True):
print ("Variable",v)
varobject = getattr(instance, str(v))
for index in varobject:
print (" ",index, varobject[index].value) | Python | 0.000039 | |
00f766b24865e8010411105794f20bc0ef39a6dc | Add py-sphinxcontrib-devhelp package (#13278) | var/spack/repos/builtin/packages/py-sphinxcontrib-devhelp/package.py | var/spack/repos/builtin/packages/py-sphinxcontrib-devhelp/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySphinxcontribDevhelp(PythonPackage):
"""sphinxcontrib-devhelp is a sphinx extension which outputs
Devhelp document."""
homepage = "http://sphinx-doc.org/"
url = "https://pypi.io/packages/source/s/sphinxcontrib-devhelp/sphinxcontrib-devhelp-1.0.1.tar.gz"
version('1.0.1', sha256='6c64b077937330a9128a4da74586e8c2130262f014689b4b89e2d08ee7294a34')
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
def test(self):
# Requires sphinx, creating a circular dependency
pass
| Python | 0 | |
fe14781a46a60a4fdd0101468ae487a691a2154a | Add ontap_command.py Module (#44190) | lib/ansible/modules/storage/netapp/na_ontap_command.py | lib/ansible/modules/storage/netapp/na_ontap_command.py | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
author: NetApp Ansible Team (ng-ansibleteam@netapp.com)
description:
- "Run system-cli commands on ONTAP"
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_command
short_description: "NetApp ONTAP Run any cli command"
version_added: "2.7"
options:
command:
description:
- a comma separated list containing the command and arguments.
'''
EXAMPLES = """
- name: run ontap cli command
na_ontap_command:
hostname: "{{ hostname }} "
username: "{{ admin username }}"
password: "{{ admin password }}"
command: ['version']
- name: run ontap cli command
na_ontap_command:
hostname: "{{ hostname }} "
username: "{{ admin username }}"
password: "{{ admin password }}"
command: ['network', 'interface', 'show']
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPCommand(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
command=dict(required=True, type='list')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
parameters = self.module.params
# set up state variables
self.command = parameters['command']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def run_command(self):
command_obj = netapp_utils.zapi.NaElement("system-cli")
args_obj = netapp_utils.zapi.NaElement("args")
for arg in self.command:
args_obj.add_new_child('arg', arg)
command_obj.add_child_elem(args_obj)
try:
output = self.server.invoke_successfully(command_obj, True)
return output.to_string()
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error running command %s: %s' %
(self.command, to_native(error)),
exception=traceback.format_exc())
def apply(self):
changed = True
output = self.run_command()
self.module.exit_json(changed=changed, msg=output)
def main():
"""
Execute action from playbook
"""
command = NetAppONTAPCommand()
command.apply()
if __name__ == '__main__':
main()
| Python | 0 | |
295afe540c24ded86353402d87c42e072f7a64fa | Initialize makePublicPrivateKeys | books/CrackingCodesWithPython/Chapter23/makePublicPrivateKeys.py | books/CrackingCodesWithPython/Chapter23/makePublicPrivateKeys.py | # Public Key Generator
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import random, sys, os, primeNum, cryptomath
def main():
# Create a public/private keypair with 1024-bit keys:
print('Making key files...')
makeKeyFiles('al_sweigart', 1024)
print('Key files made.')
def generateKey(keySize):
# Creates public/private keys keySize bits in size.
p = 0
q = 0
# Step 1: Create two prime numbers, p and q. Calculate n = p * q:
print('Generating p prime...')
while p == q:
p = primeNum.generateLargePrime(keySize)
q = primeNum.generateLargePrime(keySize)
n = p * q
# Step 2: Create a number e that is relatively prime to (p-1)*(q-1):
print('Generating e that is relatively prime to (p-1)*(q-1)...')
while True:
# Keep trying random numbers for e until one is valid:
e = random.randrange(2 ** (keySize - 1), 2 ** (keySize))
if cryptomath.gcd(e, (p - 1) * (q - 1)) == 1:
break
# Step 3: Calculate d, the mod inverse of e:
print('Calculating d that is mod inverse of e...')
d = cryptomath.findModInverse(e, (p - 1) * (q - 1))
publicKey = (n, e)
privateKey = (n, d)
print('Public key:', publicKey)
print('Private key:', privateKey)
return (publicKey, privateKey)
def makeKeyFiles(name, keySize):
# Creates two files 'x_pubkey.txt' and 'x_privkey.txt' (where x
# is the value in name) with the n,e and d,e integers written in
# them, delimited by a comma.
# Our safety check will prevent us from overwriting our old key files:
if os.path.exists('%s_pubkey.txt' % (name)) or os.path.exists('%s_privkey.txt' % (name)):
sys.exit('WARNING: The file %s_pubkey.txt or %s_privkey.txt already exists! Use a different name or delete these files and rerun this program.' % (name, name))
publicKey, privateKey = generateKey(keySize)
print()
print('The public key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing public key to file %s_pubkey.txt...' % (name))
fo = open('%s_pubkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, publicKey[0], publicKey[1]))
fo.close()
print()
print('The private key is a %s and a %s digit number.' % (len(str(publicKey[0])), len(str(publicKey[1]))))
print('Writing private key to file %s_privkey.txt...' % (name))
fo = open('%s_privkey.txt' % (name), 'w')
fo.write('%s,%s,%s' % (keySize, privateKey[0], privateKey[1]))
fo.close()
# If makePublicPrivateKeys.py is run (instead of imported as a module),
# call the main() function:
if __name__ == '__main__':
main() | Python | 0.000004 | |
3e97731449027e5ac0d3a047e1b872956feac528 | Create cracking-the-safe.py | Python/cracking-the-safe.py | Python/cracking-the-safe.py | # Time: O(k^n)
# Space: O(k^n)
# There is a box protected by a password.
# The password is n digits, where each letter can be one of the first k digits 0, 1, ..., k-1.
#
# You can keep inputting the password,
# the password will automatically be matched against the last n digits entered.
#
# For example, assuming the password is "345",
# I can open it when I type "012345", but I enter a total of 6 digits.
#
# Please return any string of minimum length that is guaranteed to open the box after the entire string is inputted.
#
# Example 1:
# Input: n = 1, k = 2
# Output: "01"
# Note: "10" will be accepted too.
#
# Example 2:
# Input: n = 2, k = 2
# Output: "00110"
# Note: "01100", "10011", "11001" will be accepted too.
#
# Note:
# - n will be in the range [1, 4].
# - k will be in the range [1, 10].
# - k^n will be at most 4096.
# https://en.wikipedia.org/wiki/De_Bruijn_sequence
class Solution(object):
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
M = k**(n-1)
P = [q*k+i for i in xrange(k) for q in xrange(M)]
result = []
for i in xrange(k**n):
j = i
while P[j] >= 0:
result.append(str(j//M))
P[j], j = -1, P[j]
return "".join(result) + "0"*(n-1)
# Time: O(n *k^n)
# Space: O(n *k^n)
class Solution2(object):
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
def dfs(k, node, lookup, result):
for i in xrange(k):
neigbor = node + str(i)
if neigbor not in lookup:
lookup.add(neigbor)
dfs(k, neigbor[1:], lookup, result)
result.append(str(i))
lookup = set()
result = []
dfs(k, "0"*(n-1), lookup, result)
return "".join(result) + "0"*(n-1)
| Python | 0.000004 | |
9c2487ab2c3b8d12e5a5f0f179b2a1fd79496b17 | add tests | doajtest/unit/event_consumers/test_application_publisher_in_progress_notify.py | doajtest/unit/event_consumers/test_application_publisher_in_progress_notify.py | from portality import models
from portality import constants
from portality.bll import exceptions
from doajtest.helpers import DoajTestCase
from doajtest.fixtures import ApplicationFixtureFactory
import time
from portality.events.consumers.application_publisher_inprogress_notify import ApplicationPublisherInprogresNotify
class TestApplicationPublisherInProgressNotify(DoajTestCase):
def setUp(self):
super(TestApplicationPublisherInProgressNotify, self).setUp()
def tearDown(self):
super(TestApplicationPublisherInProgressNotify, self).tearDown()
def test_consumes(self):
source = ApplicationFixtureFactory.make_application_source()
event = models.Event(constants.EVENT_APPLICATION_STATUS, context={"application": {}, "old_status": "pending", "new_status": "in progress"})
assert ApplicationPublisherInprogresNotify.consumes(event)
event = models.Event(constants.EVENT_APPLICATION_STATUS,
context={"application": {}, "old_status": "in progress", "new_status": "in progress"})
assert not ApplicationPublisherInprogresNotify.consumes(event)
event = models.Event("test:event", context={"application" : {}})
assert not ApplicationPublisherInprogresNotify.consumes(event)
event = models.Event(constants.EVENT_APPLICATION_STATUS)
assert not ApplicationPublisherInprogresNotify.consumes(event)
def test_consume_success(self):
self._make_and_push_test_context("/")
acc = models.Account()
acc.set_id("publisher")
acc.set_email("test@example.com")
acc.save()
source = ApplicationFixtureFactory.make_application_source()
event = models.Event(constants.EVENT_APPLICATION_STATUS,
context={"application": source, "old_status": "pending",
"new_status": "in progress"})
# event = models.Event(constants.EVENT_APPLICATION_STATUS, context={"application": "abcdefghijk", "old_status": "in progress", "new_status": "revisions_required"})
ApplicationPublisherInprogresNotify.consume(event)
time.sleep(2)
ns = models.Notification.all()
assert len(ns) == 1
n = ns[0]
assert n.who == "publisher", "Expected: {}, Received: {}".format("publisher", n.who)
assert n.created_by == ApplicationPublisherInprogresNotify.ID, "Expected: {}, Received: {}".format(ApplicationPublisherInprogresNotify.ID, n.created_by)
assert n.classification == constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE, "Expected: {}, Received: {}".format(constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE, n.classification)
assert n.message is not None
assert n.action is None
assert not n.is_seen()
def test_consume_fail(self):
event = models.Event(constants.EVENT_APPLICATION_ASSED_ASSIGNED, context={"application": {"dummy" : "data"}})
with self.assertRaises(exceptions.NoSuchObjectException):
ApplicationPublisherInprogresNotify.consume(event)
| Python | 0 | |
73ae4839941b802870eaba29b67c8b8a89e43c71 | add backend_service_migration script to call the migration handler | backend_service_migration.py | backend_service_migration.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The script takes the arguments and run the backend service migration handler.
Before running:
1. If not already done, enable the Compute Engine API
and check the quota for your project at
https://console.developers.google.com/apis/api/compute
2. This sample uses Application Default Credentials for authentication.
If not already done, install the gcloud CLI from
https://cloud.google.com/sdk and run
`gcloud beta auth application-default login`.
For more information, see
https://developers.google.com/identity/protocols/application-default-credentials
3. Install the Python client library for Google APIs by running
`pip install --upgrade google-api-python-client`
Run the script by terminal, for example:
python3 instance_group_migration.py --project_id=test-project
--zone=us-central1-a --instance_group_name=test-group --network=test-network
--subnetwork=test-network --preserve_external_ip=False
"""
import warnings
import argparse
from vm_network_migration.handlers.backend_service_migration import BackendServiceMigration
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--project_id',
help='The project ID of the backend service.')
parser.add_argument('--region', default=None,
help='The region of the the backend service.')
parser.add_argument('--backend_service_name',
help='The name of the the backend service')
parser.add_argument('--network', help='The name of the new network')
parser.add_argument(
'--subnetwork',
default=None,
help='The name of the subnetwork. For auto mode networks,'
' this field is optional')
parser.add_argument(
'--preserve_external_ip',
default=False,
help='Preserve the external IP address')
args = parser.parse_args()
if args.preserve_external_ip == 'True':
args.preserve_external_ip = True
else:
args.preserve_external_ip = False
if args.preserve_external_ip:
warnings.warn(
'You choose to preserve the external IP. If the original instance '
'has an ephemeral IP, it will be reserved as a static external IP after the '
'execution.',
Warning)
continue_execution = input(
'Do you still want to preserve the external IP? y/n: ')
if continue_execution == 'n':
args.preserve_external_ip = False
backend_service_migration = BackendServiceMigration(args.project_id,
args.backend_service_name,
args.network,
args.subnetwork,
args.preserve_external_ip,
args.region)
backend_service_migration.network_migration()
| Python | 0.000001 | |
a1b88f50edf9f30f3840c50067545f2d315596aa | create compare.py | part-1/compare.py | part-1/compare.py | # coding: utf8
print '''
CPython implementation detail: Objects of different types except numbers are ordered by their type names; objects of the same types that don’t support proper comparison are ordered by their address.
>>> 5 < 'foo' # <type 'int'> < <type 'str'>
True
>>> 5 < (1, 2)
True
>>> 5 < {}
True
>>> 5 < [1, 2]
True
>>> [1, 2] > 'foo' # 'list' < 'str'
False
>>> (1, 2) > 'foo' # 'tuple' > 'str'
True
'''
| Python | 0.000001 | |
696960eba9da48a8eb4830f464ccacb792e0c435 | Get the list of entities found by the name given in input | sara_flexbe_states/src/sara_flexbe_states/List_Entities_By_Name.py | sara_flexbe_states/src/sara_flexbe_states/List_Entities_By_Name.py | #!/usr/bin/env python
from flexbe_core.proxy import ProxySubscriberCached
from flexbe_core import EventState, Logger
from sara_msgs.msg import Entities
from geometry_msgs.msg import Pose
from tf.transformations import euler_from_quaternion
import math
class list_found_entities(EventState):
'''
will list people seen by the camera
-- frontality_level float
#> found_entities object
#< name string
<= found people are found
<= not_found no one is found
'''
def __init__(self, frontality_level):
'''
Constructor
'''
super(list_found_entities, self).__init__(outcomes=['found', 'not_found'], output_keys=['list_found_entities', 'number'], input_keys=['name'])
self._sub = ProxySubscriberCached({'/entities': Entities})
self._topic = "/robot_pose"
self._subpos = ProxySubscriberCached({self._topic: Pose})
self.frontality_level = frontality_level
self.mypose = None
self.message = None
def execute(self, userdata):
if self._subpos.has_msg(self._topic):
self.mypose = userdata.pose = self._subpos.get_last_msg(self._topic)
if self._sub.has_msg('/entities'):
Logger.loginfo('getting message')
self.message = self._sub.get_last_msg('/entities')
self._sub.remove_last_msg('/entities')
if self.message is not None and self.mypose is not None:
found_entities = self.list()
userdata.list_found_entities = found_entities
userdata.number = len(found_entities)
if len(found_entities) != 0:
return 'found'
else:
return 'not_found'
def list(self):
found_entities = []
wraps = []
for entity in self.message.entities:
if entity.name == 'name':
wrap = wrapper()
wrap.init(self.mypose, entity, self.frontality_level)
wraps.append(wrap)
wraps.sort(key=wrapper.key)
for wrap in wraps:
found_entities.append(wrap.entity)
return found_entities
class wrapper():
def init(self, mypose, entity, frontality_level):
self.entity = entity
x = entity.position.x - mypose.position.x
y = entity.position.y - mypose.position.y
quat = [mypose.orientation.x, mypose.orientation.y, mypose.orientation.z, mypose.orientation.w]
euler = euler_from_quaternion(quat)
A = euler[2]
a = math.tan(A)
b = y - x * a
self.dist = (abs(y - a * x - b) / (1 + b ** 2) ** 0.5) * frontality_level
self.dist += (((entity.position.x - mypose.position.x) ** 2 + (
entity.position.y - mypose.position.y) ** 2) ** 0.5) * (1 - frontality_level)
self.dist /= entity.probability**2
def key(self):
return self.dist
| Python | 0.999836 | |
c7c4a3c68e4950049db4b113576cfa3b2f6748f5 | add test data | corehq/apps/reports/tests/data/case_list_report_data.py | corehq/apps/reports/tests/data/case_list_report_data.py | dummy_user_list = [
{
'domain': 'case-list-test',
'username': 'active-worker-1',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': 'activeworker@commcarehq.com',
'uuid': 'active1',
'is_active': True,
'doc_type': 'CommcareUser'
},
{
'domain': 'case-list-test',
'username': 'active-worker-2',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': 'activeworker2@commcarehq.com',
'uuid': 'active2',
'is_active': True,
'doc_type': 'CommcareUser'
},
{
'domain': 'case-list-test',
'username': 'deactivated-worker-1',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': 'deactiveworker1@commcarehq.com',
'uuid': 'deactive1',
'is_active': False,
'doc_type': 'CommcareUser'
},
{
'domain': 'case-list-test',
'username': 'web-worker-1',
'password': 'Some secret Pass',
'created_by': None,
'created_via': None,
'email': 'webworker@commcarehq.com',
'uuid': 'web1',
'is_active': True,
"timezone": "UTC",
'doc_type': 'WebUser'
},
]
dummy_case_list = [
{
'_id': 'id-1',
'domain': 'case-list-test',
'name': 'Deactivated Owner case 1',
'owner_id': 'deactive1',
'user_id': 'deactivated-worker-1@commcarehq.org',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-2',
'domain': 'case-list-test',
'name': 'Active Owner case 1',
'owner_id': 'active1',
'user_id': 'active-worker-1@commcarehq.org',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-3',
'domain': 'case-list-test',
'name': 'Active Owner case 2',
'owner_id': 'active1',
'user_id': 'active-worker-1@commcarehq.org',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-4',
'domain': 'case-list-test',
'name': 'Web Owner case 1',
'owner_id': 'web1',
'user_id': 'active-worker-1@commcarehq.org',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
{
'_id': 'id-5',
'domain': 'case-list-test',
'name': 'Active Owner case 2',
'owner_id': 'active2',
'user_id': 'active-worker-2@commcarehq.org',
'type': 'case',
'opened_on': '2021-02-24T00:00:00.000000Z',
'modified_on': None,
'closed_on': None,
},
]
| Python | 0.000006 | |
3f24e7b51281031fa9713b737a9647b305105a89 | Write unittest for parse_file() in ConfigReader.py | src/unittests.py | src/unittests.py | from ConfigReader import ConfigReader as cr
import unittest
import os
class testConfigReader(unittest.TestCase):
"""Test cases for configReader"""
def setUp(self):
"""Set up some important variables"""
self.example_config_filename = 'testConfig.config'
# Set some values
oauth_string = 'xxxxxxxxxxx'
nick_string = 'justinfan4242'
channels_string = 'channel1 channel2'
channels_list = ['channel1', 'channel2']
log_string = 'default.log'
time_format_string = "'[%Y-%m-%d %H:%M:%S]'"
time_format_value = '[%Y-%m-%d %H:%M:%S]'
host_string = 'irc.twitch.tv'
port_string = '6667'
port_int = 6667
block_size_string = '4096'
block_size_int = 4096
reconnect_timer_string = '600'
reconnect_timer_int = 600
stayalive_timer_string = '0'
stayalive_timer_int = 0
connect_timeout_string = '10'
connect_timeout_float = 10
receive_timeout_string = '0.1'
receive_timeout_float = 0.1
# Write a config file
config_file_string = 'oauth: ' + oauth_string + '\n'
config_file_string += 'nick: ' + nick_string + '\n'
config_file_string += 'channels: ' + channels_string + '\n'
config_file_string += 'log: ' + log_string + '\n'
config_file_string += 'time_format: ' + time_format_string + '\n'
config_file_string += 'host: ' + host_string + '\n'
config_file_string += 'port: ' + port_string + '\n'
config_file_string += 'block_size: ' + block_size_string + '\n'
config_file_string += 'reconnect_timer: ' + reconnect_timer_string + '\n'
config_file_string += 'stayalive_timer: ' + stayalive_timer_string + '\n'
config_file_string += 'connect_timeout: ' + connect_timeout_string + '\n'
config_file_string += 'receive_timeout: ' + receive_timeout_string + '\n'
config_example = open(self.example_config_filename,'w')
config_example.write(config_file_string)
config_example.close()
self.exemplar_config = {
'oauth': oauth_string,
'nick': nick_string,
'channels': channels_list,
'log': log_string,
'time_format': time_format_value,
'host': host_string,
'port': port_int,
'block_size': block_size_int,
'reconnect_timer': reconnect_timer_int,
'stayalive_timer': stayalive_timer_int,
'connect_timeout': connect_timeout_float,
'receive_timeout': receive_timeout_float
}
def test_parse_file(self):
"""Test parse_file()"""
reader = cr()
reader.parse_file(self.example_config_filename)
self.assertEqual(reader.configuration, self.exemplar_config)
def tearDown(self):
"""Delete the example config file, etc"""
os.remove(self.example_config_filename)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
bbd6e538ec45c3650b7b3b7d520613fb4967236a | Print 4x4 grid | python/reddit/think_python_grid.py | python/reddit/think_python_grid.py | def grid():
delimiter_row = ('{}{}'.format('+ ', '- ' * 4) * 4) + '+'
openspace_row = ('{}{}'.format('|', ' ' * 9) * 4) + '|'
for box_row in range(4 * 4):
if box_row % 4 == 0:
print(delimiter_row)
print(openspace_row)
else:
print(openspace_row)
print(delimiter_row)
grid()
| Python | 0.000017 | |
6e535a2d597f172d9342fb8a547335890c474b49 | Add a sample config file | src/config-sample.py | src/config-sample.py | FLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
| Python | 0.000001 | |
7b545e210aa534b5d76e30769a125285cb40bfa8 | Create PrintFunctionBancorFormula.py | solidity/python/constants/PrintFunctionBancorFormula.py | solidity/python/constants/PrintFunctionBancorFormula.py | from math import factorial
MIN_PRECISION = 32
MAX_PRECISION = 127
NUM_OF_PRECISIONS = 128
NUM_OF_COEFS = 34
maxFactorial = factorial(NUM_OF_COEFS)
coefficients = [maxFactorial/factorial(i) for i in range(NUM_OF_COEFS)]
def fixedExpUnsafe(x,precision):
xi = x
res = safeMul(coefficients[0],1 << precision)
for i in range(1,NUM_OF_COEFS-1):
res = safeAdd(res,safeMul(xi,coefficients[i]))
xi = safeMul(xi,x) >> precision
res = safeAdd(res,safeMul(xi,coefficients[-1]))
return res / coefficients[0]
def safeMul(x,y):
assert(x * y < (1 << 256))
return x * y
def safeAdd(x,y):
assert(x + y < (1 << 256))
return x + y
def binarySearch(func,args):
lo = 1
hi = 1 << 256
while lo+1 < hi:
mid = (lo+hi)/2
try:
func(mid,args)
lo = mid
except Exception,error:
hi = mid
try:
func(hi,args)
return hi
except Exception,error:
func(lo,args)
return lo
maxExpArray = [0]*NUM_OF_PRECISIONS
for precision in range(NUM_OF_PRECISIONS):
maxExpArray[precision] = binarySearch(fixedExpUnsafe,precision)
print ' function BancorFormula() {'
for precision in range(NUM_OF_PRECISIONS):
prefix = ' ' if MIN_PRECISION <= precision <= MAX_PRECISION else '//'
print ' {} maxExpArray[{:3d}] = 0x{:x};'.format(prefix,precision,maxExpArray[precision])
print ' }'
| Python | 0.000001 | |
8b0130ccb318f7f04daf8e8fa7532c88afb9f7c2 | convert eexec doctests into eexec_test.py | Tests/misc/eexec_test.py | Tests/misc/eexec_test.py | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.eexec import decrypt, encrypt
def test_decrypt():
testStr = b"\0\0asdadads asds\265"
decryptedStr, R = decrypt(testStr, 12321)
assert decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
assert R == 36142
def test_encrypt():
testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
encryptedStr, R = encrypt(testStr, 12321)
assert encryptedStr == b"\0\0asdadads asds\265"
assert R == 36142
| Python | 0.000319 | |
9ea8b1ea9eaf7906abaf9cfe73bbe19b581fa562 | Add TVA. | inspectors/tva.py | inspectors/tva.py | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://oig.tva.gov
# Oldest report: 1998
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://oig.tva.gov/reports/{year}.html"
SEMIANNUAL_REPORTS_URL = "http://oig.tva.gov/reports/oig-reports.xml"
PDF_REPORT_FORMAT = "http://oig.tva.gov/reports/node/semi/{report_number}/semi{report_number}.pdf"
def run(options):
year_range = inspector.year_range(options)
# Pull the reports
for year in year_range:
if year < 2005: # This is the earliest audits go back
continue
url = AUDIT_REPORTS_URL.format(year=year)
doc = BeautifulSoup(utils.download(url))
results = doc.select("div.content")
for result in results:
report = report_from(result, url, year_range)
if report:
inspector.save_report(report)
doc = BeautifulSoup(utils.download(SEMIANNUAL_REPORTS_URL))
results = doc.select("report")
for result in results:
report = semiannual_report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, landing_url, year_range):
header = result.find_previous("p", class_="heading")
published_on_text, title, report_id = header.text.split("-", 2)
title = title.strip()
report_id = report_id.strip().replace("/", "-")
if "summary only" in result.text.lower():
unreleased = True
report_url = None
else:
unreleased = False
report_url = urljoin(landing_url, result.find("a").get('href'))
# Skip the last 'p' since it is just the report link
summary_text = [paragraph.text for paragraph in result.findAll("p")[:-1]]
summary = "\n".join(summary_text)
# Some reports list multiple dates. Split on '&' to get the latter.
published_on_text = published_on_text.split("&")[-1].strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'tva',
'inspector_url': 'http://oig.tva.gov',
'agency': 'tva',
'agency_name': 'Tennessee Valley Authority',
'report_id': report_id,
'url': report_url,
'title': title,
'summary': summary,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if unreleased:
report['unreleased'] = unreleased
report['landing_url'] = landing_url
return report
def semiannual_report_from(result, year_range):
report_url = urljoin(SEMIANNUAL_REPORTS_URL, result.get('pdfurl'))
if report_url.endswith("index.html"):
# Sometime they link to the landing page instead of the report. We convert
# the url to get the actual report.
report_number = report_url.split("/")[-2]
report_url = PDF_REPORT_FORMAT.format(report_number=report_number)
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
published_on_text = result.find("date").text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
title = "Semiannual Report {}".format(published_on_text)
alternative_title = result.find("title").text.strip()
if alternative_title:
title = "{} ({})".format(alternative_title, title)
summary = result.find("summary").text.strip()
report = {
'inspector': 'tva',
'inspector_url': 'http://oig.tva.gov',
'agency': 'tva',
'agency_name': 'Tennessee Valley Authority',
'report_id': report_id,
'url': report_url,
'title': title,
'summary': summary,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
| Python | 0.000001 | |
6515e45e6d717ed2c84789a5d0941533465496b7 | update test | h2o-py/tests/testdir_munging/pyunit_insert_missing.py | h2o-py/tests/testdir_munging/pyunit_insert_missing.py | from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def insert_missing():
# Connect to a pre-existing cluster
data = [[1, 2, 3, 1, 'a', 1, 9],
[1, 6, 4, 2, 'a', 1, 9],
[2, 3, 8, 6, 'b', 1, 9],
[3, 4, 3, 2, 'b', 3, 8],
[4, 5, 9, 5, 'c', 2, 8],
[5, 7, 10,7, 'b', 8, 8]]
h2o_data = h2o.H2OFrame(data)
h2o_data.insert_missing_values(fraction = 0.0)
print(h2o_data)
num_nas = sum([v.isna().sum() for v in h2o_data])
assert num_nas == 0, "Expected no missing values inserted, but got {0}".format(num_nas)
h2o_data.insert_missing_values(fraction = 1.0)
print(h2o_data)
num_nas = sum([v.isna().sum() for v in h2o_data])
assert num_nas == h2o_data.nrow*h2o_data.ncol, "Expected all missing values inserted, but got {0}".format(num_nas)
if __name__ == "__main__":
pyunit_utils.standalone_test(insert_missing)
else:
insert_missing()
| from builtins import zip
from builtins import range
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def insert_missing():
# Connect to a pre-existing cluster
data = [[1, 2, 3, 1, 'a', 1, 9],
[1, 6, 4, 2, 'a', 1, 9],
[2, 3, 8, 6, 'b', 1, 9],
[3, 4, 3, 2, 'b', 3, 8],
[4, 5, 9, 5, 'c', 2, 8],
[5, 7, 10,7, 'b', 8, 8]]
h2o_data = h2o.H2OFrame(data)
h2o_data.insert_missing_values(fraction = 0.0)
num_nas = sum([h2o_data[c].isna().sum() for c in range(h2o_data.ncol)])
assert num_nas == 0, "Expected no missing values inserted, but got {0}".format(num_nas)
h2o_data.insert_missing_values(fraction = 1.0)
num_nas = sum([h2o_data[c].isna().sum() for c in range(h2o_data.ncol)])
assert num_nas == h2o_data.nrow*h2o_data.ncol, "Expected all missing values inserted, but got {0}".format(num_nas)
if __name__ == "__main__":
pyunit_utils.standalone_test(insert_missing)
else:
insert_missing()
| Python | 0.000001 |
1de77375a12e26693c89f5fe824df82719bc8632 | Add dummy directory | jacquard/directory/dummy.py | jacquard/directory/dummy.py | from .base import Directory
class DummyDirectory(Directory):
def __init__(self, users=()):
self.users = {x.id: x for x in users}
def lookup(self, user_id):
return self.users[user_id]
def all_users(self):
return self.users.values()
| Python | 0.000001 | |
90eda86a7bbd1dc28023a6c5df1f964add3ddf55 | add client test for oaipmh endpoint. | test/oaipmh_client_test.py | test/oaipmh_client_test.py | import requests
from lxml import etree
NS = "{http://www.openarchives.org/OAI/2.0/}"
JOURNAL_BASE_URL = "http://localhost:5004/oai"
ARTICLE_BASE_URL = "http://localhost:5004/oai.article"
def harvest(base_url, resToken=None):
url = base_url + "?verb=ListRecords"
if resToken is not None:
url += "&resumptionToken=" + resToken
else:
url += "&metadataPrefix=oai_dc"
print "harvesting " + url
resp = requests.get(url)
assert resp.status_code == 200, resp.text
xml = etree.fromstring(resp.text[39:])
rtel = xml.find(".//" + NS + "resumptionToken")
if rtel is not None and (rtel.text is not None and rtel.text != ""):
print "resumption token", rtel.text, "cursor", rtel.get("cursor") + "/" + rtel.get("completeListSize")
return rtel.text
print "no resumption token, complete"
return None
# journals
rt = None
while True:
rt = harvest(JOURNAL_BASE_URL, rt)
if rt is None:
break
# articles
rt = None
while True:
rt = harvest(ARTICLE_BASE_URL, rt)
if rt is None:
break | Python | 0 | |
294f8721799f6562b7d7f3f31a68f25cb24c964f | Add Spanish Código Cuenta Corriente (CCC) | stdnum/es/ccc.py | stdnum/es/ccc.py | # ccc.py - functions for handling Spanish CCC bank account code
# coding: utf-8
#
# Copyright (C) 2016 David García Garzón
# Copyright (C) 2016 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""CCC (Código Cuenta Corriente, Spanish Bank Account Code)
CCC code is the country-specific part in Spanish IBAN codes. In order to
fully validate an Spanish IBAN you have to validate as well the country
specific part as a valid CCC. It was used for home banking transactions until
February 1st 2014 when IBAN codes started to be used as an account ID.
The CCC has 20 digits, all being numbers: EEEE OOOO DD NNNNNNNNNN
* EEEE: banking entity
* OOOO: office
* DD: check digits
* NNNNN NNNNN: account identifier
This module does not check if the bank code to exist. Existing bank codes are
published on the 'Registro de Entidades' by 'Banco de España' (Spanish
Central Bank).
More information:
* https://es.wikipedia.org/wiki/Código_cuenta_cliente
* http://www.bde.es/bde/es/secciones/servicios/Particulares_y_e/Registros_de_Ent/
>>> validate('1234-1234-16 1234567890')
'12341234161234567890'
>>> validate('134-1234-16 1234567890') # wrong length
Traceback (most recent call last):
...
InvalidLength: ...
>>> validate('12X4-1234-16 1234567890') # non numbers
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('1234-1234-00 1234567890') # invalid check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('12341234161234567890')
'1234 1234 16 12345 67890'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return ' '.join([
number[0:4],
number[4:8],
number[8:10],
number[10:15],
number[15:20],
])
def _calc_check_digit(number):
"""Calculate a single check digit on the provided part of the number."""
check = sum(int(n) * 2 ** i for i, n in enumerate(number)) % 11
return str(check if check < 2 else 11 - check)
def calc_check_digits(number):
"""Calculate the check digits for the number. The supplied number should
have check digits included but are ignored."""
number = compact(number)
return (
_calc_check_digit('00' + number[:8]) + _calc_check_digit(number[10:]))
def validate(number):
"""Checks to see if the number provided is a valid CCC."""
number = compact(number)
if len(number) != 20:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
if number[8:10] != calc_check_digits(number):
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid CCC."""
try:
return bool(validate(number))
except ValidationError:
return False
| Python | 0.99999 | |
cb18bcb02b86c185d946f9bf74d3e846fff7205c | fix error message | src/python/importer.py | src/python/importer.py | # Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Simple importer that allows python to import data from a dict of
# code objects. The keys are the module path, and the items are the
# filename and bytecode of the file.
class CodeImporter(object):
def __init__(self):
self.modules = {}
def add_module(self, filename, abspath, modpath, code):
if modpath in self.modules:
raise AttributeError, "%s already found in importer" % modpath
self.modules[modpath] = (filename, abspath, code)
def find_module(self, fullname, path):
if fullname in self.modules:
return self
return None
def load_module(self, fullname):
# Because the importer is created and initialized in its own
# little sandbox (in init.cc), the globals that were available
# when the importer module was loaded and CodeImporter was
# defined are not available when load_module is actually
# called. Soooo, the imports must live here.
import imp
import os
import sys
mod = imp.new_module(fullname)
sys.modules[fullname] = mod
try:
mod.__loader__ = self
srcfile,abspath,code = self.modules[fullname]
override = os.environ.get('M5_OVERRIDE_PY_SOURCE', 'false').lower()
if override in ('true', 'yes') and os.path.exists(abspath):
src = file(abspath, 'r').read()
code = compile(src, abspath, 'exec')
if os.path.basename(srcfile) == '__init__.py':
mod.__path__ = fullname.split('.')
mod.__file__ = srcfile
exec code in mod.__dict__
except Exception:
del sys.modules[fullname]
raise
return mod
# Create an importer and add it to the meta_path so future imports can
# use it. There's currently nothing in the importer, but calls to
# add_module can be used to add code.
import sys
importer = CodeImporter()
add_module = importer.add_module
sys.meta_path.append(importer)
| # Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# Simple importer that allows python to import data from a dict of
# code objects. The keys are the module path, and the items are the
# filename and bytecode of the file.
class CodeImporter(object):
def __init__(self):
self.modules = {}
def add_module(self, filename, abspath, modpath, code):
if modpath in self.modules:
raise AttributeError, "%s already found in importer"
self.modules[modpath] = (filename, abspath, code)
def find_module(self, fullname, path):
if fullname in self.modules:
return self
return None
def load_module(self, fullname):
# Because the importer is created and initialized in its own
# little sandbox (in init.cc), the globals that were available
# when the importer module was loaded and CodeImporter was
# defined are not available when load_module is actually
# called. Soooo, the imports must live here.
import imp
import os
import sys
mod = imp.new_module(fullname)
sys.modules[fullname] = mod
try:
mod.__loader__ = self
srcfile,abspath,code = self.modules[fullname]
override = os.environ.get('M5_OVERRIDE_PY_SOURCE', 'false').lower()
if override in ('true', 'yes') and os.path.exists(abspath):
src = file(abspath, 'r').read()
code = compile(src, abspath, 'exec')
if os.path.basename(srcfile) == '__init__.py':
mod.__path__ = fullname.split('.')
mod.__file__ = srcfile
exec code in mod.__dict__
except Exception:
del sys.modules[fullname]
raise
return mod
# Create an importer and add it to the meta_path so future imports can
# use it. There's currently nothing in the importer, but calls to
# add_module can be used to add code.
import sys
importer = CodeImporter()
add_module = importer.add_module
sys.meta_path.append(importer)
| Python | 0.000002 |
0ba3dff1e150d534e4eda086ebbd53ec3789d82c | Add alg_balance_symbols.py | alg_max_connected_colors.py | alg_max_connected_colors.py | def max_connected_colors():
pass
def main():
# A grid of connected colors: 5 (of 2's).
grid = [[1, 1, 2, 3],
[1, 2, 3, 2],
[3, 2, 2, 2]]
if __init__ == '__main__':
main()
| Python | 0.00058 | |
ce6052ee9df9ca83ac2da691eb51a8eaea0ab603 | Comment model migration | comments/migrations/0001_initial.py | comments/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-10 22:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0007_post_tags'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=500)),
('is_removed', models.BooleanField(default=False)),
('is_public', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')),
],
),
]
| Python | 0.000001 | |
74260fbf266628d4f8afbbab61bbd6de0ddfe7fe | Remove unused constant | dragonflow/neutron/common/constants.py | dragonflow/neutron/common/constants.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
DF_REMOTE_PORT_TYPE = 'remote_port'
DF_BINDING_PROFILE_PORT_KEY = 'port_key'
DF_BINDING_PROFILE_HOST_IP = 'host_ip'
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
DF_REMOTE_PORT_TYPE = 'remote_port'
DF_BINDING_PROFILE_PORT_KEY = 'port_key'
DF_BINDING_PROFILE_HOST_IP = 'host_ip'
DF_PORT_BINDING_PROFILE = portbindings.PROFILE
| Python | 0.000247 |
22298d91fff788c37395cdad9245b3e7ed20cfdf | Add a snippet (Python OpenCV). | python/opencv/opencv_2/images/display_image_with_matplotlib.py | python/opencv/opencv_2/images/display_image_with_matplotlib.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
| Python | 0.000018 | |
ba9e4c6b003cc002e5bc7216da960e47f9fe5424 | Print information about all nitrogens. | copper_imidazole_csv_allnitrogen.py | copper_imidazole_csv_allnitrogen.py | #!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
| Python | 0 | |
eb0772fc6c30d98b83bf1c8e7d83af21066ae45b | Add peek method and implementation | data_structures/Stack/Python/Stack.py | data_structures/Stack/Python/Stack.py | # Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1] | # Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element) | Python | 0 |
825c4d613915d43aea2e6ee0bc5d5b49ed0a4500 | Create a simple method to segment a trip into sections | emission/analysis/classification/segmentation/section_segmentation.py | emission/analysis/classification/segmentation/section_segmentation.py | # Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
| Python | 0 | |
5f2cd26054adff5a1fbf9ba5d56766b972f46670 | Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading. | leakcheck/thread-key-gen.py | leakcheck/thread-key-gen.py | # Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
| Python | 0 | |
c87be0f98295d64addc01529999996b566c80f2c | add sent notification status | migrations/versions/00xx_add_sent_notification_status.py | migrations/versions/00xx_add_sent_notification_status.py | """empty message
Revision ID: 00xx_add_sent_notification_status
Revises: 0075_create_rates_table
Create Date: 2017-04-24 16:55:20.731069
"""
# revision identifiers, used by Alembic.
revision = '00xx_sent_notification_status'
down_revision = '0075_create_rates_table'
from alembic import op
import sqlalchemy as sa
enum_name = 'notify_status_type'
tmp_name = 'tmp_' + enum_name
old_options = (
'created',
'sending',
'delivered',
'pending',
'failed',
'technical-failure',
'temporary-failure',
'permanent-failure'
)
new_options = old_options + ('sent',)
old_type = sa.Enum(*old_options, name=enum_name)
new_type = sa.Enum(*new_options, name=enum_name)
alter_str = 'ALTER TABLE {table} ALTER COLUMN status TYPE {enum} USING status::text::notify_status_type '
def upgrade():
op.execute('ALTER TYPE {enum} RENAME TO {tmp_name}'.format(enum=enum_name, tmp_name=tmp_name))
new_type.create(op.get_bind())
op.execute(alter_str.format(table='notifications', enum=enum_name))
op.execute(alter_str.format(table='notification_history', enum=enum_name))
op.execute('DROP TYPE ' + tmp_name)
def downgrade():
op.execute('ALTER TYPE {enum} RENAME TO {tmp_name}'.format(enum=enum_name, tmp_name=tmp_name))
# Convert 'sent' template into 'sending'
update_str = "UPDATE TABLE {table} SET status='sending' where status='sent'"
op.execute(update_str.format(table='notifications'))
op.execute(update_str.format(table='notification_history'))
old_type.create(op.get_bind())
op.execute(alter_str.format(table='notifications', enum=enum_name))
op.execute(alter_str.format(table='notification_history', enum=enum_name))
op.execute('DROP TYPE ' + tmp_name)
| Python | 0 | |
44526603b2ab388fceb99de625f3282c4a2a8d99 | write the test with the proper constants | tests/modules/test_math.py | tests/modules/test_math.py | import math
from ..base import BaseTopazTest
class TestMath(BaseTopazTest):
def assert_float_equal(self, result, expected, eps=1e-15):
assert abs(result - expected) < eps
def test_domain_error(self, space):
space.execute("Math::DomainError")
def test_pi(self, space):
w_res = space.execute("return Math::PI")
assert space.float_w(w_res) == math.pi
def test_exp(self, space):
w_res = space.execute("return [Math.exp(0.0), Math.exp(1)]")
assert self.unwrap(space, w_res) == [1, math.exp(1)]
def test_sqrt(self, space):
w_res = space.execute("return [Math.sqrt(4), Math.sqrt(28)]")
assert self.unwrap(space, w_res) == [2, math.sqrt(28)]
def test_e(self, space):
w_res = space.execute("return Math::E")
assert space.float_w(w_res) == math.e
def test_log(self, space):
w_res = space.execute("return Math.log(4, 10)")
self.assert_float_equal(space.float_w(w_res), math.log(4, 10))
w_res = space.execute("return Math.log(28)")
self.assert_float_equal(space.float_w(w_res), math.log(28))
w_res = space.execute("return Math.log(3, 4)")
self.assert_float_equal(space.float_w(w_res), math.log(3, 4))
def test_gamma(self, space):
w_res = space.execute("return Math.gamma(5.0)")
self.assert_float_equal(space.float_w(w_res), 24.0)
w_res = space.execute("return Math.gamma(6.0)")
self.assert_float_equal(space.float_w(w_res), 120.0)
w_res = space.execute("return Math.gamma(0.5)")
self.assert_float_equal(space.float_w(w_res), math.pi ** 0.5)
w_res = space.execute("return Math.gamma(1000)")
assert space.float_w(w_res) == float('inf')
w_res = space.execute("return Math.gamma(0.0)")
assert space.float_w(w_res) == float('inf')
w_res = space.execute("return Math.gamma(-0.0)")
assert space.float_w(w_res) == float('-inf')
w_res = space.execute("return Math.gamma(Float::INFINITY)")
assert space.float_w(w_res) == float('inf')
with self.raises(space, "DomainError", 'Numerical argument is out of domain - "gamma"'):
space.execute("""Math.gamma(-1)""")
with self.raises(space, "DomainError", 'Numerical argument is out of domain - "gamma"'):
space.execute("""Math.gamma(-Float::INFINITY)""")
w_res = space.execute("return Math.gamma(Float::NAN)")
assert math.isnan(space.float_w(w_res))
| import math
from ..base import BaseTopazTest
class TestMath(BaseTopazTest):
def assert_float_equal(self, result, expected, eps=1e-15):
assert abs(result - expected) < eps
def test_domain_error(self, space):
space.execute("Math::DomainError")
def test_pi(self, space):
w_res = space.execute("return Math::PI")
assert space.float_w(w_res) == math.pi
def test_exp(self, space):
w_res = space.execute("return [Math.exp(0.0), Math.exp(1)]")
assert self.unwrap(space, w_res) == [1, math.exp(1)]
def test_sqrt(self, space):
w_res = space.execute("return [Math.sqrt(4), Math.sqrt(28)]")
assert self.unwrap(space, w_res) == [2, math.sqrt(28)]
def test_e(self, space):
w_res = space.execute("return Math::E")
assert space.float_w(w_res) == math.e
def test_log(self, space):
w_res = space.execute("return Math.log(4, 10)")
self.assert_float_equal(space.float_w(w_res), math.log(4, 10))
w_res = space.execute("return Math.log(28)")
self.assert_float_equal(space.float_w(w_res), math.log(28))
w_res = space.execute("return Math.log(3, 4)")
self.assert_float_equal(space.float_w(w_res), math.log(3, 4))
def test_gamma(self, space):
w_res = space.execute("return Math.gamma(5.0)")
self.assert_float_equal(space.float_w(w_res), 24.0)
w_res = space.execute("return Math.gamma(6.0)")
self.assert_float_equal(space.float_w(w_res), 120.0)
w_res = space.execute("return Math.gamma(0.5)")
self.assert_float_equal(space.float_w(w_res), math.pi ** 0.5)
w_res = space.execute("return Math.gamma(1000)")
assert space.float_w(w_res) == float('inf')
w_res = space.execute("return Math.gamma(0.0)")
assert space.float_w(w_res) == float('inf')
w_res = space.execute("return Math.gamma(-0.0)")
assert space.float_w(w_res) == float('-inf')
# inf
w_res = space.execute("return Math.gamma(1e1000)")
assert space.float_w(w_res) == float('inf')
with self.raises(space, "DomainError", 'Numerical argument is out of domain - "gamma"'):
space.execute("""Math.gamma(-1)""")
with self.raises(space, "DomainError", 'Numerical argument is out of domain - "gamma"'):
# -inf
space.execute("""Math.gamma(-1e1000)""")
# nan
w_res = space.execute("return Math.gamma(1e1000 - 1e1000)")
assert math.isnan(space.float_w(w_res))
| Python | 0 |
7597e834288c21065703bcdc86530a0ad5414a95 | backup strategy tasks | nodeconductor/backup/tasks.py | nodeconductor/backup/tasks.py | from celery import shared_task
@shared_task
def backup_task(backupable_instance):
backupable_instance.get_backup_strategy.backup()
@shared_task
def restore_task(backupable_instance):
backupable_instance.get_backup_strategy.restore()
@shared_task
def delete_task(backupable_instance):
backupable_instance.get_backup_strategy.delete()
| Python | 0.000002 | |
4820013e207947fe7ff94777cd8dcf1ed474eab1 | Add migration for account lockout fields on User | migrations/versions/fb6a6554b21_add_account_lockout_fields_to_user.py | migrations/versions/fb6a6554b21_add_account_lockout_fields_to_user.py | """Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
| Python | 0 | |
cee7f23df93f4a09550348e30709aa1e6e6969fc | use net ip availability api def from neutron-lib | neutron/extensions/network_ip_availability.py | neutron/extensions/network_ip_availability.py | # Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import network_ip_availability as apidef
from neutron_lib.api import extensions as api_extensions
import neutron.api.extensions as extensions
import neutron.api.v2.base as base
import neutron.services.network_ip_availability.plugin as plugin
class Network_ip_availability(api_extensions.APIExtensionDescriptor):
"""Extension class supporting network ip availability information."""
api_definition = apidef
@classmethod
def get_resources(cls):
"""Returns Extended Resource for service type management."""
resource_attributes = apidef.RESOURCE_ATTRIBUTE_MAP[
apidef.RESOURCE_PLURAL]
controller = base.create_resource(
apidef.RESOURCE_PLURAL,
apidef.RESOURCE_NAME,
plugin.NetworkIPAvailabilityPlugin.get_instance(),
resource_attributes)
return [extensions.ResourceExtension(apidef.COLLECTION_NAME,
controller,
attr_map=resource_attributes)]
| # Copyright 2016 GoDaddy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api import extensions as api_extensions
import neutron.api.extensions as extensions
import neutron.api.v2.base as base
import neutron.services.network_ip_availability.plugin as plugin
RESOURCE_NAME = "network_ip_availability"
RESOURCE_PLURAL = "network_ip_availabilities"
COLLECTION_NAME = RESOURCE_PLURAL.replace('_', '-')
EXT_ALIAS = RESOURCE_NAME.replace('_', '-')
RESOURCE_ATTRIBUTE_MAP = {
RESOURCE_PLURAL: {
'network_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'network_name': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'tenant_id': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'total_ips': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'used_ips': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'subnet_ip_availability': {'allow_post': False, 'allow_put': False,
'is_visible': True},
# TODO(wwriverrat) Make composite attribute for subnet_ip_availability
}
}
class Network_ip_availability(api_extensions.ExtensionDescriptor):
"""Extension class supporting network ip availability information."""
@classmethod
def get_name(cls):
return "Network IP Availability"
@classmethod
def get_alias(cls):
return EXT_ALIAS
@classmethod
def get_description(cls):
return "Provides IP availability data for each network and subnet."
@classmethod
def get_updated(cls):
return "2015-09-24T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Extended Resource for service type management."""
resource_attributes = RESOURCE_ATTRIBUTE_MAP[RESOURCE_PLURAL]
controller = base.create_resource(
RESOURCE_PLURAL,
RESOURCE_NAME,
plugin.NetworkIPAvailabilityPlugin.get_instance(),
resource_attributes)
return [extensions.ResourceExtension(COLLECTION_NAME,
controller,
attr_map=resource_attributes)]
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
| Python | 0.000003 |
6fabbe85bb74788641897daf8b162eac3d47b0aa | Add script for downloading Indonesia price data | data_crunching/indonesia_timeseries/download_indonesia_prices.py | data_crunching/indonesia_timeseries/download_indonesia_prices.py | #!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
| Python | 0 | |
4fdf2c32bcd937ba2fc21dbaad8a81620c02fb17 | Fix part of #5134: Add test for core.storage.config.gae_models (#5565) | core/storage/config/gae_models_test.py | core/storage/config/gae_models_test.py | # coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core.platform import models
from core.tests import test_utils
import feconf
(config_models,) = models.Registry.import_models([models.NAMES.config])
class ConfigPropertyModelUnitTests(test_utils.GenericTestBase):
"""Test ConfigPropertyModel class."""
def test_create_model(self):
config_model = config_models.ConfigPropertyModel(
value='b')
self.assertEqual(config_model.value, 'b')
def test_commit(self):
config_model1 = config_models.ConfigPropertyModel(
id='config_model1', value='c')
config_model1.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model1 = config_models.ConfigPropertyModel.get_version(
'config_model1', 1)
self.assertEqual(retrieved_model1.value, 'c')
retrieved_model1.value = 'd'
retrieved_model1.commit(feconf.SYSTEM_COMMITTER_ID, [])
retrieved_model2 = config_models.ConfigPropertyModel.get_version(
'config_model1', 2)
self.assertEqual(retrieved_model2.value, 'd')
| Python | 0.000001 | |
65969d0251dc5031328132cf2043f1f76ee90d72 | Include the demo as a separate file | demo.py | demo.py |
import sys, curses
from cwidgets import *
from cwidgets import _LOG
def demo(window):
# Create the root of the widget hierarchy.
root = WidgetRoot(window)
# Wrap the UI in a Viewport to avoid crashes at small resolutions.
vp = root.add(Viewport())
# Push the UI together to avoid spreading everyting over the screen.
cont = vp.add(AlignContainer())
# The user-visible "window"; with a border and the bottom line pushed
# inside by one line height.
win = cont.add(MarginContainer(border=True, insets=(0, 0, 1, 0)))
# Decoratively enclose the title
title_wrapper = win.add(TeeContainer(), slot=MarginContainer.POS_TOP)
# Add the title
title = title_wrapper.add(Label('cwidgets demo'))
# Add the content. This could also be a nested Viewport containing
# a more complex UI.
# When text is typed into the entry box, it will increase smoothly (along
# with the remaining UI) until it's 70 columns or 20 rows (because of the
# multiline setting, it can have multiple lines) large, then, it will not
# grow further (along the corresponding axis), and scroll instead.
content = win.add(EntryBox('Lorem ipsum dolor sit amet', multiline=True,
cmaxsize=(70, 20)))
# Bind a vertical scrollbar to the content
sbv = win.add(content.bind(Scrollbar(Scrollbar.DIR_VERTICAL)),
slot=MarginContainer.POS_RIGHT)
# The bottom contains a line of buttons stacked below a scrollbar.
bottom = win.add(VerticalContainer(), slot=MarginContainer.POS_BOTTOM)
# Add the horizontal scrollbar.
sbh = bottom.add(content.bind(Scrollbar(Scrollbar.DIR_HORIZONTAL)))
# The buttons are laid out horizontally.
buttons = bottom.add(HorizontalContainer())
# A bare Widget as "glue" to fill the space. An AlignContainer would
# have been possible as well.
buttons.add(Widget(), weight=1)
# The first button
buttons.add(Button('OK', sys.exit))
# A little spacer between the buttons
buttons.add(Widget(cminsize=(1, 1)))
# The second button
buttons.add(Button('Cancel', lambda: sys.exit(1)))
# Another glue
buttons.add(Widget(), weight=1)
# Run it.
root.main()
try:
init()
curses.wrapper(demo)
finally:
if _LOG:
_LOG.append('')
sys.stderr.write('\n'.join(map(str, _LOG)))
sys.stderr.flush()
| Python | 0 | |
3968c53c4577b2efe9ef3cd2de76b688a26517d9 | Add gpio example | chapter2/gpio.py | chapter2/gpio.py | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(5, GPIO.OUT)
GPIO.output(5, GPIO.HIGH)
GPIO.output(5, GPIO.LOW)
| Python | 0.000001 | |
dc0bb07da52fd11a7980b9f36c38fcdb7f9c6ba5 | Add `edit.py` to be able to edit a view asynchronously | edit.py | edit.py | # edit.py
# buffer editing for both ST2 and ST3 that "just works"
import sublime
import sublime_plugin
from collections import defaultdict
try:
sublime.edit_storage
except AttributeError:
sublime.edit_storage = {}
class EditStep:
def __init__(self, cmd, *args):
self.cmd = cmd
self.args = args
def run(self, view, edit):
if self.cmd == 'callback':
return self.args[0](view, edit)
funcs = {
'insert': view.insert,
'erase': view.erase,
'replace': view.replace,
}
func = funcs.get(self.cmd)
if func:
func(edit, *self.args)
class Edit:
defer = defaultdict(dict)
def __init__(self, view):
self.view = view
self.steps = []
def step(self, cmd, *args):
step = EditStep(cmd, *args)
self.steps.append(step)
def insert(self, point, string):
self.step('insert', point, string)
def erase(self, region):
self.step('erase', region)
def replace(self, region, string):
self.step('replace', region, string)
def callback(self, func):
self.step('callback', func)
def run(self, view, edit):
for step in self.steps:
step.run(view, edit)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
view = self.view
if sublime.version().startswith('2'):
edit = view.begin_edit()
self.run(edit)
view.end_edit(edit)
else:
key = str(hash(tuple(self.steps)))
sublime.edit_storage[key] = self.run
view.run_command('apply_edit', {'key': key})
class apply_edit(sublime_plugin.TextCommand):
def run(self, edit, key):
sublime.edit_storage.pop(key)(self.view, edit)
| Python | 0.000001 | |
a795d94a9c885b97ab5bffc313524ae46626d556 | Add simple function-size analysis tool. | tools/analyze_code_size.py | tools/analyze_code_size.py |
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
| Python | 0 | |
2ce7bcdd6606cb1590febf6430a7635462b09d74 | fix #61: prefer configuration files under script dir | lixian_config.py | lixian_config.py |
import os
import os.path
def get_config_path(filename):
if os.path.exists(filename):
return filename
import sys
local_path = os.path.join(sys.path[0], filename)
if os.path.exists(local_path):
return local_path
user_home = os.getenv('USERPROFILE') or os.getenv('HOME')
lixian_home = os.getenv('LIXIAN_HOME') or user_home
return os.path.join(lixian_home, filename)
LIXIAN_DEFAULT_CONFIG = get_config_path('.xunlei.lixian.config')
LIXIAN_DEFAULT_COOKIES = get_config_path('.xunlei.lixian.cookies')
def load_config(path):
values = {}
if os.path.exists(path):
with open(path) as x:
for line in x.readlines():
line = line.strip()
if line:
if line.startswith('--'):
line = line.lstrip('-')
if line.startswith('no-'):
values[line[3:]] = False
elif '=' in line:
k, v = line.split('=', 1)
values[k] = v
else:
values[line] = True
else:
raise NotImplementedError(line)
return values
def dump_config(path, values):
with open(path, 'w') as x:
for k in values:
v = values[k]
if v is True:
x.write('--%s\n'%k)
elif v is False:
x.write('--no-%s\n'%k)
else:
x.write('--%s=%s\n'%(k, v))
class Config:
def __init__(self, path=LIXIAN_DEFAULT_CONFIG):
self.path = path
self.values = load_config(path)
def put(self, k, v=True):
self.values[k] = v
dump_config(self.path, self.values)
def get(self, k, v=None):
return self.values.get(k, v)
def delete(self, k):
if k in self.values:
del self.values[k]
dump_config(self.path, self.values)
def source(self):
if os.path.exists(self.path):
with open(self.path) as x:
return x.read()
def __str__(self):
return '<Config{%s}>' % self.values
global_config = Config()
def put_config(k, v=True):
if k.startswith('no-') and v is True:
k = k[3:]
v = False
global_config.put(k, v)
def get_config(k, v=None):
return global_config.get(k, v)
def delete_config(k):
return global_config.delete(k)
def source_config():
return global_config.source()
|
import os
import os.path
def get_config_path(filename):
if os.path.exists(filename):
return filename
user_home = os.getenv('USERPROFILE') or os.getenv('HOME')
lixian_home = os.getenv('LIXIAN_HOME') or user_home
return os.path.join(lixian_home, filename)
LIXIAN_DEFAULT_CONFIG = get_config_path('.xunlei.lixian.config')
LIXIAN_DEFAULT_COOKIES = get_config_path('.xunlei.lixian.cookies')
def load_config(path):
values = {}
if os.path.exists(path):
with open(path) as x:
for line in x.readlines():
line = line.strip()
if line:
if line.startswith('--'):
line = line.lstrip('-')
if line.startswith('no-'):
values[line[3:]] = False
elif '=' in line:
k, v = line.split('=', 1)
values[k] = v
else:
values[line] = True
else:
raise NotImplementedError(line)
return values
def dump_config(path, values):
with open(path, 'w') as x:
for k in values:
v = values[k]
if v is True:
x.write('--%s\n'%k)
elif v is False:
x.write('--no-%s\n'%k)
else:
x.write('--%s=%s\n'%(k, v))
class Config:
def __init__(self, path=LIXIAN_DEFAULT_CONFIG):
self.path = path
self.values = load_config(path)
def put(self, k, v=True):
self.values[k] = v
dump_config(self.path, self.values)
def get(self, k, v=None):
return self.values.get(k, v)
def delete(self, k):
if k in self.values:
del self.values[k]
dump_config(self.path, self.values)
def source(self):
if os.path.exists(self.path):
with open(self.path) as x:
return x.read()
def __str__(self):
return '<Config{%s}>' % self.values
global_config = Config()
def put_config(k, v=True):
if k.startswith('no-') and v is True:
k = k[3:]
v = False
global_config.put(k, v)
def get_config(k, v=None):
return global_config.get(k, v)
def delete_config(k):
return global_config.delete(k)
def source_config():
return global_config.source()
| Python | 0 |
b0b304e58b964ef87c61a3b258b1b7a675e217f2 | Add a copy of our Trac integration (with example constants) to the API examples | api/examples/humbug_trac.py | api/examples/humbug_trac.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright © 2012 Humbug, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Humbug trac plugin -- sends humbugs when tickets change.
#
# Install by copying this file to the trac plugins/ subdirectory,
# customizing the constants below this comment, and then adding
# "humbug_trac" to the [components] section of the conf/trac.ini
# file, like so:
#
# [components]
# humbug_trac = enabled
#
# You may then need to restart trac (or restart Apache) for the bot
# (or changes to the bot) to actually be loaded by trac.
# Change these constants:
HUMBUG_API_PATH = "/path/to/humbug/api"
HUMBUG_SITE = "https://example.humbughq.com"
HUMBUG_USER = "trac_user@example.com"
HUMBUG_API_KEY = "0123456789abcdef0123456789abcdef"
TRAC_BASE_TICKET_URL = "https://trac.example.com/ticket"
from trac.core import Component, implements
from trac.ticket import ITicketChangeListener
import sys
sys.path.append(HUMBUG_API_PATH)
import humbug
client = humbug.Client(
email=HUMBUG_USER,
site=HUMBUG_SITE,
api_key=HUMBUG_API_KEY)
def markdown_ticket_url(ticket, heading="ticket"):
return "[%s #%s](%s/%s)" % (heading, ticket.id, TRAC_BASE_TICKET_URL, ticket.id)
def markdown_block(desc):
return "\n\n>" + "\n> ".join(desc.split("\n")) + "\n"
def truncate(string, length):
if len(string) <= length:
return string
return string[:length - 3] + "..."
def trac_subject(ticket):
return truncate("#%s: %s" % (ticket.id, ticket.values.get("summary")), 60)
def send_update(ticket, content):
client.send_message({
"type": "stream",
"to": "trac",
"content": content,
"subject": trac_subject(ticket)
})
class HumbugPlugin(Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
"""Called when a ticket is created."""
content = "%s created %s in component **%s**, priority **%s**:\n" % \
(ticket.values.get("reporter"), markdown_ticket_url(ticket),
ticket.values.get("component"), ticket.values.get("priority"))
if ticket.values.get("description") != "":
content += "%s" % markdown_block(ticket.values.get("description"))
send_update(ticket, content)
def ticket_changed(self, ticket, comment, author, old_values):
"""Called when a ticket is modified.
`old_values` is a dictionary containing the previous values of the
fields that have changed.
"""
if not comment and set(old_values.keys()) <= set(["priority", "milestone",
"cc", "keywords",
"component"]):
# This is probably someone going through trac and updating
# the priorities; this can result in a lot of messages
# nobody wants to read, so don't send them without a comment.
return
content = "%s updated %s" % (author, markdown_ticket_url(ticket))
if comment:
content += ' with comment: %s\n\n' % (markdown_block(comment,))
else:
content += ":\n\n"
field_changes = []
for key in old_values.keys():
if key == "description":
content += '- Changed %s from %s to %s' % (key, markdown_block(old_values.get(key)),
markdown_block(ticket.values.get(key)))
elif old_values.get(key) == "":
field_changes.append('%s: => **%s**' % (key, ticket.values.get(key)))
elif ticket.values.get(key) == "":
field_changes.append('%s: **%s** => ""' % (key, old_values.get(key)))
else:
field_changes.append('%s: **%s** => **%s**' % (key, old_values.get(key),
ticket.values.get(key)))
content += ", ".join(field_changes)
send_update(ticket, content)
def ticket_deleted(self, ticket):
"""Called when a ticket is deleted."""
content = "%s was deleted." % markdown_ticket_url(ticket, heading="Ticket")
send_update(ticket, content)
| Python | 0 | |
2ba9fb77ddcf1a5cc8b923ab46e50c4b17c36447 | add readme update tool | hiho.py | hiho.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import codecs
import sys
import argparse
class Entry:
URL = r'http://hihocoder.com/problemset/problem/'
def __init__(self, name=None, number=0):
self.name = name
self.number = number
def parse(self, line):
m = re.match(r'\|.*?\|\[(?P<name>.*?)\].*?\|\[(?P<number>.*?)\].*?\|', line, re.U)
assert m, u'Malformed line: {}'.format(line).encode('utf-8')
self.name = m.group('name')
self.number = m.group('number')
def __str__(self):
return u'[{name}]({url}{number})|[{number}](solutions/{number})'.format(name=self.name, url=Entry.URL, number=self.number).encode('utf-8')
class Table:
def __init__(self):
self.entries = {}
def parse(self, lines):
for line in lines:
e = Entry()
e.parse(line)
self.entries[e.number] = e
def add(self, entry):
if self.entries.get(entry.number, None):
return False
self.entries[entry.number] = entry
return True
def __str__(self):
order = 1
ret = []
for k in sorted(self.entries):
ret.append('|{order}|{content}|'.format(order=order, content=str(self.entries[k])))
order += 1
return '\n'.join(ret)
class ReadMe:
def __init__(self, path):
self.path = path
self.header = []
self.table = Table()
self.trailor = []
self.parse()
def parse(self):
table_start = False
table_end = True
with codecs.open(self.path, mode='r', encoding='utf-8') as fin:
for line in fin.readlines():
if not table_end and line == '\n':
table_end = True
line = '\n\n'
if not table_start:
self.header.append(line)
elif not table_end:
e = Entry()
e.parse(line)
self.table.add(e)
else:
self.trailor.append(line)
if not table_start and line.startswith('|---|-------|----------|'):
table_start = True
table_end = False
def write(self, path=None):
if not path:
path = self.path
with open(path, mode='w') as fout:
fout.write(str(self))
def add_solution(self, title, number):
return self.table.add(Entry(title, number))
def __str__(self):
ret = []
ret.append(''.join(self.header).encode('utf-8'))
ret.append(str(self.table))
ret.extend(''.join(self.trailor).encode('utf-8'))
return ''.join(ret)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description="""description:
hiho facilitates update README
""",
epilog="""examples:
""")
parser.add_argument("-n", "--number", type=int, metavar="<number>", required=True, help="Specify the question number")
parser.add_argument("-t", "--title", type=str, metavar="<title>", required=True, help="Specify the question title")
args = parser.parse_args(sys.argv[1:])
r = ReadMe(r'./README.md')
if r.add_solution(args.title, args.number):
print 'Add success'
r.write()
| Python | 0 | |
dfe2bd52fd2e561a79c91d4ff34fbead8a26c1c3 | Create init.py | init.py | init.py | #!/usr/bin/env python
import sys
import os
import psycopg2
def dump_table(table_name, conn):
query = "SELECT * FROM "+table_name+" LIMIT 1"
cur = conn.cursor()
cur.execute(query)
rows = cur.fetchall()
description = cur.description
columns = "'INSERT INTO "+table_name+" VALUES ('"
for desc in description:
columns += "||CASE WHEN "+desc.name+" IS NULL THEN 'NULL' ELSE ''''||"+desc.name+"::VARCHAR||'''' END ||','"
columns = columns[0:len(columns)-3]
columns += "')'"
print "SELECT "+columns+" FROM "+table_name
def update_flex_version(vl_flex_version, hostname, conn):
if (hostname == "alpha"):
hostname = "alpha-asset.valebroker.com.br"
else:
hostname = "alpha-asset-"+hostname+".valebroker.com.br"
cur = conn.cursor()
cur.execute("UPDATE tb_contract_host SET vl_flex_version = %s WHERE hostname = %s", (vl_flex_version, hostname))
conn.commit()
print "Host "+hostname+" updated to Flex version "+vl_flex_version
def show_error(conn):
cur = conn.cursor()
cur.execute("SELECT stack_trace, detail FROM tb_log_error WHERE id_investor = 5801 ORDER BY dt_error DESC LIMIT 1")
rows = cur.fetchall()
print rows[0][0]
print rows[0][1]
def get_connection():
postgres_database = os.environ['postgres_database']
postgres_user = os.environ['postgres_user']
postgres_password = os.environ['postgres_password']
postgres_host = os.environ['postgres_host']
postgres_port = os.environ['postgres_port']
return psycopg2.connect(database=postgres_database, user=postgres_user, password=postgres_password, host=postgres_host, port=postgres_port)
# def set_enviroment_vars():
# f = open('/tmp/envs.conf')
# for line in f:
def init(args):
conn = get_connection()
# docker-compose up
if (os.environ['action'] == "dump_table"):
# docker-compose run dump_table tb_asset_operation
dump_table(args[0], conn)
if (os.environ['action'] == "update_flex_version"):
# docker-compose run update_flex_version 4324 alpha/rf/support
update_flex_version(args[0], args[1], conn)
if (os.environ['action'] == "show_error"):
# docker-compose run show_error
show_error(conn)
conn.close()
if __name__ == "__main__":
init(sys.argv[1:])
| Python | 0.000001 | |
bfaeeec3f5f5582822e2918491090815a606ba44 | Add test to make sure imports and __all__ matches | test/test_api.py | test/test_api.py | # -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
| Python | 0.000037 | |
48857638694ceca08c64d7b9c6825e2178c53279 | Add function decorator to improve functools.wraps | pylearn2/utils/doc.py | pylearn2/utils/doc.py | """
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
| Python | 0.000001 | |
dfca9c3d7dbbe97516a24bea89b917f7282c7dc7 | Add problem rotate image | python/rotateImage.py | python/rotateImage.py | # https://leetcode.com/problems/rotate-image/
class Solution(object):
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
size = len(matrix)
for i in xrange(0, size/2):
for j in xrange(i, size-1-i):
t = matrix[i][j]
matrix[i][j] = matrix[size-j-1][i]
matrix[size-j-1][i] = matrix[size-i-1][size-j-1]
matrix[size-i-1][size-j-1] = matrix[j][size-i-1]
matrix[j][size-i-1]= t
matrix = [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
]
s = Solution()
s.rotate(matrix)
print matrix
matrix = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
s.rotate(matrix)
print matrix
| Python | 0.000003 | |
cce6a4c2efe62c267b04f6ce75019d577428e2c9 | add sensu_check_dict module | library/sensu_check_dict.py | library/sensu_check_dict.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Blue Box Group, Inc.
# Copyright 2014, Craig Tracey <craigtracey@gmail.com>
# Copyright 2016, Paul Czarkowski <pczarkow@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import traceback
from hashlib import md5
from jinja2 import Environment
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(default=None, required=True),
check_dir=dict(default='/etc/sensu/conf.d/checks', required=False),
state=dict(default='present', required=False, choices=['present','absent']),
check=dict(type='dict', required=True)
)
)
if module.params['state'] == 'present':
try:
changed = False
check_path = '%s/%s.json' % (module.params['check_dir'], module.params['name'])
check=dict({
'checks': {
module.params['name']: module.params['check']
}
})
if os.path.isfile(check_path):
with open(check_path) as fh:
if json.load(fh) == check:
module.exit_json(changed=False, result="ok")
else:
with open(check_path, "w") as fh:
fh.write(json.dumps(check, indent=4))
module.exit_json(changed=True, result="changed")
else:
with open(check_path, "w") as fh:
fh.write(json.dumps(check, indent=4))
module.exit_json(changed=True, result="created")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="creating the check failed: %s %s" % (e,formatted_lines))
else:
try:
changed = False
check_path = '%s/%s.json' % (module.params['check_dir'], module.params['name'])
if os.path.isfile(check_path):
os.remove(check_path)
module.exit_json(changed=True, result="changed")
else:
module.exit_json(changed=False, result="ok")
except Exception as e:
formatted_lines = traceback.format_exc()
module.fail_json(msg="removing the check failed: %s %s" % (e,formatted_lines))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
main()
| Python | 0.000001 | |
5dfa0e6333403c1082be0d276f9775c55ba0e74e | add settings | net1/settings.py | net1/settings.py | import astrometry.net1.secrets.django_db as secrets
#GMAPS_API_KEY = 'ABQIAAAA7dWWcc9pB-GTzZE7CvT6SRTpFHQyuc9zMeXV0wfLqFiAr83b_xQ02hHbHnV4CjuIf4L-3WI_XATmBQ'
# edge.astrometry.net
#GMAPS_API_KEY = 'ABQIAAAA7dWWcc9pB-GTzZE7CvT6SRRd6OZDG7afgOT-qBD56qXrD_4sXBRoWqZoA0bluUpwPo-gQuBqRm5Tug'
# oven.cosmo.fas.nyu.edu
GMAPS_API_KEY = 'ABQIAAAAdOKbO45hSEoNCGlzTiew7BRWPaQfegoYWoxyhnGKpr3zYcSQBxRo-Gk2drjLibynnK4VOeUcCehGxA'
GMAPS_HOSTS = ['edge%i.astrometry.net' % i for i in [1,2,3,4,5]]
W3C_VALIDATOR_URL = 'http://oven.cosmo.fas.nyu.edu:8888/w3c-markup-validator/check'
#UPLOADER_URL = '/test/uploader'
UPLOADER_URL = '/uploader'
UPLOAD_DIR = '/data2/TEMP-test'
#BASEDIR = '/home/gmaps/test/'
BASEDIR = '/home/dstn/astrometry/src/'
DATADIR = '/home/dstn/test/'
LOG_DIR = BASEDIR + 'astrometry/net1/log/'
ANDIR = BASEDIR + 'astrometry/'
UTIL_DIR = ANDIR + 'util/'
BLIND_DIR = ANDIR + 'blind/'
WEB_DIR = ANDIR + 'net1/'
JOB_QUEUE_DIR = DATADIR + 'job-queue/'
BASE_URL = 'http://oven.cosmo.fas.nyu.edu:9000/'
# for astrometry.net.server
MAIN_SERVER = 'http://oven.cosmo.fas.nyu.edu:8888'
SERVER_LOGFILE = LOG_DIR + 'server.log'
#BACKEND_CONFIG = '/home/dstn/go/django/backend-%s.cfg'
BACKEND_CONFIG = '/home/gmaps/shard-backend.cfg'
LOGFILE = LOG_DIR + 'django.log'
PORTAL_LOGFILE = LOG_DIR + 'portal.log'
VO_LOGFILE = LOG_DIR + 'vo.log'
HENRY_DRAPER_CAT = ANDIR + 'net1/hd.fits'
TYCHO_MKDT = ANDIR + 'net1/tycho.mkdt.fits'
TILERENDER = ANDIR + 'render/tilerender'
SIPSO = ANDIR + 'util/_sip.so'
CACHEDIR = DATADIR + 'tilecache/'
RENDERCACHEDIR = DATADIR + 'rendercache/'
TEMPDIR = '/tmp'
#SITE_NAME = 'test'
SITE_NAME = 'edge'
JOB_DIR = DATADIR + 'web-data/'
#FIELD_DIR = '/home/gmaps/test/web-data/fields'
FIELD_DIR = DATADIR + 'fields'
APPEND_SLASH=False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
ACCOUNT_ADMIN = 'dstn@cs.toronto.edu'
SESSION_COOKIE_NAME = 'AstrometryTestSession'
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
# Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'an-test',
'USER': secrets.DATABASE_USER,
'PASSWORD': secrets.DATABASE_PASSWORD,
'HOST': secrets.DATABASE_HOST,
'PORT': secrets.DATABASE_PORT,
}
}
## --> see astrometry/net/fixtures/site.json
# Load with: python manage.py loaddata fixtures/site.json
SITE_ID = 2
DEFAULT_FROM_EMAIL = 'Astrometry.net <alpha@astrometry.net>'
# Local time zone for this installation. Choices can be found here:
# http://www.postgresql.org/docs/8.1/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
# although not all variations may be possible on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Toronto'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
MEDIA_ROOT = WEB_DIR + 'media/'
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
#MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin-media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*pc#4fb*(%4gvp1-5yq6a_s&=4!gnui9r*53d+!*&s0=(@_ida'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
# default set:
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
# added:
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.middleware.doc.XViewMiddleware',
)
ROOT_URLCONF = 'astrometry.net1.root-urls-test'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/job/newurl/'
TEMPLATE_DIRS = (
WEB_DIR,
)
AUTH_PROFILE_MODULE = 'portal.userprofile'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'astrometry.net1.tile',
'astrometry.net1.upload',
'astrometry.net1.portal',
#'astrometry.net1.vo',
#'astrometry.net1.testbed',
)
| Python | 0 | |
577b84cf124a35b49311e39ab4d40ef0f8af59ed | introduce proso.analysis module | proso/analysis.py | proso/analysis.py | import json
import hashlib
import os
def get_experiment_data(name, compute_fun, cache_dir, cached=True, **kwargs):
kwargs_hash = hashlib.sha1(json.dumps(kwargs, sort_keys=True)).hexdigest()
filename = '{}/{}.json'.format(cache_dir, name);
if cached and os.path.exists(filename):
with open(filename, 'r') as f:
return _convert_json_keys(json.loads(f.read()))
result = compute_fun(**kwargs)
if cached:
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
with open(filename, 'w') as f:
f.write(json.dumps(result, sort_keys=True))
return result
def _convert_json_keys(json_struct):
if isinstance(json_struct, list):
return map(_convert_json_keys, json_struct)
elif isinstance(json_struct, dict):
return {_maybe_convert_str(key): val for (key, val) in json_struct.iteritems()}
else:
return json_struct
def _maybe_convert_str(x):
if x.isdigit():
try:
return int(x)
except ValueError:
pass
try:
return float(x)
except ValueError:
return x
| Python | 0 | |
45cb940db74d99b0dac31a2aace3d8505e4a9046 | Add empty file to contain main part of module | datac/main.py | datac/main.py | # -*- coding: utf-8 -*-
import copy
| Python | 0.000001 | |
323fb80744e63a322fe5ed70d86130aa61aa3c19 | Remove unused imports | examples/manifold/plot_swissroll.py | examples/manifold/plot_swissroll.py | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD, (C) INRIA 2011
print __doc__
import pylab as pl
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from scikits.learn import manifold, datasets
X, color = datasets.samples_generator.swiss_roll(1500)
print "Computing LLE embedding"
X_r, err = manifold.locally_linear_embedding(X, 12, 2)
print "Done. Reconstruction error: %g" % err
#----------------------------------------------------------------------
# Plot result
fig = pl.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:,0], X_r[:,1], c=color)
pl.xticks([]), pl.yticks([])
pl.show()
| """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD, (C) INRIA 2011
print __doc__
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from scikits.learn import manifold, datasets
X, color = datasets.samples_generator.swiss_roll(1500)
print "Computing LLE embedding"
X_r, err = manifold.locally_linear_embedding(X, 12, 2)
print "Done. Reconstruction error: %g" % err
#----------------------------------------------------------------------
# Plot result
fig = pl.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:,0], X_r[:,1], c=color)
pl.xticks([]), pl.yticks([])
pl.show()
| Python | 0 |
e8c75e84a158876e71a926bec244af43ad93cbc4 | add imu class | imu.py | imu.py | import serial
import math
import struct
class IMU:
"""Class for working with a Microstrain IMU"""
def __init__(self):
self.IMU_PORT = '/dev/ttyS0'
self.IMU_BAUD = 115200
self.CMD_ACCEL_ANG_ORIENT = '\xC8'
self.CMD_ACCEL_ANG_ORIENT_SIZE = 67
self.IMU_COMMAND = self.CMD_ACCEL_ANG_ORIENT
self.IMU_MESSAGE_SIZE = self.CMD_ACCEL_ANG_ORIENT_SIZE
def open_imu(self):
self.imu = serial.Serial(self.IMU_PORT, self.IMU_BAUD)
def close_imu(self):
self.imu.close()
def read_imu(self):
self.imu.write(self.IMU_COMMAND)
#TODO check IMU write
data = []
data = self.imu.read(self.IMU_MESSAGE_SIZE)
#TODO check read status, check first char, checksum
#conversion to numbers
accel_x = struct.unpack('>f', data[1:5])[0]
accel_y = struct.unpack('>f', data[5:9])[0]
accel_z = struct.unpack('>f', data[9:13])[0]
ang_rate_x = struct.unpack('>f', data[13:17])[0]
ang_rate_y = struct.unpack('>f', data[17:21])[0]
ang_rate_z = struct.unpack('>f', data[21:25])[0]
#orientation matrix
m_1 = struct.unpack('>f', data[33:37])[0]
m_2 = struct.unpack('>f', data[45:49])[0]
m_3 = struct.unpack('>f', data[57:61])[0]
#handle clock rollover outside of function
t = 0
t = struct.unpack('>I', data[61:65])[0]
time = 0.0
time = t / 62500.0 # convert time to seconds
return accel_x, accel_y, accel_z, m_1, m_2, m_3, ang_rate_x, ang_rate_y, ang_rate_z, time, data
def main():
imu = IMU()
imu.open_imu()
accel_x, accel_y, accel_z, m_1, m_2, m_3, ang_rate_x, ang_rate_y, ang_rate_z, time, data = imu.read_imu()
print accel_x
print accel_y
print accel_z
print ang_rate_x
print ang_rate_y
print ang_rate_z
print time
imu.close_imu()
if __name__ == "__main__":
main()
| Python | 0.000001 | |
fb37af691d63ab8a43d50701d6b1f8ae027e2e1b | Create dfirwizard.py | dfirwizard.py | dfirwizard.py | #!/usr/bin/python
# Sample program or step 1 in becoming a DFIR Wizard!
# No license as this code is simple and free!
import sys
import pytsk3
imagefile = "Stage2.vhd"
imagehandle = pytsk3.Img_Info(imagefile)
partitionTable = pytsk3.Volume_Info(imagehandle)
for partition in partitionTable:
print partition.addr, partition.desc, "%ss(%s)" % (partition.start, partition.start * 512), partition.len
| Python | 0 | |
ac83a8bbef2c61021c39c77ef3c14675383edc62 | Fix a typo. | packs/st2/actions/lib/action.py | packs/st2/actions/lib/action.py | from st2actions.runners.pythonrunner import Action
from st2client.client import Client
from st2client.models.keyvalue import KeyValuePair # pylint: disable=no-name-in-module
from lib.utils import filter_none_values
__all__ = [
'St2BaseAction'
]
class St2BaseAction(Action):
def __init__(self, config):
super(St2BaseAction, self).__init__(config)
self._client = Client
self._kvp = KeyValuePair
self.client = self._get_client()
def _get_client(self):
host = self.config['base_url']
try:
return self._client(base_url=host)
except Exception as e:
return e
def _run_client_method(self, method, method_kwargs, format_func):
"""
Run the provided client method and format the result.
:param method: Client method to run.
:type method: ``func``
:param method_kwargs: Keyword arguments passed to the client method.
:type method_kwargs: ``dict``
:param format_func: Function for formatting the result.
:type format_func: ``func``
:rtype: ``list`` of ``dict``
"""
# Filter out parameters with string value of "None"
# This is a work around since the default values can only be strings
method_kwargs = filter_none_values(method_kwargs)
method_name = method.__name__
self.logger.debug('Calling client method "%s" with kwargs "%s"' % (method_name,
method_kwargs))
result = method(**method_kwargs)
result = format_func(result)
return result
| from st2actions.runners.pythonrunner import Action
from st2client.client import Client
from st2client.models.datastore import KeyValuePair # pylint: disable=no-name-in-module
from lib.utils import filter_none_values
__all__ = [
'St2BaseAction'
]
class St2BaseAction(Action):
def __init__(self, config):
super(St2BaseAction, self).__init__(config)
self._client = Client
self._kvp = KeyValuePair
self.client = self._get_client()
def _get_client(self):
host = self.config['base_url']
try:
return self._client(base_url=host)
except Exception as e:
return e
def _run_client_method(self, method, method_kwargs, format_func):
"""
Run the provided client method and format the result.
:param method: Client method to run.
:type method: ``func``
:param method_kwargs: Keyword arguments passed to the client method.
:type method_kwargs: ``dict``
:param format_func: Function for formatting the result.
:type format_func: ``func``
:rtype: ``list`` of ``dict``
"""
# Filter out parameters with string value of "None"
# This is a work around since the default values can only be strings
method_kwargs = filter_none_values(method_kwargs)
method_name = method.__name__
self.logger.debug('Calling client method "%s" with kwargs "%s"' % (method_name,
method_kwargs))
result = method(**method_kwargs)
result = format_func(result)
return result
| Python | 0.999957 |
7ff614950163b1fb6a8fe0fef5b8de9bfa3a9d85 | Add a test for the hard-coded re() partial frac form | transmutagen/tests/test_partialfrac.py | transmutagen/tests/test_partialfrac.py | from sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
| Python | 0.000035 | |
352e2d053b8880e1e1a951be4338c188fee925d1 | order book testing first iteration | orderbooktest.py | orderbooktest.py | import time
try:
import ujson as json
except ImportError:
import json
from orderbook.book import Book
def dict_compare(new_dictionary, old_dictionary, price_map=False, order_map=False):
d1_keys = set(new_dictionary.keys())
d2_keys = set(old_dictionary.keys())
intersect_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = []
# for key in intersect_keys:
# if price_map:
# try:
# print(len(new_dictionary[key]))
# print(len(old_dictionary[key]))
# assert len(new_dictionary[key]) == len(old_dictionary[key])
# assert len(new_dictionary[key]) == old_dictionary[key]
# assert new_dictionary[key].length == old_dictionary[key].length
# assert new_dictionary[key].volume == old_dictionary[key].volume
#
# assert new_dictionary[key].head_order.order_id == old_dictionary[key].head_order.order_id
# assert new_dictionary[key].head_order.size == old_dictionary[key].head_order.size
# assert new_dictionary[key].head_order.price == old_dictionary[key].head_order.price
#
# assert new_dictionary[key].tail_order.order_id == old_dictionary[key].tail_order.order_id
# assert new_dictionary[key].tail_order.size == old_dictionary[key].tail_order.size
# assert new_dictionary[key].tail_order.price == old_dictionary[key].tail_order.price
# except AssertionError:
# pass
# raise Exception()
# modified += (new_dictionary[key], old_dictionary[key])
modified = {o: (new_dictionary[o], old_dictionary[o]) for o in intersect_keys if new_dictionary[o] != old_dictionary[o]}
same = set(o for o in intersect_keys if new_dictionary[o] == old_dictionary[o])
return added, removed, modified, same
def test_orderbook():
variable_order_book = Book()
control_order_book = Book()
with open('testdata/messages.json') as messages_json_file:
messages = json.load(messages_json_file)
with open('testdata/beginning_level_3.json') as begin_json_file:
beginning_level_3 = json.load(begin_json_file)
with open('testdata/ending_level_3.json') as end_json_file:
ending_level_3 = json.load(end_json_file)
try:
assert beginning_level_3['sequence'] + 1 == messages[0]['sequence']
assert ending_level_3['sequence'] == messages[-1]['sequence']
except AssertionError:
print("Problem with sample data sequences")
variable_order_book.get_level3(beginning_level_3)
start = time.time()
[variable_order_book.process_message(message) for message in messages]
end = time.time()
print('messages per sec: {0}'.format(int(len(messages)/(end-start))))
control_order_book.get_level3(ending_level_3)
# assert variable_order_book.asks.price_map == control_order_book.asks.price_map
added, removed, modified, same = dict_compare(variable_order_book.asks.price_map, control_order_book.asks.price_map,
price_map=True)
if added:
print('superfluous entries: {0}'.format(added))
if removed:
print('missing entries: {0}'.format(removed))
# if modified:
# print('modified entries: {0}'.format(modified))
#
if __name__ == '__main__':
test_orderbook()
| Python | 0 | |
7522ffb9f6934de02d5d326d5f798d42a2da800d | add script to find old experimental apis | pdfium/find_old_experimental.py | pdfium/find_old_experimental.py | #!/usr/bin/env python3
#
# Copyright 2019 Miklos Vajna. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""Finds my old + experimental APIs."""
import subprocess
import time
def main() -> None:
"""Commandline interface to this module."""
apis_bytes = subprocess.check_output(["git", "grep", "-n", "Experimental API", "public/"])
apis = apis_bytes.decode("utf-8").strip().split("\n")
author_date_loc = []
for api in apis:
tokens = api.split(":")
path = tokens[0]
line_num = tokens[1]
blame_bytes = subprocess.check_output(["git", "blame", "--porcelain", "-L", line_num + "," + line_num, path])
blame_lines = blame_bytes.decode("utf-8").strip().split("\n")
date = 0
author = ""
for line in blame_lines:
if line.startswith("author-time"):
tokens = line.split(" ")
date = int(tokens[1])
elif line.startswith("author "):
tokens = line.split(" ")
author = tokens[1]
author_date_loc.append((author, date, path + ":" + line_num))
author_date_loc = sorted(author_date_loc, key=lambda x: x[1])
today = time.time()
for author, date, loc in author_date_loc:
if author != "Miklos":
continue
# Year in seconds.
if date >= today - 3 * 31536000:
continue
parsed_date = time.localtime(date)
date_string = time.strftime("%Y-%m-%d", parsed_date)
print("date: '"+date_string+"', loc: "+loc+"")
if __name__ == "__main__":
main()
# vim:set shiftwidth=4 softtabstop=4 expandtab:
| Python | 0 | |
05dd8bdfeab63b3096e8f7d98032088133d1f0e5 | Add function provider to get osm data | campaign_manager/provider.py | campaign_manager/provider.py | import json
import hashlib
import os
from reporter import config
from reporter.utilities import (
split_bbox,
)
from reporter.osm import (
load_osm_document
)
from urllib.parse import quote
from reporter.queries import TAG_MAPPING, OVERPASS_QUERY_MAP
def get_osm_data(bbox, feature):
"""Get osm data.
:param bbox: String describing a bbox e.g. '106.78674459457397,
-6.141301491467023,106.80691480636597,-6.133834354201348'
:param feature: The type of feature to extract:
buildings, building-points, roads, potential-idp, boundary-[1,11]
:type feature: str
:returns: A dict from retrieved OSM dataset.
:rtype: dict
"""
server_url = 'http://overpass-api.de/api/interpreter?data='
tag_name = feature
overpass_verbosity = 'body'
try:
coordinates = split_bbox(bbox)
except ValueError:
error = "Invalid area"
coordinates = split_bbox(config.BBOX)
feature_type = TAG_MAPPING[tag_name]
parameters = coordinates
parameters['print_mode'] = overpass_verbosity
query = OVERPASS_QUERY_MAP[feature_type].format(**parameters)
# Query to returns json string
query = '[out:json];' + query
encoded_query = quote(query)
url_path = '%s%s' % (server_url, encoded_query)
safe_name = hashlib.md5(query.encode('utf-8')).hexdigest() + '.osm'
file_path = os.path.join(config.CACHE_DIR, safe_name)
osm_document = load_osm_document(file_path, url_path)
osm_data = json.loads(osm_document.read())
return osm_data
| Python | 0 | |
9305f158b71f65923ee37de2805324db362e0db6 | Add DRF LocalDateTimeField | arcutils/drf/serializers.py | arcutils/drf/serializers.py | from django.utils import timezone
from rest_framework import serializers
class LocalDateTimeField(serializers.DateTimeField):
"""Converts datetime to local time before serialization."""
def to_representation(self, value):
value = timezone.localtime(value)
return super().to_representation(value)
| Python | 0 | |
3f9aae149dba5c9b68ff6f7fd83cadf3fd6b1d7d | Add automorphic number implementation (#7978) | maths/automorphic_number.py | maths/automorphic_number.py | """
== Automorphic Numbers ==
A number n is said to be a Automorphic number if
the square of n "ends" in the same digits as n itself.
Examples of Automorphic Numbers: 0, 1, 5, 6, 25, 76, 376, 625, 9376, 90625, ...
https://en.wikipedia.org/wiki/Automorphic_number
"""
# Author : Akshay Dubey (https://github.com/itsAkshayDubey)
# Time Complexity : O(log10n)
def is_automorphic_number(number: int) -> bool:
"""
# doctest: +NORMALIZE_WHITESPACE
This functions takes an integer number as input.
returns True if the number is automorphic.
>>> is_automorphic_number(-1)
False
>>> is_automorphic_number(0)
True
>>> is_automorphic_number(5)
True
>>> is_automorphic_number(6)
True
>>> is_automorphic_number(7)
False
>>> is_automorphic_number(25)
True
>>> is_automorphic_number(259918212890625)
True
>>> is_automorphic_number(259918212890636)
False
>>> is_automorphic_number(740081787109376)
True
>>> is_automorphic_number(5.0)
Traceback (most recent call last):
...
TypeError: Input value of [number=5.0] must be an integer
"""
if not isinstance(number, int):
raise TypeError(f"Input value of [number={number}] must be an integer")
if number < 0:
return False
number_square = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python | 0 | |
34391723f44c81ceab77fd3200ee34c9f1b2d4b2 | add plugin factory | pilot/common/pluginfactory.py | pilot/common/pluginfactory.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, wen.guan@cern.ch, 2018
import logging
logger = logging.getLogger(__name__)
"""
A factory to manage plugins
"""
class PluginFactory(object):
def __init__(self, *args, **kwargs):
self.classMap = {}
def get_plugin(self, confs):
"""
Load plugin class
:param confs: a dict of configurations.
"""
class_name = confs['class']
if class_name is None:
logger.error("[class] is not defined in confs: %s" % confs)
return None
if class_name not in self.classMap:
logger.info("Trying to import %s" % class_name)
components = class_name.split('.')
mod = __import__('.'.join(components[:-1]))
for comp in components[1:]:
mod = getattr(mod, comp)
self.classMap[class_name] = mod
args = {}
for key in confs:
if key in ['class']:
continue
args[key] = confs[key]
cls = self.classMap[class_name]
logger.info("Importing %s with args: %s" % (cls, args))
impl = cls(**args)
return impl
| Python | 0 | |
9f016a58a98ba89b9feae68dd01e752d75a628ec | Update test_client.py | tests/test_client.py | tests/test_client.py | import pytest
from mock import patch, Mock
from plaid import Client, require_access_token
def test_require_access_token_decorator():
class TestClass(object):
access_token = 'foo'
@require_access_token
def some_func(self):
return True
obj = TestClass()
obj.some_func()
def test_require_access_token_decorator_raises():
class TestClass(object):
access_token = None
@require_access_token
def some_func(self):
return True
obj = TestClass()
with pytest.raises(Exception):
obj.some_func()
def test_connect():
with patch('requests.post') as mock_requests_post:
mock_response = Mock()
mock_response.content = '{}'
mock_requests_post.return_value = mock_response
client = Client('myclientid', 'mysecret')
account_type = 'bofa'
username = 'foo'
password = 'bar'
email = 'foo@bar.com'
response = client.connect(account_type, username, password, email)
assert mock_response == response
def test_step():
with patch('requests.post') as mock_requests_post:
client = Client('myclientid', 'mysecret', 'token')
client.step('bofa', 'foo')
assert mock_requests_post.called
def test_step_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.step('bofa', 'foo')
def test_delete_user():
with patch('requests.delete') as mock_requests_delete:
client = Client('myclientid', 'mysecret', 'token')
client.delete_user()
assert mock_requests_delete.called
def test_delete_user_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.delete_user('bofa', 'foo')
def test_transactions():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret', 'token')
ret = client.transactions()
assert mock_requests_get.called
assert ret is not None
def test_transactions_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.transactions()
def test_balance():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret', 'token')
ret = client.balance()
assert mock_requests_get.called
assert ret is not None
def test_balance_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.balance()
def test_entity():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.entity(1)
assert mock_requests_get.called
def test_categories():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.categories()
assert mock_requests_get.called
def test_category():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.category(1)
assert mock_requests_get.called
def test_categories_by_mapping():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.categories_by_mapping('Food > Spanish Restaurant', 'plaid')
assert mock_requests_get.called
| import pytest
from mock import patch, Mock
from plaid import Client, require_access_token
def test_require_access_token_decorator():
class TestClass(object):
access_token = 'foo'
@require_access_token
def some_func(self):
return True
obj = TestClass()
obj.some_func()
def test_require_access_token_decorator_raises():
class TestClass(object):
access_token = None
@require_access_token
def some_func(self):
return True
obj = TestClass()
with pytest.raises(Exception):
obj.some_func()
def test_connect():
with patch('requests.post') as mock_requests_post:
mock_response = Mock()
mock_response.content = '{}'
mock_requests_post.return_value = mock_response
client = Client('myclientid', 'mysecret')
account_type = 'bofa'
username = 'foo'
password = 'bar'
email = 'foo@bar.com'
response = client.connect(account_type, username, password, email)
assert mock_response == response
def test_step():
with patch('requests.post') as mock_requests_post:
client = Client('myclientid', 'mysecret', 'token')
client.step('bofa', 'foo')
assert mock_requests_post.called
def test_step_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.step('bofa', 'foo')
def test_delete_user():
with patch('requests.delete') as mock_requests_delete:
client = Client('myclientid', 'mysecret', 'token')
client.delete_user()
assert mock_requests_delete.called
def test_delete_user_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.delete_user('bofa', 'foo')
def test_transactions():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret', 'token')
ret = client.transactions()
assert mock_requests_get.called
assert ret is not None
def test_transactions_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.transactions()
def test_balance():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret', 'token')
ret = client.balance()
assert mock_requests_get.called
assert not ret is None
def test_balance_requires_access_token():
client = Client('myclientid', 'mysecret')
with pytest.raises(Exception):
client.balance()
def test_entity():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.entity(1)
assert mock_requests_get.called
def test_categories():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.categories()
assert mock_requests_get.called
def test_category():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.category(1)
assert mock_requests_get.called
def test_categories_by_mapping():
with patch('requests.get') as mock_requests_get:
client = Client('myclientid', 'mysecret')
client.categories_by_mapping('Food > Spanish Restaurant', 'plaid')
assert mock_requests_get.called
| Python | 0.000002 |
9346ca997d723cbfedf383eb78db2f62552f8a7c | Fix empty image list test. | tests/test_comics.py | tests/test_comics.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012 Bastian Kleineidam
import tempfile
import shutil
from itertools import islice
from unittest import TestCase
from dosagelib import scraper
class _ComicTester(TestCase):
"""Basic comic test class."""
scraperclass=None
def setUp(self):
self.name = self.scraperclass.get_name()
def test_comic(self):
# Test a scraper. It must be able to traverse backward for
# at least 5 pages from the start, and find strip images
# on at least 4 pages.
scraperobj = self.scraperclass()
num = empty = 0
for strip in islice(scraperobj.getAllStrips(), 0, 5):
images = 0
for image in strip.getImages():
images += 1
self.save(image)
if not images:
empty += 1
num += 1
self.check(num >= 4, 'traversal failed after %d strips.' % num)
self.check(empty <= 1, 'failed to find images on %d pages.' % empty)
def save(self, image):
# create a temporary directory
tmpdir = tempfile.mkdtemp()
try:
image.save(tmpdir)
except Exception, msg:
self.check(False, 'could not save to %s: %s' % (tmpdir, msg))
finally:
shutil.rmtree(tmpdir)
def check(self, condition, msg):
self.assertTrue(condition, "%s: %s" % (self.name, msg))
def generate_comic_testers():
"""For each comic scraper, create a test class."""
# Limit number of scraper tests for now
max_scrapers = 10
for scraperclass in islice(scraper.get_scrapers(), 0, max_scrapers):
name = 'Test'+scraperclass.__name__
globals()[name] = type(name,
(_ComicTester,),
dict(scraperclass=scraperclass)
)
generate_comic_testers()
| # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012 Bastian Kleineidam
import tempfile
import shutil
from itertools import islice
from unittest import TestCase
from dosagelib import scraper
class _ComicTester(TestCase):
"""Basic comic test class."""
scraperclass=None
def setUp(self):
self.name = self.scraperclass.get_name()
def test_comic(self):
# Test a scraper. It must be able to traverse backward for
# at least 5 pages from the start, and find strip images
# on at least 4 pages.
scraperobj = self.scraperclass()
num = empty = 0
for strip in islice(scraperobj.getAllStrips(), 0, 5):
images = strip.getImages()
if len(images) == 0:
empty += 1
for image in images:
self.save(image)
num += 1
self.check(num >= 4, 'traversal failed after %d strips.' % num)
self.check(empty <= 1, 'failed to find images on %d pages.' % empty)
def save(self, image):
# create a temporary directory
tmpdir = tempfile.mkdtemp()
try:
image.save(tmpdir)
except Exception, msg:
self.check(False, 'could not save to %s: %s' % (tmpdir, msg))
finally:
shutil.rmtree(tmpdir)
def check(self, condition, msg):
self.assertTrue(condition, "%s: %s" % (self.name, msg))
def generate_comic_testers():
"""For each comic scraper, create a test class."""
# Limit number of scraper tests for now
max_scrapers = 10
for scraperclass in islice(scraper.get_scrapers(), 0, max_scrapers):
name = 'Test'+scraperclass.__name__
globals()[name] = type(name,
(_ComicTester,),
dict(scraperclass=scraperclass)
)
generate_comic_testers()
| Python | 0.000076 |
dad70b067c1afcd44a37c30d21c0e6a35e12ce68 | Add boostrap script for buildout | bootstrap.py | bootstrap.py | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| Python | 0 | |
acd33bdffb3302d2130505873a062fae39dcd976 | Add WikiText103 and WikiText2 Mocked Unit Tests (#1592) | test/datasets/test_wikitexts.py | test/datasets/test_wikitexts.py | import os
import random
import string
import zipfile
from collections import defaultdict
from unittest.mock import patch
from ..common.parameterized_utils import nested_params
from torchtext.datasets.wikitext103 import WikiText103
from torchtext.datasets.wikitext2 import WikiText2
from ..common.case_utils import TempDirMixin, zip_equal
from ..common.torchtext_test_case import TorchtextTestCase
def _get_mock_dataset(root_dir, base_dir_name):
"""
root_dir: directory to the mocked dataset
base_dir_name: WikiText103 or WikiText2
"""
base_dir = os.path.join(root_dir, base_dir_name)
temp_dataset_dir = os.path.join(base_dir, "temp_dataset_dir")
os.makedirs(temp_dataset_dir, exist_ok=True)
seed = 1
mocked_data = defaultdict(list)
file_names = ("wiki.train.tokens", "wiki.valid.tokens", "wiki.test.tokens")
for file_name in file_names:
csv_file = os.path.join(temp_dataset_dir, file_name)
mocked_lines = mocked_data[os.path.splitext(file_name)[0]]
with open(csv_file, "w") as f:
for i in range(5):
rand_string = " ".join(
random.choice(string.ascii_letters) for i in range(seed)
)
dataset_line = rand_string
f.write(f'{rand_string}\n')
# append line to correct dataset split
mocked_lines.append(dataset_line)
seed += 1
if base_dir_name == WikiText103.__name__:
compressed_file = "wikitext-103-v1"
else:
compressed_file = "wikitext-2-v1"
compressed_dataset_path = os.path.join(base_dir, compressed_file + ".zip")
# create zip file from dataset folder
with zipfile.ZipFile(compressed_dataset_path, "w") as zip_file:
for file_name in file_names:
txt_file = os.path.join(temp_dataset_dir, file_name)
zip_file.write(txt_file, arcname=compressed_file)
return mocked_data
class TestWikiTexts(TempDirMixin, TorchtextTestCase):
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
cls.patcher = patch(
"torchdata.datapipes.iter.util.cacheholder._hash_check", return_value=True
)
cls.patcher.start()
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
super().tearDownClass()
@nested_params([WikiText103, WikiText2], ["train", "valid", "test"])
def test_wikitexts(self, wikitext_dataset, split):
expected_samples = _get_mock_dataset(self.root_dir, base_dir_name=wikitext_dataset.__name__)[split]
dataset = wikitext_dataset(root=self.root_dir, split=split)
samples = list(dataset)
for sample, expected_sample in zip_equal(samples, expected_samples):
self.assertEqual(sample, expected_sample)
@nested_params([WikiText103, WikiText2], ["train", "valid", "test"])
def test_wikitexts_split_argument(self, wikitext_dataset, split):
# call `_get_mock_dataset` to create mock dataset files
_ = _get_mock_dataset(self.root_dir, wikitext_dataset.__name__)
dataset1 = wikitext_dataset(root=self.root_dir, split=split)
(dataset2,) = wikitext_dataset(root=self.root_dir, split=(split,))
for d1, d2 in zip_equal(dataset1, dataset2):
self.assertEqual(d1, d2)
| Python | 0 | |
61cd24aef4c9c8ef72527e75991c23873892ec3b | Change listener module file | platform/listener/__init__.py | platform/listener/__init__.py | '''
Module to handle data synchronization with contacts.
'''
| Python | 0.000001 | |
18378b201cae7e23889031044fa6ddbaf50946c5 | check langauge detecting for lett files where we know the expetected language from the URL | baseline/check_lett_lang.py | baseline/check_lett_lang.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
doc2lang = {}
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('referencepairs', type=argparse.FileType('r'))
parser.add_argument('-slang', help='Source language', default='en')
parser.add_argument('-tlang', help='Non-english language', default='fr')
parser.add_argument('-prefix', help='prefix added to make filenames',
default="/fs/syn0/pkoehn/crawl/data/site-crawls")
args = parser.parse_args(sys.argv[1:])
# read all the .lett files from stdin
for line in sys.stdin:
line = line.split("\t")
if len(line) != 6:
# sys.stderr.write("broken format: %s\n" % line[0])
continue
lang = line[0]
filename = line[3].strip()
if filename in doc2lang:
sys.stderr.write("Duplicate entry: %s:%s\n" % (filename, lang))
doc2lang[filename] = lang
# print filename, lang
correct = 0
total = 0
unknown = 0
unknown_but_file = 0
wrong_lang_pair = 0
for line in args.referencepairs:
total += 1
domain, a, b = line.split("\t")
a = a.strip()
b = b.strip()
found = True
for f in (a, b):
if f not in doc2lang:
sys.stderr.write("unknown file %s\n" % (f))
unknown += 1
filename = os.path.join(args.prefix, f.split("/")[0], f)
if os.path.isfile(filename):
sys.stderr.write("but file %s exists\n" % (filename))
unknown_but_file += 1
found = False
elif doc2lang[f] not in (args.slang, args.tlang):
sys.stderr.write("%s detected as neither %s or %s\n"
% (f, args.slang, args.tland))
wrong_lang_pair += 1
found = False
if not found:
continue
if doc2lang[a] == doc2lang[b]:
sys.stderr.write("Found both %s and %s to be in %s\n"
% (a, b, doc2lang[b]))
wrong_lang_pair += 1
continue
correct += 1
print "Total: ", total
print "Possible: ", correct
print "Unknown: ", unknown
print "Unknown but file exists: ", unknown_but_file
print "Wrong_lang_pair: ", wrong_lang_pair
| Python | 0 | |
1bbfb6fe5080de9326bd7a35afe893bf59744bdf | add ASGI plugin/middleware tests. | honeybadger/tests/contrib/test_asgi.py | honeybadger/tests/contrib/test_asgi.py | import pprint
import unittest
from async_asgi_testclient import TestClient
import aiounittest
import mock
from honeybadger import contrib
class SomeError(Exception):
pass
def asgi_app():
"""Example ASGI App."""
async def app(scope, receive, send):
if "error" in scope["path"]:
raise SomeError("Some Error.")
headers = [(b"content-type", b"text/html")]
body = f"<pre>{pprint.PrettyPrinter(indent=2, width=256).pformat(scope)}</pre>"
await send({"type": "http.response.start", "status": 200, "headers": headers})
await send({"type": "http.response.body", "body": body})
return app
class ASGIPluginTestCase(unittest.TestCase):
def setUp(self):
self.client = TestClient(contrib.ASGIHoneybadger(asgi_app(), api_key="abcd"))
@mock.patch("honeybadger.contrib.asgi.honeybadger")
def test_should_support_asgi(self, hb):
asgi_context = {"asgi": {"version": "3.0"}}
non_asgi_context = {}
self.assertTrue(self.client.application.supports(hb.config, asgi_context))
self.assertFalse(self.client.application.supports(hb.config, non_asgi_context))
@aiounittest.async_test
@mock.patch("honeybadger.contrib.asgi.honeybadger")
async def test_should_notify_exception(self, hb):
with self.assertRaises(SomeError):
await self.client.get("/error")
hb.notify.assert_called_once()
self.assertEqual(type(hb.notify.call_args.kwargs["exception"]), SomeError)
@aiounittest.async_test
@mock.patch("honeybadger.contrib.asgi.honeybadger")
async def test_should_not_notify_exception(self, hb):
response = self.client.get("/")
hb.notify.assert_not_called()
| Python | 0 | |
10dd7a4a70fe639b806e004bc0a0d6fb791279a3 | Add a utility script: | utils/misc/grep-svn-log.py | utils/misc/grep-svn-log.py | #!/usr/bin/env python
"""
Greps and returns the first svn log entry containing a line matching the regular
expression pattern passed as the only arg.
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h$'
"""
import fileinput, re, sys, StringIO
# Separator string for "svn log -v" output.
separator = '-' * 72
usage = """Usage: grep-svn-log.py line-pattern
Example:
svn log -v | grep-svn-log.py '^ D.+why_are_you_missing.h'"""
class Log(StringIO.StringIO):
"""Simple facade to keep track of the log content."""
def __init__(self):
self.reset()
def add_line(self, a_line):
"""Add a line to the content, if there is a previous line, commit it."""
global separator
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = a_line
self.separator_added = (a_line == separator)
def del_line(self):
"""Forget about the previous line, do not commit it."""
self.prev_line = None
def reset(self):
"""Forget about the previous lines entered."""
StringIO.StringIO.__init__(self)
self.prev_line = None
def finish(self):
"""Call this when you're finished with populating content."""
if self.prev_line != None:
print >> self, self.prev_line
self.prev_line = None
def grep(regexp):
# The log content to be written out once a match is found.
log = Log()
LOOKING_FOR_MATCH = 0
FOUND_LINE_MATCH = 1
state = LOOKING_FOR_MATCH
while 1:
line = sys.stdin.readline()
if not line:
return
line = line.splitlines()[0]
if state == FOUND_LINE_MATCH:
# At this state, we keep on accumulating lines until the separator
# is encountered. At which point, we can return the log content.
if line == separator:
print log.getvalue()
return
log.add_line(line)
elif state == LOOKING_FOR_MATCH:
if line == separator:
log.reset()
log.add_line(line)
# Update next state if necessary.
if regexp.search(line):
state = FOUND_LINE_MATCH
def main():
if len(sys.argv) != 2:
print usage
sys.exit(0)
regexp = re.compile(sys.argv[1])
grep(regexp)
if __name__ == '__main__':
main()
| Python | 0.000301 | |
8dc7a1e239dc22dd4eb69cfe1754586e3a1690dc | Test javascript using the "js" | tests/test_run_js.py | tests/test_run_js.py | import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
| Python | 0 | |
bbed1fc6d144571f5cb69d1c1a54904857646d74 | Create redis-graphite.py | redis-graphite.py | redis-graphite.py | """
Redis Graphite Publisher
~~~~~~~~~~~~~~~~~~~~~~~~
Publishes stats from a redis server to a carbon server.
These stats include:
- Generic server stats (INFO command)
- Length of lists (useful for monitoring queues)
Requires redis and statsd:
https://pypi.python.org/pypi/redis
Example for a carbon storage schema:
[redis]
pattern = ^redis\.
retentions = 10s:24d,1m:30d,10m:1y
:license: MIT License
:author: Michael Mayr <michael@michfrm.net>
"""
import time
import socket
import logging
from argparse import ArgumentParser
from redis import Redis
log = logging.getLogger("redis-graphite")
stats_keys = [
# Clients
('connected_clients', int),
('client_longest_output_list', int),
('client_biggest_input_buf', int),
('blocked_clients', int),
# Memory
('used_memory', int),
('used_memory_rss', int),
('used_memory_peak', int),
('used_memory_lua', int),
('mem_fragmentation_ratio', lambda x: int(float(x) * 100)),
# Persistence
('rdb_bgsave_in_progress', int), # Nice for graphites render 0 as inf
('aof_rewrite_in_progress', int), # Nice for graphites render 0 as inf
('aof_base_size', int),
('aof_current_size', int),
# Stats
('total_connections_received', int),
('total_commands_processed', int),
]
parser = ArgumentParser()
# Connections
parser.add_argument('--redis-server', default="localhost")
parser.add_argument('--redis-port', type=int, default=6379)
parser.add_argument('--carbon-server', default="localhost")
parser.add_argument('--carbon-port', type=int, default=2003)
# Options
parser.add_argument('--no-server-stats', '-s', help="Disable graphing of server stats", action="store_true")
parser.add_argument('--lists', '-l', help="Watch the length of one or more lists", nargs="+")
parser.add_argument('--once', '-o', help="Run only once, then quit", action="store_true")
parser.add_argument('--interval', '-i', help="Check interval in seconds", type=int, default=10)
parser.add_argument('--verbose', '-v', help="Debug output", action="store_true")
def main():
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
base_key = "redis.{}:{}.".format(args.redis_server, args.redis_port)
log.debug("Base key:{}".format(base_key))
log.debug("Connecting to redis")
client = Redis(args.redis_server, args.redis_port)
sock = socket.socket()
sock.connect((args.carbon_server, args.carbon_port))
def send(key, value):
cmd = "{} {} {}\n".format(key, value, int(time.time()))
sock.sendall(cmd)
log.debug("Starting mainloop")
while True:
info = client.info()
log.debug("Got {} info keys from redis".format(len(info)))
if not args.no_server_stats:
for key, keytype in stats_keys:
if key not in info:
log.debug("WARN:Key not supported by redis: {}".format(key))
continue
value = keytype(info[key])
log.debug("gauge {}{} -> {}".format(base_key, key, value))
send(base_key + key, value)
if args.lists:
lists_key = base_key + "list."
for key in args.lists:
length = client.llen(key)
log.debug("Length of list {}: {}".format(key, length))
send(lists_key + key, length)
if args.once:
break
log.debug("Sleeping {} seconds".format(args.interval))
time.sleep(args.interval)
sock.close()
if __name__ == '__main__':
main()
| Python | 0.000002 | |
dee49a5e023907d77e2598560d25480bc7f56e34 | Add k40 batch script | examples/offline_analysis/qk40calib.py | examples/offline_analysis/qk40calib.py | """
================================
K40 Calibration Batch Processing
================================
Standalone job submitter for K40 offline calibrations with KM3Pipe.
"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Filename: qk40calib.py
# Author: Tamas Gal <tgal@km3net.de>
"""
Standalone job submitter for K40 offline calibrations with KM3Pipe.
Usage:
qk40calib.py OUTPUT_PATH [options]
qk40calib.py (-h | --help)
Options:
OUTPUT_PATH Folder to store the calibration data.
-d DET_ID Detector ID [default: 29].
-t TMAX Coincidence time window [default: 10].
-n N_RUNS Number of runs to process per job [default: 10].
-e ET Estimated walltime per run in minutes [default: 8].
-m VMEM Estimated vmem for a job [default: 8G].
-s RUNSETUP Runsetup match [default: PHYS.1710v5-TUNED.HRV19.3D_T_S_MX.NBMODULE].
-j JOBNAME The name of the submitted jobs [default: k40calib].
-l LOG_PATH Path of the job log files [default: qlogs].
-q Dryrun: don't submit jobs, just print the first job script.
-h --help Show this screen.
"""
import os
import re
from glob import glob
import time
from km3pipe.shell import qsub
import km3pipe as kp
from docopt import docopt
def main():
args = docopt(__doc__)
DET_ID = int(args['-d'])
TMAX = int(args['-t'])
ET_PER_RUN = int(args['-e'])*60 # [s]
RUNS_PER_JOB = int(args['-n']) # runs per job
VMEM = args['-m']
CWD = os.getcwd()
LOG_PATH = args['-l']
JOB_NAME = args['-j']
CALIB_PATH = os.path.join(CWD, args['OUTPUT_PATH'])
RUN_SUBSTR = args['-s']
DRYRUN = args['-q']
if not os.path.exists(CALIB_PATH):
os.makedirs(CALIB_PATH)
db = kp.db.DBManager()
run_table = db.run_table(det_id=DET_ID)
phys_run_table = run_table[run_table.RUNSETUPNAME.str.contains(RUN_SUBSTR)]
phys_runs = set(phys_run_table.RUN)
processed_runs = set(int(re.search("_\\d{8}_(\\d{8})", s).group(1))
for s in
glob(os.path.join(CALIB_PATH, '*.k40_cal.p')))
remaining_runs = list(phys_runs - processed_runs)
print("Remaining runs: {}".format(remaining_runs))
cmds = []
for job_id, runs_chunk in enumerate(kp.tools.chunks(remaining_runs,
RUNS_PER_JOB)):
n_runs = len(runs_chunk)
print("Preparing batch script for a chunk of {} runs."
.format(len(runs_chunk)))
cmds.append("cd $TMPDIR; mkdir -p $USER; cd $USER")
for run in runs_chunk:
cmds.append("echo Processing {}:".format(run))
irods_path = kp.tools.irods_filepath(DET_ID, run)
root_filename = os.path.basename(irods_path)
calib_filename = root_filename + '.k40_cal.p'
cmds.append("iget -v {}".format(irods_path))
cmds.append("CTMIN=$(JPrint -f {}|grep '^ctMin'|awk '{{print $2}}')"
.format(root_filename))
cmds.append("k40calib {} {} -t {} -c $CTMIN -o {}"
.format(root_filename, DET_ID, TMAX, calib_filename))
cmds.append("cp {} {}".format(calib_filename, CALIB_PATH))
cmds.append("rm -f {}".format(root_filename))
cmds.append("rm -f {}".format(calib_filename))
cmds.append("echo Run {} processed.".format(run))
cmds.append("echo " + 42*"=")
walltime = time.strftime('%H:%M:%S', time.gmtime(ET_PER_RUN * n_runs))
script = '\n'.join(cmds)
qsub(script, '{}_{}'.format(JOB_NAME, job_id), walltime=walltime,
vmem=VMEM, log_path=LOG_PATH, irods=True, platform='sl6',
dryrun=DRYRUN)
if DRYRUN:
break
cmds = []
if __name__ == '__main__':
main()
| Python | 0 | |
e44bd0b5a5db15b99a06b7561b8146554b1419d2 | Add genesisbalance class #217 | bitshares/genesisbalance.py | bitshares/genesisbalance.py | # -*- coding: utf-8 -*-
from .account import Account
from .instance import BlockchainInstance
from graphenecommon.genesisbalance import (
GenesisBalance as GrapheneGenesisBalance,
GenesisBalances as GrapheneGenesisBalances,
)
from bitsharesbase.account import Address, PublicKey
from bitsharesbase import operations
@BlockchainInstance.inject
class GenesisBalance(GrapheneGenesisBalance):
""" Read data about a Genesis Balances from the chain
:param str identifier: identifier of the balance
:param bitshares blockchain_instance: bitshares() instance to use when
accesing a RPC
"""
type_id = 15
def define_classes(self):
self.account_class = Account
self.operations = operations
self.address_class = Address
self.publickey_class = PublicKey
@BlockchainInstance.inject
class GenesisBalances(GrapheneGenesisBalances):
""" List genesis balances that can be claimed from the
keys in the wallet
"""
def define_classes(self):
self.genesisbalance_class = GenesisBalance
self.publickey_class = PublicKey
self.address_class = Address
| Python | 0 | |
3dd71c02ea1fa9e39054bd82bf9e8657ec77d6b9 | Add a script to recover the chat_id | tools/get_chat_id.py | tools/get_chat_id.py | #! /usr/bin/python3
# -*- coding:utf-8 -*-
# by antoine@2ohm.fr
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
| Python | 0 | |
e950a53b2a392014fbfd7b9827a9f3f0b12a377b | add connector test class | connectortest.py | connectortest.py | import unittest
import threading
import re
import message
import StringIO
from connector import Connector, AppConnector
import SocketServer
from threadserver import DetailServer
from datetime import datetime
from PIL import Image
class App:
def update_msg(self, txtmsg):
print txtmsg.get_body()
return txtmsg
def update_image(self, imgmsg):
img = imgmsg.get_image()
img.show()
return imgmsg
class ConnectorTest(unittest.TestCase):
def setUp(self):
self.app = App()
self.c = AppConnector(app=self.app)
def test_header(self):
c = self.c
c.serve_forever()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
f97868b89da50532413465250d84308b84276296 | add script | scripts/getliblist.py | scripts/getliblist.py | #!/usr/bin/env python
import sys
import os
def getlibs(invcf):
rgs = {}
with open(invcf, 'r') as vcf:
for line in vcf:
if not line.startswith('#'):
chrom, pos, id, ref, alt, qual, filter, info, format, sample = line.strip().split('\t')
for rg in sample.split(':')[-1].split(','):
rgs[rg] = True
return rgs.keys()
if len(sys.argv) == 2:
rgs = []
with open(sys.argv[1], 'r') as vcflist:
for vcf in vcflist:
vcf = vcf.strip()
assert os.path.exists(vcf), "VCF not found: " + vcf
for rg in getlibs(vcf):
rgs.append(rg)
print '\n'.join(sorted(list(set(rgs))))
else:
print "usage:", sys.argv[0], "<tebreak output vcf list in a file>"
| Python | 0.000001 | |
fda7d76e4b10a1b43e3612742585d9abcc7b27da | Rename tags.py to search.py | tiddlywebplugins/tank/search.py | tiddlywebplugins/tank/search.py | """
Routines associated with finding and listing tags.
An experiment for now.
"""
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.policy import PermissionsError
from tiddlywebplugins.whoosher import get_searcher, query_parse
def list_tags(environ, start_response):
"""
Plain text list of tags in a certain context.
If a q query parameter is provided, then that is used to limit
the search space for tags. For example q=modifier:cdent bag:foobar
would return tags only from tiddlers in the bag foobar with most
recent modifier of cdent.
"""
config = environ['tiddlyweb.config']
query = environ['tiddlyweb.query'].get('q', [None])[0]
searcher = get_searcher(config)
if query:
# XXX this is not robust in the face of wacky inputs
# (including quoted inputs), for now we ride.
kwargs = dict([entry.split(':') for entry in query.split()])
documents = searcher.documents(**kwargs)
else:
documents = searcher.documents()
# As yet unknown if this will be slow or not.
set_tags = set()
for stored_fields in documents:
set_tags.update(stored_fields['tags'].split(','))
start_response('200 OK', [('Content-Type', 'text/plain; charset=UTF-8')])
return '\n'.join(set_tags)
def get_comp_bags(store, config, usersign):
"""
Saving for later. Return a list of bags that can be used in
comps.
"""
comp_bags = []
for result in full_search(config, 'title:app'):
bag, _ = result['id'].split(':', 1)
bag = store.get(Bag(bag))
try:
bag.policy.allows(usersign, 'read')
comp_bags.append(bag)
except PermissionsError:
pass
return comp_bags
def full_search(config, query):
query = query_parse(config, query)
searcher = get_searcher(config)
return searcher.search(query)
| Python | 0.000003 | |
eac6545d0700d2a6c3de43db5ea8d46cfea12464 | Update link.py | link.py | link.py | from module import XMPPModule
import halutils
import re, requests
class Link(XMPPModule):
def handleMessage(self, msg):
obj = re.match('.*(http[s]?://.*)+', msg['body'])
if obj:
addr = obj.group(1)
webpage = requests.get(addr).content
title = re.match('.*<title>(.*)</title>', str(webpage)).group(1).rstrip().lstrip()
self.xmpp.reply(msg, "Website: " + title)
| Python | 0 | |
6dae3f3199b2828cef457b06b6a2aa50ae98cb55 | Add a convenient tool for sweeping over a bunch of configs. | tools/sweep_scene.py | tools/sweep_scene.py | #!/usr/bin/env python3
import sys
import os
import argparse
import json
import subprocess
import tempfile
import statistics
def compute_stddev(values):
stdev = 0.0 if len(values) <= 1 else statistics.stdev(values)
avg = statistics.mean(values)
return avg, stdev
def run_test(sweep, config, iterations, stat_file):
config_results = []
for i in range(iterations):
print('Running scene with config:', config)
subprocess.check_call(sweep)
print('Ran scene ...')
with open(stat_file, 'r') as f:
json_data = f.read()
parsed = json.loads(json_data)
config_results.append(parsed['averageFrameTimeUs'])
avg, stddev = compute_stddev(config_results)
return avg, stddev
def main():
parser = argparse.ArgumentParser(description = 'Script for running automated performance tests.')
parser.add_argument('--scene',
help = 'The glTF/glB scene to test')
parser.add_argument('--texcomp',
help = 'Which texture compression to use for LDR textures',
type = str)
parser.add_argument('--optimized-scene',
help = 'Path where a processed scene is placed.',
type = str)
parser.add_argument('--environment-texcomp',
help = 'Which texture compression to use for environments',
type = str)
parser.add_argument('--environment-cube',
help = 'Cubemap texture',
type = str)
parser.add_argument('--environment-reflection',
help = 'Reflection texture',
type = str)
parser.add_argument('--environment-irradiance',
help = 'Irradiance texture',
type = str)
parser.add_argument('--extra-lights',
help = 'Extra lights',
type = str)
parser.add_argument('--extra-cameras',
help = 'Extra cameras',
type = str)
parser.add_argument('--configs',
help = 'Which config files to sweep through',
type = str,
nargs = '+')
parser.add_argument('--gen-configs',
help = 'Automatically generate configs to sweep through',
action = 'store_true')
parser.add_argument('--width',
help = 'Resolution X',
type = int)
parser.add_argument('--height',
help = 'Resolution Y',
type = int)
parser.add_argument('--frames',
help = 'Number of frames',
type = int)
parser.add_argument('--iterations',
help = 'Number of iterations',
type = int)
args = parser.parse_args()
if args.optimized_scene:
scene_build = ['./tools/gltf-repacker']
scene_build.append(args.scene)
scene_build.append('--output')
scene_build.append(args.optimized_scene)
if args.texcomp:
scene_build.append('--texcomp')
scene_build.append(args.texcomp)
if args.environment_texcomp:
scene_build.append('--environment-texcomp')
scene_build.append(args.environment_texcomp)
if args.environment_cube:
scene_build.append('--environment-cube')
scene_build.append(args.environment_cube)
if args.environment_reflection:
scene_build.append('--environment-reflection')
scene_build.append(args.environment_reflection)
if args.environment_irradiance:
scene_build.append('--environment-irradiance')
scene_build.append(args.environment_irradiance)
if args.extra_lights:
scene_build.append('--extra-lights')
scene_build.append(args.extra_lights)
if args.extra_cameras:
scene_build.append('--extra-cameras')
scene_build.append(args.extra_cameras)
print('Building scene with arguments', scene_build)
subprocess.check_call(scene_build)
print('Built scene ...')
sweep_path = args.optimized_scene
else:
sweep_path = args.scene
f, stat_file = tempfile.mkstemp()
f_c, config_file = tempfile.mkstemp()
os.close(f)
os.close(f_c)
if (not args.width) or (not args.height) or (not args.frames):
sys.stderr.write('Need width, height and frames.\n')
sys.exit(1)
base_sweep = ['./viewer/gltf-viewer-headless', '--frames', str(args.frames),
'--width', str(args.width),
'--height', str(args.height), sweep_path,
'--stat', stat_file]
results = []
iterations = args.iterations if args.iterations else 1
if args.configs:
for config in args.configs:
sweep = base_sweep + ['--config', config]
avg, stddev = run_test(sweep, config, iterations, stat_file)
results.append((config, avg, stddev))
elif args.gen_configs:
for renderer in ['forward', 'deferred']:
for msaa in [1, 4]:
for prepass in [False, True]:
if msaa != 1 and renderer == 'deferred':
continue
if prepass and renderer == 'deferred':
continue
for clustered in [False, True]:
for hdr_bloom in [False, True]:
for shadows in [False, True]:
for pos_shadows in [False, True]:
c = {}
c['renderer'] = renderer
c['hdrBloom'] = hdr_bloom
c['msaa'] = msaa
c['clusteredLights'] = clustered
c['directionalLightShadows'] = shadows
c['forwardDepthPrepass'] = prepass
c['clusteredLightsShadows'] = pos_shadows
with open(config_file, 'w') as f:
json.dump(c, f)
sweep = base_sweep + ['--config', config_file]
avg, stddev = run_test(sweep, config_file, iterations, stat_file)
config_name = {}
config_name['renderer'] = renderer
config_name['msaa'] = msaa
config_name['prepass'] = prepass
config_name['clustered'] = clustered
config_name['hdr_bloom'] = hdr_bloom
config_name['shadows'] = shadows
config_name['pos_shadows'] = pos_shadows
results.append((config_name, avg, stddev))
for res in results:
print(res)
os.remove(stat_file)
os.remove(config_file)
if __name__ == '__main__':
main()
| Python | 0 | |
c29e430301dc854dc7bd83ebc2a588cea70589a6 | Fix has_perm issue in get_project_list | sentry/web/helpers.py | sentry/web/helpers.py | """
sentry.web.views
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.conf import settings as dj_settings
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponse
from django.template import loader
from sentry.conf import settings
from sentry.models import ProjectMember, Project
def get_project_list(user=None, flag=None):
"""
Returns a set of all projects a user has some level of access to.
"""
projects = dict((p.pk, p) for p in Project.objects.filter(public=True))
if user.is_authenticated():
projects.update(dict(
(pm.project_id, pm.project)
for pm in ProjectMember.objects.filter(user=user).select_related('project')
if (not flag or pm.has_perm(flag))))
return projects
_LOGIN_URL = None
def get_login_url(reset=False):
global _LOGIN_URL
if _LOGIN_URL is None or reset:
# if LOGIN_URL resolves force login_required to it instead of our own
# XXX: this must be done as late as possible to avoid idempotent requirements
try:
resolve(dj_settings.LOGIN_URL)
except:
_LOGIN_URL = settings.LOGIN_URL
else:
_LOGIN_URL = dj_settings.LOGIN_URL
if _LOGIN_URL is None:
_LOGIN_URL = reverse('sentry-login')
return _LOGIN_URL
def iter_data(obj):
for k, v in obj.data.iteritems():
if k.startswith('_') or k in ['url']:
continue
yield k, v
def render_to_string(template, context={}):
context.update({
'has_search': False,
'MESSAGES_PER_PAGE': settings.MESSAGES_PER_PAGE,
})
return loader.render_to_string(template, context)
def render_to_response(template, context={}, status=200):
response = HttpResponse(render_to_string(template, context))
response.status_code = status
return response
| """
sentry.web.views
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from django.conf import settings as dj_settings
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponse
from django.template import loader
from sentry.conf import settings
from sentry.models import Project
def get_project_list(user=None, flag=None):
"""
Returns a set of all projects a user has some level of access to.
"""
projects = dict((p.pk, p) for p in Project.objects.filter(public=True))
if user.is_authenticated():
projects.update(dict((p.pk, p) for p in Project.objects.filter(member_set__user=user) if (not flag or p.has_perm(flag))))
return projects
_LOGIN_URL = None
def get_login_url(reset=False):
global _LOGIN_URL
if _LOGIN_URL is None or reset:
# if LOGIN_URL resolves force login_required to it instead of our own
# XXX: this must be done as late as possible to avoid idempotent requirements
try:
resolve(dj_settings.LOGIN_URL)
except:
_LOGIN_URL = settings.LOGIN_URL
else:
_LOGIN_URL = dj_settings.LOGIN_URL
if _LOGIN_URL is None:
_LOGIN_URL = reverse('sentry-login')
return _LOGIN_URL
def iter_data(obj):
for k, v in obj.data.iteritems():
if k.startswith('_') or k in ['url']:
continue
yield k, v
def render_to_string(template, context={}):
context.update({
'has_search': False,
'MESSAGES_PER_PAGE': settings.MESSAGES_PER_PAGE,
})
return loader.render_to_string(template, context)
def render_to_response(template, context={}, status=200):
response = HttpResponse(render_to_string(template, context))
response.status_code = status
return response
| Python | 0 |
1510a0faeff91f6f6ed7a1c5929628d430cb0506 | Update file identification tools | fpr/migrations/0010_update_fido_136.py | fpr/migrations/0010_update_fido_136.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def data_migration(apps, schema_editor):
IDTool = apps.get_model('fpr', 'IDTool')
IDTool.objects.filter(description='Fido', version='1.3.5').update(version='1.3.6')
IDTool.objects.filter(description='Siegfried', version='1.6.7').update(version='1.7.3')
def reverse_migration(apps, schema_editor):
IDTool = apps.get_model('fpr', 'IDTool')
IDTool.objects.filter(description='Fido', version='1.3.6').update(version='1.3.5')
IDTool.objects.filter(description='Siegfried', version='1.7.3').update(version='1.6.7')
class Migration(migrations.Migration):
dependencies = [
('fpr', '0009_pronom_90'),
]
operations = [
migrations.RunPython(data_migration, reverse_migration),
]
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.