commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
bdc04453938366e28ff91b6e16c536eca84d8bef | add summary generator | summary.py | summary.py | #!/usr/bin/env python
import os
import json
import argparse
from argparse import ArgumentDefaultsHelpFormatter
from time import gmtime, strftime, mktime
import datetime
class DatetimeConverter(object):
TIME_STR_FORMAT = '%Y-%m-%dT%H:%M:%S'
@staticmethod
def get_UTC():
return gmtime()
@staticmethod
def get_string_UTC():
return strftime(DatetimeConverter.TIME_STR_FORMAT, gmtime())
@staticmethod
def get_datetime_from_string(input_time_string):
return datetime.strptime(input_time_string, DatetimeConverter.TIME_STR_FORMAT)
@staticmethod
def get_timestamp_from_string(input_time_string):
return mktime(DatetimeConverter.get_datetime_from_string(input_time_string).timetuple())
class SummaryGenerator(object):
def __init__(self, root_folder):
self.root_folder = root_folder
def list_to_hierarchy_dict(self, dict_root, input_list):
if input_list:
node = input_list[0]
if type(input_list[0]) is not str:
node = str(input_list[0])
current_node = dict_root.setdefault(node, {})
self.list_to_hierarchy_dict(current_node, input_list[1:])
def generate_summary_dict(self):
ret_dict = {}
for root, dirs, files in os.walk(self.root_folder):
has_time = False
time_list = []
time_sum = 0
time_counter = 0
for f in files:
if f.endswith('time'):
has_time = True
try:
t = int(f.replace('.time', ''))
time_list.append(t)
time_sum += t
time_counter += 1
except Exception:
pass
if has_time:
# generate hierarchy dir dict from list
dir_structure = root.split(os.sep)
self.list_to_hierarchy_dict(ret_dict, dir_structure)
# go to the inner dir
cur_dir = ret_dict
for next_dir in dir_structure:
cur_dir = cur_dir[next_dir]
cur_dir[str(time_sum / time_counter)] = time_list
return ret_dict
def run(self):
summary_dict = self.generate_summary_dict()
utc_time = DatetimeConverter.get_string_UTC()
ret_dict = {
'summary': summary_dict,
'UTC': utc_time
}
print(json.dumps(ret_dict, indent=4))
def main():
arg_parser = argparse.ArgumentParser(description='Summary Generator',
formatter_class=ArgumentDefaultsHelpFormatter)
arg_parser.add_argument('-d', '--dir', dest='root_folder', action='store', default='.',
help='the root folder', required=True)
args = arg_parser.parse_args()
sg = SummaryGenerator(args.root_folder)
sg.run()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
d75c519eb4c3b276f04ba58277d03801c8568ff0 | Create 4.py | solutions/4.py | solutions/4.py | # A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
# Find the largest palindrome made from the product of two 3-digit numbers.
def main():
max =0
for i in range(999,900,-1):
for j in range(999,900,-1):
product = str(i*j)
if ((product == product[::-1]) and ((i*j)>max)):
max = product
print max
main()
| Python | 0.000001 | |
21ddecb7804501476d35290b0b0cb2b7311728ab | add hello world tornado | server.py | server.py | import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def data_received(self, chunk):
pass
def get(self):
self.write("Hello, world")
def make_app():
return tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| Python | 0.999999 | |
19cfaf8534626e5c6b5193da40a17cc092b24758 | Use tuple instead of a list for DEFAULT_VIEWPORT_VIRTUAL_TAGS | taskwiki/constants.py | taskwiki/constants.py | DEFAULT_VIEWPORT_VIRTUAL_TAGS = ("-DELETED", "-PARENT")
DEFAULT_SORT_ORDER = "due+,pri-,project+"
| DEFAULT_VIEWPORT_VIRTUAL_TAGS = ["-DELETED", "-PARENT"]
DEFAULT_SORT_ORDER = "due+,pri-,project+"
| Python | 0.000366 |
d93dada0fe434cd736d11b9cfb1635146130f24a | Add 031 | 031/main.py | 031/main.py | # Integers avoid having to rely on decimal.Decimal
# to handle rounding errors
COINS = 1, 2, 5, 10, 20, 50, 100, 200
TARGET = 200
visited = set()
solutions = []
stack = [(0, (0,) * len(COINS))]
while stack:
total, state = stack.pop()
for cn, coin in enumerate(COINS):
new_total = total + coin
if new_total > TARGET:
continue
new_state = list(state)
new_state[cn] += 1
new_state = tuple(new_state)
if new_state not in visited:
visited.add(new_state)
if new_total == TARGET:
solutions.append(new_state)
else: # < TARGET
stack.append((new_total, new_state))
print(len(solutions))
| Python | 0.00051 | |
1d51529add69d2ce43829d0f09f25f2ab897125f | Add info-bar-gtk. | src/info_bar_gtk.py | src/info_bar_gtk.py | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gtk
import pango
from event import *
class InfoBar(gtk.EventBox):
"""Represents a single horizontal bar of text, possibly with an
icon, textual buttons, and a close button. Always visible."""
def __init__(self, text = ""):
gtk.EventBox.__init__(self)
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("#FBE99C"))
self._label = gtk.Label()
self._label.set_property('ellipsize', pango.ELLIPSIZE_END)
self._label.set_text(text)
self._label.set_tooltip_text(text)
self._label.set_size_request(-1,36)
self._hbox1 = gtk.HBox()
self._hbox2 = gtk.HBox()
self.add(self._hbox1)
self._icon_bin = gtk.Alignment(0,0.5,0,0)
self._hbox1.pack_start(self._icon_bin, False, True, 5)
self._hbox1.pack_start(self._label, True, True, 5)
self._hbox1.pack_start(self._hbox2, False, True, 5)
self._hbox1.show_all()
self._has_close_button = False
self._after_button_pressed = Event()
def set_icon(self, image):
if image == None:
self._icon_bin.remove(gtk._icon_bin.get_children()[0])
else:
self._icon_bin.add(image)
def set_stock_icon(self, stock):
if stock== None:
self._icon_bin.remove(gtk._icon_bin.get_children()[0])
else:
image = gtk.image_new_from_stock(stock, gtk.ICON_SIZE_SMALL_TOOLBAR)
self._icon_bin.add(image)
def add_button(self, label, cb, *userdata):
bn = gtk.Button(label)
# bn.set_property('relief', gtk.RELIEF_NONE)
bn.set_property('can-focus', False)
# bn.modify_bg(gtk.STATE_PRELIGHT, gtk.gdk.color_parse("#E1D18C"))
# bn.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("#E1D18C"))
self._hbox2.pack_start(self._mka(bn),False,False,2)
def on_click(*args):
cb(*userdata)
self._after_button_pressed.fire()
bn.connect('clicked', on_click)
def add_close_button(self, cb = None, *userdata):
assert self._has_close_button == False
self._has_close_button = True
bn = gtk.Button()
bn.set_property('relief', gtk.RELIEF_NONE)
bn.set_property('can-focus', False)
bn.set_image(gtk.image_new_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_SMALL_TOOLBAR))
bn.modify_bg(gtk.STATE_PRELIGHT, gtk.gdk.color_parse("#E1D18C"))
bn.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("#E1D18C"))
self._hbox1.pack_end(self._mka(bn),False,False,2)
def on_click(*args):
if cb:
cb(*userdata)
self._after_button_pressed.fire()
bn.connect('clicked', on_click)
@property
def after_button_pressed(self):
return self._after_button_pressed
@property
def has_close_button(self):
return self._has_close_button
@property
def has_buttons(self):
return len(self._hbox2.get_children()) != 0
@property
def text(self):
return self._label.get_text()
@text.setter
def text(self, text):
self._label.set_text(text)
def _mka(self, bn):
a = gtk.Alignment(0,0.5,0,0.0)
a.add(bn)
return a
class _BSeparator(gtk.EventBox):
def __init__(self):
gtk.EventBox.__init__(self)
self.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("#FBE99C"))
self.add(gtk.HSeparator())
class InfoBarCollection(gtk.VBox):
"""A collection of butter bars"""
def __init__(self):
gtk.VBox.__init__(self)
self._num_bars = 0
def has_bar(self, bar):
return bar.get_parent() in self.get_children()
def close_bar(self, bar):
self.remove(bar.get_parent())
self._num_bars -= 1
if self._num_bars > 0:
# remove the bsep from the topmost bar
grp0 = self.get_children()[0]
c0 = grp0.get_children()[0]
if isinstance(c0, _BSeparator):
grp0.remove(c0)
def add_bar(self, bar):
assert isinstance(bar,InfoBar)
if bar.has_close_button == False:
bar.add_close_button(lambda: True)
def close_bar():
if bar.get_parent():
self.close_bar(bar)
bar.after_button_pressed.add_listener(close_bar)
grp = gtk.VBox()
if self._num_bars >= 1:
sep = _BSeparator()
grp.pack_start(sep)
grp.pack_start(bar)
self.pack_start(grp)
grp.show_all()
self._num_bars += 1
def __len__(self):
return self._num_bars
def __getitem__(self,i):
grp = self.get_children()[i]
if isinstance(grp.get_children()[0], InfoBar):
return grp.get_children()[0]
else:
return grp.get_children()[1]
if __name__ == "__main__" and False:
w = gtk.Window()
w.set_size_request(400,-1)
b = InfoBar()
b.text = "blah blah blah"
w.add(b)
w.show_all()
gtk.main()
if __name__ == "__main__" and True:
w = gtk.Window()
w.set_size_request(400,-1)
bbc = InfoBarCollection()
# bb1
bb = InfoBar("blah blah blah")
bbc.add_bar(bb)
# bb1
bb = InfoBar("this is informational")
bb.set_stock_icon(gtk.STOCK_DIALOG_INFO)
bbc.add_bar(bb)
# bb1
bb = InfoBar("blah blah blah")
bb.add_button("Accept", lambda: True)
bbc.add_bar(bb)
# bb1
bb = InfoBar("OMG you need to do somethnig")
bb.set_stock_icon(gtk.STOCK_DIALOG_WARNING)
bb.add_button("Accept", lambda: True)
bb.add_close_button(lambda: True)
bbc.add_bar(bb)
w.add(bbc)
w.show_all()
gtk.main()
| Python | 0 | |
eaace54d5e7d8d2ebad42cf31cf071a9cf9d3e50 | test case for creating a new story | umklapp/test.py | umklapp/test.py | from django.test import TestCase
from django.test.utils import override_settings
from umklapp.models import *
class UmklappTestCase(TestCase):
def addUsers(self):
self.users = []
for i in range(0,7):
u = User.objects.create_user(
"user%d" % i,
"test@example.com",
"p455w0rd"
)
self.users.append(u)
class NewStoryTest(UmklappTestCase):
def setUp(self):
self.addUsers()
def testNewStory(self):
Story.create_new_story(self.users[0], self.users, "first")
| Python | 0.00035 | |
d20e468a32d1f476196525848688ae64845c4dce | Add Python solution | sg-ski.py | sg-ski.py | #!/usr/bin/env python
import sys
def parse_map_file(path):
map_grid = []
with open(path, 'r') as f:
width, height = map(int, f.readline().split())
for line in f:
row = map(int, line.split())
map_grid.append(row)
assert height == len(map_grid)
assert width == len(map_grid[0])
return width, height, map_grid
def make_grid(width, height, initial_value):
return [width*[initial_value] for i in range(height)]
def get_length_and_elevation(x, y, map_grid, path_lengths, final_elevations):
path_length = path_lengths[y][x]
if path_length != -1:
return path_length, final_elevations[y][x]
current_elevation = map_grid[y][x]
longest_path = 0
lowest_elevation = current_elevation
neighbors = [
(x, y - 1), # up
(x, y + 1), # down
(x - 1, y), # left
(x + 1, y), # right
]
for xn, yn in neighbors:
try:
neighbor = map_grid[yn][xn]
except IndexError:
continue
if neighbor < current_elevation:
path_length, final_elevation = get_length_and_elevation(xn, yn, map_grid, path_lengths, final_elevations)
if path_length > longest_path or (path_length == longest_path and final_elevation < lowest_elevation):
longest_path = path_length
lowest_elevation = final_elevation
path_length = longest_path + 1
path_lengths[y][x] = path_length
final_elevations[y][x] = lowest_elevation
return path_length, lowest_elevation
def main():
sys.stdout.write('Processing...')
sys.stdout.flush()
try:
width, height, map_grid = parse_map_file(sys.argv[1])
except IOError as e:
sys.exit('Unable to read map file: {}'.format(e))
except ValueError as e:
sys.exit('Invalid map file: {}'.format(sys.argv[1]))
# Initialize corresponding grids for path lengths and final elevations
path_lengths = make_grid(width, height, -1)
final_elevations = make_grid(width, height, -1)
longest_path = -1
steepest_drop = -1
for y, row in enumerate(map_grid):
for x, initial_elevation in enumerate(row):
path_length, final_elevation = get_length_and_elevation(x, y, map_grid, path_lengths, final_elevations)
drop = initial_elevation - final_elevation
if path_length > longest_path or (path_length == longest_path and drop > steepest_drop):
longest_path = path_length
steepest_drop = drop
print '\rProcessing... DONE.'
print '\nlength = {}, drop = {}\n'.format(longest_path, steepest_drop)
if __name__ == '__main__':
main()
| Python | 0.000444 | |
78e0135169d2c53b0b99c7811109eb1da040f14d | add bin2h.py | tools/bin2h.py | tools/bin2h.py | #!/usr/bin/env python
# vim: set expandtab ts=4 sw=4 tw=100:
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-b", "--before", dest="before", action="append",
help="text to put before, may be specified more than once")
parser.add_option("-a", "--after", dest="after", action="append",
help="text to put after, may be specified more than once")
(options, args) = parser.parse_args()
if options.before and len(options.before) > 0:
for b in options.before:
print b
offset = 0
f = bytearray(sys.stdin.read())
for c in f:
if offset != 0 and offset % 16 == 0:
print ""
print "%#04x," % c,
offset = offset + 1
print ""
if options.after and len(options.after) > 0:
for a in options.after:
print a
| Python | 0 | |
d68cfae0cac869d6676643e33479383bc11b086a | Add najeto.py script. | utils/najeto.py | utils/najeto.py | """
Calculate elapsed distance based on hJOP HV csv output.
"""
import sys
import csv
class HV:
def __init__(self, addr: int, name: str, owner: str) -> None:
self.address: int = addr
self.name: str = name
self.owner: str = owner
self.start_forward: float = 0
self.start_backward: float = 0
self.end_forward: float = 0
self.end_backward: float = 0
if __name__ == '__main__':
if len(sys.argv) < 4:
sys.stderr.write(f'Usage: {sys.argv[0]} start.csv end.csv output.csv')
sys.exit(1)
start_fn, end_fn, out_fn = sys.argv[1:]
hvs = {}
with open(start_fn, encoding='cp1250') as start_file:
start_reader = csv.reader(start_file, delimiter=';')
for i, line in enumerate(start_reader):
if i == 0:
continue
addr, name, owner, forward, backward = int(line[0]), line[1], line[2], float(line[3]), float(line[4])
assert addr not in hvs, f'{addr} duplicity'
hvs[addr] = HV(addr, name, owner)
hvs[addr].start_forward = forward
hvs[addr].start_backward = backward
with open(end_fn, encoding='utf8') as end_file:
end_reader = csv.reader(end_file, delimiter=',')
for i, line in enumerate(end_reader):
if i == 0:
continue
addr, name, owner, forward, backward = int(line[0]), line[1], line[2], float(line[3]), float(line[4])
if addr in hvs:
if hvs[addr].name != name:
print(f'Warning: {addr}: name[begin] = {hvs[addr].name} != {name} = name[end]')
if hvs[addr].owner != owner:
print(f'Warning: {addr}: owner[begin] = {hvs[addr].owner} != {owner} = owner[end]')
hvs[addr].end_forward = forward
hvs[addr].end_backward = backward
else:
hvs[addr] = HV(addr, name, owner)
hvs[addr].end_forward = forward
hvs[addr].end_backward = backward
with open(out_fn, 'w', encoding='utf-8', newline='') as out_file:
writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['adresa', 'nazev', 'majitel', 'najeto_metru_vpred', 'najeto_metru_vzad'])
for hv in hvs.values():
forward: float = round(hv.end_forward - hv.start_forward, 2)
backward: float = round(hv.end_backward - hv.start_backward, 2)
assert (forward*backward) >= 0, f'HV {hv.address} has different signs for directions!'
if forward > 0 and backward > 0:
writer.writerow([hv.address, hv.name, hv.owner, forward, backward])
elif forward < 0 and backward < 0:
print(f'Omitting {hv.address} ({hv.name}) - negative diff')
| Python | 0 | |
cf8a0105e0c4fc6af04ede6c7ae4fe4f4dac048e | add migrations | accelerator/migrations/0103_update_startupupdate_model.py | accelerator/migrations/0103_update_startupupdate_model.py | # Generated by Django 2.2.28 on 2022-05-09 11:20
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0102_update_program_model'),
]
operations = [
migrations.AddField(
model_name='startupupdate',
name='acquired_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_annualized_revenue_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Annualized revenue (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_total_funding_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Total Funding Raised (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='active_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AddField(
model_name='startupupdate',
name='currency_type',
field=models.CharField(
choices=[
('USD', 'USD'), ('GBP', 'GBP'),
('EUR', 'EUR'), ('JPY', 'JPY'),
('AUD', 'AUD'), ('CAD', 'CAD'),
('CHF', 'CHF'), ('NZD', 'NZD'),
('NGN', 'NGN'), ('MXN', 'MXN')],
default='USD',
max_length=5,
verbose_name='Status Currency'),
),
migrations.AddField(
model_name='startupupdate',
name='ipo_valuation_usd',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation (in US dollars)'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_annualized_revenue',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Annualized revenue'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_total_funding',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Total Funding Raised'),
),
migrations.AlterField(
model_name='startupupdate',
name='active_valuation',
field=models.DecimalField(
blank=True,
decimal_places=2,
max_digits=13,
null=True,
verbose_name='Valuation'),
),
]
| Python | 0 | |
7014c5affa780044fd46911287d883024bae3fae | Create ipy_custom_hbox.py | basics/layout/ipy_custom_hbox.py | basics/layout/ipy_custom_hbox.py |
from PySide import QtCore
from PySide import QtGui
class MyHBoxLayout(QtGui.QHBoxLayout):
def __init__(self, *args, **kwargs):
super(MyHBoxLayout, self).__init__(*args, **kwargs)
@property
def margins(self):
return self.contentsMargins()
@margins.setter
def margins(self, margins):
self.setContentsMargins(*margins)
class MyWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
self.setLayout(MyHBoxLayout())
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
@property
def lay(self):
return self.layout()
self = MyWidget()
self.show()
##
self.lay.addWidget(QtGui.QPushButton('1'))
self.lay.addWidget(QtGui.QPushButton('2'))
self.lay.margins = [0] * 4
self.lay.setSpacing(15)
self.lay.addStretch()
self.lay.addWidget(QtGui.QPushButton('3'))
self.lay.setSpacing(0)
self.lay.setSpacing(10)
| Python | 0.000003 | |
29a8a4d6dc125785c450e66bd90239995ba50841 | Add clitest.py. | test/clitest.py | test/clitest.py | # -*- coding: utf-8 -*-
# Copyright 2014 Jan-Philip Gehrcke. See LICENSE file for details.
from __future__ import unicode_literals
import os
import shutil
import logging
import subprocess
import traceback
logging.basicConfig(
format='%(asctime)s,%(msecs)-6.1f %(funcName)s# %(message)s',
datefmt='%H:%M:%S')
log = logging.getLogger("clitest")
class CmdlineTestError(Exception):
pass
class WrongExitCode(CmdlineTestError):
pass
class CmdlineInterfaceTest(object):
"""Command line interface test abstraction for a given CLI proram, called
PROGRAM in the following paragraphs.
Creates a run directory and a test shell script as a wrapper for the
actual test (the command to be tested is provided as unicode string,
and written to this shell script in a certain encoding as given by
self.shellscript_encoding). This wrapper shell script becomes
interpreted and executed by a shell of choice (e.g. bash or cmd.exe).
stdout and stderr of this wrapper are streamed into real files in the
file system.
Other CLI program test environments directly use Python's subprocess module
for invoking PROGRAM including corresponding command line arguments. While
in real use cases PROGRAM might also become executed this way, the largest
fraction of use cases is different: This automated method of testing the
command line behavior of PROGRAM resembles actual user behavior as close as
possbile. In most cases, a user would either invoke PROGRAM directly by
typing a command in a shell or write a shell script which he/she then
executes later.
This test environt here also uses Python's subprocess module for
setting the current working directory for the test, for redirecting
stdout and stderr to files, and for actually invoking the test shell
script via a command as simple as
/bin/bash test-shell-script.sh
The issues prevented with this kind of wrapper shell script technique
are all issues related to non-obvious argument interpretation magic
due to argv encoding done by Python's subprocess module -- which
differs from platform to platform and between Python 2 and 3. Through
the wrapper script, written in a self-defined encoding, we can
guarantee under which input conditions exactly the test is executed, and we
can also guarantee that these conditions are as close as possible to
the command line conditions in most use cases.
On Unix systems, the shell works in byte mode, and usually expects
characters to be UTF-8 encoded, as given by the LANG_ and LC_* environment
variables (cf. `locale` command in your Unix shell).
On Windows, cmd.exe can also be made to execute batch files encoded in
UTF-8 (code page 65001, cf. https://www.google.de/#q=Code+Page+Identifiers)
A systematic way to reproduce Unix shell behavior w.r.t to encoding:
@chcp 65001 > nul
@set PYTHONIOENCODING=utf-8
See also http://superuser.com/q/269818
This test runner catches the exit code of the shell script wrapper, which
should always correspond to the exit code of the last command executed,
which is PROGRAM. For bash, this is documented: "The equivalent of a bare
exit is exit $? or even just omitting the exit."
(http://tldp.org/LDP/abs/html/exit-status.html)
TODO:
- look up behavior of cmd.exe on Windows.
- add features for test inspection/validation
"""
# If required, these defaults should be overridden in a sub class.
shellpath = "/bin/bash"
rundirtop = "."
shellscript_encoding = "utf-8"
shellscript_ext = ".sh"
def __init__(self, name):
self.name = name
self.rundir = os.path.join(self.rundirtop, name)
self.shellscript_name = "runtest_%s%s" % (name, self.shellscript_ext)
errfilename = "runtest_%s.err" % (name)
outfilename = "runtest_%s.out" % (name)
self.errfilepath = os.path.join(self.rundir, errfilename)
self.outfilepath = os.path.join(self.rundir, outfilename)
self._clear_create_rundir()
def _clear_create_rundir(self):
self.clear()
os.makedirs(self.rundir)
def clear(self):
try:
shutil.rmtree(self.rundir)
except OSError:
# Does not exist, fine.
pass
def add_file(self, name, content_bytestring):
assert isinstance(content_bytestring, str) # TODO: Py3
p = os.path.join(self.rundir, name)
with open(p, 'w') as f:
f.write(content_bytestring)
def run(self, cmd_unicode, expect_rc=0, log_output=True):
shellscript_content_bytes = cmd_unicode.encode(
self.shellscript_encoding)
self.add_file(self.shellscript_name, shellscript_content_bytes)
cmd = [self.shellpath, self.shellscript_name]
of = open(self.outfilepath, "w")
ef = open(self.errfilepath, "w")
try:
sp = subprocess.Popen(cmd, stdout=of, stderr=ef, cwd=self.rundir)
sp.wait()
rc = sp.returncode
log.info("Test returncode: %s", rc)
except:
log.error("Error running test subprocess. Traceback:\n%s",
traceback.format_exc())
raise CmdlineTestError("Error during attempt to run child.")
finally:
of.close()
ef.close()
with open(self.outfilepath) as f:
out = f.read()
with open(self.errfilepath) as f:
err = f.read()
if log_output:
log.info("Test stdout repr:\n%s", repr(out))
log.info("Test stderr repr:\n%s", repr(err))
if rc != expect_rc:
raise WrongExitCode("Expected %s, got %s" % (expect_rc, rc))
| Python | 0.000005 | |
0454f0aaddb581f172e5a9f5d2f503bd711af86d | Create core framework for 2D truss evaluation | truss_struct.py | truss_struct.py | import numpy as np
from scipy.optimize import fsolve
from scipy.optimize import minimize
numerical_mult = 1e9
class truss:
def __init__(self, x1, x2, y1, y2, E, A,
node1, node2, stress=None):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.E = E
self.A = A
self.l = ((x2-x1)**2+(y2-y1)**2)**(1/2)
self.cos_t = (x2-x1)/self.l
self.sin_t = (y2-y1)/self.l
self.L = [[self.cos_t, self.sin_t, 0, 0],
[0, 0, self.cos_t, self.sin_t]]
self.eL = [-self.cos_t, -self.sin_t, self.cos_t, self.sin_t]
# K1D definition assumes a linear approximation
# for the truss displacement function
self.K1D = [[self.E*self.A/self.l, -self.E*self.A/self.l],
[-self.E*self.A/self.l, self.E*self.A/self.l]]
self.K2D = (np.matrix(self.L).getT()*np.matrix(self.K1D)*\
np.matrix(self.L)).tolist()
self.node1 = node1 #zero-indexed
self.node2 = node2 #zero-indexed
class force:
def __init__(self, fx, fy, node):
self.fx = fx
self.fy = fy
self.node = node
class fixed_node:
def __init__(self, node, x_or_y, disp):
self.node = node
self.x_or_y = x_or_y
self.disp = disp
def compile_K(dof, trusses):
K = np.zeros((dof, dof))
for i in range(0,len(trusses)):
for j in range(0,2):
for k in range(0,2):
node1 = trusses[i].node1
node2 = trusses[i].node2
K[j+2*node1][k+2*node1] += trusses[i].K2D[j][k]
K[j+2*node1][k+2*node2] += trusses[i].K2D[j][k+2]
K[j+2*node2][k+2*node1] += trusses[i].K2D[j+2][k]
K[j+2*node2][k+2*node2] += trusses[i].K2D[j+2][k+2]
return K
def compile_F(dof, forces):
F = np.zeros((dof,1))
for i in range(0,len(forces)):
node = forces[i].node
F[node][0] += forces[i].fx
F[node+1][0] += forces[i].fy
return F
def fix_nodes(K, F, c, fixed_nodes):
for i in range(0,len(fixed_nodes)):
if (fixed_nodes[i].x_or_y == 'x'):
ind = 2*fixed_nodes[i].node
if (fixed_nodes[i].x_or_y == 'y'):
ind = 2*fixed_nodes[i].node+1
K[ind][ind] += c
F[ind][0] += c*fixed_nodes[i].disp
return K, F
def solve_u(K, F):
u_matrix = np.matrix(K).getI()*np.matrix(F)
u_list = u_matrix.tolist()
return u_list
def set_c(K,mult):
K_max = K[0][0]
for i in range(0,len(K)):
if (K[i][i]>K_max):
K_max = K[i][i] #check diagonals only
return mult*K_max
def assign_stresses(u, trusses):
for i in range(0,len(trusses)):
truss = trusses[i]
disp1 = u[truss.node1][0]
disp2 = u[truss.node2][0]
truss.stress = (truss.E/truss.l)*(disp2-disp1)
dof = int(input('Number of nodes: '))*2
A = truss(x1=0.5,x2=0,y1=0.3,y2=0.3,
E=70e9,A=200*1e-6,node1=0,node2=1)
B = truss(x1=0.5,x2=0.9,y1=0.3,y2=0,
E=70e9,A=200*1e-6,node1=0,node2=2)
trusses = [A,B]
f1 = force(fx=0,fy=-12e3,node=0)
forces = [f1]
n1 = fixed_node(1,'x',0)
n2 = fixed_node(1,'y',0)
n3 = fixed_node(2,'x',0)
n4 = fixed_node(2,'y',0)
fixed_nodes=[n1,n2,n3,n4]
K = compile_K(dof,trusses)
F = compile_F(dof,forces)
c = set_c(K,numerical_mult)
K, F = fix_nodes(K,F,c,fixed_nodes)
u = solve_u(K,F)
assign_stresses(u,trusses)
for i in range(0, len(trusses)):
print(trusses[i].stress)
print(K,len(K))
print(F,len(F))
print(u,len(u))
| Python | 0 | |
6c1b5d91a9119a073eb0d41626d7464b3ff96aee | Add module with IntervalTree class. | typhon/trees.py | typhon/trees.py | """
Module that provides classes for tree creation and handling.
Trees are powerful structures to sort a huge amount of data and to speed up performing query requests on them
significantly.
"""
import numpy as np
__all__ = [
"IntervalTree"
]
class IntervalTreeNode:
"""Helper class for IntervalTree.
"""
def __init__(self, center_point, center, left, right):
self.center_point = center_point
self.center = np.asarray(center)
self.left = left
self.right = right
class IntervalTree:
"""Tree to implement fast 1-dimensional interval searches.
Based on the description in Wikipedia (https://en.wikipedia.org/wiki/Interval_tree#Centered_interval_tree)
and the GitHub repository by tylerkahn (https://github.com/tylerkahn/intervaltree-python).
"""
def __init__(self, intervals):
"""Creates an IntervalTree object.
Args:
intervals: A numpy array containing the intervals (list of two numbers).
"""
self.left = np.min(intervals)
self.right = np.max(intervals)
# We want to return the indices of the intervals instead of their actual bounds. But the original indices will
# be lost due resorting. Hence, we add the original indices to the intervals themselves.
indices = np.arange(intervals.shape[0]).reshape(intervals.shape[0], 1)
indexed_intervals = np.hstack([intervals, indices])
self.root = self._build_tree(np.sort(indexed_intervals, axis=0))
def _build_tree(self, intervals):
if not intervals.any():
return None
center_point = self._get_center(intervals)
# Sort the intervals into bins
center = intervals[(intervals[:, 0] <= center_point) & (intervals[:, 1] >= center_point)]
left = intervals[intervals[:, 1] < center_point]
right = intervals[intervals[:, 0] > center_point]
return IntervalTreeNode(center_point, center, self._build_tree(left), self._build_tree(right))
@staticmethod
def _get_center(intervals):
return intervals[int(intervals.shape[0]/2), 0]
@staticmethod
def overlaps(interval1, interval2):
"""Checks whether two interval overlap each other.
Args:
x1: The lower bound of the first interval.
x2: The higher bound of the first interval.
y1: The lower bound of the second interval.
y2: The higher bound of the second interval.
Returns:
True if the interval overlap.
"""
return interval1[0] <= interval2[0] <= interval1[1] or \
interval1[0] <= interval2[1] <= interval1[1] or \
(interval2[0] <= interval1[0] and interval2[1] >= interval1[1])
@staticmethod
def contains(interval, point):
"""Checks whether a point lies in a interval.
Args:
x1: The lower bound of the interval.
x2: The higher bound of the interval.
y: The point.
Returns:
True if point lies in the interval.
"""
return interval[0] <= point <= interval[1]
def query(self, intervals):
"""Find all overlaps between this tree and a list of intervals.
Args:
intervals: A list of intervals. Each interval is a tuple/list of two elements: its lower and higher
boundary.
Returns:
List of lists which contain the overlapping intervals of this tree for each element in `intervals`.
"""
return [self._query(interval, self.root, check_extreme=True) for interval in intervals]
def _query(self, query_interval, node, check_extreme=False):
# Check this special case: the bounds of the query interval lie outside of the bounds of this tree:
if (check_extreme
and IntervalTree.contains(query_interval, self.left)
and IntervalTree.contains(query_interval, self.right)):
return [] # TODO: Return all intervals
# Let's start with the centered intervals
intervals = [int(interval[2]) for interval in node.center if IntervalTree.overlaps(interval, query_interval)]
#print("Query interval", query_interval)
#print(node.center_point, node.center, intervals)
# if node.left is not None:
# print("links:", node.left.center)
# else:
# print("Links ist leer.")
# if node.right is not None:
# print("rechts:", node.right.center)
# else:
# print("Rechts ist leer.")
if query_interval[0] <= node.center_point and node.left is not None:
#print("Gehe nach links!")
intervals.extend(self._query(query_interval, node.left))
if query_interval[1] >= node.center_point and node.right is not None:
#print("Gehe nach rechts!")
intervals.extend(self._query(query_interval, node.right))
return intervals
def query_points(self, points):
"""Find all intervals of this tree which contain one of those points.
Args:
points: A list of points.
Returns:
List of lists which contain the enclosing intervals of this tree for each element in `points`.
"""
return [self._query_point(point, self.root, check_extreme=True) for point in points]
def _query_point(self, point, node, check_extreme=False):
# Check this special case: the query point lies outside of the bounds of this tree:
if check_extreme and not IntervalTree.contains((self.left, self.right), point):
return []
# Let's start with the centered intervals
intervals = [int(interval[2]) for interval in node.center if IntervalTree.contains(interval, point)]
if point < node.center_point and node.left is not None:
intervals.extend(self._query_point(point, node))
if point > node.center_point and node.right is not None:
intervals.extend(self._query_point(point, node))
return intervals
| Python | 0 | |
17067d5d25c5ce755ba86505ffcf1dd6fd572deb | Initialize version.py | version.py | version.py | """python-tutorials version."""
__version__ = '1.0.1'
| Python | 0.000004 | |
bc518732086795c699290d49f30ad5d449b79f9e | add template for settings.py | proyecto_eees/settings-template.py | proyecto_eees/settings-template.py | """
Django settings for proyecto_eees project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = (
('Carlos Gutierrez', 'cgutierr3z@utp.edu.co'),
)
# Modelo de autenticacion de Usuarios
AUTH_USER_MODEL = 'eees.Usuario'
# Permite verificar si un usuario esta activo o no
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.AllowAllUsersModelBackend']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x9sa47hq)w24g2xg!&+gvs$!w@h3ubjeif+a@b-@!3d5h4k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'eees.apps.EeesConfig',
'widget_tweaks',
]
#AUTH_USER_MODEL = 'eees.Usuario'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proyecto_eees.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proyecto_eees.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db-eees-AQWED.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
#Configuraciones para enviar mensajes usando gmail
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
#EMAIL_USE_TLS = True
EMAIL_USE_SSL = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'email@gmail.com'
EMAIL_HOST_PASSWORD = 'pwd'
EMAIL_PORT = 465
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
SERVER_EMAIL = EMAIL_HOST_USER
| Python | 0 | |
c95ef4dc0771098e3589721851865f012b652136 | add requirements unit test | awx/main/tests/unit/test_python_requirements.py | awx/main/tests/unit/test_python_requirements.py | from pip.operations import freeze
from django.conf import settings
def test_req():
def check_is_in(src, dests):
if src not in dests:
src2 = [src[0].replace('_', '-'), src[1]]
if src2 not in dests:
print("%s not in" % src2)
return False
else:
print("%s not in" % src)
return False
return True
base_dir = settings.BASE_DIR
reqs_actual = []
xs = freeze.freeze(local_only=True, requirement=base_dir + "/../requirements/requirements.txt")
for x in xs:
if '## The following requirements were added by pip freeze' in x:
break
reqs_actual.append(x.split('=='))
reqs_expected = []
with open(base_dir + "/../requirements/requirements.txt") as f:
for line in f:
line.rstrip()
# TODO: process git requiremenst and use egg
if line.strip().startswith('#') or line.strip().startswith('git'):
continue
if line.startswith('-e'):
continue
line.rstrip()
reqs_expected.append(line.rstrip().split('=='))
for r in reqs_actual:
print(r)
not_found = []
for r in reqs_expected:
res = check_is_in(r, reqs_actual)
if res is False:
not_found.append(r)
raise RuntimeError("%s not found in \n\n%s" % (not_found, reqs_expected))
| Python | 0 | |
5f47cf46c82d9a48a9efe5ad11c6c3a55896da12 | Implement abstract class for csc and csr matrix | cupy/sparse/compressed.py | cupy/sparse/compressed.py | from cupy import cusparse
from cupy.sparse import base
from cupy.sparse import data as sparse_data
class _compressed_sparse_matrix(sparse_data._data_matrix):
def __init__(self, arg1, shape=None, dtype=None, copy=False):
if isinstance(arg1, tuple) and len(arg1) == 3:
data, indices, indptr = arg1
if shape is not None and len(shape) != 2:
raise ValueError(
'Only two-dimensional sparse arrays are supported.')
if not(base.isdense(data) and data.ndim == 1 and
base.isdense(indices) and indices.ndim == 1 and
base.isdense(indptr) and indptr.ndim == 1):
raise ValueError(
'data, indices, and indptr should be 1-D')
if len(data) != len(indices):
raise ValueError('indices and data should have the same size')
if dtype is None:
dtype = data.dtype
if dtype != 'f' and dtype != 'd':
raise ValueError('Only float32 and float64 are supported')
sparse_data._data_matrix.__init__(self, data)
self.indices = indices.astype('i', copy=copy)
self.indptr = indptr.astype('i', copy=copy)
if shape is None:
shape = self._swap(len(indptr) - 1, int(indices.max()) + 1)
else:
raise ValueError(
'Only (data, indices, indptr) format is supported')
major, minor = self._swap(*shape)
if len(indptr) != major + 1:
raise ValueError('index pointer size (%d) should be (%d)'
% (len(indptr), major + 1))
self._descr = cusparse.MatDescriptor.create()
self._shape = shape
def _with_data(self, data):
return self.__class__(
(data, self.indices.copy(), self.indptr.copy()), shape=self.shape)
def _swap(self, x, y):
raise NotImplementedError
def get_shape(self):
"""Shape of the matrix.
Returns:
tuple: Shape of the matrix.
"""
return self._shape
def getnnz(self, axis=None):
"""Number of stored values, including explicit zeros."""
if axis is None:
return self.data.size
else:
raise ValueError
def sorted_indices(self):
"""Returns a copy of the matrix with sorted indices."""
x = self.copy()
x.sort_indices()
return x
| Python | 0.000047 | |
e99af02407edf1424733350a359264c2202b27c3 | Add unit test for appending with write_raster_netcdf. | landlab/io/netcdf/tests/test_write_raster_netcdf.py | landlab/io/netcdf/tests/test_write_raster_netcdf.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io.netcdf import WITH_NETCDF4, NotRasterGridError, write_raster_netcdf
def test_append_with_time(tmpdir):
field = RasterModelGrid(4, 3)
field.add_field("node", "topographic__elevation", np.ones(12, dtype=np.int64))
with tmpdir.as_cwd():
write_raster_netcdf(
"test.nc", field, append=False, format="NETCDF4", with_time=True
)
field.at_node["topographic__elevation"] *= 2
write_raster_netcdf("test.nc", field, append=True, format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
root.variables[name][:],
[
[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]],
],
)
assert root.variables[name][:].dtype == "int64"
assert "nt" in root.dimensions
assert len(root.dimensions["nt"]) == 2
root.close()
def test_without_time(tmpdir):
field = RasterModelGrid(4, 3)
field.add_field("node", "topographic__elevation", np.ones(12, dtype=np.int64))
with tmpdir.as_cwd():
write_raster_netcdf(
"test.nc", field, append=False, format="NETCDF4", with_time=False
)
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
root.variables[name][:], [[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]]
)
assert root.variables[name][:].dtype == "int64"
assert "nt" in root.dimensions
assert len(root.dimensions["nt"]) == 1
root.close()
def test_with_time(tmpdir):
field = RasterModelGrid(4, 3)
field.add_field("node", "topographic__elevation", np.ones(12, dtype=np.int64))
with tmpdir.as_cwd():
write_raster_netcdf(
"test.nc", field, append=False, format="NETCDF4", with_time=True
)
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
root.variables[name][:], [[[1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1]]]
)
assert root.variables[name][:].dtype == "int64"
assert "nt" in root.dimensions
assert len(root.dimensions["nt"]) == 1
root.close()
| Python | 0 | |
36333c275f4d3a66c8f14383c3ada5a42a197bea | Add module for displaying RAM usage | bumblebee/modules/memory.py | bumblebee/modules/memory.py | import bumblebee.module
import psutil
def fmt(num, suffix='B'):
for unit in [ "", "Ki", "Mi", "Gi" ]:
if num < 1024.0:
return "{:.2f}{}{}".format(num, unit, suffix)
num /= 1024.0
return "{:05.2f%}{}{}".format(num, "Gi", suffix)
class Module(bumblebee.module.Module):
def __init__(self, args):
super(Module, self).__init__(args)
self._mem = psutil.virtual_memory()
def data(self):
self._mem = psutil.virtual_memory()
free = self._mem.available
total = self._mem.total
return "{}/{} ({:05.02f}%)".format(fmt(self._mem.available), fmt(self._mem.total), 100.0 - self._mem.percent)
def warning(self):
return self._mem.percent < 20
def critical(self):
return self._mem.percent < 10
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| Python | 0 | |
dd1bface79e3fcc53e9e8b1cc10ea9f467b757f2 | update use of AppCommand | django_extensions/management/commands/create_jobs.py | django_extensions/management/commands/create_jobs.py | # -*- coding: utf-8 -*-
import os
import sys
import shutil
from django.core.management.base import AppCommand
from django.core.management.color import color_style
from django_extensions.management.utils import _make_writeable, signalcommand
class Command(AppCommand):
help = "Creates a Django jobs command directory structure for the given app name in the current directory."
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
@signalcommand
def handle_app_config(self, app, **options):
copy_template('jobs_template', app.path, **options)
def copy_template(template_name, copy_to, **options):
"""copies the specified template directory to the copy_to location"""
import django_extensions
style = color_style()
ERROR = getattr(style, 'ERROR', lambda x: x)
SUCCESS = getattr(style, 'SUCCESS', lambda x: x)
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f).rstrip(".tmpl")
if os.path.exists(path_new):
if options.get('verbosity', 1) > 1:
print(ERROR("%s already exists" % path_new))
continue
if options.get('verbosity', 1) > 1:
print(SUCCESS("%s" % path_new))
with open(path_old, 'r') as fp_orig:
with open(path_new, 'w') as fp_new:
fp_new.write(fp_orig.read())
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| # -*- coding: utf-8 -*-
import os
import sys
import shutil
from django.core.management.base import AppCommand
from django.core.management.color import color_style
from django_extensions.management.utils import _make_writeable, signalcommand
class Command(AppCommand):
help = "Creates a Django jobs command directory structure for the given app name in the current directory."
args = "[appname]"
label = 'application name'
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
@signalcommand
def handle_app_config(self, app, **options):
copy_template('jobs_template', app.path, **options)
@signalcommand
def handle_app(self, app, **options):
# handle_app is RemovedInDjango19
app_dir = os.path.dirname(app.__file__)
copy_template('jobs_template', app_dir, **options)
def copy_template(template_name, copy_to, **options):
"""copies the specified template directory to the copy_to location"""
import django_extensions
style = color_style()
ERROR = getattr(style, 'ERROR', lambda x: x)
SUCCESS = getattr(style, 'SUCCESS', lambda x: x)
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f).rstrip(".tmpl")
if os.path.exists(path_new):
if options.get('verbosity', 1) > 1:
print(ERROR("%s already exists" % path_new))
continue
if options.get('verbosity', 1) > 1:
print(SUCCESS("%s" % path_new))
with open(path_old, 'r') as fp_orig:
with open(path_new, 'w') as fp_new:
fp_new.write(fp_orig.read())
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| Python | 0 |
06b536cdfd684d12ce64670bde50fdcbf7a71bd2 | Add a workspace_binary rule to run a binary from the workspace root | defs/run_in_workspace.bzl | defs/run_in_workspace.bzl | # Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This technique was inspired by the gazelle rule implementation in bazelbuild/rules_go:
# https://github.com/bazelbuild/rules_go/blob/86ade29284ca11deeead86c061e9ba9bd0d157e0/go/private/tools/gazelle.bzl
# Writes out a script which saves the runfiles directory,
# changes to the workspace root, and then runs a command.
def _workspace_binary_script_impl(ctx):
content = """#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
BASE=$(pwd)
cd $(dirname $(readlink WORKSPACE))
"$BASE/{cmd}" $@
""".format(cmd=ctx.file.cmd.short_path)
ctx.actions.write(output=ctx.outputs.executable, content=content, is_executable=True)
runfiles = ctx.runfiles(
files = [ctx.file.cmd, ctx.file.workspace],
)
return [DefaultInfo(runfiles=runfiles)]
_workspace_binary_script = rule(
attrs = {
"cmd": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
"workspace": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
implementation = _workspace_binary_script_impl,
)
# Wraps a binary to be run in the workspace root via bazel run.
#
# For example, one might do something like
#
# workspace_binary(
# name = "dep",
# cmd = "//vendor/github.com/golang/dep/cmd/dep",
# )
#
# which would allow running dep with bazel run.
def workspace_binary(name, cmd, visibility=None):
script_name = name + "_script"
_workspace_binary_script(
name=script_name,
cmd=cmd,
workspace = "//:WORKSPACE",
)
native.sh_binary(
name = name,
srcs = [":" + script_name],
visibility = visibility,
)
| Python | 0.000001 | |
eda3e6c005c1115a039f394d6f00baabebd39fee | Add command for full daily build process | calaccess_website/management/commands/updatebuildpublish.py | calaccess_website/management/commands/updatebuildpublish.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to the
website.
"""
import logging
from django.core.management import call_command
from calaccess_raw.management.commands.updatecalaccessrawdata import Command as updatecommand
logger = logging.getLogger(__name__)
class Command(updatecommand):
"""
Update to the latest available CAL-ACCESS snapshot and publish the files to
the website.
"""
help = 'Update to the latest available CAL-ACCESS snapshot and publish the\
files to the website.'
def handle(self, *args, **options):
"""
Make it happen.
"""
super(Command, self).handle(*args, **options)
self.header('Creating latest file links')
call_command('createlatestlinks')
self.header('Baking downloads-website content')
call_command('build')
self.header('Publishing backed content to S3 bucket.')
call_command('publish')
self.success("Done!")
| Python | 0 | |
52637b519ca7e743b913f55c37a2ae952a520d9f | List commands of given apps | django_dev_commands/management/commands/commands.py | django_dev_commands/management/commands/commands.py | # -*- coding: utf-8 -*-
"""List commands of the specified applications.
By passing command line arguments that represents regexs of app names you can list the commands of
those apps only.
"""
import re
from django.core.management import get_commands, execute_from_command_line
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import color_style
from django.utils import six
def get_filtered_commands(*filters):
filters_re = re.compile("|".join(filters))
for command, app in six.iteritems(get_commands()):
if filters_re.search(app):
yield app, command
class Command(BaseCommand):
args = '<app-name-regex ...>'
help = __doc__
def handle(self, *filters, **options):
if filters:
style = color_style()
output = set()p
for app, command in get_filtered_commands(*filters):
if app not in output:
self.stdout.write(style.NOTICE("\n[{}]\n".format(app)))
output.add(app)
self.stdout.write("{}\n".format(command))
else:
execute_from_command_line(["manage.py"])
| Python | 0.99999 | |
056966052d0c23395a205511dce2e9577f376539 | Add Sequence | chainerrl/links/sequence.py | chainerrl/links/sequence.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import super
from future import standard_library
standard_library.install_aliases()
import inspect
import chainer
from chainerrl.recurrent import RecurrentChainMixin
class Sequence(chainer.ChainList, RecurrentChainMixin):
def __init__(self, *layers):
self.layers = layers
links = [layer for layer in layers if isinstance(layer, chainer.Link)]
super().__init__(*links)
def __call__(self, x, **kwargs):
h = x
for layer in self.layers:
layer_argnames = inspect.getargspec(layer)[0]
layer_kwargs = {k: v for k, v in kwargs.items()
if k in layer_argnames}
h = layer(h, **layer_kwargs)
return h
| Python | 0.000048 | |
b8acaf64187f5626ef6755ef00d2b2a1471d4914 | Add closure type inference test | numba/tests/closures/test_closure_type_inference.py | numba/tests/closures/test_closure_type_inference.py | import numpy as np
from numba import *
from numba.tests.test_support import *
@autojit
def test_cellvar_promotion(a):
"""
>>> inner = test_cellvar_promotion(10)
200.0
>>> inner.__name__
'inner'
>>> inner()
1000.0
"""
b = int(a) * 2
@jit(void())
def inner():
print a * b
inner()
a = float(a)
b = a * a # + 1j # Promotion issue
return inner
testmod() | Python | 0.000006 | |
79e3931d1a89fc1423e098b108a78302349c3f04 | Add a test scenario for establish of vpn connection | ec2api/tests/functional/scenario/test_vpn_routing.py | ec2api/tests/functional/scenario/test_vpn_routing.py | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ec2api.tests.functional import base
from ec2api.tests.functional import config
CONF = config.CONF
class VpnRoutingTest(base.EC2TestCase):
VPC_CIDR = '10.4.0.0/20'
CUSTOMER_GATEWAY_IP = '198.51.100.77'
CUSTOMER_VPN_CIDR = '172.16.25.0/24'
@classmethod
@base.safe_setup
def setUpClass(cls):
super(VpnRoutingTest, cls).setUpClass()
if not base.TesterStateHolder().get_vpc_enabled():
raise cls.skipException('VPC is disabled')
def test_vpn_routing(self):
vpc_id, _subnet_id = self.create_vpc_and_subnet(self.VPC_CIDR)
data = self.client.create_customer_gateway(
Type='ipsec.1', PublicIp=self.CUSTOMER_GATEWAY_IP, BgpAsn=65000)
cgw_id = data['CustomerGateway']['CustomerGatewayId']
self.addResourceCleanUpStatic(
self.client.delete_customer_gateway, CustomerGatewayId=cgw_id)
data = self.client.create_vpn_gateway(Type='ipsec.1')
vgw_id = data['VpnGateway']['VpnGatewayId']
self.addResourceCleanUpStatic(
self.client.delete_vpn_gateway, VpnGatewayId=vgw_id)
data = self.client.create_vpn_connection(
CustomerGatewayId=cgw_id, VpnGatewayId=vgw_id,
Options={'StaticRoutesOnly': True}, Type='ipsec.1')
vpn_id = data['VpnConnection']['VpnConnectionId']
self.addResourceCleanUp(self.client.delete_vpn_connection,
VpnConnectionId=vpn_id)
data = self.client.attach_vpn_gateway(VpnGatewayId=vgw_id,
VpcId=vpc_id)
self.addResourceCleanUp(self.client.detach_vpn_gateway,
VpnGatewayId=vgw_id, VpcId=vpc_id)
vpn_waiter = self.get_vpn_connection_waiter()
vpn_waiter.wait_available(vpn_id)
attach_waiter = self.get_vpn_gateway_attachment_waiter()
attach_waiter.wait_available(vgw_id, 'attached')
data = self.client.describe_route_tables(
Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}])
rtb_id = data['RouteTables'][0]['RouteTableId']
data = self.client.enable_vgw_route_propagation(RouteTableId=rtb_id,
GatewayId=vgw_id)
data = self.client.create_vpn_connection_route(
VpnConnectionId=vpn_id,
DestinationCidrBlock=self.CUSTOMER_VPN_CIDR)
route_waiter = self.get_vpn_connection_route_waiter(
self.CUSTOMER_VPN_CIDR)
route_waiter.wait_available(vpn_id)
data = self.client.describe_route_tables(RouteTableIds=[rtb_id])
route = next((r for r in data['RouteTables'][0]['Routes']
if r['DestinationCidrBlock'] == self.CUSTOMER_VPN_CIDR),
None)
self.assertIsNotNone(route)
self.assertEqual('active', route['State'])
self.assertEqual('EnableVgwRoutePropagation', route['Origin'])
| Python | 0.000293 | |
3a04bff5a7940463d6429918215429700befb507 | add valid-number | valid-number.py | valid-number.py | # Link: https://oj.leetcode.com/problems/valid-number/
class Solution:
"""
Notes please see https://blog.xiaoba.me/2014/11/10/leetcode-valid-number.html
"""
# @param s, a string
# @return a boolean
def isNumber(self, s):
stateTable = [
[ 1, 1, 1, 3, 3, 7, 7, 7,-1],
[ 4, 3, 4,-1,-1,-1,-1,-1,-1],
[ 0, 8,-1, 8,-1,-1,-1, 8, 8],
[-1, 5,-1, 5,-1,-1,-1,-1,-1],
[ 2,-1,-1,-1,-1, 6,-1,-1,-1]
]
i = 0
state = 0
while True:
if i == len(s):
break
c = s[i]
i += 1
inputType = self._getInputType(c)
if inputType is None:
return False
state = stateTable[inputType][state]
if state == -1:
return False
return state == 1 or state == 3 or state == 7 or state == 8
def _isDigit(self, c):
return c >= '0' and c <= '9'
def _getInputType(self, c):
if self._isDigit(c):
return 0
if c == '.':
return 1
if c == ' ':
return 2
if c.lower() == 'e':
return 3
if c == '+' or c == '-':
return 4
| Python | 0.999993 | |
dec6ea168c68e267f15b74407f8745d242629d30 | Create tokens.py | tokens.py | tokens.py | C_KEY = ""
C_SECRET = ""
A_TOKEN = ""
A_TOKEN_SECRET = ""
| Python | 0.000001 | |
d56c3528ad8058231910fd3d06895f39174eeb6c | Prepare v2.16.2.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.16.2.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.16.1'
| Python | 0.000007 |
502beb022bb7d4e70f44e40411a1af2f7f08c14e | add test version | CexioAPI.py | CexioAPI.py | #
# CexioAPI class
#
# @author zhzhussupovkz@gmail.com
#
# The MIT License (MIT)
#
# Copyright (c) 2013 Zhussupov Zhassulan zhzhussupovkz@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import urllib
import urllib2
import hmac
import simplejson
import time
class CexioAPI(object):
def __init__(self, client_id, api_key, secret):
self.api_url = "https://cex.io/api/"
self.client_id = client_id
self.api_key = api_key
self.secret = secret
#for public requests
def __public_request(command, args = {}):
args = urllib.urlencode(args)
url = self.api_url + command
req = urllib2.Request(url, args)
opener = urllib2.build_opener(req)
f = opener.open(req)
res = simplejson.load(f)
return res
#for private requests
def __private_request(command, args = {}):
nonce = str(time.time()).split('.')[0]
message = nonce + self.client_id + self.api_key
signature = hmac.new(self.secret, message, digestmod = hashlib.sha256).hexdigest().upper()
args.update({'key' : self.api_key, 'nonce' : nonce, 'signature' : signature})
args = urllib.urlencode(args)
url = self.api_url + command
req = urllib2.Request(url, args)
opener = urllib2.build_opener(req)
f = opener.open(req)
res = simplejson.load(f)
return res
############### ticker ####################
#Returns JSON dictionary:
#last - last BTC price
#high - last 24 hours price high
#low - last 24 hours price low
#volume - last 24 hours volume
#bid - highest buy order
#ask - lowest sell order
def ticker():
return self.__public_request('ticker/GHS/BTC')
############### order_book ###############
#Returns JSON dictionary with "bids" and "asks".
#Each is a list of open orders and each order is
#represented as a list of price and amount.
def order_book():
return self.__public_request('order_book/GHS/BTC')
############### trade_history ###############
#Returns a list of recent trades, where each trade is a JSON dictionary:
#tid - trade id
#amount - trade amount
#price - price
#date - UNIX timestamp
def trade_history(since = 1):
args = {'since' : since}
return self.__public_request('trade_history/GHS/BTC', args)
############## balance ################
#Returns JSON dictionary:
#available - available balance
#orders - balance in pending orders
#bonus - referral program bonus
def balance():
return self.__private_request('balance')
############## open orders #############
#Returns JSON list of open orders. Each order is represented as dictionary:
#id - order id
#time - timestamp
#type - buy or sell
#price - price
#amount - amount
#pending - pending amount (if partially executed)
def open_orders():
return self.__private_request('open_orders/GHS/BTC')
############## cancel order ############
#Returns 'true' if order has been found and canceled.
#Params:
#id - order ID
def cancel_order(order_id):
args = {'order_id' : order_id}
return self.__private_request('cancel_order/GHS/BTC', args)
############ place order #############
#Returns JSON dictionary representing order:
#id - order id
#time - timestamp
#type - buy or sell
#price - price
#amount - amount
#pending - pending amount (if partially executed)
#Params:
#type - 'buy' or 'sell'
#amount - amount
#price - price
def place_order(p_type = 'buy', amount = 1, price = 1):
args = {'type' : p_type, 'amount' : amount, 'price' : price}
return self.__private_request('place_order/GHS/BTC', args)
| Python | 0 | |
111966757a57d929263de342fa8c6a0af2f26869 | Add normalize_field processing algorithm | svir/processing/normalize_field.py | svir/processing/normalize_field.py | # -*- coding: utf-8 -*-
"""
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsFeature,
QgsField,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingUtils,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterBoolean,
QgsProcessingParameterFeatureSink)
from processing.tools import vector
class NormalizeFieldAlgorithm(QgsProcessingAlgorithm):
"""
This algorithm takes a vector layer and normalizes the values of one of
its fields in the interval 0-1 (or 1-0 if 'inverted' is checked).
"""
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
INPUT = 'INPUT'
FIELD_TO_NORMALIZE = 'FIELD_TO_NORMALIZE'
INVERTED = 'INVERTED'
OUTPUT = 'OUTPUT'
def tr(self, string):
"""
Returns a translatable string with the self.tr() function.
"""
return QCoreApplication.translate('Processing', string)
def createInstance(self):
return NormalizeFieldAlgorithm()
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'normalizefield'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('Normalize field values in the range 0-1')
def group(self):
"""
Returns the name of the group this algorithm belongs to. This string
should be localised.
"""
return self.tr('OpenQuake IRMT plugin')
def groupId(self):
"""
Returns the unique ID of the group this algorithm belongs to. This
string should be fixed for the algorithm, and must not be localised.
The group id should be unique within each provider. Group id should
contain lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'irmt'
def shortHelpString(self):
"""
Returns a localised short helper string for the algorithm. This string
should provide a basic description about what the algorithm does and
the parameters and outputs associated with it..
"""
return self.tr("Normalize one of the fields of a vector layer")
def initAlgorithm(self, config=None):
"""
Here we define the inputs and output of the algorithm, along
with some other properties.
"""
# We add the input vector features source
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Input layer'),
[QgsProcessing.TypeVector]
)
)
self.addParameter(
QgsProcessingParameterField(
self.FIELD_TO_NORMALIZE,
description=self.tr("Field to normalize"),
defaultValue=None,
parentLayerParameterName=self.INPUT,
type=QgsProcessingParameterField.Numeric,
allowMultiple=False, # FIXME
optional=False,
)
)
self.addParameter(
QgsProcessingParameterBoolean(
self.INVERTED,
description=self.tr("Invert transformation"),
defaultValue=False,
)
)
# We add a feature sink in which to store our processed features (this
# usually takes the form of a newly created vector layer when the
# algorithm is run in QGIS).
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Output layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
# Retrieve the feature source and sink. The 'dest_id' variable is used
# to uniquely identify the feature sink, and must be included in the
# dictionary returned by the processAlgorithm function.
source = self.parameterAsSource(
parameters,
self.INPUT,
context
)
# If source was not found, throw an exception to indicate that the
# algorithm encountered a fatal error. The exception text can be any
# string, but in this case we use the pre-built invalidSourceError
# method to return a standard helper text for when a source cannot be
# evaluated
if source is None:
raise QgsProcessingException(
self.invalidSourceError(parameters, self.INPUT))
fieldname_to_normalize = self.parameterAsString(
parameters, self.FIELD_TO_NORMALIZE, context)
field_to_normalize = [field for field in source.fields()
if field.name() == fieldname_to_normalize][0]
normalized_field = QgsField(field_to_normalize)
normalized_field_name = '%s_MIN_MAX' % fieldname_to_normalize
normalized_field.setName(normalized_field_name)
sink_fields = source.fields()
sink_fields.append(normalized_field)
(sink, self.dest_id) = self.parameterAsSink(
parameters,
self.OUTPUT,
context,
sink_fields,
source.wkbType(),
source.sourceCrs()
)
# Send some information to the user
feedback.pushInfo('CRS is {}'.format(source.sourceCrs().authid()))
# If sink was not created, throw an exception to indicate that the
# algorithm encountered a fatal error. The exception text can be any
# string, but in this case we use the pre-built invalidSinkError method
# to return a standard helper text for when a sink cannot be evaluated
if sink is None:
raise QgsProcessingException(
self.invalidSinkError(parameters, self.OUTPUT))
# Compute the number of steps to display within the progress bar and
# get features from source
total = 100.0 / source.featureCount() if source.featureCount() else 0
original_values = vector.values(
source, fieldname_to_normalize)[fieldname_to_normalize]
min_value = min(original_values)
max_value = max(original_values)
min_max_range = float(max_value - min_value)
if min_max_range == 0:
raise ValueError(
"The min_max transformation can not be performed"
" if the range of valid values (max-min) is zero.")
inverted = self.parameterAsBool(
parameters, self.INVERTED, context)
# Transform
if inverted:
normalized_values = [1.0 - ((x - min_value) / min_max_range)
for x in original_values]
else:
normalized_values = [(x - min_value) / min_max_range
for x in original_values]
for current, source_feature in enumerate(source.getFeatures()):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
sink_feature = QgsFeature(sink_fields)
for field in source.fields():
sink_feature[field.name()] = source_feature[field.name()]
sink_feature[normalized_field_name] = normalized_values[current]
sink_feature.setGeometry(source_feature.geometry())
# Add a feature in the sink
sink.addFeature(sink_feature, QgsFeatureSink.FastInsert)
# Update the progress bar
feedback.setProgress(int(current * total))
# Return the results of the algorithm. In this case our only result is
# the feature sink which contains the processed features, but some
# algorithms may return multiple feature sinks, calculated numeric
# statistics, etc. These should all be included in the returned
# dictionary, with keys matching the feature corresponding parameter
# or output names.
return {self.OUTPUT: self.dest_id}
def postProcessAlgorithm(self, context, feedback):
processed_layer = QgsProcessingUtils.mapLayerFromString(
self.dest_id, context)
# Do smth with the layer, e.g. style it
return {self.OUTPUT: processed_layer}
| Python | 0.000009 | |
a475d50d2b7b9febe5fb01bb185b63cbbe25f4d1 | add migration to remove fields | hoover/search/migrations/0006_auto_20200303_1309.py | hoover/search/migrations/0006_auto_20200303_1309.py | # Generated by Django 2.2.7 on 2020-03-03 13:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('search', '0005_rename_user_hypens_to_dots'),
]
operations = [
migrations.RemoveField(
model_name='collection',
name='loader',
),
migrations.RemoveField(
model_name='collection',
name='loader_state',
),
migrations.RemoveField(
model_name='collection',
name='options',
),
]
| Python | 0.000001 | |
32108ccab67a76a05150e8cfb5bbdf2ff3477346 | Create minesweeper.py | game/minesweeper.py | game/minesweeper.py | from tkinter import *
root = Tk()
root.resizable(0, 0)
root.title("Minesweeper")
frame = Frame(root)
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
frame.grid(row=0, column=0)
class Tiles:
def __init__(self, frame, size):
self.size = size
self.frame = frame
self.tiles[]
for x in range(self.size):
self.tiles.append([])
for y in range(self.size):
this.tiles[x].append(Button())
tiles[x][y] = Button(self.frame, text=' ', width=2, bd = 3, command=lambda row=x, col=y: self.clicked(row, col)
tiles[x][y].grid(row=x, column=y)
for x in range(this.size):
Grid.columnconfigure(frame, x, weight=1)
for y in range(this.size):
Grid.rowconfigure(frame, y, weight=1)
def clicked(self, x, y):
tiles[x][y]["text"] = '@'
tiles[x][y]["relief"] = SUNKEN
root.mainloop()
| Python | 0.000001 | |
a75e87fd3b4fc3f370554227cefc4687593621ca | fix merge fup | gdbpool/psyco_ge.py | gdbpool/psyco_ge.py | """A wait callback to allow psycopg2 cooperation with gevent.
Use `make_psycopg_green()` to enable gevent support in Psycopg.
"""
# Copyright (C) 2010 Daniele Varrazzo <daniele.varrazzo@gmail.com>
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import psycopg2
from psycopg2 import extensions
from gevent.socket import wait_read, wait_write
def make_psycopg_green():
"""Configure Psycopg to be used with gevent in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(gevent_wait_callback)
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise psycopg2.OperationalError(
"Bad result from poll: %r" % state)
| Python | 0.000001 | |
5f503f0b9ab51ca2b1985fe88d5e84ff63b7d745 | Add sample playlists for testing features. | addplaylists.py | addplaylists.py | #!/usr/bin/env python2
from datetime import datetime
from datetime import timedelta
import random
from wuvt.trackman.lib import perdelta
from wuvt import db
from wuvt.trackman.models import DJSet, DJ
today = datetime.now()
print("adding dj")
dj = DJ(u"Johnny 5", u"John")
db.session.add(dj)
db.session.commit()
print("djadded")
for show in perdelta(today - timedelta(days=500), today, timedelta(hours=4)):
if random.randint(0,99) < 40:
djset = DJSet(dj.id)
djset.dtstart = show
djset.dtend = show + timedelta(4)
db.session.add(djset)
db.session.commit()
| Python | 0 | |
1905395783d5a0f5997e6e620ba09d41398840e0 | add test_vincia.py | test_vincia.py | test_vincia.py |
from deepjets.generate import generate_events
for event in generate_events('w_vincia.config', 1, vincia=True):
pass
| Python | 0.000004 | |
4fab31eef9ad80230b36039b66c70d94456e5f9b | Add missing tests file from previous commit. | tests/monad.py | tests/monad.py | '''Test case for monads and monoidic functions
'''
import unittest
from lighty import monads
class MonadTestCase(unittest.TestCase):
'''Test case for partial template execution
'''
def testNumberComparision(self):
monad = monads.ValueMonad(10)
assert monad == 10, 'Number __eq__ error: %s' % monad
assert monad > 9, 'Number __gt__ error: %s' % monad
assert monad >= 10, 'Number __ge__ error: %s' % monad
assert monad < 11, 'Number __lt__ error: %s' % monad
assert monad <= 10, 'Number __le__ error: %s' % monad
def testNumberActions(self):
monad = monads.ValueMonad(10)
assert monad + 10 == 20, 'Number + error: %s' % (monad + 10)
assert monad - 5 == 5, 'Number - error: %s' % (monad - 5)
assert monad / 2 == 5, 'Number / error: %s' % (monad / 2)
assert monad * 2 == 20, 'Number * error: %s' % (monad * 2)
assert monad ** 2 == 100, 'Number pow error: %s' % (monad ** 2)
assert monad << 1 == 10 << 1, 'Number << error: %s' % (monad << 1)
assert monad >> 1 == 10 >> 1, 'Number >> error: %s' % (monad >> 1)
def testNumberSeq(self):
monad = monads.ValueMonad(10)
assert len(monad) == 1, 'Number len error: %s' % len(monad)
assert monad[0] == 10, 'Number [0] error: %s' % monad[0]
assert isinstance(monad[1], monads.NoneMonad), ('Number [1] error' %
monad[1])
assert not 10 in monad, 'Number in error: %s' % (10 in monad)
def test():
suite = unittest.TestSuite()
suite.addTest(MonadTestCase('testNumberComparision'))
suite.addTest(MonadTestCase('testNumberActions'))
suite.addTest(MonadTestCase('testNumberSeq'))
return suite
| Python | 0 | |
5e4a8e4e90bbf41442a384ab9323822a448c0941 | Add Exercises 8.1, 8.15. | Kane1985/Chapter4/Ex8.1_8.15.py | Kane1985/Chapter4/Ex8.1_8.15.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Exercises 8.1, 8.15 from Kane 1985.
"""
from __future__ import division
from collections import OrderedDict
from sympy import diff, factor, solve, simplify, symbols
from sympy import sin
from sympy.physics.mechanics import ReferenceFrame, Point
from sympy.physics.mechanics import dot
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics import partial_velocity
from sympy.physics.mechanics import MechanicsStrPrinter
def msprint(expr):
pr = MechanicsStrPrinter()
return pr.doprint(expr)
def subs(x, *args, **kwargs):
if not hasattr(x, 'subs'):
if hasattr(x, '__iter__'):
return map(lambda x: subs(x, *args, **kwargs), x)
return x.subs(*args, **kwargs)
class PartialVelocity(dict):
def __init__(self, frame, *args, **kwargs):
self._set_frame(frame)
dict.__init__(self, *args, **kwargs)
def _set_frame(self, f):
if not isinstance(f, ReferenceFrame):
raise TypeError(
'{0} is not an instance of ReferenceFrame'.format(f))
self._frame = f
@property
def frame(self):
return self._frame
def partial_velocities(system, generalized_speeds, frame,
kde_map=None, constraint_map=None, express_frame=None):
partials = PartialVelocity(frame)
if express_frame is None:
express_frame = frame
for p in system:
if isinstance(p, Point):
v = p.vel(frame)
elif isinstance(p, ReferenceFrame):
v = p.ang_vel_in(frame)
if kde_map is not None:
v = v.subs(kde_map)
if constraint_map is not None:
v = v.subs(constraint_map)
v_r_p = OrderedDict()
for u in generalized_speeds:
v_r_p[u] = 0 if v == 0 else v.diff(u, express_frame)
partials[p] = v_r_p
return partials
def generalized_active_forces(partials, point_forces):
# use the same frame used in calculating partial velocities
v = partials.values()[0] # dict of partial velocities of the first item
ulist = v.keys() # list of generalized speeds in case user wants it
Fr = [0] * len(ulist)
for ft in point_forces:
p = ft[0]
f = ft[1]
for i, u in enumerate(ulist):
if partials[p][u] != 0 and f != 0:
r = dot(partials[p][u], f)
if len(ft) == 2:
Fr[i] += r
# if more than 2 args, 3rd is an integral function, where the
# input is the integrand
if len(ft) > 2:
Fr[i] += ft[2](r)
return Fr, ulist
def generalized_inertia_forces(partials, point_masses, kde_map=None):
# use the same frame used in calculating partial velocities
v = partials.values()[0] # dict of partial velocities of the first item
ulist = v.keys() # list of generalized speeds in case user wants it
frame = partials.frame
Fr_star = [0] * len(ulist)
for p, m in point_masses:
for i, u in enumerate(ulist):
if partials[p][u] != 0 and m != 0:
a = p.acc(frame)
if kde_map is not None:
a = a.subs(kde_map)
if a != 0:
Fr_star[i] += dot(partials[p][u], -m*a)
return Fr_star, ulist
g, L, m1, m2, omega, t = symbols('g L m1 m2 omega t')
C, f1, f2 = symbols('C f1 f2')
q1, q2, q3 = dynamicsymbols('q1:4')
q1d, q2d, q3d = dynamicsymbols('q1:4', level=1)
u1, u2, u3 = dynamicsymbols('u1:4')
A = ReferenceFrame('A')
B = A.orientnew('B', 'Axis', [omega * t, A.y])
E = B.orientnew('E', 'Axis', [q3, B.z])
pO = Point('O')
pO.set_vel(A, 0)
pO.set_vel(B, 0)
pP1 = pO.locatenew('P1', q1 * B.x + q2 * B.y)
pP2 = pP1.locatenew('P2', L * E.x)
pP1.set_vel(E, 0)
pP1.set_vel(B, pP1.pos_from(pO).diff(t, B))
pP1.v1pt_theory(pO, A, B)
pP2.set_vel(E, 0)
pP2.v2pt_theory(pP1, A, E)
print("velocities of points P1, P2 in rf A:\nv_P1_A = {0}\nv_P2_A = {1}".format(
pP1.vel(A), pP2.vel(A)))
# three sets of generalized speeds
u_s1 = [dot(pP1.vel(A), A.x), dot(pP1.vel(A), A.y), q3d]
u_s2 = [dot(pP1.vel(A), E.x), dot(pP1.vel(A), E.y), q3d]
u_s3 = [q1d, q2d, q3d]
# f1, f2 are forces the panes of glass exert on P1, P2 respectively
R1 = f1*B.z + C*E.x - m1*g*B.y
R2 = f2*B.z - C*E.x - m2*g*B.y
forces = [(pP1, R1), (pP2, R2)]
point_masses = [(pP1, m1), (pP2, m2)]
torques = []
ulist = [u1, u2, u3]
for uset in [u_s1, u_s2, u_s3]:
print("\nFor generalized speeds:\n[u1, u2, u3] = {0}".format(msprint(uset)))
# solve for u1, u2, u3 in terms of q1d, q2d, q3d and substitute
kde = [u_i - u_expr for u_i, u_expr in zip(ulist, uset)]
kde_map = solve(kde, [q1d, q2d, q3d])
# include second derivatives in kde map
for k, v in kde_map.items():
kde_map[k.diff(t)] = v.diff(t)
partials = partial_velocities([pP1, pP2], ulist, A, kde_map)
Fr, _ = generalized_active_forces(partials, forces + torques)
Fr_star, _ = generalized_inertia_forces(partials, point_masses, kde_map)
print("Generalized active forces:")
for i, f in enumerate(Fr, 1):
print("F{0} = {1}".format(i, msprint(simplify(f))))
print("Generalized inertia forces:")
for i, f in enumerate(Fr_star, 1):
sub_map = {}
if uset == u_s1: # make the results easier to read
if i == 1 or i == 3:
sub_map = solve([u1 - u_s1[0]], [omega*q1*sin(omega*t)])
print("F{0}* = {1}".format(i, msprint(simplify(f.subs(sub_map)))))
| Python | 0.000206 | |
62207985d301dc9a47b0334f02a3f0c942e19d22 | Add packetStreamerClient example | example/packetStreamerClientExample.py | example/packetStreamerClientExample.py | #!/usr/bin/python
import urllib2
import json
import re
import sys
from optparse import OptionParser
sys.path.append('~/floodlight/target/gen-py')
sys.path.append('~/floodlight/thrift/lib/py')
from packetstreamer import PacketStreamer
from packetstreamer.ttypes import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
SESSIONID = 'sessionId'
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage, version="%prog 1.0")
parser.add_option("-c", "--controller", dest="controller", metavar="CONTROLLER_IP",
default="127.0.0.1", help="controller's IP address")
parser.add_option("-m", "--mac", dest="mac", metavar="HOST_MAC",
help="The host mac address to trace the OF packets")
(options, args) = parser.parse_args()
def validateIp(ip):
ipReg = ("(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
"\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)")
m = re.compile(ipReg).match(ip)
if m:
return True
else :
return False
def validateMac(mac):
macReg = '([a-fA-F0-9]{2}:){5}[a-fA-F0-9]{2}' # same regex as above
m = re.compile(macReg).match(mac)
if m:
return True
else :
return False
if not validateIp(options.controller):
parser.error("Invalid format for ip address.")
if not options.mac:
parser.error("-m or --mac option is required.")
if not validateMac(options.mac):
parser.error("Invalid format for mac address. Format: xx:xx:xx:xx:xx:xx")
controller = options.controller
host = options.mac
url = 'http://%s:8080/wm/core/packettrace/json' % controller
filter = {'mac':host, 'direction':'both', 'period':1000}
post_data = json.dumps(filter)
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
response_text = None
def terminateTrace(sid):
global controller
filter = {SESSIONID:sid, 'period':-1}
post_data = json.dumps(filter)
url = 'http://%s:8080/wm/core/packettrace/json' % controller
request = urllib2.Request(url, post_data, {'Content-Type':'application/json'})
try:
response = urllib2.urlopen(request)
response_text = response.read()
except Exception, e:
# Floodlight may not be running, but we don't want that to be a fatal
# error, so we just ignore the exception in that case.
print "Exception:", e
try:
response = urllib2.urlopen(request)
response_text = response.read()
except Exception, e:
# Floodlight may not be running, but we don't want that to be a fatal
# error, so we just ignore the exception in that case.
print "Exception:", e
exit
if not response_text:
print "Failed to start a packet trace session"
sys.exit()
response_text = json.loads(response_text)
sessionId = None
if SESSIONID in response_text:
sessionId = response_text[SESSIONID]
else:
print "Failed to start a packet trace session"
sys.exit()
try:
# Make socket
transport = TSocket.TSocket('localhost', 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(transport)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = PacketStreamer.Client(protocol)
# Connect!
transport.open()
while 1:
packets = client.getPackets(sessionId)
for packet in packets:
print "Packet: %s"% packet
if "FilterTimeout" in packet:
sys.exit()
except Thrift.TException, e:
print '%s' % (e.message)
terminateTrace(sessionId)
except KeyboardInterrupt, e:
terminateTrace(sessionId)
# Close!
transport.close()
| Python | 0 | |
50141a66831d080ecc0791f94d1bd3bfec0aeb65 | Add migration for #465 | judge/migrations/0046_blogpost_authors.py | judge/migrations/0046_blogpost_authors.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-08 16:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0045_organization_access_code'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='authors',
field=models.ManyToManyField(blank=True, help_text='', to='judge.Profile', verbose_name='authors'),
),
]
| Python | 0 | |
364f8fedb492c1eedd317729b05d3bd37c0ea4ad | add andromercury tool | andromercury.py | andromercury.py | #!/usr/bin/env python
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import sys, re, os
from optparse import OptionParser
from androguard.core.bytecodes import apk
sys.path.append("./elsim/")
from elsim.elsign import dalvik_elsign
sys.path.append("./mercury/client")
from merc.lib.common import Session
option_0 = { 'name' : ('-l', '--list'), 'help' : 'list all packages', 'nargs' : 1 }
option_1 = { 'name' : ('-i', '--input'), 'help' : 'get specific packages (a filter)', 'nargs' : 1 }
option_2 = { 'name' : ('-r', '--remotehost'), 'help' : 'specify ip of emulator/device', 'nargs' : 1 }
option_3 = { 'name' : ('-p', '--port'), 'help' : 'specify the port', 'nargs' : 1 }
option_4 = { 'name' : ('-o', '--output'), 'help' : 'output directory to write packages', 'nargs' : 1 }
option_5 = { 'name' : ('-b', '--database'), 'help' : 'database : use this database', 'nargs' : 1 }
option_6 = { 'name' : ('-c', '--config'), 'help' : 'use this configuration', 'nargs' : 1 }
option_7 = { 'name' : ('-v', '--verbose'), 'help' : 'display debug information', 'action' : 'count' }
options = [option_0, option_1, option_2, option_3, option_4, option_5, option_6, option_7]
def display(ret, debug) :
print "---->", ret[0],
def main(options, arguments) :
sessionip = "127.0.0.1"
sessionport = 31415
if options.remotehost :
sessionip = options.remotehost
if options.port :
sessionport = int(options.port)
newsession = Session(sessionip, sessionport, "bind")
# Check if connection can be established
if newsession.executeCommand("core", "ping", None).data == "pong":
if options.list :
request = {'filter': options.list, 'permissions': None }
apks_info = newsession.executeCommand("packages", "info", {}).getPaddedErrorOrData()
print apks_info
elif options.input and options.output :
s = None
if options.database != None or options.config != None :
s = dalvik_elsign.MSignature( options.database, options.config, options.verbose != None, ps = dalvik_elsign.PublicSignature)
request = {'filter': options.input, 'permissions': None }
apks_info = newsession.executeCommand("packages", "info", request).getPaddedErrorOrData()
print apks_info
for i in apks_info.split("\n") :
if re.match("APK path:", i) != None :
name_app = i.split(":")[1][1:]
print name_app,
response = newsession.downloadFile(name_app, options.output)
print response.data, response.error,
if s != None :
a = apk.APK( options.output + "/" + os.path.basename(name_app) )
if a.is_valid_APK() :
display( s.check_apk( a ), options.verbose )
print
else:
print "\n**Network Error** Could not connect to " + sessionip + ":" + str(sessionport) + "\n"
if __name__ == "__main__" :
parser = OptionParser()
for option in options :
param = option['name']
del option['name']
parser.add_option(*param, **option)
options, arguments = parser.parse_args()
sys.argv[:] = arguments
main(options, arguments)
| Python | 0 | |
e5931a5837b1574681757e2c6fc7260122b48746 | Add minify util | web-app/js/ofm/scripts/utils/minify.py | web-app/js/ofm/scripts/utils/minify.py | #!/usr/bin/python2.6
# Minify Filemanager javascript files
# Usage : $ python ./utils/minify.py
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
import httplib, urllib, sys, os
fmRootFolder = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + "/"
os.chdir(fmRootFolder) # set working directory
toMinify = ["filemanager.js"]
print bcolors.HEADER + "-------------------------------------" + bcolors.ENDC
# we loop on JS languages files
for index, item in enumerate(toMinify):
# print index, item
dir = os.path.dirname(item)
file = os.path.basename(item)
with open (fmRootFolder + item, "r") as myfile:
js_input=myfile.read()
# Define the parameters for the POST request and encode them in
# a URL-safe format.
params = urllib.urlencode([
('js_code', js_input),
# ('compilation_level', 'WHITESPACE_ONLY'),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'compiled_code'),
])
params2 = urllib.urlencode([
('js_code', js_input),
# ('compilation_level', 'WHITESPACE_ONLY'),
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'errors'),
])
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
# we write the minified file - os.path.splitext(file)[0] return filename without extension
with open(fmRootFolder + dir + '/' + os.path.splitext(file)[0] + ".min.js", "w") as text_file:
text_file.write(data)
# We retrieve errors
conn.request('POST', '/compile', params2, headers)
response = conn.getresponse()
errors = response.read()
if errors == "":
print bcolors.OKBLUE + file + " has been minified. No error found."
else:
print bcolors.FAIL + file + " : the code contains errors : "
print ""
print errors + bcolors.ENDC
conn.close()
print bcolors.HEADER + "-------------------------------------" + bcolors.ENDC
| Python | 0.002208 | |
348ffbf16fcb67768d72bd18167e6c70c99a27a1 | Add Homodyne node | gpi/Homodyne_GPI.py | gpi/Homodyne_GPI.py | # Author: Ashley Anderson III <aganders3@gmail.com>
# Date: 2015-10-10 21:13
# Copyright (c) 2015 Dignity Health
from __future__ import absolute_import, division, print_function, unicode_literals
import os
# gpi, future
import gpi
from bart.gpi.borg import IFilePath, OFilePath, Command
# bart
import bart
base_path = bart.__path__[0] # library base for executables
import bart.python.cfl as cfl
class ExternalNode(gpi.NodeAPI):
'''Usage: homodyne dim fraction <input> <output>
Perform homodyne reconstruction along dimension dim.
'''
def initUI(self):
# Widgets
self.addWidget('SpinBox', 'dim', min=0)
self.addWidget('DoubleSpinBox', 'fraction', min=0.5, max=1.0,
decimals=3, singlestep=0.01)
# IO Ports
self.addInPort('kspace', 'NPYarray')
self.addOutPort('out', 'NPYarray')
return 0
def compute(self):
kspace = self.getData('kspace')
# load up arguments list
args = [base_path+'/homodyne']
args += [str(self.getVal('dim'))]
args += [str(self.getVal('fraction'))]
# setup file for passing data to external command
in1 = IFilePath(cfl.writecfl, kspace, asuffix=['.cfl','.hdr'])
args += [in1]
out1 = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr'])
args += [out1]
# run commandline
print(Command(*args))
self.setData('out', out1.data())
in1.close()
out1.close()
return 0
| Python | 0.000022 | |
1a7fa8080d19909ccf8e8e89aa19c92c1413f1c1 | Add script to submite jobs again | apps/pyjob_submite_jobs_again.py | apps/pyjob_submite_jobs_again.py | #!/usr/bin/env python3
import os
import sys
import subprocess
right_inputs = False
if len(sys.argv) > 2 :
tp = sys.argv[1]
rms = [int(x) for x in sys.argv[2:]]
if tp in ['ma', 'ex', 'xy']: right_inputs = True
curdir = os.getcwd()
if right_inputs:
if curdir.endswith('trackcpp'):
flatfile = 'flatfile.txt'
input_file = 'input_' + tp.lower() + '.py'
exec_file = 'runjob_' + tp.lower() + '.sh'
dirs = curdir.split(os.sep)
label = '-'.join(dirs[-5:]) + '-submitting_again.'
for m in rms:
mlabel = 'rms%02i'%m
os.chdir(os.path.join(curdir, mlabel))
files = os.listdir(os.getcwd())
kicktable_files = ','.join([f for f in files if f.endswith('_kicktable.txt')])
if len(kicktable_files) != 0:
inputs = ','.join([kicktable_files, flatfile,input_file])
else:
inputs = ','.join([flatfile,input_file])
description = ': '.join([mlabel, tp.upper(), label])
p = subprocess.Popen(['pyjob_qsub.py', '--inputFiles', inputs, '--exec', exec_file, '--description', description])
p.wait()
os.chdir(curdir)
else:
print('Change the current working directory to trackcpp directory.')
else:
print('Invalid inputs')
| Python | 0 | |
1bf7439c67e2206acb0c6d285014261eeb18097f | Add coverage as single execution | coverage.py | coverage.py | from app import initialization
from app.configuration import add
from app.check import *
initialization.run()
add('phpunit-coverage', 'true')
phpunit.execute()
| Python | 0.000009 | |
67f5e4a4ec4606d00fb94139b9c39c7abe0be33b | Add browse queue statistics sample | samples/python/queue_statistics.py | samples/python/queue_statistics.py | '''
This sample will read all queue statistic messages from SYSTEM.ADMIN.STATISTICS.QUEUE.
MQWeb runs on localhost and is listening on port 8081.
'''
import json
import httplib
import socket
import argparse
parser = argparse.ArgumentParser(
description='MQWeb - Python sample - Browse statistic messages from SYSTEM.ADMIN.STATISTICS.QUEUE',
epilog="For more information: http://www.mqweb.org"
)
parser.add_argument('-m', '--queuemanager', help='Name of the queuemanager', required=True)
args = parser.parse_args()
size = 1024 * 32
url = "/api/message/browse/" + args.queuemanager + '/SYSTEM.ADMIN.STATISTICS.QUEUE?size=' + str(size)
try:
conn = httplib.HTTPConnection('localhost', 8081)
conn.request('GET', url)
res = conn.getresponse()
result = json.loads(res.read())
if 'error' in result:
print('Received a WebSphere MQ error: ' +
str(result['error']['reason']['code'])
)
else:
count = 0
for message in result['data']:
count += 1
if 'admin' in message:
parameters = message['admin']['parameters']
print(str(parameters['IntervalStartDate']['value']) + ' ' +
str(parameters['IntervalStartTime']['value']) + ' ' +
str(parameters['IntervalEndDate']['value']) + ' ' +
str(parameters['IntervalEndTime']['value']))
if 'QStatisticsData' in parameters:
queues = {}
for statistics in parameters['QStatisticsData']['value']:
queue = {
'depth' : {
'min' : statistics['QMinDepth']['value'],
'max' : statistics['QMaxDepth']['value']
},
'get' : {
'count' : statistics['GetCount']['value'][0] + statistics['GetCount']['value'][1],
'bytes' : statistics['GetBytes']['value'][0] + statistics['GetBytes']['value'][1],
'fail' : statistics['GetFailCount']['value']
},
'put' : {
'count' : statistics['PutCount']['value'][0] + statistics['PutCount']['value'][1] + statistics['Put1Count']['value'][0] + statistics['Put1Count']['value'][1],
'bytes' : statistics['PutBytes']['value'][0] + statistics['PutBytes']['value'][1],
'fail' : statistics['PutFailCount']['value'] + statistics['Put1FailCount']['value']
},
'browse' : {
'count' : statistics['BrowseCount']['value'][0] + statistics['BrowseCount']['value'][1],
'bytes' : statistics['BrowseBytes']['value'][0] + statistics['BrowseBytes']['value'][1],
'fail' : statistics['BrowseFailCount']['value']
}
}
queues[statistics['QName']['value']] = queue
print(json.dumps(queues))
else:
print(json.dumps(parameters))
print('Number of messages: ' + str(count))
except httplib.HTTPException as e:
print ('An HTTP error occurred while inquiring queuemanager: ' +
e.errno + e.strerror
)
except socket.error as e:
print e.strerror
print 'Is the MQWeb daemon running?'
| Python | 0 | |
884ae74bb75e5a0c60da74791a2e6fad9e4b83e5 | Add py solution for 436. Find Right Interval | py/find-right-interval.py | py/find-right-interval.py | from operator import itemgetter
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
sorted_itv = map(itemgetter(1, 2), sorted((x.start, i, x) for i, x in enumerate(intervals)))
size = len(intervals)
ans = []
for itv in intervals:
L, U = -1, size
while L + 1 < U:
mid = (L + U) / 2
if sorted_itv[mid][1].start >= itv.end:
U = mid
else:
L = mid
if U == size:
ans.append(-1)
else:
ans.append(sorted_itv[U][0])
return ans
| Python | 0.998967 | |
07f8fd56ab366a2d1365278c3310ade4b1d30c57 | Add functional test for version negotiation | heat_integrationtests/functional/test_versionnegotiation.py | heat_integrationtests/functional/test_versionnegotiation.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from heat_integrationtests.functional import functional_base
expected_version_dict = {
"versions": [
{"links": [{"href": None, "rel": "self"}],
"status": "CURRENT", "id": "v1.0"}
]
}
class VersionNegotiationTestCase(functional_base.FunctionalTestsBase):
def test_authless_version_negotiation(self):
# NOTE(pas-ha): this will grab the public endpoint by default
heat_url = self.identity_client.get_endpoint_url(
'orchestration', region=self.conf.region)
heat_api_root = heat_url.split('/v1')[0]
expected_version_dict[
'versions'][0]['links'][0]['href'] = heat_api_root + '/v1/'
r = requests.get(heat_api_root)
self.assertEqual(300, r.status_code, 'got response %s' % r.text)
self.assertEqual(expected_version_dict, r.json())
| Python | 0.000058 | |
4b8f7a4c97668b4dbd8634d6b01e30b71737c3bd | fix send_HTML_email to work when no email_from argument is supplied | dimagi/utils/django/email.py | dimagi/utils/django/email.py | from django.conf import settings
from django.core.mail import get_connection
from django.core.mail.message import EmailMultiAlternatives
NO_HTML_EMAIL_MESSAGE = """
Your email client is trying to display the plaintext version of an email that
is only supported in HTML. Please set your email client to display this message
in HTML, or use an email client that supports HTML emails.
"""
def send_HTML_email(subject, recipient, html_content, text_content=None,
cc=None, email_from=settings.DEFAULT_FROM_EMAIL,
file_attachments=None):
if not text_content:
text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
NO_HTML_EMAIL_MESSAGE)
from_header = {'From': email_from} # From-header
connection = get_connection()
msg = EmailMultiAlternatives(subject, text_content, email_from,
[recipient], headers=from_header,
connection=connection, cc=cc)
for file in (file_attachments or []):
if file:
msg.attach(file["title"], file["file_obj"].getvalue(),
file["mimetype"])
msg.attach_alternative(html_content, "text/html")
msg.send()
| from django.conf import settings
from django.core.mail import get_connection
from django.core.mail.message import EmailMultiAlternatives
NO_HTML_EMAIL_MESSAGE = """
Your email client is trying to display the plaintext version of an email that
is only supported in HTML. Please set your email client to display this message
in HTML, or use an email client that supports HTML emails.
"""
def send_HTML_email(subject, recipient, html_content, text_content=None, cc=None, email_from=None, file_attachments=None):
if not text_content:
text_content = getattr(settings, 'NO_HTML_EMAIL_MESSAGE',
NO_HTML_EMAIL_MESSAGE)
# If you get the return_path header wrong, this may impede mail delivery. It appears that the SMTP server
# has to recognize the return_path as being valid for the sending host. If we set it to, say, our SMTP
# server, this will always be the case (as the server is explicitly serving the host).
if email_from is None:
#todo: verify that this is even necessary here since it seems like email_return_path == email_from
email_return_path = getattr(settings, 'EMAIL_RETURN_PATH', None)
if email_return_path is None:
email_return_path = settings.EMAIL_LOGIN
email_from = getattr(settings, 'EMAIL_FROM', None)
if email_from is None:
email_from = email_return_path
else:
email_return_path = email_from
from_header = {'From': email_from} # From-header
connection = get_connection()
msg = EmailMultiAlternatives(subject, text_content, email_return_path, [recipient], headers=from_header, connection=connection, cc=cc)
for file in (file_attachments or []):
if file:
msg.attach(file["title"], file["file_obj"].getvalue(), file["mimetype"])
msg.attach_alternative(html_content, "text/html")
msg.send() | Python | 0 |
f7c4f8d43b30dfee36d4ff46e9133194a15b3e81 | Add tests for __unicode__ functions in model. (#1026) | tests/unit/accounts/test_models.py | tests/unit/accounts/test_models.py | from django.contrib.auth.models import User
from django.test import TestCase
from accounts.models import Profile, UserStatus
class BaseTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(
username='user',
email='user@test.com',
password='password')
class UserStatusTestCase(BaseTestCase):
def setUp(self):
super(UserStatusTestCase, self).setUp()
self.user_status = UserStatus.objects.create(
name='user',
status=UserStatus.UNKNOWN,
)
def test__str__(self):
self.assertEqual(self.user_status.name, self.user_status.__str__())
class ProfileTestCase(BaseTestCase):
def setUp(self):
super(ProfileTestCase, self).setUp()
self.profile = Profile.objects.get(user=self.user)
def test__str__(self):
self.assertEqual('{}'.format(self.profile.user), self.profile.__str__())
| Python | 0 | |
3e5b98c1a79f625fbf9f54af782e459de7fa5b1f | update migration with new filename and parent migration name | accelerator/migrations/0052_cleanup_twitter_urls.py | accelerator/migrations/0052_cleanup_twitter_urls.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from accelerator.twitter_handle_cleanup import (
clean_entrepreneur_profile_twitter_handles,
clean_expert_profile_twitter_handles,
clean_organization_twitter_handles
)
def clean_up_twitter_handles(apps, schema_editor):
Organization = apps.get_model('accelerator', 'Organization')
ExpertProfile = apps.get_model('accelerator', 'ExpertProfile')
EntrepreneurProfile = apps.get_model(
'accelerator',
'EntrepreneurProfile')
clean_entrepreneur_profile_twitter_handles(EntrepreneurProfile)
clean_expert_profile_twitter_handles(ExpertProfile)
clean_organization_twitter_handles(Organization)
class Migration(migrations.Migration):
dependencies = [
(
'accelerator',
'0051_add_register_for_events_to_event_subnav_items'
),
]
operations = [
migrations.RunPython(
clean_up_twitter_handles,
migrations.RunPython.noop),
]
| Python | 0 | |
c97f648a012c38802d9637d4c573a4ca9c8e1633 | Create encoder.py | additional/customencoder/encoder.py | additional/customencoder/encoder.py | #!/usr/bin/python
#below is the shellcode for /bin/sh using execve sys call
shellcode = ("\x31\xc0\x50\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x50\x89\xe2\x53\x89\xe1\xb0\x0b\xcd\x80")
t=[]
w=[]
z=[]
ror = lambda val, r_bits, max_bits: \
((val & (2**max_bits-1)) >> r_bits%max_bits) | \
(val << (max_bits-(r_bits%max_bits)) & (2**max_bits-1))
for i in range(0, len(shellcode)):
s = ord(shellcode[i])
y = ror(s,2,8)
b = y+1
w.append(s)
t.append(y)
z.append(b)
print "length %d" %len(t)
print "[+] Original shellcode..:", (", ".join(hex(c) for c in w[0::]))
print "[+] ROR shellcode..:", (", ".join(hex(c) for c in t[0::]))
print "[+] ROR shellcode after adding 1 to each byte ..:", (", ".join(hex(c) for c in z[0::]))
| Python | 0.000004 | |
30bb33609d9e22b6999d086196ed622a456d7dc2 | Create incomplete_minimal_logger.py | lld_practice/incomplete_minimal_logger.py | lld_practice/incomplete_minimal_logger.py | # just trying to minimize what all it offers
# reference python standard logging module snippets
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelToName = {
CRITICAL: 'CRITICAL',
ERROR: 'ERROR',
WARNING: 'WARNING',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'NOTSET',
}
_nameToLevel = {
'CRITICAL': CRITICAL,
'FATAL': FATAL,
'ERROR': ERROR,
'WARN': WARNING,
'WARNING': WARNING,
'INFO': INFO,
'DEBUG': DEBUG,
'NOTSET': NOTSET,
}
def getLevelName(level):
result = _levelToName.get(level)
if result is not None:
return result
result = _nameToLevel.get(level)
if result is not None:
return result
return "Level %s" % level
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in _nameToLevel:
raise ValueError("Unknown level: %r" % level)
rv = _nameToLevel[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
class my_logger:
def __init__(self, logger_name, logger_level):
# write to console and a text file is the req...
# error > warning > info > debug...
self.logger_name = logger_name
self.logger_level = _checkLevel(logger_level)
self._cache = {}
def info(self, msg, *args, **kwargs):
# check if we can allow this and then allow it
if self.isEnabledFor("INFO"):
self.log_me(log_level="INFO", log_msg=msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
if self.isEnabledFor("WARN"):
self.log_me(log_level="WARN", log_msg=msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
if self.isEnabledFor("ERROR"):
self.log_me(log_level="ERROR", log_msg=msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
if self.isEnabledFor("DEBUG"):
self.log_me(log_level="DEBUG", log_msg=msg, *args, **kwargs)
def handler(self, msg_level, msg):
msg_level = msg_level
pass
def isEnabledFor(self, level):
# is the logger enabled for this level?
try:
return self._cache[level]
except KeyError:
try:
is_enabled = self._cache[level] = (level >= self.getEffectiveLevel())
finally:
pass
return is_enabled
def setLevel(self, level):
self.level = _checkLevel(level)
def log_me(self, log_level, log_msg, *args, **kwargs):
# we need to handle handlers here as well..
# create a log_record (prepare the message here)
#
pass
def getEffectiveLevel(self):
# get the effective level for this logger...
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def __repr__(self):
level = getLevelName(self.getEffectiveLevel())
return '<%s %s (%s)>' % (self.__class__.__name__, self.name, level)
| Python | 0.000147 | |
fb004f72c27b49ba9661e6a83b8f49be39757d22 | add changemath shell | math_change.py | math_change.py | import sys
filename = './Deterministic Policy Gradient Algorithms笔记.md'
outname = ''
def change(filename, outname):
f = open(filename, encoding='utf8')
data = f.readlines()
f.close()
out = ''
doublenum = 0
for line in data:
if line=='$$\n':
doublenum += 1
if doublenum % 2 == 0:
out += '$$\n\n'
else:
out += '\n$$\n'
elif '$' in line:
out += line.replace('$','\n$$\n').replace('$$$$','$$')
else:
out += line
with open(outname, 'w', encoding='utf8') as f:
f.write(out)
if __name__=='__main__':
arglen = len(sys.argv) - 1
if arglen == 2:
change(*sys.argv[1:])
if arglen == 1:
filename = sys.argv[1]
change(filename, filename)
| Python | 0.000009 | |
79ea8a3a9ac43ba5ab9789e4962b4fb0814dccc0 | clean up localsettings example | localsettings.example.py | localsettings.example.py | import os
import sys
####### Database config. This assumes Postgres #######
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'commcarehq',
'USER': 'postgres',
'PASSWORD': '******'
}
}
####### Couch Config ######
COUCH_SERVER_ROOT = '127.0.0.1:5984'
COUCH_USERNAME = 'admin'
COUCH_PASSWORD = '********'
COUCH_DATABASE_NAME = 'commcarehq'
####### # Email setup ########
# email settings: these ones are the custom hq ones
EMAIL_LOGIN = "notifications@dimagi.com"
EMAIL_PASSWORD="******"
EMAIL_SMTP_HOST="smtp.gmail.com"
EMAIL_SMTP_PORT=587
ADMINS = (('HQ Dev Team', 'commcarehq-dev+www-notifications@dimagi.com'),)
BUG_REPORT_RECIPIENTS = ['commcarehq-support@dimagi.com']
NEW_DOMAIN_RECIPIENTS = ['commcarehq-dev+newdomain@dimagi.com']
####### Log/debug setup ########
DEBUG = False
TEMPLATE_DEBUG = DEBUG
# The django logs will end up here
DJANGO_LOG_FILE = os.path.join('/opt/www.commcarehq.org_project/log',"www.commcarehq.org.django.log")
SEND_BROKEN_LINK_EMAILS = True
CELERY_SEND_TASK_ERROR_EMAILS = True
####### Static files ########
filepath = os.path.abspath(os.path.dirname(__file__))
# media for user uploaded media. in general this won't be used at all.
MEDIA_ROOT = os.path.join(filepath,'mediafiles')
STATIC_ROOT = os.path.join(filepath,'staticfiles')
####### Bitly ########
BITLY_LOGIN = 'dimagi'
BITLY_APIKEY = '*******'
####### Jar signing config ########
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
JAR_SIGN = dict(
jad_tool = os.path.join(_ROOT_DIR, "submodules", "core-hq-src", "corehq", "apps", "app_manager", "JadTool.jar"),
key_store = os.path.join(os.path.dirname(os.path.dirname(_ROOT_DIR)), "DimagiKeyStore"),
key_alias = "javarosakey",
store_pass = "*******",
key_pass = "*******",
)
####### XEP stuff - TODO: remove this section when we retire XEP ########
REFLEXIVE_URL_BASE = "https://localhost:8001"
def get_url_base():
return REFLEXIVE_URL_BASE
GET_URL_BASE = 'settings.get_url_base'
####### SMS Config ########
# Mach
SMS_GATEWAY_URL = "http://gw1.promessaging.com/sms.php"
SMS_GATEWAY_PARAMS = "id=******&pw=******&dnr=%(phone_number)s&msg=%(message)s&snr=DIMAGI"
# Unicel
UNICEL_CONFIG = {"username": "Dimagi",
"password": "******",
"sender": "Promo" }
####### Custom reports ########
CUSTOM_REPORT_MAP = {
"domain_name": [
'path.to.CustomReport',
]
}
####### Domain sync / de-id ########
DOMAIN_SYNCS = {
"domain_name": {
"target": "target_db_name",
"transform": "corehq.apps.domainsync.transforms.deidentify_domain"
}
}
DOMAIN_SYNC_APP_NAME_MAP = { "app_name": "new_app_name" }
####### Misc / HQ-specific Config ########
DEFAULT_PROTOCOL = "https" # or http
OVERRIDE_LOCATION="https://www.commcarehq.org"
GOOGLE_ANALYTICS_ID = '*******'
AXES_LOCK_OUT_AT_FAILURE = False
LUCENE_ENABLED = True
INSECURE_URL_BASE = "http://submit.commcarehq.org"
PREVIEWER_RE = r'^.*@dimagi\.com$'
GMAPS_API_KEY = '******'
FORMTRANSLATE_TIMEOUT = 5
LOCAL_APPS = ('django_cpserver','dimagi.utils', 'gunicorn', 'django_extensions')
| import os
# Postgres config
DATABASE_ENGINE = 'postgresql_psycopg2'
DATABASE_NAME = 'commcarehq'
DATABASE_USER = 'postgres'
DATABASE_PASSWORD = '*****'
DATABASE_HOST = ''
DATABASE_PORT = '5432'
DJANGO_LOG_FILE = "/var/log/datahq/datahq.django.log"
LOG_SIZE = 1000000
LOG_LEVEL = "DEBUG"
LOG_FILE = "/var/log/datahq/datahq.log"
LOG_FORMAT = "[%(name)s]: %(message)s"
LOG_BACKUPS = 256 # number of logs to keep
filepath = os.path.abspath(os.path.dirname(__file__))
STATIC_ROOT = os.path.join(filepath, 'staticmedia') #if you so wish to have your staticroot be defined here - this is necessary for staticfiles to merge all the static media from the management command.
####### Couch Forms ######
COUCH_SERVER_ROOT = 'localhost:5984'
COUCH_USERNAME = ''
COUCH_PASSWORD = ''
COUCH_DATABASE_NAME = 'commcarehq'
BITLY_LOGIN = 'dimagi'
BITLY_APIKEY = '*****'
EMAIL_LOGIN="user@gmail.com"
EMAIL_PASSWORD="******"
EMAIL_SMTP_HOST="smtp.gmail.com"
EMAIL_SMTP_PORT=587
JAR_SIGN = dict(
key_store = "/PATH/TO/KEY_STORE",
key_alias = "KEY",
store_pass = "*****",
key_pass = "*****",
)
# Link to XForm Editor
# in the future we will possible allow multiple
EDITOR_URL = 'http://localhost:8011/xep/initiate/'
XFORMPLAYER_URL = 'http://localhost:8888/play_remote/'
# A back door for phones that can't do SSL to access HQ through http
INSECURE_URL_BASE = "http://submit.mysite.com"
BUG_REPORT_RECIPIENTS = ['me@example.com']
PREVIEWER_RE = r'^.*@dimagi\.com$' | Python | 0.000001 |
cc6f30cb1c91b321db6bf77496c8b6fe7c56aabb | Add log parser | log_parser/parse_logs.py | log_parser/parse_logs.py | #!/usr/bin/env python
######################################################
# -*- coding: utf-8 -*-
# File Name: parse_logs.py
# Author: James Hong & Qian Li
# Created Date: 2017-10-28
# Description: Parse CloudWatch logs
######################################################
import argparse
import json
import os
import re
import sys
import boto3
import gzip
import numpy as np
import shutil
from collections import OrderedDict
TEMP_INPUT = './download_log.gz'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--bucket', '-b', type=str, required=True,
help='S3 bucket where logs files are stored')
parser.add_argument('--prefix', '-p', type=str, required=True,
help='S3 log files prefix')
parser.add_argument('--outfile', '-o', type=str, required=True,
help='File to save parsed output')
return parser.parse_args()
class StatsObject(object):
def __init__(self):
self.numLambdas = 0
self.data = OrderedDict()
def incrementNumLambdas(self):
self.numLambdas += 1
def record_key_value(self, k, v):
if k not in self.data:
self.data[k] = []
self.data[k].append(v)
def print_stats(self):
print 'Parsed %d lambda logs' % self.numLambdas
for k, v in self.data.iteritems():
print k
print ' mean:', np.mean(v)
print ' stdev:', np.std(v)
print ' median:', np.median(v)
print ' min:', min(v)
print ' max:', max(v)
print ' 10th:', np.percentile(v, 10)
print ' 25th:', np.percentile(v, 25)
print ' 75th:', np.percentile(v, 75)
print ' 90th:', np.percentile(v, 90)
print ' 95th:', np.percentile(v, 95)
print ' 99th:', np.percentile(v, 99)
def dump_parsed_values(self, outfile):
print >> sys.stderr, 'Writing parsed results to', outfile
with open(outfile, 'w') as ofs:
json.dump(self.data, ofs)
REPORT_RE = re.compile(r'Duration: ([\d.]+) ms[\s]+Billed Duration: (\d+) ms[\s]+Memory Size: (\d+) MB[\s]+Max Memory Used: (\d+) MB')
def parse_line(line, stats):
if 'Timelist:' in line:
try:
_, timelist = line.split('Timelist:', 1)
timelistObj = json.loads(json.loads(timelist.strip()))
for k, v in timelistObj.iteritems():
stats.record_key_value(k, v)
except Exception as e:
print >> sys.stderr, e, line
matchObj = REPORT_RE.search(line)
if matchObj is not None:
duration = float(matchObj.group(1))
billedDuration = int(matchObj.group(2))
memorySize = int(matchObj.group(3))
maxMemoryUsed = int(matchObj.group(4))
stats.record_key_value('duration', duration)
stats.record_key_value('billed-duration', billedDuration)
stats.record_key_value('memory-size', memorySize)
stats.record_key_value('max-memory-used', maxMemoryUsed)
stats.incrementNumLambdas()
def ensure_clean_state():
if os.path.exists(TEMP_INPUT):
os.remove(TEMP_INPUT)
def main(args):
ensure_clean_state()
print >> sys.stderr, 'Bucket: ', args.bucket
print >> sys.stderr, 'Prefix: ', args.prefix
stats = StatsObject()
s3 = boto3.resource('s3')
inputBucket = args.bucket
inputPrefix = args.prefix
# We need to fetch file from S3
logsBucket = s3.Bucket(inputBucket)
for obj in logsBucket.objects.filter(Prefix=inputPrefix):
objKey = obj.key
if objKey.endswith('.gz'):
print >> sys.stderr, 'Parsing', objKey
s3.Object(logsBucket.name, objKey).download_file(TEMP_INPUT)
try:
with gzip.open(TEMP_INPUT, 'rb') as logFile:
for line in logFile:
parse_line(line, stats)
except Exception as e:
print >> sys.stderr, e
print('S3 Bucket: {}'.format(args.bucket))
print('File Prefix: {}'.format(args.prefix))
stats.print_stats()
if args.outfile is not None:
stats.dump_parsed_values(args.outfile)
if __name__ == '__main__':
main(get_args())
| Python | 0.000006 | |
f380db268eecec80a5ab12fcc553c0278452b18a | add file ProbModelXML.py along with an example of student model | pgmpy/readwrite/ProbModelXML.py | pgmpy/readwrite/ProbModelXML.py | """
ProbModelXML: http://leo.ugr.es/pgm2012/submissions/pgm2012_submission_43.pdf
For the student example the ProbModelXML file should be:
<?xml version=“1.0” encoding=“UTF-8”?>
<ProbModelXML formatVersion=“1.0”>
<ProbNet type=BayesianNetwork >
<AdditionalConstraints />
<Comment>
Student example model from Probabilistic Graphical Models: Principles and Techniques by Daphne Koller
</Comment>
<Language>
English
</Language>
<AdditionalProperties />
<Variable name="intelligence" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="smart"><AdditionalProperties /></State>
<State name="dumb"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="difficulty" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="difficult"><AdditionalProperties /></State>
<State name="easy"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="grade" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="grade_A"><AdditionalProperties /></State>
<State name="grade_B"><AdditionalProperties /></State>
<State name="grade_C"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="recommendation_letter" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="good"><AdditionalProperties /></State>
<State name="bad"><AdditionalProperties /></State>
</States>
</Variable>
<Variable name="SAT" type="FiniteState" role="Chance">
<Comment />
<Coordinates />
<AdditionalProperties />
<States>
<State name="high"><AdditionalProperties /></State>
<State name="low"><AdditionalProperties /></State>
</States>
</Variable>
<Links>
<Link var1="difficulty" var2="grade" directed=1>
<Comment>Directed Edge from difficulty to grade</Comment>
<Label>diff_to_grad</Label>
<AdditionalProperties />
</Link>
<Link var1="intelligence" var2="grade" directed=1>
<Comment>Directed Edge from intelligence to grade</Comment>
<Label>intel_to_grad</Label>
<AdditionalProperties />
</Link>
<Link var1="intelligence" var2="SAT" directed=1>
<Comment>Directed Edge from intelligence to SAT</Comment>
<Label>intel_to_sat</Label>
<AdditionalProperties />
</Link>
<Link var1="grade" var2="recommendation_letter" directed=1>
<Comment>Directed Edge from grade to recommendation_letter</Comment>
<Label>grad_to_reco</Label>
<AdditionalProperties />
</Link>
</Links>
<Potential type="Table" role="ConditionalProbability" label=string>
<Comment>CPDs in the form of table</Comment>
<AdditionalProperties />
<state>
<name>
</Potential>
</ProbNet>
<Policies />
<InferenceOptions />
<Evidence>
<EvidenceCase>
<Finding variable=string state=string stateIndex=integer numericValue=number/>
</EvidenceCase>
</Evidence>
</ProbModelXML>
""" | Python | 0 | |
394fed634204ea98afb443ab4574a7cc31c09529 | Create and delete objects in workspace context. Refs gh-185, gh-189 | tests/GIR/test_412_object_in_workspace_context.py | tests/GIR/test_412_object_in_workspace_context.py | # coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
from bookstorequery import BookStoreQuery
# In this test, we depend on workspace context test, which leaves
# undeleted workspaces (and thus contexts) in test database.
class TestObjectInWorkspaceContext(unittest.TestCase):
mgd = None
manager = None
bookstoreName = "BookStore In Foo"
extraFoo = "extra Foo"
extraFooBar = "extra FooBar"
extraFooBarFooBar = "extra FooBarFooBar"
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
self.mgd.enable_workspace(True)
if self.manager is None:
self.manager = Midgard.WorkspaceManager(connection = self.mgd)
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
self.mgd.set_workspace(ws)
def tearDown(self):
self.mgd.close()
self.mgd = None
def testCreate_01_ObjectInWorkspaceContext_Foo(self):
bookstore = Midgard.Object.factory(self.mgd, "gir_test_book_store", None)
bookstore.set_property("name", self.bookstoreName)
bookstore.set_property("extra", self.extraFoo)
self.assertTrue(bookstore.create())
# set new context so we can query object from there
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo/Bar")
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
def testCreate_02_ObjectInWorkspaceContext_FooBar(self):
# set new context so we can query object from there
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo/Bar")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
bookstore.set_property("extra", self.extraFooBar)
self.assertTrue(bookstore.update())
# test updated object
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertEqual(bookstore.get_property("extra"), self.extraFooBar)
# set default context and get object
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertEqual(bookstore.get_property("extra"), self.extraFoo)
def testCreate_03_ObjectInWorkspaceContext_FooBarFooBar(self):
# set new context so we can query object from there
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo/Bar/FooBar")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
bookstore.set_property("extra", self.extraFooBarFooBar)
self.assertTrue(bookstore.update())
# test updated object
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertEqual(bookstore.get_property("extra"), self.extraFooBarFooBar)
# set default context and get object
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertEqual(bookstore.get_property("extra"), self.extraFoo)
def testDelete_04_ObjectInWorkspaceContext_FooBarFooBar(self):
# set /Foo/Bar/FooBar context and get object
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo/Bar/FooBar")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertTrue(bookstore.purge(False))
# expect object from /Foo/Bar
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertEqual(bookstore.get_property("extra"), self.extraFooBar)
def testDelete_05_ObjectInWorkspaceContext_FooBar(self):
# set /Foo/Bar/FooBar context and get object
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo/Bar")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertTrue(bookstore.purge(False))
# expect object from /Foo/Bar
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertEqual(bookstore.get_property("extra"), self.extraFoo)
def testDelete_06_ObjectInWorkspaceContext_FooBarFooBar(self):
# set /Foo/Bar/FooBar context and get object
context = Midgard.WorkspaceContext()
self.manager.get_workspace_by_path(context, "/Foo")
self.mgd.set_workspace(context)
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 1)
bookstore = bookstores[0]
self.assertTrue(bookstore.purge(False))
# expect object from /Foo/Bar
bookstores = BookStoreQuery.findByName(self.mgd, self.bookstoreName)
self.assertEqual(len(bookstores), 0)
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
59b1787f620e4665b17b0cc2283c2363aac92d18 | add test class for game | tests/game_test.py | tests/game_test.py | from unittest import TestCase
from model.game import Game
from model.player import Player
from model.card import Card
class GameWithThreePlayerTest(TestCase):
def setUp(self):
self.game = Game([Player("P1"), Player("P2"), Player("P3")])
def test_createDeck_correctSize(self):
# then
self.assertEquals(len(self.game.card_deck), 32)
def test_createDeck_containsAllCards(self):
# then
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.SEVEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.EIGHT) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.NINE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.TEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.JACK) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.QUEEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.KING) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.BELLS, Card.Face.ACE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.SEVEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.EIGHT) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.NINE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.TEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.JACK) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.QUEEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.KING) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.HEARTS, Card.Face.ACE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.SEVEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.EIGHT) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.NINE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.TEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.JACK) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.QUEEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.KING) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.LEAVES, Card.Face.ACE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.SEVEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.EIGHT) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.NINE) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.TEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.JACK) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.QUEEN) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.KING) in self.game.card_deck)
self.assertTrue(Card(Card.Suit.ACORNS, Card.Face.ACE) in self.game.card_deck)
def test_createDeck_noDuplicateCards(self):
# given
card_counter = {}
for card in self.game.card_deck:
count = card_counter.get(card, 0)
card_counter[card] = count + 1
# then
for card, count in card_counter.items():
self.assertEquals(count, 1)
def test_clearCards(self):
# given
self.game.skat.append(Card(Card.Suit.BELLS, Card.Face.SEVEN))
[player.cards.append(Card(Card.Suit.BELLS, Card.Face.SEVEN)) for player in self.game.players]
# when
self.game.clear_cards()
# then
self.assertEquals(len(self.game.skat), 0)
for player in self.game.players:
self.assertEquals(len(player.cards), 0)
def test_giveOutCards(self):
# TODO
pass
def test_startNew(self):
# TODO
pass
def test_seatsAndDealerPositions_firstRound(self):
# when
self.game.start_new()
# then
self.assertEquals(self.game.players[0], self.game.get_dealer())
self.assertEquals(self.game.players[0], self.game.get_third_seat())
self.assertEquals(self.game.players[1], self.game.get_first_seat())
self.assertEquals(self.game.players[2], self.game.get_second_seat())
def test_seatsAndDealerPositions_secondRound(self):
# when
self.game.start_new()
self.game.start_new()
# then
self.assertEquals(self.game.players[1], self.game.get_dealer())
self.assertEquals(self.game.players[1], self.game.get_third_seat())
self.assertEquals(self.game.players[2], self.game.get_first_seat())
self.assertEquals(self.game.players[0], self.game.get_second_seat())
def test_seatsAndDealerPositions_thirdRound(self):
# when
self.game.start_new()
self.game.start_new()
self.game.start_new()
# then
self.assertEquals(self.game.players[2], self.game.get_dealer())
self.assertEquals(self.game.players[2], self.game.get_third_seat())
self.assertEquals(self.game.players[0], self.game.get_first_seat())
self.assertEquals(self.game.players[1], self.game.get_second_seat())
def test_seatsAndDealerPositions_fourthRound(self):
# when
self.game.start_new()
self.game.start_new()
self.game.start_new()
self.game.start_new()
# then
self.assertEquals(self.game.players[0], self.game.get_dealer())
self.assertEquals(self.game.players[0], self.game.get_third_seat())
self.assertEquals(self.game.players[1], self.game.get_first_seat())
self.assertEquals(self.game.players[2], self.game.get_second_seat()) | Python | 0 | |
209314b65ee960d73ee81baad6b9ced4102d6c0b | Introduce GenericSparseDB() class | lib/generic_sparse_db.py | lib/generic_sparse_db.py | #!/usr/bin/env python
# -*- encoding: utf-8
import gzip
import scipy.io as sio
from utils.utils import Utils
class GenericSparseDB(Utils):
def init(self):
self.data = sio.mmread(gzip.open(self._matrix_fn)).tolil()
self.factors = self._load_pickle(self._factors_fn)
self.fac_len = len(self.factors)
self.col_names = self.factors + self._load_pickle(self._colnames_fn)
assert self.data.shape[1] == len(self.col_names),\
'Mismatch between the number of columns: %s - %s.'\
% (self.data.shape[1], len(self.col_names))
def reset(self):
self._init()
| Python | 0 | |
47dff2561be481ff067c22ed98d9ea6a9cf8ae10 | Add test to execute notebooks | test/test_notebook.py | test/test_notebook.py | import os
import glob
import contextlib
import subprocess
import pytest
notebooks = list(glob.glob("*.ipynb", recursive=True))
@contextlib.contextmanager
def cleanup(notebook):
name, __ = os.path.splitext(notebook)
yield
fname = name + ".html"
if os.path.isfile(fname):
os.remove(fname)
@pytest.mark.parametrize("notebook", notebooks)
def test_notebook(notebook):
with cleanup(notebook):
# hack to execute the notebook from commandline
assert 0 == subprocess.call(["jupyter", "nbconvert", "--to=html",
"--ExecutePreprocessor.enabled=True",
notebook])
| Python | 0.000001 | |
ec033203d8e82258347eb4f6a6a83ef67bc9171c | Add expr tests | tests/test_Expr.py | tests/test_Expr.py | #!/usr/bin/python3
import pytest
import numpy as np
def test_nans_in_same_place(testCOB):
norm_expr = testCOB.expr(raw=False)
raw_expr = testCOB.expr(raw=True).ix[norm_expr.index,norm_expr.columns]
assert all(np.isnan(norm_expr) == np.isnan(raw_expr))
def test_inplace_nansort(testCOB):
x = np.random.rand(50000)
for i in np.random.randint(0,50000,500):
x[i] = np.nan
sorted_x = testCOB.inplace_nansort(x)
assert all(np.isnan(x) == np.isnan(sorted_x))
| Python | 0.000001 | |
f3c4b7513c49189750ea15b36e561a4e5ed56214 | add linear classification back | soccer/gameplay/evaluation/linear_classification.py | soccer/gameplay/evaluation/linear_classification.py |
# Classifies a feature into any number of classes
# Linear classfication defined is
# y = f(x, w, b) where...
# x is a vector of input features of an object
# w is a vector of weights to apply to the features
# b is the bias of the feature-weight system
# f() is x dot w + b
# y is the final output score
# Classifies the object into two distinct class based on a cutoff value
# Anything less than the cutoff is of class false, greater than the cutoff is of class true
#
# @param input The vector of input features
# @param weights The vector of weights to apply to the input features
# @param bias The bias of the features-weight system
# @param cutoff The number which splits the output score of the object into two classes
# @param Returns tuple of the class (true or false) and the given score
def binary_classification(input, weights, bias, cutoff):
score = linear_classification(input, weights, bias)
return (score < cutoff, score)
# Returns the raw output score of the linear classifier based on the dot product
#
# @param input The vector of input features
# @param weights The vector of weights to apply to the input features
# @param bias The bias of the features-weight system
def linear_classification(input, weights, bias):
# Element wise multiplication
out = map(lambda x, w: x * w, input, weights)
return sum(out) + bias | Python | 0.000342 | |
14c20e35bcfc55cc3c12d94596079fc27a907f94 | Add unit tests | tests/test_unit.py | tests/test_unit.py | import unittest
from test_utilities import mapTestJsonFiles, mapJsonToYml, testYaml, getImmediateSubdirectories, unidiff_output
class TestUnit(unittest.TestCase):
def test_input(self):
testDirectories = getImmediateSubdirectories('test_input')
for directory in testDirectories:
json = mapTestJsonFiles(directory)
ymlInput = mapJsonToYml(json)['services']
ymlOutput = testYaml(ymlInput, inputDirectoryName=directory)
try:
self.assertEqual(ymlInput, ymlOutput, msg='{}\n{}'.format(directory,unidiff_output(ymlOutput, ymlInput)))
except Exception, e:
print(e)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 | |
c2b082ebe95acc24f86fde9cd6875d7de3a9ca40 | Set up test_user file | tests/test_user.py | tests/test_user.py | import unittest
import settings
import requests_mock
from util import register_uris
from pycanvas.user import User
from pycanvas.exceptions import ResourceDoesNotExist
from pycanvas import Canvas
class TestUser(unittest.TestCase):
"""
Tests core Account functionality
"""
@classmethod
def setUpClass(self):
requires = {
}
adapter = requests_mock.Adapter()
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY, adapter)
register_uris(settings.BASE_URL, requires, adapter) | Python | 0.000002 | |
39589065b158061c280f68fa730f72bf595428be | Add Stata package (#10189) | var/spack/repos/builtin/packages/stata/package.py | var/spack/repos/builtin/packages/stata/package.py | # Copyright 2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
from datetime import datetime
class Stata(Package):
"""STATA is a general-purpose statistical software package developed
by StataCorp."""
# Known limitations of this installer:
# * This really only installs the command line version of the program. To
# install GUI support there are extra packages needed that I can't easily
# test right now (should be installable via yum as a temp workaround):
# libgtk-x11-2.0.so libgdk-x11-2.0.so libatk-1.0.so libgdk_pixbuf-2.0.so
# Those libraries appear to be provided by: pango gdk-pixbuf2 gtk2
#
# * There are two popular environment variables that can be set, but vary from
# place to place, so future enhancement maybe to support STATATMP and TMPDIR.
#
# * I haven't tested any installer version but 15.
homepage = "https://www.stata.com/"
# url = "stata"
version('15', '2486f4c7db1e7b453004c7bd3f8da40ba1e30be150613065c7b82b1915259016')
# V15 depends on libpng v12 and fails with other versions of libpng
depends_on('libpng@1.2.57')
# STATA is downloaded from user/pass protected ftp as Stata15Linux64.tar.gz
def url_for_version(self, version):
return "file://{0}/Stata{1}Linux64.tar.gz".format(os.getcwd(), version)
# STATA is simple and needs really just the PATH set.
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', prefix)
run_env.prepend_path('LD_LIBRARY_PATH', self.spec['libpng'].prefix.lib)
# Extracting the file provides the following:
# ./unix/
# ./unix/linux64/
# ./unix/linux64/docs.taz
# ./unix/linux64/setrwxp
# ./unix/linux64/ado.taz
# ./unix/linux64/inst2
# ./unix/linux64/base.taz
# ./unix/linux64/bins.taz
# ./license.pdf
# ./stata15.ico
# ./install
#
# The installation scripts aren't really necessary:
# ./install is a shell script that sets up the environment.
# ./unix/linux64/setrwxp is a shell script that ensures permissions.
# ./unix/linux64/inst2 is the actual installation script.
#
# 1. There is a markfile that is the version number. Stata uses this for
# for doing version checks/updates.
# echo $(date) > installed.150
#
# 2. Then it extracts the tar.gz files: ado.taz base.taz bins.taz docs.taz
#
# 3. It copies installer scripts to root directory
# cp ./unix/linux64/setrwxp setrwxp
# cp ./unix/linux64/inst2 inst2
#
# 4. Then it checks for proper permissions:
# chmod 750 setrwxp inst2
# ./setrwxp now
#
# 5. The last step has to be run manually since it is an interactive binary
# for configuring the license key. Load the module and run:
# $ stinit
def install(self, spec, prefix):
bash = which('bash')
tar = which('tar')
# Step 1.
x = datetime.now()
with open("installed.150", "w") as fh:
fh.write(x.strftime("%a %b %d %H:%M:%S %Z %Y"))
# Step 2.
instlist = ['ado.taz', 'base.taz', 'bins.taz', 'docs.taz']
for instfile in instlist:
tar('-x', '-z', '-f', 'unix/linux64/' + instfile)
# Step 3.
install('unix/linux64/setrwxp', 'setrwxp')
install('unix/linux64/inst2', 'inst2')
# Step 4. Since the install script calls out specific permissions and
# could change in the future (or old versions) I thought it best to
# just use it.
bash("./setrwxp", "now")
# Install should now be good to copy into the installation directory.
install_tree('.', prefix)
| Python | 0 | |
c5a2d916fa907aa15a425dedc405ecc0ae2ba668 | Add script to compare taxa from before and after filter step | compare_taxa.py | compare_taxa.py | #!/usr/bin/env python
"""Module to compare filtered and unfiltered taxa deduced from their respective trees to assert that they match."""
from divergence import parse_options
import logging as log
import sys
def fail(unfiltered_a, unfiltered_b, filtered_a, filtered_b):
"""Report error back to the user and exit with error code 1."""
def _log_error_and_print_stderr(msg, dictionary = None):
"""Both log an error and print it to sys.stderr"""
log.error(msg)
print >> sys.stderr, msg
if dictionary:
for key, value in dictionary.iteritems():
log.error('{0}\t{1}'.format(key, value))
print >> sys.stderr, '{0}\t{1}'.format(key, value)
_log_error_and_print_stderr('Unfiltered & filtered tree clusterings do not match!')
_log_error_and_print_stderr('Unfiltered taxon A:', unfiltered_a)
_log_error_and_print_stderr('Unfiltered taxon B:', unfiltered_b)
_log_error_and_print_stderr('Filtered taxon A:', filtered_a)
_log_error_and_print_stderr('Filtered taxon B:', filtered_b)
sys.exit(1)
def main(args):
"""Main function called when run from command line or as part of pipeline."""
usage = """
Usage: compare_taxa.py
--unfiltered-taxon-a=FILE genome IDs for taxon A as deduced from phylogenetic tree of unfiltered concatemers
--unfiltered-taxon-b=FILE genome IDs for taxon B as deduced from phylogenetic tree of unfiltered concatemers
--filtered-taxon-a=FILE genome IDs for taxon A as deduced from phylogenetic tree of filtered concatemers
--filtered-taxon-b=FILE genome IDs for taxon B as deduced from phylogenetic tree of filtered concatemers
"""
options = ['unfiltered-taxon-a', 'unfiltered-taxon-b', 'filtered-taxon-a', 'filtered-taxon-b']
unfiltered_a_file, unfiltered_b_file, filtered_a_file, filtered_b_file = parse_options(usage, options, args)
#Parse file containing RefSeq project IDs to extract RefSeq project IDs
with open(unfiltered_a_file) as read_handle:
unfiltered_a = dict((line.split('\t')[0], line.strip().split('\t')[1]) for line in read_handle)
with open(unfiltered_b_file) as read_handle:
unfiltered_b = dict((line.split('\t')[0], line.strip().split('\t')[1]) for line in read_handle)
with open(filtered_a_file) as read_handle:
filtered_a = dict((line.split('\t')[0], line.strip().split('\t')[1]) for line in read_handle)
with open(filtered_b_file) as read_handle:
filtered_b = dict((line.split('\t')[0], line.strip().split('\t')[1]) for line in read_handle)
#Otherwise fail after
if unfiltered_a.keys()[0] in filtered_a:
if not (set(unfiltered_a.keys()) == set(filtered_a.keys())
and set(unfiltered_b.keys()) == set(filtered_b.keys())):
fail(unfiltered_a, unfiltered_b, filtered_a, filtered_b)
else:
if not (set(unfiltered_a.keys()) == set(filtered_b.keys())
and set(unfiltered_b.keys()) == set(filtered_a.keys())):
fail(unfiltered_a, unfiltered_b, filtered_b, filtered_a)
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0 | |
5ba9feee6195a1f8b488888bf2065a8a5cba94b8 | Add countdown written in python | tools/countdown.py | tools/countdown.py | #!/bin/env python3
"""
Usage:
countdown [options] [<seconds>]
Options:
"""
"""
This example uses docopt with the built in cmd module to demonstrate an
interactive command application.
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-i | --interactive)
my_program (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
<arguements>, ARGUMENTS -> list
--options, words with dash(-) , or --input=FILE or -i FILE
commands, dont follow above
Pattern constructs:
[](brackets) optional elements
()(parentheses) required elements, everything not put in [] also required
|(pipe) mutually exclusive elements. Group with () or []
...(ellipsis) one or more elements, e.g. my_program.py FILE ..., one ore
more FILE s accepted, for zero or more use [FILE ...]
[options](case sensitive) shortcut for options, defined in options below
"[--]" used by convetntion to separate positional arguements
"[-]" by conevntion signify stdin is used instead of a file
[-v | -vv | -vv] countable flags, args["-v"] will be nr of occ
Options:
--verbose # GOOD
-o FILE # GOOD
Other: --bad # BAD, line does not start with dash "-"
-o FILE --output=FILE # without comma, with "=" sign
-i <file>, --input <file> # with comma, without "=" sing
Use two spaces to separate options with their informal description
--verbose More text. # BAD, will be treated as if verbose option had
# an argument "More", so use 2 spaces instead
-q Quit. # GOOD
-o FILE Output file. # GOOD
--stdout Use stdout. # GOOD, 2 spaces
If you want to set a default value for an option with an argument,
put it into the option-description, in form [default: <my-default-value>]:
--coefficient=K The K coefficient [default: 2.95]
--output=FILE Output file [default: test.txt]
--directory=DIR Some directory [default: ./]
for git like sub commands use, options_first parameter
args = docopt(__doc__,
version='git version 1.7.4.4',
options_first=True)
print('global arguments:')
print(args)
print('command arguments:')
argv = [args['<command>']] + args['<args>']
if args['<command>'] == 'add':
# In case subcommand is implemented as python module:
import git_add
print(docopt(git_add.__doc__, argv=argv))
"""
import sys
from docopt import docopt
import logging
import time
import pytest
# docopt(doc, argv=None, help=True, version=None, options_first=False))
class Countdown:
def __init__(self):
self.hour = 0
self.minute = 0
self.second = 0
self.seconds= 0
self.finished = False
def _normalize_from_seconds(self):
second = _get_second(self.seconds)
minutes = _get_minutes(self.seconds)
minute = _get_minute(minutes)
hour = _get_hours(minutes)
self.hour = hour
self.minute = minute
self.second = second
def _normalize_to_seconds(self):
minutes = self.hour*60 + self.minute
self.seconds = minutes*60 + self.second
def sync(self):
self._normalize_from_seconds()
def countdown(self):
if not self.seconds:
self.finished = True
else:
self.seconds -= 1
self.sync()
# while looping with true more accustomed habit
return not self.finished
@classmethod
def from_seconds(cls, seconds):
cd = cls()
cd.seconds = seconds
cd.sync()
return cd
@classmethod
def from_minutes(cls, minutes):
return cls.from_seconds(minutes*60)
@classmethod
def from_hours(cls, hours):
return cls.from_minutes(hours*60)
@classmethod
def from_stamp(cls, hour, minute, second):
cd = cls()
cd.hour = hour
cd.minute = minute
cd.second = second
cd._normalize_to_seconds()
return cd
def __eq__(cd, cd2):
return cd.hour == cd2.hour \
and cd.minute == cd2.minute \
and cd.second == cd2.second
def _get_second(seconds):
"return leftover second"
return seconds % 60
def _get_minute(minutes):
"return leftover minute"
return minutes % 60
def _get_minutes(seconds):
"return total minutes"
return seconds // 60
def _get_hours(minutes):
"return total hour"
return minutes // 60
def main():
opt = docopt(__doc__, sys.argv[1:])
print(opt)
time.sleep(2)
@pytest.mark.parametrize("seconds, expected", [
(1, (0,0,1)),
(59, (0,0,59)),
(61, (0,1,1)),
(121, (0,2,1)),
(299, (0,4,59)),
(300, (0,5,0)),
(301, (0,5,1)),
(3599, (0,59,59)),
(3600, (1,0,0)),
(3601, (1,0,1)),
(3661, (1,1,1)),
])
def test_countdown_convert_from_seconds(seconds, expected):
cd = Countdown.from_seconds(seconds)
cdexpected = Countdown.from_stamp(*expected)
assert cd == cdexpected
if __name__ == "__main__":
main()
| Python | 0.00001 | |
0b8d5794d2c5a1ae46659e02b65d1c21ffe8881d | Implement tests for temperature endpoint | babyonboard/api/tests/test_views.py | babyonboard/api/tests/test_views.py | import json
from rest_framework import status
from django.test import TestCase, Client
from django.urls import reverse
from ..models import Temperature
from ..serializers import TemperatureSerializer
client = Client()
class GetCurrentTemperatureTest(TestCase):
""" Test class for GET current temperature from API """
def setUp(self):
Temperature.objects.create(temperature=35)
def test_get_current_temperature(self):
response = client.get(reverse('temperature'))
temperature = Temperature.objects.order_by('date', 'time').last()
serializer = TemperatureSerializer(temperature)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class CreateNewTemperatureTest(TestCase):
""" Test class for saving a new temperature registry """
def setUp(self):
self.valid_payload = {
'temperature': 27.2
}
self.invalid_payload = {
'temperature': ''
}
def test_creat_valid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.valid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_invalid_temperature(self):
response = client.post(
reverse('temperature'),
data=json.dumps(self.invalid_payload),
content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| Python | 0.000002 | |
fa1057d8b9a44bdf2ee0f667184ff5854fd0e8e1 | add rds backup | base/docker/scripts/rds-snapshot.py | base/docker/scripts/rds-snapshot.py | #!/usr/bin/env python
"""
Backup Amazon RDS DBs.
Script is expected to be run on EC2 VM within the same Amazon account as RDS.
Script reads tags of EC2 and then searches for all matching RDSes.
Where matching RDS is the one that shares the same "Stack" tag value.
"""
import sys
import time
import argparse
import boto
import boto.utils
import boto.ec2
import boto.rds2
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--stack-tag', help="name of tag shared by EC2 and RDS", default="Stack")
parser.add_argument('--dry-run', help="skip backup", action='store_true')
args = parser.parse_args()
region = boto.utils.get_instance_metadata()['placement']['availability-zone'][:-1]
print "region: {}".format(region)
instance_id = boto.utils.get_instance_metadata()['instance-id']
print "instance_id: {}".format(instance_id)
account_id = boto.utils.get_instance_metadata()['iam']['info']['InstanceProfileArn'].split(':')[4]
print "account_id: {}".format(account_id)
conn_ec2 = boto.ec2.connect_to_region(region)
conn_rds = boto.rds2.connect_to_region(region)
my_instance = conn_ec2.get_all_instances(instance_ids=[instance_id])[0].instances[0]
if args.stack_tag not in my_instance.tags:
print "Missing tag '{}' on this EC2".format(args.stack_tag)
return 1
my_stack = my_instance.tags[args.stack_tag]
print "Tag {}:{}".format(args.stack_tag, my_stack)
print
db_descriptions = conn_rds.describe_db_instances()[u'DescribeDBInstancesResponse'][u'DescribeDBInstancesResult'][u'DBInstances']
ts_formatted = "-".join(str(time.time()).split('.'))
error_count = 0
for db_desc in db_descriptions:
rds_id = db_desc['DBInstanceIdentifier']
# For now AWS API does not support filtering filters={'tag:{}'.format(STACK_TAG):my_stack,}
# see: http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBInstances.html
# so we have to go through list_tags_for_resource(arn)
rds_arn = 'arn:aws:rds:{region}:{account_id}:db:{rds_id}'.format(
region=region,
account_id=account_id,
rds_id=rds_id
)
tag_list = conn_rds.list_tags_for_resource(rds_arn)['ListTagsForResourceResponse']['ListTagsForResourceResult']['TagList']
tag_dict = dict(map(lambda x: (x['Key'], x['Value']), tag_list))
if args.stack_tag not in tag_dict:
print "Skipping {} as missing tag '{}'".format(rds_id, args.stack_tag)
elif tag_dict[args.stack_tag] != my_stack:
print "Skipping {} as tag '{}'!='{}'".format(rds_id, tag_dict[args.stack_tag], my_stack)
else:
snapshot_id = '{}-{}'.format(rds_id, ts_formatted)
if args.dry_run:
print "Backing up {} as {} - dry run".format(rds_id, snapshot_id)
else:
print "Backing up {} as {} - requested".format(rds_id, snapshot_id)
try:
conn_rds.create_db_snapshot(snapshot_id, rds_id)
except boto.rds2.exceptions.InvalidDBInstanceState as e:
error_count += 1
print "Failed - API response: {}".format(e.body)
return error_count
if __name__ == "__main__":
error_count = main()
sys.exit(error_count)
| Python | 0 | |
a9d458c0995db80f164f6099b5264f23c1ceffbb | Create 02.py | 02/qu/02.py | 02/qu/02.py | # Define a procedure, sum3, that takes three
# inputs, and returns the sum of the three
# input numbers.
def sum3(aa, bb, cc):
return aa + bb + cc
#print sum3(1,2,3)
#>>> 6
#print sum3(93,53,70)
#>>> 216
| Python | 0 | |
9a705f58acbcfb2cc7292cb396544f1f8c9b89a1 | Add basic web test | tests/baseweb_test.py | tests/baseweb_test.py | from __future__ import with_statement
from ass2m.ass2m import Ass2m
from ass2m.server import Server
from unittest import TestCase
from webtest import TestApp
from tempfile import mkdtemp
import os.path
import shutil
class BaseWebTest(TestCase):
def setUp(self):
self.root = mkdtemp(prefix='ass2m_test_root')
ass2m = Ass2m(self.root)
ass2m.create(self.root)
server = Server(self.root)
self.app = TestApp(server.process)
def tearDown(self):
if self.root:
shutil.rmtree(self.root)
def test_listAndDownload(self):
res = self.app.get("/")
assert "<h1>Index of /</h1>" in res.body
with file(os.path.join(self.root, "penguins_are_cute"), 'a') as f:
f.write("HELLO")
res = self.app.get("/")
assert "penguins_are_cute" in res.body
res = self.app.get("/penguins_are_cute")
assert "HELLO" == res.body
| Python | 0 | |
56eba00d00e450b5dc8fae7ea8475d418b00e2db | Add problem69.py | euler_python/problem69.py | euler_python/problem69.py | """
problem69.py
Euler's Totient function, φ(n) [sometimes called the phi function], is used to
determine the number of numbers less than n which are relatively prime to n. For
example, as 1, 2, 4, 5, 7, and 8, are all less than nine and relatively prime to
nine, φ(9)=6.
It can be seen that n=6 produces a maximum n/φ(n) for n ≤ 10. Find the value of
n ≤ 1,000,000 for which n/φ(n) is a maximum.
"""
from itertools import takewhile
from toolset import get_primes
# def phi(n):
# ps = list(unique(prime_factors(n)))
# return n * reduce(operator.mul, (1 - Fraction(1, p) for p in ps))
# return max((n for n in range(2, 1000000+1)), key=lambda n: n/phi(n))
#
# The commented-out solution above is correct and true to the problem
# description, but slightly slower than 1 minute.
#
# So, note that the phi function multiplies n by (1 - (1/p)) for every p in
# its unique prime factors. Therefore, phi(n) will diminish as n has a
# greater number of small unique prime factors. Since we are seeking the
# largest value for n/phi(n), we want to minimize phi(n). We are therefore
# looking for the largest number < 1e6 which is the product of the smallest
# unique prime factors, i.e successive prime numbers starting from 2.
def candidates():
primes = get_primes()
x = next(primes)
while True:
yield x
x *= next(primes)
def problem69():
return max(takewhile(lambda x: x < 1e6, candidates()))
| Python | 0.000241 | |
e082435619399051321e7c9ae02540f54e436e5b | Create acr_routerauthenticator.py | Server/integrations/acr_router/acr_routerauthenticator.py | Server/integrations/acr_router/acr_routerauthenticator.py | from org.xdi.model.custom.script.type.auth import PersonAuthenticationType
from org.jboss.seam.contexts import Context, Contexts
from org.jboss.seam.security import Identity
from org.xdi.oxauth.service import UserService, AuthenticationService, SessionStateService, VeriCloudCompromise
from org.xdi.util import StringHelper
from org.xdi.util import ArrayHelper
from org.xdi.oxauth.client.fido.u2f import FidoU2fClientFactory
from org.xdi.oxauth.service.fido.u2f import DeviceRegistrationService
from org.xdi.oxauth.util import ServerUtil
from org.xdi.oxauth.model.config import Constants
from org.jboss.resteasy.client import ClientResponseFailure
from org.jboss.resteasy.client.exception import ResteasyClientException
from javax.ws.rs.core import Response
from java.util import Arrays
import sys
import java
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "acr_router Initialization"
return True
def destroy(self, configurationAttributes):
print "acr_router Destroy"
print "acr_router Destroyed successfully"
return True
def getApiVersion(self):
return 1
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
credentials = Identity.instance().getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
context = Contexts.getEventContext()
session_attributes = context.get("sessionAttributes")
remote_ip = session_attributes.get("remote_ip")
client_id = self.getClientID(session_attributes)
sessionStateService = SessionStateService.instance()
sessionState = sessionStateService.getSessionState()
print "SessionState id: %s" % sessionState.getId()
acr = sessionStateService.getAcr(sessionState)
print "Current ACR_VALUE: " + acr
if (step == 1):
print "acr_router Authenticate for step 1"
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
userService = UserService.instance()
veriCloudCompromise= VeriCloudCompromise.instance()
find_user_by_uid = userService.getUser(user_name)
status_attribute_value = userService.getCustomAttribute(find_user_by_uid, "mail")
if status_attribute_value != None:
user_mail = status_attribute_value.getValue()
#isCompromise = veriCloudCompromise.is_compromised("testuser123@gmail.com", "123456")
isCompromise = False
if(isCompromise ):
sessionAttributes = sessionState.getSessionAttributes()
sessionAttributes.put("acr_values", "otp")
sessionAttributes.put("acr", "otp")
sessionState.setSessionAttributes(sessionAttributes)
sessionStateService.reinitLogin(sessionState,True)
else:
logged_in = userService.authenticate(user_name, user_password)
if (not logged_in):
return False
return True
elif (step == 2):
print "acr_router Authenticate for step 2"
authenticationService = AuthenticationService.instance()
user = authenticationService.getAuthenticatedUser()
if (user == None):
print "acr_router Prepare for step 2. Failed to determine user name"
return False
if (auth_method == 'authenticate'):
print "Code here to check APIs"
return True
else:
print "acr_router. Prepare for step 2. Authenticatiod method is invalid"
return False
return False
else:
return False
def prepareForStep(self, configurationAttributes, requestParameters, step):
if (step == 1):
return True
elif (step == 2):
print "acr_router Prepare for step 2"
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 1
def getPageForStep(self, configurationAttributes, step):
return True
def logout(self, configurationAttributes, requestParameters):
return True
def getClientID(self, session_attributes):
if not session_attributes.containsKey("client_id"):
return None
return session_attributes.get("client_id")
| Python | 0.000004 | |
47bdc98a7fb8c030f5beb09ec9bb1b83c100dc9a | Add missing migration | src/users/migrations/0008_auto_20160222_0553.py | src/users/migrations/0008_auto_20160222_0553.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-22 05:53
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0007_auto_20160122_1333'),
]
operations = [
migrations.AlterField(
model_name='user',
name='github_id',
field=models.CharField(blank=True, help_text='Your GitHub account, without the "@" sign. This will be shown when we display your public information.', max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z_-]*$', 'Not a valid GitHub account')], verbose_name='GitHub'),
),
migrations.AlterField(
model_name='user',
name='twitter_id',
field=models.CharField(blank=True, help_text='Your Twitter handle, without the "@" sign. This will be shown when we display your public information.', max_length=100, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z_]*$', 'Not a valid Twitter handle')], verbose_name='Twitter'),
),
]
| Python | 0.0002 | |
f0cd785688ed04821f0338021e2360b98bd9dd58 | add very simple perf test | conform/perf.py | conform/perf.py | #!/usr/bin/env python
import sys
import barrister
trans = barrister.HttpTransport("http://localhost:9233/")
client = barrister.Client(trans, validate_request=False)
num = int(sys.argv[1])
s = "safasdfasdlfasjdflkasjdflaskjdflaskdjflasdjflaskdfjalsdkfjasldkfjasldkasdlkasjfasld"
for i in range(num):
client.B.echo(s)
| Python | 0.000001 | |
e88a6a634f600a5ef3ae269fc0d49bcd1e1d58e8 | Revert "More accurate info in examples." | examples/svm/plot_iris.py | examples/svm/plot_iris.py | """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on the iris dataset. It
will plot the decision surface and the support vectors.
"""
import numpy as np
import pylab as pl
from scikits.learn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h=.02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
svc = svm.SVC(kernel='linear').fit(X, Y)
rbf_svc = svm.SVC(kernel='poly').fit(X, Y)
nu_svc = svm.NuSVC(kernel='linear').fit(X,Y)
lin_svc = svm.LinearSVC().fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:,0].min()-1, X[:,0].max()+1
y_min, y_max = X[:,1].min()-1, X[:,1].max()+1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with polynomial (degree 3) kernel',
'NuSVC with linear kernel',
'LinearSVC (linear kernel)']
pl.set_cmap(pl.cm.Paired)
for i, clf in enumerate((svc, rbf_svc, nu_svc, lin_svc)):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i+1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
pl.contourf(xx, yy, Z)
pl.axis('tight')
# Plot also the training points
pl.scatter(X[:,0], X[:,1], c=Y)
pl.title(titles[i])
pl.axis('tight')
pl.show()
| """
==================================================
Plot different SVM classifiers in the iris dataset
==================================================
Comparison of different linear SVM classifiers on the iris dataset. It
will plot the decision surface for four different SVM classifiers.
"""
import numpy as np
import pylab as pl
from scikits.learn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h=.02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
svc = svm.SVC(kernel='linear').fit(X, Y)
rbf_svc = svm.SVC(kernel='poly').fit(X, Y)
nu_svc = svm.NuSVC(kernel='linear').fit(X,Y)
lin_svc = svm.LinearSVC().fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:,0].min()-1, X[:,0].max()+1
y_min, y_max = X[:,1].min()-1, X[:,1].max()+1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with polynomial (degree 3) kernel',
'NuSVC with linear kernel',
'LinearSVC (linear kernel)']
pl.set_cmap(pl.cm.Paired)
for i, clf in enumerate((svc, rbf_svc, nu_svc, lin_svc)):
# Plot the decision boundary. For that, we will asign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i+1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.set_cmap(pl.cm.Paired)
pl.contourf(xx, yy, Z)
pl.axis('tight')
# Plot also the training points
pl.scatter(X[:,0], X[:,1], c=Y)
pl.title(titles[i])
pl.axis('tight')
pl.show()
| Python | 0 |
9ba3c840514e765acac2542ee3faf47671824918 | add missing source file | moban/buffered_writer.py | moban/buffered_writer.py | from moban import utils, file_system
import fs
import fs.path
class BufferedWriter(object):
def __init__(self):
self.fs_list = {}
def write_file_out(self, filename, content):
if "zip://" in filename:
self.write_file_out_to_zip(filename, content)
else:
utils.write_file_out(filename, content)
def write_file_out_to_zip(self, filename, content):
zip_file, file_name = filename.split(".zip/")
zip_file = zip_file + ".zip"
if zip_file not in self.fs_list:
self.fs_list[zip_file] = fs.open_fs(
file_system.to_unicode(zip_file), create=True
)
base_dirs = fs.path.dirname(file_name)
if not self.fs_list[zip_file].exists(base_dirs):
self.fs_list[zip_file].makedirs(base_dirs)
self.fs_list[zip_file].writebytes(
file_system.to_unicode(file_name), content
)
def close(self):
for fsx in self.fs_list.values():
fsx.close()
| Python | 0.000001 | |
b573daf86d2bcb5d8dc71e45a65b5f2ffc0866b1 | Correct module help | examples/create_events.py | examples/create_events.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import misp_url, misp_key
import argparse
# For python2 & 3 compat, a bit dirty, but it seems to be the least bad one
try:
input = raw_input
except NameError:
pass
def init(url, key):
return PyMISP(url, key, True, 'json')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create an event on MISP.')
parser.add_argument("-d", "--distrib", type=int, help="The distribution setting used for the attributes and for the newly created event, if relevant. [0-3].")
parser.add_argument("-i", "--info", help="Used to populate the event info field if no event ID supplied.")
parser.add_argument("-a", "--analysis", type=int, help="The analysis level of the newly created event, if applicatble. [0-2]")
parser.add_argument("-t", "--threat", type=int, help="The threat level ID of the newly created event, if applicatble. [1-4]")
args = parser.parse_args()
misp = init(misp_url, misp_key)
event = misp.new_event(args.distrib, args.threat, args.analysis, args.info)
print event
response = misp.add_mutex(event, 'booh')
print response
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import misp_url, misp_key
import argparse
# For python2 & 3 compat, a bit dirty, but it seems to be the least bad one
try:
input = raw_input
except NameError:
pass
def init(url, key):
return PyMISP(url, key, True, 'json')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create an event on MISP.')
parser.add_argument("-d", "--distrib", type=int, help="The distribution setting used for the attributes and for the newly created event, if relevant. [0-3].")
parser.add_argument("-i", "--info", help="Used to populate the event info field if no event ID supplied.")
parser.add_argument("-a", "--analysis", type=int, help="The analysis level of the newly created event, if applicatble. [0-2]")
parser.add_argument("-t", "--threat", type=int, help="The threat level ID of the newly created event, if applicatble. [0-3]")
args = parser.parse_args()
misp = init(misp_url, misp_key)
event = misp.new_event(args.distrib, args.threat, args.analysis, args.info)
print event
response = misp.add_mutex(event, 'booh')
print response
| Python | 0.000001 |
23ce1dcedbd8c1204439db7df3d99a5f90b363fa | Add NARR Cross Section Example | examples/cross_section.py | examples/cross_section.py | # Copyright (c) 2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
======================
Cross Section Analysis
======================
The MetPy function `metpy.interpolate.cross_section` can obtain a cross-sectional slice through
gridded data.
"""
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.interpolate import cross_section
##############################
# **Getting the data**
#
# This example uses NARR reanalysis data for 18 UTC 04 April 1987 from NCEI
# (https://www.ncdc.noaa.gov/data-access/model-data).
#
# We use MetPy's CF parsing to get the data ready for use, and squeeze down the size-one time
# dimension.
data = xr.open_dataset(get_test_data('narr_example.nc', False))
data = data.metpy.parse_cf().squeeze()
print(data)
##############################
# Define start and end points:
start = (37.0, -105.0)
end = (35.5, -65.0)
##############################
# Get the cross section, and convert lat/lon to supplementary coordinates:
cross = cross_section(data, start, end)
cross.set_coords(('lat', 'lon'), True)
print(cross)
##############################
# For this example, we will be plotting potential temperature, relative humidity, and
# tangential/normal winds. And so, we need to calculate those, and add them to the dataset:
temperature, pressure, specific_humidity = xr.broadcast(cross['Temperature'],
cross['isobaric'],
cross['Specific_humidity'])
theta = mpcalc.potential_temperature(pressure, temperature)
rh = mpcalc.relative_humidity_from_specific_humidity(specific_humidity, temperature, pressure)
# These calculations return unit arrays, so put those back into DataArrays in our Dataset
cross['Potential_temperature'] = xr.DataArray(theta,
coords=temperature.coords,
dims=temperature.dims,
attrs={'units': theta.units})
cross['Relative_humidity'] = xr.DataArray(rh,
coords=specific_humidity.coords,
dims=specific_humidity.dims,
attrs={'units': rh.units})
cross['u_wind'].metpy.convert_units('knots')
cross['v_wind'].metpy.convert_units('knots')
cross['t_wind'], cross['n_wind'] = mpcalc.cross_section_components(cross['u_wind'],
cross['v_wind'])
print(cross)
##############################
# Now, we can make the plot.
# Define the figure object and primary axes
fig = plt.figure(1, figsize=(16., 9.))
ax = plt.axes()
# Plot RH using contourf
rh_contour = ax.contourf(cross['lon'], cross['isobaric'], cross['Relative_humidity'],
levels=np.arange(0, 1.05, .05), cmap='YlGnBu')
rh_colorbar = fig.colorbar(rh_contour)
# Plot potential temperature using contour, with some custom labeling
theta_contour = ax.contour(cross['lon'], cross['isobaric'], cross['Potential_temperature'],
levels=np.arange(250, 450, 5), colors='k', linewidths=2)
plt.clabel(theta_contour, theta_contour.levels[1::2], fontsize=8, colors='k', inline=1,
inline_spacing=8, fmt='%i', rightside_up=True, use_clabeltext=True, alpha=0.6)
# Plot winds using the axes interface directly, with some custom indexing to make the barbs
# less crowded
wind_slc_vert = list(range(0, 19, 2)) + list(range(19, 29))
wind_slc_horz = slice(5, 100, 5)
ax.barbs(cross['lon'][wind_slc_horz], cross['isobaric'][wind_slc_vert],
cross['t_wind'][wind_slc_vert, wind_slc_horz],
cross['n_wind'][wind_slc_vert, wind_slc_horz], color='k')
# Adjust the y-axis to be logarithmic
ax.set_yscale('symlog')
ax.set_yticklabels(np.arange(1000, 50, -100))
ax.set_ylim(cross['isobaric'].max(), cross['isobaric'].min())
plt.yticks(np.arange(1000, 50, -100))
# Define the CRS and inset axes
data_crs = data['Geopotential_height'].metpy.cartopy_crs
ax_inset = fig.add_axes([0.125, 0.665, 0.25, 0.25], projection=data_crs)
# Plot geopotential height at 500 hPa using xarray's contour wrapper
ax_inset.contour(data['x'], data['y'], data['Geopotential_height'].sel(isobaric=500.),
levels=np.arange(5100, 6000, 60), cmap='inferno')
# Plot the path of the cross section
endpoints = data_crs.transform_points(ccrs.Geodetic(),
*np.vstack([start, end]).transpose()[::-1])
ax_inset.scatter(endpoints[:, 0], endpoints[:, 1], c='k', zorder=2)
ax_inset.plot(cross['x'], cross['y'], c='k', zorder=2)
# Add geographic features
ax_inset.coastlines()
ax_inset.add_feature(cfeature.STATES.with_scale('50m'), edgecolor='k', alpha=0.2, zorder=0)
# Set the titles and axes labels
ax_inset.set_title('')
ax.set_title('NARR Cross-Section \u2013 {} to {} \u2013 Valid: {}\n'
'Potential Temperature (K), Tangential/Normal Winds (knots), '
'Relative Humidity (dimensionless)\n'
'Inset: Cross-Section Path and 500 hPa Geopotential Height'.format(
start, end, cross['time'].dt.strftime('%Y-%m-%d %H:%MZ').item()))
ax.set_ylabel('Pressure (hPa)')
ax.set_xlabel('Longitude (degrees east)')
rh_colorbar.set_label('Relative Humidity (dimensionless)')
plt.show()
| Python | 0 | |
291d882a29981ea6c82c40c8e9a001aa3305e0ae | Create 8kyu_do_I_get_a_bonus.py | Solutions/8kyu/8kyu_do_I_get_a_bonus.py | Solutions/8kyu/8kyu_do_I_get_a_bonus.py | def bonus_time(salary, bonus):
return '${}'.format(salary*([1,10][bonus]))
| Python | 0.000001 | |
7c16172f9ebe65d6928a72001a086637bf4bd725 | Fix buggy annotations for stdout/stderr. | scripts/lib/node_cache.py | scripts/lib/node_cache.py | from __future__ import print_function
import os
import hashlib
from os.path import dirname, abspath
if False:
from typing import Optional, List, IO, Tuple
from scripts.lib.zulip_tools import subprocess_text_output, run
ZULIP_PATH = dirname(dirname(dirname(abspath(__file__))))
NPM_CACHE_PATH = "/srv/zulip-npm-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
NPM_CACHE_PATH = "/home/travis/zulip-npm-cache"
def setup_node_modules(npm_args=None, stdout=None, stderr=None, copy_modules=False):
# type: (Optional[List[str]], Optional[IO], Optional[IO], Optional[bool]) -> None
sha1sum = hashlib.sha1()
sha1sum.update(subprocess_text_output(['cat', 'package.json']).encode('utf8'))
sha1sum.update(subprocess_text_output(['npm', '--version']).encode('utf8'))
sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))
if npm_args is not None:
sha1sum.update(''.join(sorted(npm_args)).encode('utf8'))
npm_cache = os.path.join(NPM_CACHE_PATH, sha1sum.hexdigest())
cached_node_modules = os.path.join(npm_cache, 'node_modules')
success_stamp = os.path.join(cached_node_modules, '.success-stamp')
# Check if a cached version already exists
if not os.path.exists(success_stamp):
do_npm_install(npm_cache, npm_args or [], stdout, stderr, copy_modules)
print("Using cached node modules from %s" % (cached_node_modules,))
cmds = [
['rm', '-rf', 'node_modules'],
["ln", "-nsf", cached_node_modules, 'node_modules'],
['touch', success_stamp],
]
for cmd in cmds:
run(cmd, stdout=stdout, stderr=stderr)
def do_npm_install(target_path, npm_args, stdout=None, stderr=None, copy_modules=False):
# type: (str, List[str], Optional[IO], Optional[IO], Optional[bool]) -> None
cmds = [
["sudo", "rm", "-rf", target_path],
['sudo', 'mkdir', '-p', target_path],
["sudo", "chown", "{}:{}".format(os.getuid(), os.getgid()), target_path],
['cp', 'package.json', target_path],
]
if copy_modules:
print("Cached version not found! Copying node modules.")
cmds.append(["mv", "node_modules", target_path])
else:
print("Cached version not found! Installing node modules.")
cmds.append(['npm', 'install'] + npm_args + ['--prefix', target_path])
for cmd in cmds:
run(cmd, stdout=stdout, stderr=stderr)
| from __future__ import print_function
import os
import hashlib
from os.path import dirname, abspath
if False:
from typing import Optional, List, Tuple
from scripts.lib.zulip_tools import subprocess_text_output, run
ZULIP_PATH = dirname(dirname(dirname(abspath(__file__))))
NPM_CACHE_PATH = "/srv/zulip-npm-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
NPM_CACHE_PATH = "/home/travis/zulip-npm-cache"
def setup_node_modules(npm_args=None, stdout=None, stderr=None, copy_modules=False):
# type: (Optional[List[str]], Optional[str], Optional[str], Optional[bool]) -> None
sha1sum = hashlib.sha1()
sha1sum.update(subprocess_text_output(['cat', 'package.json']).encode('utf8'))
sha1sum.update(subprocess_text_output(['npm', '--version']).encode('utf8'))
sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))
if npm_args is not None:
sha1sum.update(''.join(sorted(npm_args)).encode('utf8'))
npm_cache = os.path.join(NPM_CACHE_PATH, sha1sum.hexdigest())
cached_node_modules = os.path.join(npm_cache, 'node_modules')
success_stamp = os.path.join(cached_node_modules, '.success-stamp')
# Check if a cached version already exists
if not os.path.exists(success_stamp):
do_npm_install(npm_cache, npm_args or [], stdout, stderr, copy_modules)
print("Using cached node modules from %s" % (cached_node_modules,))
cmds = [
['rm', '-rf', 'node_modules'],
["ln", "-nsf", cached_node_modules, 'node_modules'],
['touch', success_stamp],
]
for cmd in cmds:
run(cmd, stdout=stdout, stderr=stderr)
def do_npm_install(target_path, npm_args, stdout=None, stderr=None, copy_modules=False):
# type: (str, List[str], Optional[str], Optional[str], Optional[bool]) -> None
cmds = [
["sudo", "rm", "-rf", target_path],
['sudo', 'mkdir', '-p', target_path],
["sudo", "chown", "{}:{}".format(os.getuid(), os.getgid()), target_path],
['cp', 'package.json', target_path],
]
if copy_modules:
print("Cached version not found! Copying node modules.")
cmds.append(["mv", "node_modules", target_path])
else:
print("Cached version not found! Installing node modules.")
cmds.append(['npm', 'install'] + npm_args + ['--prefix', target_path])
for cmd in cmds:
run(cmd, stdout=stdout, stderr=stderr)
| Python | 0 |
d36762fcf98774560fe82b32b2d64d2eba1ec72b | Improve logging to debug invalid "extra_specs" entries | cinder/scheduler/filters/capabilities_filter.py | cinder/scheduler/filters/capabilities_filter.py | # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from cinder.scheduler import filters
from cinder.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class CapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter to work with resource (instance & volume) type records."""
def _satisfies_extra_specs(self, capabilities, resource_type):
"""Check if capabilities satisfy resource type requirements.
Check that the capabilities provided by the services satisfy
the extra specs associated with the resource type.
"""
extra_specs = resource_type.get('extra_specs', [])
if not extra_specs:
return True
for key, req in six.iteritems(extra_specs):
# Either not scope format, or in capabilities scope
scope = key.split(':')
if len(scope) > 1 and scope[0] != "capabilities":
continue
elif scope[0] == "capabilities":
del scope[0]
cap = capabilities
for index in range(len(scope)):
try:
cap = cap.get(scope[index])
except AttributeError:
return False
if cap is None:
LOG.debug("Host doesn't provide capability '%(cap)s' " %
{'cap': scope[index]})
return False
if not extra_specs_ops.match(cap, req):
LOG.debug("extra_spec requirement '%(req)s' "
"does not match '%(cap)s'",
{'req': req, 'cap': cap})
return False
return True
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create resource_type."""
# Note(zhiteng) Currently only Cinder and Nova are using
# this filter, so the resource type is either instance or
# volume.
resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities,
resource_type):
LOG.debug("%(host_state)s fails resource_type extra_specs "
"requirements", {'host_state': host_state})
return False
return True
| # Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from cinder.scheduler import filters
from cinder.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class CapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter to work with resource (instance & volume) type records."""
def _satisfies_extra_specs(self, capabilities, resource_type):
"""Check if capabilities satisfy resource type requirements.
Check that the capabilities provided by the services satisfy
the extra specs associated with the resource type.
"""
extra_specs = resource_type.get('extra_specs', [])
if not extra_specs:
return True
for key, req in six.iteritems(extra_specs):
# Either not scope format, or in capabilities scope
scope = key.split(':')
if len(scope) > 1 and scope[0] != "capabilities":
continue
elif scope[0] == "capabilities":
del scope[0]
cap = capabilities
for index in range(len(scope)):
try:
cap = cap.get(scope[index])
except AttributeError:
return False
if cap is None:
return False
if not extra_specs_ops.match(cap, req):
LOG.debug("extra_spec requirement '%(req)s' "
"does not match '%(cap)s'",
{'req': req, 'cap': cap})
return False
return True
def host_passes(self, host_state, filter_properties):
"""Return a list of hosts that can create resource_type."""
# Note(zhiteng) Currently only Cinder and Nova are using
# this filter, so the resource type is either instance or
# volume.
resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities,
resource_type):
LOG.debug("%(host_state)s fails resource_type extra_specs "
"requirements", {'host_state': host_state})
return False
return True
| Python | 0.000016 |
d10ec57d6f58a4f96a2f648cac1bc94dc78efc32 | Implement identifying to accounts | txircd/modules/extra/services/account_identify.py | txircd/modules/extra/services/account_identify.py | from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
irc.ERR_SERVICES = "955" # Custom numeric; 955 <TYPE> <SUBTYPE> <ERROR>
class AccountIdentify(ModuleData):
implements(IPlugin, IModuleData)
name = "AccountIdentify"
def userCommands(self):
return [ ("IDENTIFY", 1, IdentifyCommand(self)),
("ID", 1, IdCommand(self)) ]
def parseParams(self, command, user, params, prefix, tags):
if not params:
user.sendSingleError("IdentifyParams", irc.ERR_NEEDMOREPARAMS, command, "Not enough parameters")
return None
if len(params) == 1:
return {
"password": params[0]
}
return {
"accountname": params[0],
"password": params[1]
}
def execute(self, user, data):
resultValue = self.ircd.runActionUntilValue("accountauthenticate", user, data["accountname"] if "accountname" in data else user.nick, data["password"])
if not resultValue:
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "IDENTIFY", "This server doesn't have accounts set up.")
user.sendMessage("NOTICE", "This server doesn't have accounts set up.")
return True
if resultValue[0]:
return True
user.sendMessage(irc.ERR_SERVICES, "ACCOUNT", "IDENTIFY", resultValue[1])
user.sendMessage("NOTICE", resultValue[1])
return True
class IdentifyCommand(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
return self.module.parseParams("IDENTIFY", user, params, prefix, tags)
def execute(self, user, data):
return self.module.execute(user, data)
class IdCommand(Command):
implements(ICommand)
def __init__(self, module):
self.module = module
def parseParams(self, user, params, prefix, tags):
return self.module.parseParams("ID", user, params, prefix, tags)
def execute(self, user, data):
self.module.execute(user, data)
identifyCommand = AccountIdentify() | Python | 0.000009 | |
166854466771850bda3384b75d0f8d0656c259f6 | add predict | gen_predict_res_format.py | gen_predict_res_format.py | # -*- coding: utf-8 -*-
'''
Created on Jul 9, 2013
@author: Chunwei Yan @ pkusz
@mail: yanchunwei@outlook.com
'''
import sys
from utils import get_num_lines, args_check
class Gen(object):
formats = {
'1':1,
'-1':0,
}
def __init__(self, fph, test_ph, tph):
self.fph, self.test_ph, self.tph = \
fph, test_ph, tph
def __call__(self):
self.trans()
self.tofile()
def trans(self):
num_lines = get_num_lines(self.test_ph)
self.lines = []
with open(self.fph) as resf:
with open(self.test_ph) as testf:
for i in range(num_lines):
res = resf.readline()
tes = testf.readline()
label = self.formats.get(res.strip())
line = "%d\t%s" % (label, tes.strip())
self.lines.append(line)
def tofile(self):
with open(self.tph, 'w') as f:
f.write('\n'.join(self.lines))
if __name__ == "__main__":
fph, test_ph, tph = args_check(3, "")
g = Gen(fph, test_ph, tph)
g()
| Python | 0.999925 | |
aa6837e14e520f5917cf1c452bd0c9a8ce2a27dd | Add new module for plugin loading | module/others/plugins.py | module/others/plugins.py | from maya import cmds
class Commands(object):
""" class name must be 'Commands' """
commandDict = {}
def _loadObjPlugin(self):
if not cmds.pluginInfo("objExport", q=True, loaded=True):
cmds.loadPlugin("objExport")
commandDict['sampleCommand'] = "sphere.png"
# ^ Don't forget to add the command to the dictionary.
| Python | 0 | |
67350e9ac3f2dc0fceb1899c8692adcd9cdd4213 | Add a test case to validate `get_unseen_notes` | frappe/tests/test_boot.py | frappe/tests/test_boot.py | import unittest
import frappe
from frappe.boot import get_unseen_notes
from frappe.desk.doctype.note.note import mark_as_seen
class TestBootData(unittest.TestCase):
def test_get_unseen_notes(self):
frappe.db.delete("Note")
frappe.db.delete("Note Seen By")
note = frappe.get_doc(
{
"doctype": "Note",
"title": "Test Note",
"notify_on_login": 1,
"content": "Test Note 1",
"public": 1,
}
)
note.insert()
frappe.set_user("test@example.com")
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, ["Test Note"])
mark_as_seen(note.name)
unseen_notes = [d.title for d in get_unseen_notes()]
self.assertListEqual(unseen_notes, [])
| Python | 0 | |
15fd5f6ddd3aa79a26b28d5ef4b93eeb12e28956 | add an update_users management command | controller/management/commands/update_users.py | controller/management/commands/update_users.py | """
Ensure that the right users exist:
- read USERS dictionary from auth.json
- if they don't exist, create them.
- if they do, update the passwords to match
"""
import json
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.auth.models import User
log = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Create users that are specified in auth.json"
def handle(self, *args, **options):
log.info("root is : " + settings.ENV_ROOT)
auth_path = settings.ENV_ROOT / "auth.json"
log.info(' [*] reading {0}'.format(auth_path))
with open(auth_path) as auth_file:
AUTH_TOKENS = json.load(auth_file)
users = AUTH_TOKENS.get('USERS', {})
for username, pwd in users.items():
log.info(' [*] Creating/updating user {0}'.format(username))
try:
user = User.objects.get(username=username)
user.set_password(pwd)
user.save()
except User.DoesNotExist:
log.info(' ... {0} does not exist. Creating'.format(username))
user = User.objects.create(username=username,
email=username + '@dummy.edx.org',
is_active=True)
user.set_password(pwd)
user.save()
log.info(' [*] All done!')
| Python | 0.000002 | |
3d0f6085bceffc5941e55678da20d8db4a7d5ce2 | Create question4.py | huangguolong/question4.py | huangguolong/question4.py |
def fib(nums):
'''
:param nums: 一个整数,相当于数列的下标
:return: 返回该下标的值
'''
if nums == 0 or nums == 1:
return nums
else:
return fib(nums-2) + fib(nums-1)
def createFib(n):
'''
:param n: 需要展示前面n个数
:return: 返回一个列表,费波那契数列
'''
list1 = []
for i in range(n):
list1.append(fib(i))
print(list1)
#调用生成费波那契数列函数,指定展示的前面n个数
createFib(20)
| Python | 0.00002 | |
448ca2cfb8f7e167b1395e84a4f2b4b4cea57905 | add file | crawler/jb51.py | crawler/jb51.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from async_spider import AsySpider
class Jb51Spider(AsySpider):
def handle_html(self, url, html):
print(url)
'''
filename = url.rsplit('/', 1)[1]
with open(filename, 'w+') as f:
f.write(html)
'''
if __name__ == '__main__':
urls = []
for page in range(1, 73000):
urls.append('http://www.jb51.net/article/%s.htm' % page)
s = Jb51Spider(urls)
s.run()
| Python | 0.000001 | |
55b3269f9c2cd22ef75a2632f04e37a9f723e961 | add data migration | accelerator/migrations/0033_migrate_gender_data.py | accelerator/migrations/0033_migrate_gender_data.py | # Generated by Django 2.2.10 on 2021-01-22 12:13
import sys
from django.contrib.auth import get_user_model
from django.db import migrations
# gender identity
GENDER_MALE = "Male"
GENDER_FEMALE = "Female"
GENDER_PREFER_TO_SELF_DESCRIBE = "I Prefer To Self-describe"
GENDER_PREFER_NOT_TO_SAY = "I Prefer Not To Say"
# gender
MALE_CHOICE = 'm'
FEMALE_CHOICE = 'f'
OTHER_CHOICE = 'o'
PREFER_NOT_TO_STATE_CHOICE = 'p'
gender_map = {
MALE_CHOICE: GENDER_MALE,
FEMALE_CHOICE: GENDER_FEMALE,
OTHER_CHOICE: GENDER_PREFER_TO_SELF_DESCRIBE,
PREFER_NOT_TO_STATE_CHOICE: GENDER_PREFER_NOT_TO_SAY,
}
def get_gender_choice_obj_dict(apps):
GenderChoices = apps.get_model('accelerator', 'GenderChoices')
return {
gender_choice.name: gender_choice
for gender_choice in GenderChoices.objects.all()
}
def add_gender_identity(profile, gender_choice):
if not profile.gender_identity.filter(pk=gender_choice.pk).exists():
profile.gender_identity.add(gender_choice.pk)
def migrate_gender_data_to_gender_identity(apps, schema_editor):
users = get_user_model().objects.all()
gender_choices = get_gender_choice_obj_dict(apps)
for user in users:
profile = user.get_profile()
gender = profile.gender
if gender:
try:
gender_choice = gender_choices[gender_map[gender.lower()]]
add_gender_identity(profile, gender_choice)
except KeyError:
print(f"Exception: {type(profile)} ID:{profile.id}"
f" has unexpected gender value '{gender}'")
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0032_add_ethno_racial_identity_data'),
]
operations = [
migrations.RunPython(
migrate_gender_data_to_gender_identity,
migrations.RunPython.noop,
)
]
| Python | 0 | |
3344bb0a967c4217f6fa1d701b2c4dfb89d578aa | add new package : alluxio (#14143) | var/spack/repos/builtin/packages/alluxio/package.py | var/spack/repos/builtin/packages/alluxio/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Alluxio(Package):
"""
Alluxio (formerly known as Tachyon) is a virtual distributed storage
system. It bridges the gap between computation frameworks and storage
systems, enabling computation applications to connect to numerous
storage systems through a common interface.
"""
homepage = "https://github.com/Alluxio/alluxio"
url = "https://github.com/Alluxio/alluxio/archive/v2.1.0.tar.gz"
version('2.1.0', sha256='c8b5b7848488e0ac10b093eea02ef05fa822250669d184291cc51b2f8aac253e')
def install(self, spec, prefix):
install_tree('.', prefix)
| Python | 0 | |
6dc035051d666707fdc09e63f510dbc4edf1724d | Migrate lab_members | lab_members/migrations/0001_initial.py | lab_members/migrations/0001_initial.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Position',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, unique=True, help_text='Please enter a title for this position', default='', verbose_name='title')),
],
options={
'verbose_name': 'Position',
'verbose_name_plural': 'Positions',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Scientist',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=64, unique=True, help_text='Please enter a full name for this scientist', default='', verbose_name='full name')),
('slug', models.SlugField(max_length=64, help_text='Please enter a unique slug for this scientist', default='', verbose_name='slug')),
('title', models.ForeignKey(blank=True, help_text='Please specify a title for this scientist', default=None, to='lab_members.Position', null=True)),
],
options={
'verbose_name': 'Scientist',
'verbose_name_plural': 'Scientists',
},
bases=(models.Model,),
),
]
| Python | 0.000001 | |
f7f25876d3398cacc822faf2b16cc156e88c7fd3 | Use this enough, might as well add it. | misc/jp2_kakadu_pillow.py | misc/jp2_kakadu_pillow.py | # This the basic flow for getting from a JP2 to a jpg w/ kdu_expand and Pillow
# Useful for debugging the scenario independent of the server.
from PIL import Image
from PIL.ImageFile import Parser
from os import makedirs, path, unlink
import subprocess
import sys
KDU_EXPAND='/usr/local/bin/kdu_expand'
LIB_KDU='/usr/local/lib/libkdu_v72R.so'
TMP='/tmp'
INPUT_JP2='/home/jstroop/Desktop/nanteuil.jp2'
OUT_JPG='/tmp/test.jpg'
REDUCE=0
### cmds, etc.
pipe_fp = '%s/mypipe.bmp' % (TMP,)
kdu_cmd = '%s -i %s -o %s -num_threads 4 -reduce %d' % (KDU_EXPAND, INPUT_JP2, pipe_fp, REDUCE)
mkfifo_cmd = '/usr/bin/mkfifo %s' % (pipe_fp,)
rmfifo_cmd = '/bin/rm %s' % (pipe_fp,)
# make a named pipe
mkfifo_resp = subprocess.check_call(mkfifo_cmd, shell=True)
if mkfifo_resp == 0:
print 'mkfifo OK'
# write kdu_expand's output to the named pipe
kdu_expand_proc = subprocess.Popen(kdu_cmd, shell=True,
bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
env={ 'LD_LIBRARY_PATH' : KDU_EXPAND })
# open the named pipe and parse the stream
with open(pipe_fp, 'rb') as f:
p = Parser()
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
im = p.close()
# finish kdu
kdu_exit = kdu_expand_proc.wait()
if kdu_exit != 0:
map(sys.stderr.write, kdu_expand_proc.stderr)
else:
# if kdu was successful, save to a jpg
map(sys.stdout.write, kdu_expand_proc.stdout)
im = im.resize((719,900), resample=Image.ANTIALIAS)
im.save(OUT_JPG, quality=95)
# remove the named pipe
rmfifo_resp = subprocess.check_call(rmfifo_cmd, shell=True)
if rmfifo_resp == 0:
print 'rm fifo OK'
| Python | 0 | |
2f9324f4d073082f47ecd8279d4bd85eaa1cf258 | add splits-io api wrapper | modules/apis/splits_io.py | modules/apis/splits_io.py | #! /usr/bin/env python2.7
import modules.apis.api_base as api
class SplitsIOAPI(api.API):
def __init__(self, session = None):
super(SplitsIOAPI, self).__init__("https://splits.io/api/v3", session)
def get_user_splits(self, user, **kwargs):
endpoint = "/users/{0}/pbs".format(user)
success, response = self.get(endpoint, **kwargs)
return success, response
| Python | 0 | |
d5e67563f23acb11fe0e4641d48b67fe3509822f | Add test migration removing ref to old company image | apps/companyprofile/migrations/0002_auto_20151014_2132.py | apps/companyprofile/migrations/0002_auto_20151014_2132.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='company',
old_name='image',
new_name='old_image',
),
]
| Python | 0 | |
a1e2e51c2777107bbc8a20429078638917149b6a | Remove unused import | src/compas/rpc/services/default.py | src/compas/rpc/services/default.py | import os
import inspect
import json
import socket
import compas
from compas.rpc import Server
from compas.rpc import Service
class DefaultService(Service):
pass
if __name__ == '__main__':
import sys
try:
port = int(sys.argv[1])
except:
port = 1753
print('Starting default RPC service on port {0}...'.format(port))
server = Server(("localhost", port))
server.register_function(server.ping)
server.register_function(server.remote_shutdown)
server.register_instance(DefaultService())
print('Listening, press CTRL+C to abort...')
server.serve_forever()
| import os
import inspect
import json
import socket
import compas
from compas.rpc import Server
from compas.rpc import Service
class DefaultService(Service):
pass
if __name__ == '__main__':
import sys
import threading
try:
port = int(sys.argv[1])
except:
port = 1753
print('Starting default RPC service on port {0}...'.format(port))
server = Server(("localhost", port))
server.register_function(server.ping)
server.register_function(server.remote_shutdown)
server.register_instance(DefaultService())
print('Listening, press CTRL+C to abort...')
server.serve_forever()
| Python | 0.000001 |
f20f286a3c5c6e2b9adf7220ac4426ce783d96b5 | Create regressors.py | trendpy/regressors.py | trendpy/regressors.py | # regressors.py
# MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Lasso(Strategy):
pass
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.