commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
27899a91fc6cdf73dccc7f9c5c353b05d2433c42 | add example participant client inbound drop rule for blackholing | pclnt/blackholing_test.py | pclnt/blackholing_test.py | {
"inbound": [
{
"cookie": 3,
"match": {
"eth_src": "08:00:27:89:3b:9f"
},
"action": {
"drop": 0
}
}
]
} | Python | 0 | |
cbded2a70fd854d502653137beb8809004e5874b | Add example Deep Zoom static tiler | examples/deepzoom/deepzoom-tile.py | examples/deepzoom/deepzoom-tile.py | #!/bin/env python
#
# deepzoom-tile - Convert whole-slide images to Deep Zoom format
#
# Copyright (c) 2010-2011 Carnegie Mellon University
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of version 2.1 of the GNU Lesser General Public License
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""An example program to generate a Deep Zoom directory tree from a slide."""
from multiprocessing import Process, JoinableQueue
from openslide import open_slide
from openslide.deepzoom import DeepZoomGenerator
from optparse import OptionParser
import os
import sys
class TileWorker(Process):
def __init__(self, queue, slide, tile_size, overlap):
Process.__init__(self, name='TileWorker')
self._queue = queue
self._slide = slide
self._tile_size = tile_size
self._overlap = overlap
def run(self):
dz = DeepZoomGenerator(open_slide(self._slide), self._tile_size,
self._overlap)
while True:
data = self._queue.get()
if data is None:
self._queue.task_done()
break
level, address, outfile = data
tile = dz.get_tile(level, address)
tile.save(outfile, optimize=True, quality=90)
self._queue.task_done()
class DeepZoomStaticTiler(object):
def __init__(self, slide, basename, format, tile_size, overlap, workers):
self._basename = basename
self._format = format
self._processed = 0
self._queue = JoinableQueue(2 * workers)
self._workers = workers
for _i in range(workers):
TileWorker(self._queue, slide, tile_size, overlap).start()
self._dz = DeepZoomGenerator(open_slide(slide), tile_size, overlap)
def run(self):
self._write_tiles()
self._write_dzi()
def _write_tiles(self):
for level in xrange(self._dz.level_count):
tiledir = os.path.join("%s_files" % self._basename, str(level))
if not os.path.exists(tiledir):
os.makedirs(tiledir)
cols, rows = self._dz.level_tiles[level]
for row in xrange(rows):
for col in xrange(cols):
tilename = os.path.join(tiledir, '%d_%d.%s' % (
col, row, self._format))
if not os.path.exists(tilename):
self._queue.put((level, (col, row), tilename))
self._tile_done()
for _i in range(self._workers):
self._queue.put(None)
self._queue.join()
def _tile_done(self):
self._processed += 1
count, total = self._processed, self._dz.tile_count
if count % 100 == 0 or count == total:
print >> sys.stderr, "Wrote %d/%d tiles\r" % (count, total),
if count == total:
print
def _write_dzi(self):
with open('%s.dzi' % self._basename, 'w') as fh:
fh.write(self._dz.get_dzi(self._format))
if __name__ == '__main__':
parser = OptionParser(usage='Usage: %prog [options] <slide>')
parser.add_option('-e', '--overlap', metavar='PIXELS', dest='overlap',
type='int', default=1,
help='overlap of adjacent tiles [1]')
parser.add_option('-f', '--format', metavar='{jpeg|png}', dest='format',
default='jpeg',
help='image format for tiles [jpeg]')
parser.add_option('-j', '--jobs', metavar='COUNT', dest='workers',
type='int', default=4,
help='number of worker processes to start [4]')
parser.add_option('-o', '--output', metavar='NAME', dest='basename',
help='base name of output file')
parser.add_option('-s', '--size', metavar='PIXELS', dest='tile_size',
type='int', default=256,
help='tile size [256]')
(opts, args) = parser.parse_args()
try:
slidefile = args[0]
except IndexError:
parser.error('Missing slide argument')
if opts.basename is None:
opts.basename = os.path.splitext(os.path.basename(slidefile))[0]
DeepZoomStaticTiler(slidefile, opts.basename, opts.format,
opts.tile_size, opts.overlap, opts.workers).run()
| Python | 0 | |
cd910f95753a138e2df48a1370e666bee49ad1dd | Add py solution for 693. Binary Number with Alternating Bits | py/binary-number-with-alternating-bits.py | py/binary-number-with-alternating-bits.py | class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
power_2 = (n ^ (n >> 1)) + 1
return (power_2 & -power_2) == power_2
| Python | 0.000085 | |
b34c0ec439a997705799136e56a926649bd93e52 | add new function to test whether an object is completely within the bounds of an image | plantcv/plantcv/within_frame.py | plantcv/plantcv/within_frame.py | import cv2 as cv2
import numpy as np
def within_frame(img, obj):
'''
This function tests whether the plant object is completely in the field of view
Input:
img - an image with the bounds you are interested in
obj - a single object, preferably after calling pcv.image_composition(), that is from within `img`
Returns:
in_bounds - a boolean (True or False) whether the object touches the edge of the image
:param img: numpy.ndarray
:param obj: str
:return in_bounds: boolean
'''
# Check if object is touching image boundaries (QC)
if len(np.shape(img)) == 3:
ix, iy, iz = np.shape(img)
else:
ix, iy = np.shape(img)
size1 = ix, iy
frame_background = np.zeros(size1, dtype=np.uint8)
frame = frame_background + 1
frame_contour, frame_hierarchy = cv2.findContours(frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]
ptest = []
vobj = np.vstack(obj)
for i, c in enumerate(vobj):
xy = tuple(c)
pptest = cv2.pointPolygonTest(frame_contour[0], xy, measureDist=False)
ptest.append(pptest)
in_bounds = all(c == 1 for c in ptest)
return(in_bounds)
| Python | 0.000054 | |
c4040803cb670f913bc8743ee68f5a5f0721d4f8 | Add game logic | backend/game.py | backend/game.py | # All game related code
import json
import random
class Game():
def __init__(self):
self.players = {}
self.turn = None
self.running = False
def add_player(self, conn, data):
player = Player(conn, data)
self.players[player.get_name()] = player
conn.send(json.dumps({'action': 'accepted', 'data': ''}))
return player
def wait_for_answer(self, player):
# Initial start of game
if not self.running() and len(self.players) == 3:
starter = self.start_game()
data = {'turn': starter.get_name(), 'cards': []}
return json.dumps({'action': 'start', 'data': data})
return self.handle_round(self, player)
def handle_round(self, player):
pass
def start_game(self):
self.turn = random.choice(self.players)
return self.turn
class Player():
def __init__(self, conn, data):
self.name = data['name']
self.connection = conn
self.cards = []
def get_name(self):
return self.name
class Card():
def __init__(self):
pass
| Python | 0.000059 | |
69e22c778a576f746784270fa9971a6399433f92 | Add docstring to UnivariateFilter. | examples/plot_feature_selection.py | examples/plot_feature_selection.py | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM attributes small weights to these features, but these
weight are non zero. Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn import datasets, svm
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.normal(size=(len(iris.data), 35))
# Add the noisy data to the informative features
x = np.hstack((iris.data, E))
y = iris.target
################################################################################
pl.figure(1)
pl.clf()
x_indices = np.arange(x.shape[-1])
################################################################################
# Univariate feature selection
from scikits.learn.feature_selection import univariate_selection as univ_selection
# As a scoring function, we use a F test for classification
# We use the default selection function: the 10% most significant
# features
selector = univ_selection.SelectFpr(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector._pvalues)
scores /= scores.max()
pl.bar(x_indices-.45, scores, width=.3,
label=r'Univariate score ($-\log(p\,values)$)',
color='g')
################################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(x, y)
svm_weights = (clf.support_**2).sum(axis=0)
svm_weights /= svm_weights.max()
pl.bar(x_indices-.15, svm_weights, width=.3, label='SVM weight',
color='r')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.axis('tight')
pl.legend()
pl.show()
| """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM attributes small weights to these features, but these
weight are non zero. Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
import numpy as np
import pylab as pl
################################################################################
# import some data to play with
# The IRIS dataset
from scikits.learn import datasets, svm
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.normal(size=(len(iris.data), 35))
# Add the noisy data to the informative features
x = np.hstack((iris.data, E))
y = iris.target
################################################################################
pl.figure(1)
pl.clf()
x_indices = np.arange(x.shape[-1])
################################################################################
# Univariate feature selection
from scikits.learn.feature_selection import univ_selection
# As a scoring function, we use a F test for classification
# We use the default selection function: the 10% most significant
# features
selector = univ_selection.UnivSelection(
score_func=univ_selection.f_classif)
selector.fit(x, y)
scores = -np.log(selector.p_values_)
scores /= scores.max()
pl.bar(x_indices-.45, scores, width=.3,
label=r'Univariate score ($-\log(p\,values)$)',
color='g')
################################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(x, y)
svm_weights = (clf.support_**2).sum(axis=0)
svm_weights /= svm_weights.max()
pl.bar(x_indices-.15, svm_weights, width=.3, label='SVM weight',
color='r')
################################################################################
# Now fit an SVM with added feature selection
selector = univ_selection.UnivSelection(
estimator=clf,
score_func=univ_selection.f_classif)
selector.fit(x, y)
svm_weights = (clf.support_**2).sum(axis=0)
svm_weights /= svm_weights.max()
full_svm_weights = np.zeros(selector.support_.shape)
full_svm_weights[selector.support_] = svm_weights
pl.bar(x_indices+.15, full_svm_weights, width=.3,
label='SVM weight after univariate selection',
color='b')
pl.title("Comparing feature selection")
pl.xlabel('Feature number')
pl.yticks(())
pl.axis('tight')
pl.legend()
pl.show()
| Python | 0 |
1beec05941a6a34452bea6e9f60a1673c0f0925f | add base test case file | keen/tests/base_test_case.py | keen/tests/base_test_case.py | __author__ = 'dkador'
| Python | 0.000001 | |
1fa849f1a0eadad9573b677d3904986d76f900eb | Create main.py | challenge_2/python/wost/main.py | challenge_2/python/wost/main.py | """
Python 3.6:
:: Counts all the instances of all the elements in a list.
:: Returns all the instances with a count of 1.
"""
def find_one_in_list(a_list):
a_dict = {}
for char in a_list:
if char not in a_dict.keys():
a_dict[char] = 1
else:
a_dict[char] += 1
for letter in a_dict.keys():
if a_dict[letter] == 1:
print(letter, end=" ")
print()
def main():
# Returns 6, 7.
find_one_in_list([5, 4, 3, 4, 5, 6, 1, 3, 1, 7, 8, 8])
# Returns b.
find_one_in_list(["a", "b", "c", "a", "c", "W", "W"])
# Returns A, 5, r.
find_one_in_list(["A", "b", "d", "r", 4, 5, 4, "b", "d"])
# Returns nothing.
find_one_in_list([])
if __name__ == "__main__":
main()
| Python | 0.000001 | |
ac4679b4dcbbc3b2a29230233afc138f98cf2c42 | Add the basics | anvil.py | anvil.py | import gzip
import io
import nbt.nbt
import pathlib
import re
import zlib
class Region:
def __init__(self, path):
if isinstance(path, str):
path = pathlib.Path(path)
with path.open('rb') as f:
data = f.read()
self.locations = data[:4096]
self.timestamps = data[4096:8192]
self.data = data[8192:]
match = re.search('r\.(-?[0-9]+)\.(-?[0-9]+)\.mca$', path.name)
if match:
self.x = int(match.group(1))
self.z = int(match.group(2))
else:
self.x = None
self.z = None
def chunk_column(self, x, z):
x_offset = x & 31
z_offset = z & 31
meta_offset = 4 * ((x_offset & 32) + (z_offset & 32) * 32)
chunk_location = self.locations[meta_offset:meta_offset + 4]
offset = chunk_location[0] * (256 ** 2) + chunk_location[1] * 256 + chunk_location[2]
if offset == 0:
return ChunkColumn(None, x=x, z=z)
else:
offset -= 2
sector_count = chunk_location[3]
return ChunkColumn(self.data[4096 * offset:4096 * (offset + sector_count)], x=x, z=z)
class ChunkColumn:
def __init__(self, data, *, x=None, z=None):
self.x = x
self.z = z
length = data[0] * (256 ** 3) + data[1] * (256 ** 2) + data[2] * 256 + data[3]
compression = data[4]
compressed_data = data[5:4 + length]
if compression == 1: # gzip
decompress = gzip.decompress
elif compression == 2: # zlib
decompress = zlib.decompress
else:
raise ValueError('Unknown compression method: {}'.format(compression))
self.data = nbt.nbt.NBTFile(buffer=io.BytesIO(decompress(compressed_data)))
| Python | 0.006448 | |
702abe6dc661fbcda04f743edc56d2938098cefa | Add checkJSON file function only for checking a JSON file against a specified schema | src/main/python/convertfiles/checkJSON.py | src/main/python/convertfiles/checkJSON.py | #!/nfs/projects/c/ci3_jwaldo/MONGO/bin/python
"""
This function will check an existing JSON newline delimited file
against a specified schema
Input is a newline delimited JSON file and schema file
Output is a summary printout of statistics
Usage:
python checkJSON [-options]
OPTIONS:
--input Name of input filename (required)
--output Name of output filename
--schema Specify JSON Schema (required)
--schema-name Specify JSON Schema name within json file, if it exists
@author: G.Lopez
"""
import convertCSVtoJSON as converter
from path import path
import json
from collections import OrderedDict
import argparse
import sys
# Maintain Stats
LINE_CNT = 0
LINE_CNT_1000 = 1000
def checkJSON(inputFile, schemaFile, schemaName=None):
global LINE_CNT
# Read specified schema file
checkFormat = converter.convertCSVtoJSON()
schema_dict = checkFormat.readSchema( path(schemaFile), schemaName )
# Read JSON file
fin = open(inputFile, 'r')
for line in fin:
try:
json_rec = json.loads(line, object_pairs_hook=OrderedDict)
checkFormat.cleanJSONline(json_rec, schema_dict, applySchema=False)
checkFormat.checkIllegalKeys(json_rec, fixkeys=False)
# Print procesing Counter
LINE_CNT = LINE_CNT + 1
if LINE_CNT % LINE_CNT_1000 == 0:
sys.stdout.write("[main]: %dk Lines processed\r" % ( LINE_CNT / LINE_CNT_1000 ) )
sys.stdout.flush()
except:
print "[checkJSON]: Error parsing JSON line at line %s" % LINE_CNT
pass
checkFormat.printOtherStats()
checkFormat.calculateSchemaStats()
checkFormat.printSchemaStats()
checkFormat.calculateOverallSummary()
checkFormat.printOverallSummary()
def main():
"""
Main Program to Check Specified JSON file against Schema
"""
# Setup Command Line Options
text_help = '''usage: %prog [-options] '''
text_description = ''' Check JSON schema script '''
parser = argparse.ArgumentParser( prog='PROG',
description=text_description)
parser.add_argument("--input", type=str, help="Name of input file", required=True)
parser.add_argument("--schema", type=str, help="Specify JSON Schema", required=True)
parser.add_argument("--schema-name", type=str, help="Specify JSON Schema Name")
args = vars(parser.parse_args())
print "[main]: arguments passed => %s" % args
# Read Input File
print "[main]: Reading JSON input file %s " % args['input']
checkJSON( args['input'], args['schema'], args['schema_name'] )
if __name__ == '__main__':
main()
| Python | 0 | |
7330f9f1423fe7ee169569957d537441b6d72c08 | Create 0106_us_city_synonyms.py | 2019/0106_us_city_synonyms.py | 2019/0106_us_city_synonyms.py | #%%
"""
NPR 2019-01-06
https://www.npr.org/2019/01/06/682575357/sunday-puzzle-stuck-in-the-middle
Name a major U.S. city in 10 letters. If you have the right one, you can rearrange its letters to get two 5-letter words that are synonyms. What are they?
"""
import sys
sys.path.append('..')
import nprcommontools as nct
from nltk.corpus import gazetteers
#%%
COMMON_WORDS = frozenset(x for x in nct.get_common_words() if len(x) == 5)
#%%
US_CITIES = set(nct.alpha_only(x.lower()) for x in gazetteers.words('uscities.txt') if len(nct.alpha_only(x)) == 10)
city_dict = nct.make_sorted_dict(US_CITIES)
#%%
for c1 in COMMON_WORDS:
my_synonyms = nct.get_synonyms(c1)
for c2 in my_synonyms:
sort_word = nct.sort_string(''.join(c1+c2))
if sort_word in city_dict:
print(c1,c2,city_dict[sort_word])
| Python | 0.00367 | |
2f08053dc04470c9a1e4802e0e90c198bb5eae63 | Update app/views/account/__init__.py | app/views/account/__init__.py | app/views/account/__init__.py | from flask import Blueprint
account = Blueprint(
'account',
__name__
)
from . import views
| Python | 0 | |
5470661c6f171f1e9da609c3bf67ece21cf6d6eb | Add example for response status code | examples/return_400.py | examples/return_400.py | import hug
from falcon import HTTP_400
@hug.get()
def only_positive(positive: int, response):
if positive < 0:
response.status = HTTP_400 | Python | 0.000001 | |
450f55f158bdec4b290851d68b8b79bd824d50f6 | Add the joystick test | bin/joy_test.py | bin/joy_test.py | #!/usr/bin/env python
from __future__ import print_function
import pygame
# Define some colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
# This is a simple class that will help us print to the screen
# It has nothing to do with the joysticks, just outputing the
# information.
class TextPrint:
def __init__(self):
self.reset()
self.font = pygame.font.Font(None, 20)
def print(self, screen, textString):
textBitmap = self.font.render(textString, True, BLACK)
screen.blit(textBitmap, [self.x, self.y])
self.y += self.line_height
def reset(self):
self.x = 10
self.y = 10
self.line_height = 15
def indent(self):
self.x += 10
def unindent(self):
self.x -= 10
pygame.init()
# Set the width and height of the screen [width,height]
size = [500, 700]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("My Game")
#Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# Initialize the joysticks
pygame.joystick.init()
# Get ready to print
textPrint = TextPrint()
# -------- Main Program Loop -----------
while done==False:
# EVENT PROCESSING STEP
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done=True # Flag that we are done so we exit this loop
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN:
print("Joystick button pressed.")
if event.type == pygame.JOYBUTTONUP:
print("Joystick button released.")
# DRAWING STEP
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
textPrint.reset()
# Get count of joysticks
joystick_count = pygame.joystick.get_count()
textPrint.print(screen, "Number of joysticks: {}".format(joystick_count) )
textPrint.indent()
# For each joystick:
for i in range(joystick_count):
joystick = pygame.joystick.Joystick(i)
joystick.init()
textPrint.print(screen, "Joystick {}".format(i) )
textPrint.indent()
# Get the name from the OS for the controller/joystick
name = joystick.get_name()
textPrint.print(screen, "Joystick name: {}".format(name) )
# Usually axis run in pairs, up/down for one, and left/right for
# the other.
axes = joystick.get_numaxes()
textPrint.print(screen, "Number of axes: {}".format(axes) )
textPrint.indent()
for i in range( axes ):
axis = joystick.get_axis( i )
textPrint.print(screen, "Axis {} value: {:>6.3f}".format(i, axis) )
textPrint.unindent()
buttons = joystick.get_numbuttons()
textPrint.print(screen, "Number of buttons: {}".format(buttons) )
textPrint.indent()
for i in range( buttons ):
button = joystick.get_button( i )
textPrint.print(screen, "Button {:>2} value: {}".format(i,button) )
textPrint.unindent()
# Hat switch. All or nothing for direction, not like joysticks.
# Value comes back in an array.
hats = joystick.get_numhats()
textPrint.print(screen, "Number of hats: {}".format(hats) )
textPrint.indent()
for i in range( hats ):
hat = joystick.get_hat( i )
textPrint.print(screen, "Hat {} value: {}".format(i, str(hat)) )
textPrint.unindent()
textPrint.unindent()
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Limit to 20 frames per second
clock.tick(20)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit ()
| Python | 0.000004 | |
34f44cd57baf9f0a548d728e90ca0c67f47b08a1 | Add tests for Resource | tests/test_resource.py | tests/test_resource.py | import unittest
import soccermetrics
from soccermetrics import __api_version__
from soccermetrics.rest import SoccermetricsRestClient
from soccermetrics.rest.resource import Resource
class ResourceTest(unittest.TestCase):
def setUp(self):
base_url = "http://api-summary.soccermetrics.net"
auth = dict(account="APP_ID",api_key="APP_KEY")
self.resource = Resource(base_url, auth)
def test_initialization(self):
self.assertEqual(self.resource.auth['account'],"APP_ID")
self.assertEqual(self.resource.auth['api_key'],"APP_KEY")
self.assertEqual(self.resource.endpoint,'/%s' % __api_version__) | Python | 0 | |
0b0d77ca77cf5359175836d68fc0bcce3829d731 | Create change_config.py | static/scripts/change_hostname/change_config.py | static/scripts/change_hostname/change_config.py | import os, sys
from change_gluu_host import Installer, FakeRemote, ChangeGluuHostname
name_changer = ChangeGluuHostname(
old_host='<current_hostname>',
new_host='<new_hostname>',
cert_city='<city>',
cert_mail='<email>',
cert_state='<state_or_region>',
cert_country='<country>',
server='<actual_hostname_of_server>',
ip_address='<ip_address_of_server>',
ldap_password="<ldap_password>",
os_type='<linux_distro>',
local= True
)
r = name_changer.startup()
if not r:
sys.exit(1)
name_changer.change_appliance_config()
name_changer.change_clients()
name_changer.change_uma()
name_changer.change_httpd_conf()
name_changer.create_new_certs()
name_changer.change_host_name()
name_changer.modify_etc_hosts()
| Python | 0.000002 | |
d3d077cb7aae7a5b125dcd7daa19f00e3e22a53b | Add user-desc module | cme/modules/user_description.py | cme/modules/user_description.py | from pathlib import Path
from datetime import datetime
from impacket.ldap import ldap, ldapasn1
from impacket.ldap.ldap import LDAPSearchError
class CMEModule:
'''
Get user descriptions stored in Active Directory.
Module by Tobias Neitzel (@qtc_de)
'''
name = 'user-desc'
description = 'Get user descriptions stored in Active Directory'
supported_protocols = ['ldap']
opsec_safe = True
multiple_hosts = True
def options(self, context, module_options):
'''
LDAP_FILTER Custom LDAP search filter (fully replaces the default search)
DESC_FILTER An additional seach filter for descriptions (supports wildcard *)
DESC_INVERT An additional seach filter for descriptions (shows non matching)
USER_FILTER An additional seach filter for usernames (supports wildcard *)
USER_INVERT An additional seach filter for usernames (shows non matching)
KEYWORDS Use a custom set of keywords (comma separated)
ADD_KEYWORDS Add additional keywords to the default set (comma separated)
'''
self.log_file = None
self.desc_count = 0
self.context = context
self.account_names = set()
self.keywords = {'pass', 'creds', 'creden', 'key', 'secret', 'default'}
if 'LDAP_FILTER' in module_options:
self.search_filter = module_options['LDAP_FILTER']
else:
self.search_filter = '(&(objectclass=user)'
if 'DESC_FILTER' in module_options:
self.search_filter += '(description={})'.format(module_options['DESC_FILTER'])
if 'DESC_INVERT' in module_options:
self.search_filter += '(!(description={}))'.format(module_options['DESC_INVERT'])
if 'USER_FILTER' in module_options:
self.search_filter += '(sAMAccountName={})'.format(module_options['USER_FILTER'])
if 'USER_INVERT' in module_options:
self.search_filter += '(!(sAMAccountName={}))'.format(module_options['USER_INVERT'])
self.search_filter += ')'
if 'KEYWORDS' in module_options:
self.keywords = set(module_options['KEYWORDS'].split(','))
elif 'ADD_KEYWORDS' in module_options:
add_keywords = set(module_options['ADD_KEYWORDS'].split(','))
self.keywords = self.keywords.union(add_keywords)
def __del__(self):
'''
Destructor - closes the log file.
'''
try:
self.log_file.close()
info = 'Saved {} user descriptions to {}'.format(self.desc_count, self.log_file.name)
self.context.log.highlight(info)
except AttributeError:
pass
def on_login(self, context, connection):
'''
On successful LDAP login we perform a search for all user objects that have a description.
Users can specify additional LDAP filters that are applied to the query.
'''
self.create_log_file(connection.conn.getRemoteHost(), datetime.now().strftime("%Y%m%d_%H%M%S"))
context.log.debug("Starting LDAP search with search filter '{}'".format(self.search_filter))
try:
sc = ldap.SimplePagedResultsControl()
connection.ldapConnection.search(searchFilter=self.search_filter,
attributes=['sAMAccountName', 'description'],
sizeLimit=0, searchControls=[sc],
perRecordCallback=self.process_record)
except LDAPSearchError as e:
context.log.error('Obtained unexpected exception: {}'.format(str(e)))
def create_log_file(self, host, time):
'''
Create a log file for dumping user descriptions.
'''
logfile = 'UserDesc-{}-{}.log'.format(host, time)
logfile = Path.home().joinpath('.cme').joinpath('logs').joinpath(logfile)
self.context.log.debug("Creating log file '{}'".format(logfile))
self.log_file = open(logfile, 'w')
self.append_to_log("User:", "Description:")
def append_to_log(self, user, description):
'''
Append a new entry to the log file. Helper function that is only used to have an
unified padding on the user field.
'''
print(user.ljust(25), description, file=self.log_file)
def process_record(self, item):
'''
Function that is called to process the items obtained by the LDAP search. All items are
written to the log file per default. Items that contain one of the keywords configured
within this module are also printed to stdout.
On large Active Directories there seems to be a problem with duplicate user entries. For
some reason the process_record function is called multiple times with the same user entry.
Not sure whether this is a fault by this module or by impacket. As a workaround, this
function adds each new account name to a set and skips accounts that have already been added.
'''
if not isinstance(item, ldapasn1.SearchResultEntry):
return
sAMAccountName = ''
description = ''
try:
for attribute in item['attributes']:
if str(attribute['type']) == 'sAMAccountName':
sAMAccountName = attribute['vals'][0].asOctets().decode('utf-8')
elif str(attribute['type']) == 'description':
description = attribute['vals'][0].asOctets().decode('utf-8')
except Exception as e:
entry = sAMAccountName or 'item'
self.context.error("Skipping {}, cannot process LDAP entry due to error: '{}'".format(entry, str(e)))
if description and sAMAccountName not in self.account_names:
self.desc_count += 1
self.append_to_log(sAMAccountName, description)
if self.highlight(description):
self.context.log.highlight('User: {} - Description: {}'.format(sAMAccountName, description))
self.account_names.add(sAMAccountName)
def highlight(self, description):
'''
Check for interesting entries. Just checks whether certain keywords are contained within the
user description. Keywords are configured at the top of this class within the options function.
It is tempting to implement more logic here (e.g. catch all strings that are longer than seven
characters and contain 3 different character classes). Such functionality is nice when playing
CTF in small AD environments. When facing a real AD, such functionality gets annoying, because
it generates to much output with 99% of it being false positives.
The recommended way when targeting user descriptions is to use the keyword filter to catch low
hanging fruites. More dedicated searches for sensitive information should be done using the logfile.
This allows you to refine your search query at any time without having to pull data from AD again.
'''
for keyword in self.keywords:
if keyword.lower() in description.lower():
return True
return False
| Python | 0 | |
3cb39bc8be7fdf857ebbdd2f78cbb617b2dda104 | Create PowofTwo_003.py | leetcode/231-Power-of-Two/PowofTwo_003.py | leetcode/231-Power-of-Two/PowofTwo_003.py | class Solution:
# @param {integer} n
# @return {boolean}
def isPowerOfTwo(self, n):
return n > 0 and (n & n - 1 is 0)
| Python | 0.000009 | |
90987fccd2f604a5224e5b1cf8f91073b173fdc8 | Splitting a sentence by ending characters | split_sentences.py | split_sentences.py | """ Splitting a sentence by ending characters """
import re
st1 = " Another example!! Let me contribute 0.50 cents here?? \
How about pointer '.' character inside the sentence? \
Uni Mechanical Pencil Kurutoga, Blue, 0.3mm (M310121P.33). \
Maybe there could be a multipoint delimeter?.. Just maybe... "
st2 = "One word"
def split_sentences(st):
st = st.strip() + '. '
sentences = re.split(r'[.?!][.?!\s]+', st)
return sentences[:-1]
print(split_sentences(st1))
print(split_sentences(st2))
| Python | 1 | |
98d8716192bfb6b4223d84855f647e2b698b5f19 | Add test for viewport inspection | tests/test_viewport.py | tests/test_viewport.py | from tests.base import IntegrationTest
from time import sleep
class TestViewportsTaskGeneration(IntegrationTest):
viminput = """
=== Work tasks | +work ===
"""
vimoutput = """
=== Work tasks | +work ===
* [ ] tag work task #{uuid}
"""
tasks = [
dict(description="tag work task", tags=['work']),
]
def execute(self):
self.command("w", regex="written$", lines=1)
class TestViewportsTaskRemoval(IntegrationTest):
viminput = """
=== Work tasks | -work ===
* [ ] tag work task #{uuid}
"""
vimoutput = """
=== Work tasks | -work ===
"""
tasks = [
dict(description="tag work task", tags=['work']),
]
def execute(self):
self.command("w", regex="written$", lines=1)
class TestViewportDefaultsAssigment(IntegrationTest):
viminput = """
=== Work tasks | +work ===
* [ ] tag work task
"""
vimoutput = """
=== Work tasks | +work ===
* [ ] tag work task #{uuid}
"""
def execute(self):
self.command("w", regex="written$", lines=1)
assert len(self.tw.tasks.pending()) == 1
task = self.tw.tasks.pending()[0]
assert task['description'] == 'tag work task'
assert task['status'] == 'pending'
assert task['tags'] == ['work']
class TestViewportDefaultsOverriding(IntegrationTest):
viminput = """
=== Work tasks | project:Home +home | project:Chores ===
* [ ] home task
"""
vimoutput = """
=== Work tasks | project:Home +home | project:Chores ===
"""
def execute(self):
self.command("w", regex="written$", lines=1)
assert len(self.tw.tasks.pending()) == 1
task = self.tw.tasks.pending()[0]
assert task['description'] == 'home task'
assert task['status'] == 'pending'
assert task['project'] == 'Chores'
assert task['tags'] == ['home']
class TestViewportDefaultsRemoval(IntegrationTest):
viminput = """
=== Work tasks | project:Home +home | project: ===
* [ ] home task
"""
vimoutput = """
=== Work tasks | project:Home +home | project: ===
"""
def execute(self):
self.command("w", regex="written$", lines=1)
assert len(self.tw.tasks.pending()) == 1
task = self.tw.tasks.pending()[0]
assert task['description'] == 'home task'
assert task['status'] == 'pending'
assert task['project'] == None
assert task['tags'] == ['home']
class TestViewportInspection(IntegrationTest):
viminput = """
=== Work tasks | +work ===
* [ ] tag work task #{uuid}
"""
vimoutput = """
ViewPort inspection:
--------------------
Name: Work tasks
Filter used: -DELETED +work
Defaults used: tags:['work']
Matching taskwarrior tasks: 1
Displayed tasks: 1
Tasks to be added:
Tasks to be deleted:
"""
tasks = [
dict(description="tag work task", tags=['work']),
]
def execute(self):
self.command("w", regex="written$", lines=1)
self.client.feedkeys('1gg')
self.client.feedkeys(r'\<CR>')
sleep(0.5)
assert self.command(":py print vim.current.buffer", regex="<buffer taskwiki.")
| from tests.base import IntegrationTest
class TestViewportsTaskGeneration(IntegrationTest):
viminput = """
=== Work tasks | +work ===
"""
vimoutput = """
=== Work tasks | +work ===
* [ ] tag work task #{uuid}
"""
tasks = [
dict(description="tag work task", tags=['work']),
]
def execute(self):
self.command("w", regex="written$", lines=1)
class TestViewportsTaskRemoval(IntegrationTest):
viminput = """
=== Work tasks | -work ===
* [ ] tag work task #{uuid}
"""
vimoutput = """
=== Work tasks | -work ===
"""
tasks = [
dict(description="tag work task", tags=['work']),
]
def execute(self):
self.command("w", regex="written$", lines=1)
class TestViewportDefaultsAssigment(IntegrationTest):
viminput = """
=== Work tasks | +work ===
* [ ] tag work task
"""
vimoutput = """
=== Work tasks | +work ===
* [ ] tag work task #{uuid}
"""
def execute(self):
self.command("w", regex="written$", lines=1)
assert len(self.tw.tasks.pending()) == 1
task = self.tw.tasks.pending()[0]
assert task['description'] == 'tag work task'
assert task['status'] == 'pending'
assert task['tags'] == ['work']
class TestViewportDefaultsOverriding(IntegrationTest):
viminput = """
=== Work tasks | project:Home +home | project:Chores ===
* [ ] home task
"""
vimoutput = """
=== Work tasks | project:Home +home | project:Chores ===
"""
def execute(self):
self.command("w", regex="written$", lines=1)
assert len(self.tw.tasks.pending()) == 1
task = self.tw.tasks.pending()[0]
assert task['description'] == 'home task'
assert task['status'] == 'pending'
assert task['project'] == 'Chores'
assert task['tags'] == ['home']
class TestViewportDefaultsRemoval(IntegrationTest):
viminput = """
=== Work tasks | project:Home +home | project: ===
* [ ] home task
"""
vimoutput = """
=== Work tasks | project:Home +home | project: ===
"""
def execute(self):
self.command("w", regex="written$", lines=1)
assert len(self.tw.tasks.pending()) == 1
task = self.tw.tasks.pending()[0]
assert task['description'] == 'home task'
assert task['status'] == 'pending'
assert task['project'] == None
assert task['tags'] == ['home']
| Python | 0.000001 |
5e2a14af770ca07cdf6f3674ef54668a0a433078 | hello py | helloworld.py | helloworld.py | print "Hello World"; | Python | 0.99991 | |
3ac33d336166a041caa4fbdd895f789d86081029 | add unit test | lib/new_xml_parsing/test_compatibility.py | lib/new_xml_parsing/test_compatibility.py | #!/usr/bin/env python
import unittest
import os
import sys
import re
from xml.sax import parseString
sys.path.append('..')
from patXML import *
from parsexml import Patent
# Directory of test files and logs
xml_files = [x for x in os.listdir('test_xml_files')
if re.match(r"2012_\d.xml", x) != None] # Match fixtures
print xml_files
parsed_xml_old = []
parsed_xml_new = []
for xf in xml_files:
text = open('test_xml_files/'+xf).read()
old = XMLPatentBase(text)
new = Patent()
parseString(text, new)
parsed_xml_old.append(old)
parsed_xml_new.append(new)
"""
Fields useful for legacy code testing: self.country, self.patent, self.kind,
self.date_grant, self.pat_type, self.date_app, self.country_app,
self.patent_app (each patent should have these)
self.code_app, self.clm_num, self.classes <-- can't easily test these,
vary differently across all general patents, still thinking of a solution
"""
class TestXMLPatent(unittest.TestCase):
def setUp(self):
# Basic sanity check
self.assertTrue(xml_files)
def test_country(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.country == new.country, "{0}\nshould be\n{1}".format(new.country,old.country))
def test_patent(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.patent == new.patent, "{0}\nshould be\n{1}".format(new.patent,old.patent))
def test_kind(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.kind == new.kind, "{0}\nshould be\n{1}".format(new.kind,old.kind))
def test_date_grant(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.date_grant == new.date_grant, "{0}\nshould be\n{1}".format(new.date_grant,old.date_grant))
def test_pat_type(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.pat_type == new.pat_type, "{0}\nshould be\n{1}".format(new.pat_type,old.pat_type))
def test_date_app(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.date_app == new.date_app, "{0}\nshould be\n{1}".format(new.date_app,old.date_app))
def test_country_app(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.country_app == new.country_app, "{0}\nshould be\n{1}".format(new.country_app,old.country_app))
def test_patent_app(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.patent_app == new.patent_app, "{0}\nshould be\n{1}".format(new.patent_app,old.patent_app))
def test_classes(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.classes == new.classes, "{0}\nshould be\n{1}".format(new.classes,old.classes))
def test_code_app(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.code_app == new.code_app , "{0}\nshould be\n{1}".format(new.code_app,old.code_app))
def test_clm_num(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.clm_num == new.clm_num , "{0}\nshould be\n{1}".format(new.clm_num,old.clm_num))
def test_abstract(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.abstract == new.abstract , "{0}\nshould be\n{1}".format(new.abstract,old.abstract))
def test_invention_title(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.invention_title == new.invention_title, "{0}\nshould be\n{1}".format(new.invention_title,old.invention_title))
def test_asg_list(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.asg_list == new.asg_list, "{0}\nshould be\n{1}".format(new.asg_list,old.asg_list))
def test_cit_list(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.cit_list == new.cit_list, "{0}\nshould be\n{1}".format(new.cit_list,old.cit_list))
def test_rel_list(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.rel_list == new.rel_list, "{0}\nshould be\n{1}".format(new.rel_list,old.rel_list))
def test_inv_list(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.inv_list == new.inv_list, "{0}\nshould be\n{1}".format(new.inv_list,old.inv_list))
def test_law_list(self):
for old,new in zip(parsed_xml_old, parsed_xml_new):
self.assertTrue( old.law_list == new.law_list, "{0}\nshould be\n{1}".format(new.law_list,old.law_list))
def tearDown(self):
#anything needed to be torn down\nshould be\nadded here, pass for now
pass
unittest.main()
| Python | 0.000001 | |
edd28dc68b91af78da1a1d576fcb9dcb83ebd0c8 | Create lin_reg.py | lin_reg.py | lin_reg.py | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import square
#Mean Square error function
def costf(X, y, theta):
m = y.shape[0]
#print m
return (1.0/m)*np.sum(np.power(np.dot(X,theta) - y, 2))
#Gradient of error function
def gradientf(X, y, theta):
m = y.shape[0]
err = np.dot(X, theta) - y
return (2.0/m)*np.dot(np.transpose(X), err)
t = np.arange(0,10,0.01)
y = 2*square(t) + 0*np.random.random(t.shape)
X = np.array([[1, np.sin(x), np.sin(3*x), np.sin(5*x), np.sin(7*x)] for x in t])
th = np.zeros(5)
errors = []
thetas = []
#Optimizing using gradient descent algorithm
numiters = 1000
alpha = 0.02 #Learning rate
errors.append(costf(X,y,th))
for i in xrange(numiters):
#Gradient descent
grad = gradientf(X,y,th)
th = th - alpha*grad
errors.append(costf(X,y,th))
thetas.append(th)
if(i%10 == 0):
print "Iteration: "+str(i)
print "Costf: "+ str(costf(X,y,th))
print "Gradient: " + str(gradientf(X, t, th))
print "Theta: "+ str(th)
y_ = np.dot(X, th)
#Closed form solution
th_opt = np.dot(np.linalg.pinv(X), y)
y_opt = np.dot(X, th_opt)
#Plotting results
plt.plot(t, y, 'o')
plt.xlabel('x')
plt.ylabel('y')
plt.hold(True)
plt.plot(t, y_)
plt.plot(t, y_opt)
plt.figure()
plt.plot(errors)
plt.title("Error over time")
plt.ylabel("Error")
plt.xlabel("Number of iterations")
plt.show()
| Python | 0.00001 | |
dc854dc41929b027f393c7e341be51193b4ca7b9 | Create SearchinRSArr_001.py | leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py | leetcode/033-Search-in-Rotated-Sorted-Array/SearchinRSArr_001.py | class Solution:
# @param {integer[]} nums
# @param {integer} target
# @return {integer}
def search(self, nums, target):
l, r = 0, len(nums) - 1
while l <= r:
m = (l + r) / 2
if nums[m] == target:
return m
elif nums[m] > target:
if nums[m] > nums[r] and target < nums[l]:
l = m + 1
else:
r = m - 1
else:
if nums[m] < nums[r] and target > nums[r]:
r = m - 1
else:
l = m + 1
return -1
| Python | 0 | |
a10554b81d4def386b016698c1e7dd771cecd35b | fix automatic testing | python/qidoc/test/test_qidoc.py | python/qidoc/test/test_qidoc.py | ## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
import tempfile
import unittest
import qidoc.core
import qibuild
class TestQiDoc(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp(prefix="tmp-qidoc")
self.in_dir = os.path.join(self.tmp, "in")
self.out_dir = os.path.join(self.tmp, "out")
this_dir = os.path.dirname(__file__)
qibuild.sh.install(os.path.join(this_dir, "in"),
self.in_dir, quiet=True)
self.qidoc_builder = qidoc.core.QiDocBuilder(self.in_dir, self.out_dir)
def tearDown(self):
qibuild.sh.rm(self.tmp)
def test_build(self):
opts = dict()
opts["version"] = 1.42
self.qidoc_builder.build(opts)
submodule_zip = os.path.join(self.out_dir,
"qibuild", "_downloads", "submodule.zip")
self.assertTrue(os.path.exists(submodule_zip))
def test_cfg_parse(self):
qibuild_sphinx = self.qidoc_builder.sphinxdocs["qibuild"]
self.assertEqual(qibuild_sphinx.name, "qibuild")
self.assertEqual(qibuild_sphinx.src ,
os.path.join(self.in_dir, "qibuild", "doc"))
doc_sphinx = self.qidoc_builder.sphinxdocs["doc"]
self.assertEqual(doc_sphinx.depends, ["qibuild"])
libalcommon = self.qidoc_builder.doxydocs["libalcommon"]
libalvision = self.qidoc_builder.doxydocs["libalvision"]
self.assertEqual(libalcommon.name, "libalcommon")
self.assertEqual(libalvision.name, "libalvision")
self.assertEqual(libalcommon.src ,
os.path.join(self.in_dir, "libnaoqi", "libalcommon"))
self.assertEqual(libalvision.src ,
os.path.join(self.in_dir, "libnaoqi", "libalvision"))
self.assertEqual(libalcommon.dest,
os.path.join(self.out_dir, "ref", "libalcommon"))
self.assertEqual(libalvision.dest,
os.path.join(self.out_dir, "ref", "libalvision"))
def test_sorting(self):
docs = self.qidoc_builder.sort_sphinx()
names = [d.name for d in docs]
self.assertEqual(names, ['qibuild', 'doc'])
docs = self.qidoc_builder.sort_doxygen()
names = [d.name for d in docs]
self.assertEqual(names, ['libqi', 'libalcommon', 'libalvision'])
def test_intersphinx_mapping(self):
mapping = self.qidoc_builder.get_intersphinx_mapping("doc")
self.assertEqual(mapping,
{"qibuild": (os.path.join(self.out_dir, "qibuild"),
None)}
)
def test_doxygen_mapping(self):
mapping = self.qidoc_builder.get_doxygen_mapping("libalvision")
expected = {
os.path.join(self.out_dir, "doxytags", "libalcommon.tag"):
"../libalcommon",
os.path.join(self.out_dir, "doxytags", "libqi.tag"):
"../libqi",
}
self.assertEqual(mapping, expected)
if __name__ == "__main__":
unittest.main()
| ## Copyright (c) 2012 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
import os
import tempfile
import unittest
import qidoc.core
import qibuild
class TestQiDoc(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp(prefix="tmp-qidoc")
self.in_dir = os.path.join(self.tmp, "in")
self.out_dir = os.path.join(self.tmp, "out")
this_dir = os.path.dirname(__file__)
qibuild.sh.install(os.path.join(this_dir, "in"),
self.in_dir, quiet=True)
self.qidoc_builder = qidoc.core.QiDocBuilder(self.in_dir, self.out_dir)
def tearDown(self):
qibuild.sh.rm(self.tmp)
def test_build(self):
opts = dict()
opts["version"] = 1.42
self.qidoc_builder.build(opts)
submodule_zip = os.path.join(self.out_dir,
"qibuild", "_downloads", "submodule.zip")
self.assertTrue(os.path.exists(submodule_zip))
def test_cfg_parse(self):
qidoc_cfg = self.qidoc_builder.config
qibuild_sphinx = self.qidoc_builder.sphinxdocs["qibuild"]
self.assertEqual(qibuild_sphinx.name, "qibuild")
self.assertEqual(qibuild_sphinx.src ,
os.path.join(self.in_dir, "qibuild", "doc"))
doc_sphinx = self.qidoc_builder.sphinxdocs["doc"]
self.assertEqual(doc_sphinx.depends, ["qibuild"])
libalcommon = self.qidoc_builder.doxydocs["libalcommon"]
libalvision = self.qidoc_builder.doxydocs["libalvision"]
self.assertEqual(libalcommon.name, "libalcommon")
self.assertEqual(libalvision.name, "libalvision")
self.assertEqual(libalcommon.src ,
os.path.join(self.in_dir, "libnaoqi", "libalcommon"))
self.assertEqual(libalvision.src ,
os.path.join(self.in_dir, "libnaoqi", "libalvision"))
self.assertEqual(libalcommon.dest,
os.path.join(self.out_dir, "ref", "libalcommon"))
self.assertEqual(libalvision.dest,
os.path.join(self.out_dir, "ref", "libalvision"))
self.assertEqual(qidoc_cfg.templates.repo, "aldeb-templates")
def test_sorting(self):
docs = self.qidoc_builder.sort_sphinx()
names = [d.name for d in docs]
self.assertEqual(names, ['qibuild', 'doc'])
docs = self.qidoc_builder.sort_doxygen()
names = [d.name for d in docs]
self.assertEqual(names, ['libqi', 'libalcommon', 'libalvision'])
def test_intersphinx_mapping(self):
mapping = self.qidoc_builder.get_intersphinx_mapping("doc")
self.assertEqual(mapping,
{"qibuild": (os.path.join(self.out_dir, "qibuild"),
None)}
)
def test_doxygen_mapping(self):
mapping = self.qidoc_builder.get_doxygen_mapping("libalvision")
expected = {
os.path.join(self.out_dir, "doxytags", "libalcommon.tag"):
"../libalcommon",
os.path.join(self.out_dir, "doxytags", "libqi.tag"):
"../libqi",
}
self.assertEqual(mapping, expected)
if __name__ == "__main__":
unittest.main()
| Python | 0.000225 |
b57c24b23fa9566178455da895ea63baf6e16ff4 | Test cases to verify parsing of bitwise encoded PIDs | tests/scanner_tests.py | tests/scanner_tests.py | from shadetree.obd.scanner import decode_bitwise_pids
DURANGO_SUPPORTED_PIDS_RESPONSE = 'BE 3E B8 10 '
JETTA_DIESEL_SUPPORTED_PIDS_RESPONSE = '98 3B 80 19 '
def test_decode_bitwise_pids_durango():
"""
Verify we correctly parse information about supported PIDs on a 1999 Dodge Durango
"""
supported_pids = decode_bitwise_pids(DURANGO_SUPPORTED_PIDS_RESPONSE)
assert supported_pids == {
'01': True,
'02': False,
'03': True,
'04': True,
'05': True,
'06': True,
'07': True,
'08': False,
'09': False,
'0A': False,
'0B': True,
'0C': True,
'0D': True,
'0E': True,
'0F': True,
'10': False,
'11': True,
'12': False,
'13': True,
'14': True,
'15': True,
'16': False,
'17': False,
'18': False,
'19': False,
'1A': False,
'1B': False,
'1C': True,
'1D': False,
'1E': False,
'1F': False,
'20': False
}
def test_decode_bitwise_pids_jetta_diesel():
"""
Verify we correctly parse information about supported PIDs on a 2004 Jetta Diesel Wagon
"""
supported_pids = decode_bitwise_pids(JETTA_DIESEL_SUPPORTED_PIDS_RESPONSE)
assert supported_pids == {
'01': True,
'02': False,
'03': False,
'04': True,
'05': True,
'06': False,
'07': False,
'08': False,
'09': False,
'0A': False,
'0B': True,
'0C': True,
'0D': True,
'0E': False,
'0F': True,
'10': True,
'11': True,
'12': False,
'13': False,
'14': False,
'15': False,
'16': False,
'17': False,
'18': False,
'19': False,
'1A': False,
'1B': False,
'1C': True,
'1D': True,
'1E': False,
'1F': False,
'20': True
} | Python | 0 | |
0eb28e89a5c5453a8337e031dd71a5019d828aab | Remove radmin credentials from create_heat_client | trove/common/remote.py | trove/common/remote.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import cfg
from trove.openstack.common.importutils import import_class
from cinderclient.v2 import client as CinderClient
from heatclient.v1 import client as HeatClient
from novaclient.v1_1.client import Client
from swiftclient.client import Connection
CONF = cfg.CONF
COMPUTE_URL = CONF.nova_compute_url
PROXY_AUTH_URL = CONF.trove_auth_url
VOLUME_URL = CONF.cinder_url
OBJECT_STORE_URL = CONF.swift_url
USE_SNET = CONF.backup_use_snet
HEAT_URL = CONF.heat_url
def dns_client(context):
from trove.dns.manager import DnsManager
return DnsManager()
def guest_client(context, id):
from trove.guestagent.api import API
return API(context, id)
def nova_client(context):
client = Client(context.user, context.auth_token,
project_id=context.tenant, auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_token
client.client.management_url = "%s/%s/" % (COMPUTE_URL, context.tenant)
return client
def create_admin_nova_client(context):
"""
Creates client that uses trove admin credentials
:return: a client for nova for the trove admin
"""
client = create_nova_client(context)
client.client.auth_token = None
return client
def cinder_client(context):
client = CinderClient.Client(context.user, context.auth_token,
project_id=context.tenant,
auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_token
client.client.management_url = "%s/%s/" % (VOLUME_URL, context.tenant)
return client
def heat_client(context):
endpoint = "%s/%s/" % (HEAT_URL, context.tenant)
client = HeatClient.Client(token=context.auth_token,
os_no_client_auth=True,
endpoint=endpoint)
return client
def swift_client(context):
client = Connection(preauthurl=OBJECT_STORE_URL + context.tenant,
preauthtoken=context.auth_token,
tenant_name=context.tenant,
snet=USE_SNET)
return client
create_dns_client = import_class(CONF.remote_dns_client)
create_guest_client = import_class(CONF.remote_guest_client)
create_nova_client = import_class(CONF.remote_nova_client)
create_swift_client = import_class(CONF.remote_swift_client)
create_cinder_client = import_class(CONF.remote_cinder_client)
create_heat_client = import_class(CONF.remote_heat_client)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import cfg
from trove.openstack.common.importutils import import_class
from cinderclient.v2 import client as CinderClient
from heatclient.v1 import client as HeatClient
from novaclient.v1_1.client import Client
from swiftclient.client import Connection
CONF = cfg.CONF
COMPUTE_URL = CONF.nova_compute_url
PROXY_AUTH_URL = CONF.trove_auth_url
VOLUME_URL = CONF.cinder_url
OBJECT_STORE_URL = CONF.swift_url
USE_SNET = CONF.backup_use_snet
HEAT_URL = CONF.heat_url
def dns_client(context):
from trove.dns.manager import DnsManager
return DnsManager()
def guest_client(context, id):
from trove.guestagent.api import API
return API(context, id)
def nova_client(context):
client = Client(context.user, context.auth_token,
project_id=context.tenant, auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_token
client.client.management_url = "%s/%s/" % (COMPUTE_URL, context.tenant)
return client
def create_admin_nova_client(context):
"""
Creates client that uses trove admin credentials
:return: a client for nova for the trove admin
"""
client = create_nova_client(context)
client.client.auth_token = None
return client
def cinder_client(context):
client = CinderClient.Client(context.user, context.auth_token,
project_id=context.tenant,
auth_url=PROXY_AUTH_URL)
client.client.auth_token = context.auth_token
client.client.management_url = "%s/%s/" % (VOLUME_URL, context.tenant)
return client
def heat_client(context):
endpoint = "%s/%s/" % (HEAT_URL, context.tenant)
client = HeatClient.Client(username=context.user,
password="radmin",
token=context.auth_token,
os_no_client_auth=True,
endpoint=endpoint)
return client
def swift_client(context):
client = Connection(preauthurl=OBJECT_STORE_URL + context.tenant,
preauthtoken=context.auth_token,
tenant_name=context.tenant,
snet=USE_SNET)
return client
create_dns_client = import_class(CONF.remote_dns_client)
create_guest_client = import_class(CONF.remote_guest_client)
create_nova_client = import_class(CONF.remote_nova_client)
create_swift_client = import_class(CONF.remote_swift_client)
create_cinder_client = import_class(CONF.remote_cinder_client)
create_heat_client = import_class(CONF.remote_heat_client)
| Python | 0.000006 |
7a9bb7d412ccfa4921dc691232c1192bbb2789cd | Add rudimentary swarming service. | dashboard/dashboard/services/swarming_service.py | dashboard/dashboard/services/swarming_service.py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for interfacing with the Chromium Swarming Server.
The Swarming Server is a task distribution service. It can be used to kick off
a test run.
API explorer: https://goo.gl/uxPUZo
"""
# TODO(dtu): This module is very much a work in progress. It's not clear whether
# the parameters are the right ones to pass, whether it's the right way to pass
# the parameters (as opposed to having a data object, whether the functions
# should be encapsulated in the data object, or whether this is at the right
# abstraction level.
from apiclient import discovery
from dashboard import utils
_DISCOVERY_URL = ('https://chromium-swarm.appspot.com/_ah/api'
'/discovery/v1/apis/{api}/{apiVersion}/rest')
def New(name, user, bot_id, isolated_hash, extra_args=None):
"""Create a new Swarming task."""
if not extra_args:
extra_args = []
swarming = _DiscoverService()
request = swarming.tasks().new(body={
'name': name,
'user': user,
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {
'isolated': isolated_hash,
},
'extra_args': extra_args,
'dimensions': [
{'key': 'id', 'value': bot_id},
{'key': 'pool', 'value': 'Chrome-perf'},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'id:%s-b1' % bot_id,
'pool:Chrome-perf',
],
})
return request.execute()
def Get(task_id):
del task_id
raise NotImplementedError()
def _DiscoverService():
return discovery.build('swarming', 'v1', discoveryServiceUrl=_DISCOVERY_URL,
http=utils.ServiceAccountHttp())
| Python | 0.000001 | |
1a3839a083293200862ea21283c9c4d82a836846 | Add test for profiles. | tests/test_catalyst.py | tests/test_catalyst.py |
from vdm.catalyst import DisambiguationEngine
def pretty(raw):
"""
Pretty print xml.
"""
import xml.dom.minidom
xml = xml.dom.minidom.parseString(raw)
pretty = xml.toprettyxml()
return pretty
def test_profile():
#Basic info about a person.
p = [
'Josiah',
'Carberry',
None,
'jcarberry@brown.edu',
['null'],
['null']
]
disambig = DisambiguationEngine()
disambig.affiliation_strings = ['Sample University']
doc = disambig.build_doc(*p)
#Basic verification that XML contains what we expect.
assert('<First>Josiah</First>' in doc)
assert('<Last>Carberry</Last>' in doc)
assert('<email>jcarberry@brown.edu</email>' in doc)
assert('<Affiliation>%Sample University%</Affiliation>' in doc)
| Python | 0 | |
15b69945a209515c236d8ed788e824a895ef6859 | Create uvcontinuum.py | xmps/color_selection/uvcontinuum.py | xmps/color_selection/uvcontinuum.py | Python | 0.000006 | ||
ba60687fec047ed94bf7bb76dcf8bcf485c705ec | Add script to repair member relations between organizations and packages. | repair_organizations_members.py | repair_organizations_members.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Etalab-CKAN-Scripts -- Various scripts that handle Etalab datasets in CKAN repository
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Emmanuel Raviart
# http://github.com/etalab/etalab-ckan-scripts
#
# This file is part of Etalab-CKAN-Scripts.
#
# Etalab-CKAN-Scripts is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalab-CKAN-Scripts is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Repair members of organizations, to ensure that they match the owners of packages."""
import argparse
import logging
import os
import sys
from ckan import model, plugins
from ckan.config.environment import load_environment
from paste.deploy import appconfig
from paste.registry import Registry
import pylons
import sqlalchemy as sa
#import sqlalchemy.exc
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config', help = 'path of configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
# logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
logging.basicConfig(level = logging.INFO if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = appconfig('config:{}'.format(os.path.abspath(args.config)))
load_environment(site_conf.global_conf, site_conf.local_conf)
registry = Registry()
registry.prepare()
registry.register(pylons.translator, MockTranslator())
plugins.load('synchronous_search')
revision = model.repo.new_revision()
for package in model.Session.query(model.Package).filter(
model.Package.owner_org != None,
model.Package.state == 'active',
):
owner = model.Session.query(model.Group).get(package.owner_org)
assert owner is not None
assert owner.is_organization
assert owner.state != 'deleted'
member = model.Session.query(model.Member).filter(
model.Member.group_id == owner.id,
model.Member.state == 'active',
model.Member.table_id == package.id,
).first()
if member is None:
log.info(u'Repairing organization "{}" package "{}" membership'.format(owner.name, package.name))
member = model.Session.query(model.Member).filter(
model.Member.group_id == owner.id,
model.Member.table_id == package.id,
).first()
assert member is not None
if member.capacity != 'organization':
member.capacity = 'organization'
member.state = 'active'
assert member.table_name == 'package'
else:
if member.capacity != 'organization':
log.warning(u'Repairing capacity organization "{}" package "{}" membership'.format(owner, package))
member.capacity = 'organization'
assert member.table_name == 'package'
continue
model.repo.commit_and_remove()
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0 | |
baac9ff6b831a1b91271daa3eb02cce32774f2f5 | Create DMX Blackout routine | pyEnttecBlackout.py | pyEnttecBlackout.py | #!/usr/bin/env python
#import msvcrt
import serial
import sys
import time
class pydmx(object):
def __init__(self, port_number=2):
self.channels = [0 for i in range(512)]
self.port_number = port_number
# DMX_USB Interface variables
self.SOM_VALUE = 0x7E # SOM = Start of Message
self.EOM_VALUE = 0xE7 # EOM = End of Message
# Lables:
self.REPROGRAM_FIRMWARE_LABEL = 1
self.PROGRAM_FLASH_PAGE_LABEL = 2
self.GET_WIDGET_PARAMETERS_LABEL = 3
self.SET_WIDGET_PARAMETERS_LABEL = 4
self.RECEIVED_DMX_LABEL = 5
self.OUTPUT_ONLY_SEND_DMX_LABEL = 6
self.RDM_SEND_DMX_LABEL = 7
self.RECIEVE_DMX_ON_CHANGE_LABEL = 8
self.RECIEVED_DMX_CHANGE_OF_STATE_LABEL = 9
self.GET_WIDGET_SERIAL_NUMBER_LABEL = 10
self.SEND_RDM_DISCOVERY_LABEL = 11
self.INVALID_LABEL = 0xFF
# Initialize serial port
try:
# Open serial port with receive timeout
self.ser = serial.Serial(port="/dev/tty.usbserial-EN169205", baudrate=57600, timeout=1)
#self.ser = serial.Serial(port=port_number, baudrate=57600, timeout=1)
except:
print "dmx_usb.__init__: ERROR: Could not open COM%u" % (port_number+1)
#sys.exit(0)
else:
print "dmx_usb.__init__: Using %s" % (self.ser.portstr)
# Low level functions (for inside use only)
def transmit(self, label, data, data_size):
self.ser.write(chr(self.SOM_VALUE))
self.ser.write(chr(label))
self.ser.write(chr(data_size & 0xFF))
self.ser.write(chr((data_size >> 8) & 0xFF))
for j in range(data_size):
self.ser.write(data[j])
self.ser.write(chr(self.EOM_VALUE))
# Higher level functions:
def set_channel(self, channel, value=0):
# Channel = DMX Channel (1-512)
# Value = Strength (0-100%)
self.channels[channel] = value
self.update_channels()
def update_channels(self):
'''Send all 512 DMX Channels from channels[] to the hardware:
update_channels()'''
# This is where the magic happens
print "dmx_usb.update_channels: Updating....."
self.int_data = [0] + self.channels
self.msg_data = [chr(self.int_data[j]) for j in range(len(self.int_data))]
self.transmit(self.OUTPUT_ONLY_SEND_DMX_LABEL, self.msg_data, len(self.msg_data))
def close_serial(self):
self.ser.close()
def dmx_test(self, start=1, finish=512):
print "dmx_usb.dmx_test: Starting DMX test"
print "dmx_usb.dmx_test: Testing range " + str(start) + " to " + str(finish)
for i in range(start, finish):
print "dmx_usb.dmx_test: Test channel " + str(i)
self.set_channel(i, 100)
time.sleep(1)
self.set_channel(i, 0)
print "dmx_usb.dmx_test: Test Complete!"
print "dmx_usb.dmx_test: Tested " + str(finish-start+1) + " channels, from " +str(start)+ " to " + str(finish)
def blackout(self):
channels = [0 for i in range(512)]
self.update_channels()
#STROBE
#DmxBufferToSend[1] = 50
#RED-YELLOW
#DmxBufferToSend[2] = 50
#GREEN-PURPLE
#DmxBufferToSend[3] = 0x00
#BLUE-WHITE
#DmxBufferToSend[4] = 50
#MOTOR
#DmxBufferToSend[5] = 180
# Start
print "'dmx_usb' test:"
print " Create new object"
print " dmx = pydmx()"
dmx = pydmx()
print
print " Blackout:"
print " pydmx.blackout()"
dmx.blackout()
| Python | 0 | |
c6f09446076677e5a3af8fda8c7fbbb73885234f | Add Custom Filter Design demo | demo/custom_filter_design.py | demo/custom_filter_design.py | import yodel.analysis
import yodel.filter
import yodel.complex
import yodel.conversion
import matplotlib.pyplot as plt
def frequency_response(response):
size = len(response)
freq_response_real = [0] * size
freq_response_imag = [0] * size
fft = yodel.analysis.FFT(size)
fft.forward(response, freq_response_real, freq_response_imag)
return freq_response_real, freq_response_imag
def amplitude_response(spec_real, spec_imag, db=True):
size = len(spec_real)
amp = [0] * size
for i in range(0, size):
amp[i] = yodel.complex.modulus(spec_real[i], spec_imag[i])
if db:
amp[i] = yodel.conversion.lin2db(amp[i])
return amp
def phase_response(spec_real, spec_imag, degrees=True):
size = len(spec_real)
pha = [0] * size
for i in range(0, size):
pha[i] = yodel.complex.phase(spec_real[i], spec_imag[i])
if degrees:
pha[i] = (pha[i] * 180.0 / math.pi)
return pha
class CustomFilterDesigner:
def __init__(self):
self.samplerate = 48000
self.framesize = 256
self.frsize = int((self.framesize/2)+1)
self.custom_fr = [1] * self.frsize
self.hzscale = [(i*self.samplerate) / (2.0*self.frsize) for i in range(0, self.frsize)]
self.flt = yodel.filter.Custom(self.samplerate, self.framesize)
self.pressed = None
self.update_filter()
self.create_plot()
def update_filter(self):
self.flt.design(self.custom_fr, False)
fr_re, fr_im = frequency_response(self.flt.ir)
self.fft_fr = amplitude_response(fr_re, fr_im, False)
def create_plot(self):
self.fig = plt.figure()
self.cid = self.fig.canvas.mpl_connect('button_press_event', self.onpress)
self.cid = self.fig.canvas.mpl_connect('button_release_event', self.onrelease)
self.cid = self.fig.canvas.mpl_connect('motion_notify_event', self.onmotion)
self.ax_custom_fr = self.fig.add_subplot(111)
self.ax_custom_fr.set_title('Custom Filter Design')
self.plot_custom_fr, = self.ax_custom_fr.plot(self.hzscale, self.custom_fr, 'r', label='Desired Frequency Response')
self.plot_fft_fr, = self.ax_custom_fr.plot(self.hzscale, self.fft_fr[0:self.frsize], 'b', label='Actual Frequency Response')
self.ax_custom_fr.legend()
self.ax_custom_fr.grid()
self.rescale_plot()
def rescale_plot(self):
self.ax_custom_fr.set_ylim(-1, 5)
plt.draw()
def onpress(self, event):
if event.inaxes != self.ax_custom_fr:
return
self.pressed = (event.xdata, event.ydata)
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
def onrelease(self, event):
self.pressed = None
def onmotion(self, event):
if self.pressed != None and event.xdata != None and event.ydata != None:
xpos = int(event.xdata * 2.0 * self.frsize / self.samplerate)
ypos = max(event.ydata, 0)
if xpos >= 0 and xpos < self.frsize:
self.custom_fr[xpos] = ypos
self.update_filter()
self.plot_custom_fr.set_ydata(self.custom_fr)
self.plot_fft_fr.set_ydata(self.fft_fr[0:self.frsize])
self.rescale_plot()
cfd = CustomFilterDesigner()
plt.show()
| Python | 0 | |
4826764c24fca8204322f88adfde75968b3985ee | add wrapper to start bucky from source tree | bucky.py | bucky.py | #!/usr/bin/env python
import bucky.main
if __name__ == '__main__':
bucky.main.main()
| Python | 0 | |
c757c6ad714afb393c65c1b82bca31de357332fc | Add test coverage for utility module | python/util_test.py | python/util_test.py | #
# (C) Copyright IBM Corp. 2017
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import unittest
import tempfile
import util
class TestUtils(unittest.TestCase):
def setUp(self):
""" capture stdout to a temp file """
self.tempFile = tempfile.TemporaryFile()
os.dup2(self.tempFile.fileno(), sys.stdout.fileno())
def tearDown(self):
""" remove temp file """
self.tempFile.close()
def test_output_is_clean_when_debug_is_disabled(self):
util.isDebugging = False
util.debug_print('Debug Message')
self.assertEqual(self._readOutput(), '', 'Should not write messages when debug is disabled')
def test_output_has_content_when_debug_is_enabled(self):
util.isDebugging = True
util.debug_print('Debug Message')
self.assertEqual(self._readOutput(), 'Debug Message', 'Should write messages when debug is enabled')
def test_output_has_content_when_byte_array_message_is_passed(self):
util.isDebugging = True
util.debug_print(b'Binary Debug Message')
self.assertEqual(self._readOutput(), 'Binary Debug Message', 'Should write messages when debug is enabled')
def _readOutput(self):
self.tempFile.seek(0)
return self.tempFile.read().decode().rstrip()
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
7492df7b83a99a3173726d4f253718a96ec13200 | add initial gyp file | deps/mpg123/mpg123.gyp | deps/mpg123/mpg123.gyp | # This file is used with the GYP meta build system.
# http://code.google.com/p/gyp
# To build try this:
# svn co http://gyp.googlecode.com/svn/trunk gyp
# ./gyp/gyp -f make --depth=. mpg123.gyp
# make
# ./out/Debug/test
{
'target_defaults': {
'default_configuration': 'Debug',
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
},
},
},
'Release': {
'defines': [ 'NDEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 0, # static release
},
},
}
},
'msvs_settings': {
'VCLinkerTool': {
'GenerateDebugInformation': 'true',
},
},
'conditions': [
['OS=="mac"', {
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {
'ARCHS': [ 'i386' ]
},
}],
['target_arch=="x64"', {
'xcode_settings': {
'ARCHS': [ 'x86_64' ]
},
}]
]
}]
]
},
'targets': [
{
'variables': {
'target_arch%': 'ia32'
},
'target_name': 'mpg123',
'product_prefix': 'lib',
'type': 'static_library',
'sources': [
'src/libmpg123/compat.c',
'src/libmpg123/parse.c',
'src/libmpg123/frame.c',
'src/libmpg123/format.c',
'src/libmpg123/dct64.c',
'src/libmpg123/equalizer.c',
'src/libmpg123/id3.c',
'src/libmpg123/optimize.c',
'src/libmpg123/readers.c',
'src/libmpg123/tabinit.c',
'src/libmpg123/libmpg123.c',
'src/libmpg123/index.c',
'src/libmpg123/stringbuf.c',
'src/libmpg123/icy.c',
'src/libmpg123/icy2utf8.c',
'src/libmpg123/ntom.c',
'src/libmpg123/synth.c',
'src/libmpg123/synth_8bit.c',
'src/libmpg123/layer1.c',
'src/libmpg123/layer2.c',
'src/libmpg123/layer3.c',
'src/libmpg123/synth_s32.c',
'src/libmpg123/synth_real.c',
'src/libmpg123/dither.c',
'src/libmpg123/feature.c',
'src/libmpg123/lfs_alias.c',
],
'include_dirs': [
'src/libmpg123',
# platform and arch-specific headers
'config/<(OS)/<(target_arch)',
],
'defines': [
'PIC',
'HAVE_CONFIG_H'
],
'direct_dependent_settings': {
'include_dirs': [
'src/libmpg123',
# platform and arch-specific headers
'config/<(OS)/<(target_arch)',
]
},
'conditions': [
['OS=="mac"', {
'conditions': [
['target_arch=="ia32"', {
}],
['target_arch=="x64"', {
'defines': [
'OPT_MULTI',
'OPT_X86_64',
'OPT_GENERIC',
'OPT_GENERIC_DITHER',
'REAL_IS_FLOAT',
'NOXFERMEM'
]
}]
]
}],
['target_arch=="x64"', {
'sources': [
'src/libmpg123/dct64_x86_64.S',
'src/libmpg123/dct64_x86_64_float.S',
'src/libmpg123/synth_x86_64_float.S',
'src/libmpg123/synth_x86_64_s32.S',
'src/libmpg123/synth_stereo_x86_64_float.S',
'src/libmpg123/synth_stereo_x86_64_s32.S',
'src/libmpg123/synth_x86_64.S',
'src/libmpg123/synth_stereo_x86_64.S',
]
}]
]
},
{
'target_name': 'test',
'type': 'executable',
'dependencies': [ 'mpg123' ],
'sources': [ 'test.c' ]
}
]
}
| Python | 0 | |
8ee2f2b4c3a0ac40c6b7582a2cf3724f30f41dae | Add data migration | workshops/migrations/0035_auto_20150107_1205.py | workshops/migrations/0035_auto_20150107_1205.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def copy_project_to_tags(apps, schema_editor):
Event = apps.get_model('workshops', 'Event')
for event in Event.objects.all().exclude(project=None):
tag = event.project
print('add {} to {}'.format(tag, event))
event.tags.add(tag)
event.save()
class Migration(migrations.Migration):
dependencies = [
('workshops', '0034_auto_20150107_1200'),
]
operations = [
migrations.RenameModel(
old_name='Project',
new_name='Tag',
),
migrations.AddField(
model_name='event',
name='tags',
field=models.ManyToManyField(to='workshops.Tag'),
preserve_default=True,
),
migrations.RunPython(copy_project_to_tags),
migrations.RemoveField(
model_name='event',
name='project',
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('workshops', '0034_auto_20150107_1200'),
]
operations = [
migrations.RenameModel(
old_name='Project',
new_name='Tag',
),
migrations.RemoveField(
model_name='event',
name='project',
),
migrations.AddField(
model_name='event',
name='tags',
field=models.ManyToManyField(to='workshops.Tag'),
preserve_default=True,
),
]
| Python | 0.999708 |
660d04ac87ea05032a0c19b293fd237bda15fad9 | tempson main class | tempson/tempson.py | tempson/tempson.py | # -*- coding: utf-8 -*-
def render():
print "1234"
| Python | 0.999092 | |
3ba67bf461f2f35f549cc2ac5c85dd1bfb39cfa4 | Add a collection of tests around move_or_merge.py | src/MCPClient/tests/test_move_or_merge.py | src/MCPClient/tests/test_move_or_merge.py | # -*- encoding: utf-8
import pytest
from .move_or_merge import move_or_merge
def test_move_or_merge_when_dst_doesnt_exist(tmpdir):
src = tmpdir.join("src.txt")
dst = tmpdir.join("dst.txt")
src.write("hello world")
move_or_merge(src=src, dst=dst)
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_okay_if_dst_exists_and_is_same(tmpdir):
src = tmpdir.join("src.txt")
dst = tmpdir.join("dst.txt")
src.write("hello world")
dst.write("hello world")
move_or_merge(src=src, dst=dst)
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_error_if_dst_exists_and_is_different(tmpdir):
src = tmpdir.join("src.txt")
dst = tmpdir.join("dst.txt")
src.write("hello world")
dst.write("we come in peace")
with pytest.raises(RuntimeError, match="dst exists and is different"):
move_or_merge(src=src, dst=dst)
# Check the original file wasn't deleted
assert src.exists()
assert dst.exists()
def test_moves_contents_of_directory(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src = src_dir.join("file.txt")
dst = dst_dir.join("file.txt")
src.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_moves_nested_directory(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
dst_nested = dst_dir.join("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_merges_nested_directory(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
# Unlike the previous test, we create the "nested" directory upfront,
# but we don't populate it.
dst_nested = dst_dir.mkdir("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_merges_nested_directory_with_existing_file(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
dst_nested = dst_dir.mkdir("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
dst.write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert not src.exists()
assert dst.exists()
assert dst.read() == "hello world"
def test_merges_nested_directory_with_mismatched_existing_file(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
src_nested = src_dir.mkdir("nested")
dst_nested = dst_dir.mkdir("nested")
src = src_nested.join("file.txt")
dst = dst_nested.join("file.txt")
src.write("hello world")
dst.write("we come in peace")
with pytest.raises(RuntimeError, match="dst exists and is different"):
move_or_merge(src=str(src_dir), dst=str(dst_dir))
def test_ignores_existing_files_in_dst(tmpdir):
src_dir = tmpdir.mkdir("src")
dst_dir = tmpdir.mkdir("dst")
dst_existing = dst_dir.join("philosophy.txt")
dst_existing.write("i think therefore i am")
src_dir.join("file.txt").write("hello world")
move_or_merge(src=str(src_dir), dst=str(dst_dir))
assert dst_existing.exists()
assert dst_existing.read() == "i think therefore i am"
| Python | 0.000001 | |
3c2c6002cf25dab301044f2dc4c2c3bbd99e121e | add script file | get-polymer-imports.py | get-polymer-imports.py | #!/usr/bin/env python
import os
import sys
#rootDir = "bower_components"
numArgs = len(sys.argv)
if numArgs <= 1:
print 'usage: get_all_imports.py <bower_components directory> [prefix (default "..")]'
exit(1)
rootDir = sys.argv[1]
if not (rootDir == "bower_components" or rootDir == "components"):
print 'Cowardly refusing to search non bower directory "' + rootDir + '"'
exit(1)
bowerPrefix = ".."
if numArgs >= 3:
bowerPrefix = sys.argv[2]
def shouldInclude(f, path):
blacklisted = ['src', 'demo', 'test', 'polymer', 'web-animations']
for blacklist in blacklisted:
if blacklist in path: return False
fileName, extension = os.path.splitext(f)
return extension == ".html" and fileName != "index"
def getImports(dir):
imports = []
for root, dirs, files in os.walk(dir):
path = root.split('/')
prefix = os.path.join(bowerPrefix, root)
# print (len(path) - 1) *'---' , os.path.basename(root)
for file in files:
if shouldInclude(file, prefix):
i = os.path.join(prefix, file)
# print "adding import: ", i
imports.append(i)
return imports
def tagify(i):
importTag = '<link rel="import" href="'
importTerminator = '">'
return importTag + i + importTerminator
def htmlify(imports):
html = []
for i in imports:
html.append(tagify(i))
return html
# polymer is special
polymer = os.path.join(bowerPrefix, rootDir, "polymer/polymer.html")
def printHtml(html):
print tagify(polymer)
for tag in html:
print tag
imports = getImports(rootDir)
html = htmlify(imports)
printHtml(html)
| Python | 0.000002 | |
30dcfef191666951a4084a4b9d9c135c9edb5de8 | Create check.py | check.py | check.py | # -*- coding: utf-8 -*-
__author__ = 'https://github.com/password123456/'
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def DO_CHECK_CERTIFICATE(url):
try:
user_agent = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36', 'Connection':'keep-alive'}
r = requests.get(url, headers=user_agent, verify=True, timeout=05)
result = '%s %s %s' % ( url, r.status_code, r.headers['server'])
print '%s[-] OK::%s %s %s' % (bcolors.OKGREEN, bcolors.OKBLUE, result, bcolors.ENDC)
except Exception as e:
error = '%s' % e
if 'CERTIFICATE_VERIFY_FAILED' in error:
print '%s[-] ERROR::%s %s CERTIFICATE_VERIFY_FAILED %s' % (bcolors.WARNING, bcolors.FAIL, url, bcolors.ENDC)
else:
r.close()
def READ_URL():
f = open('url.txt', 'r')
for line in f.readlines():
line = line.strip()
line = 'https://%s' % line
DO_CHECK_CERTIFICATE(line)
def main():
READ_URL()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(0)
except Exception, e:
print '%s[-] Exception::%s%s' % (bcolors.WARNING, e, bcolors.ENDC)
| Python | 0.000001 | |
c5ae855af4c999ab2cbf6d4b5c77a0f04a84c13a | Update design-search-autocomplete-system.py | Python/design-search-autocomplete-system.py | Python/design-search-autocomplete-system.py | # Time: O(p^2), p is the length of the prefix
# Space: O(p * t + s), t is the number of nodes of trie
# , s is the size of the sentences
class TrieNode(object):
def __init__(self):
self.__TOP_COUNT = 3
self.infos = []
self.leaves = {}
def insert(self, s, times):
cur = self
cur.add_info(s, times)
for c in s:
if c not in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.add_info(s, times)
def add_info(self, s, times):
for p in self.infos:
if p[1] == s:
p[0] = -times
break
else:
self.infos.append([-times, s])
self.infos.sort()
if len(self.infos) > self.__TOP_COUNT:
self.infos.pop()
class AutocompleteSystem(object):
def __init__(self, sentences, times):
"""
:type sentences: List[str]
:type times: List[int]
"""
self.__trie = TrieNode()
self.__cur_node = self.__trie
self.__search = []
self.__sentence_to_count = collections.defaultdict(int)
for sentence, count in zip(sentences, times):
self.__sentence_to_count[sentence] = count
self.__trie.insert(sentence, count)
def input(self, c):
"""
:type c: str
:rtype: List[str]
"""
result = []
if c == '#':
self.__sentence_to_count["".join(self.__search)] += 1
self.__trie.insert("".join(self.__search), self.__sentence_to_count["".join(self.__search)])
self.__cur_node = self.__trie
self.__search = []
else:
self.__search.append(c)
if self.__cur_node:
if c not in self.__cur_node.leaves:
self.__cur_node = None
return []
self.__cur_node = self.__cur_node.leaves[c]
result = [p[1] for p in self.__cur_node.infos]
return result
# Your AutocompleteSystem object will be instantiated and called as such:
# obj = AutocompleteSystem(sentences, times)
# param_1 = obj.input(c)
| # Time: O(p^2), p is the length of the prefix
# Space: O(p * t + s), t is the number of nodes of trie
# , s is the size of the sentences
class TrieNode(object):
def __init__(self):
self.__TOP_COUNT = 3
self.infos = []
self.leaves = {}
def insert(self, s, times):
cur = self
cur.add_info(s, times)
for c in s:
if c not in cur.leaves:
cur.leaves[c] = TrieNode()
cur = cur.leaves[c]
cur.add_info(s, times)
def add_info(self, s, times):
for p in self.infos:
if p[1] == s:
p[0] = -times
break
else:
self.infos.append([-times, s])
self.infos.sort()
if len(self.infos) > self.__TOP_COUNT:
self.infos.pop()
class AutocompleteSystem(object):
def __init__(self, sentences, times):
"""
:type sentences: List[str]
:type times: List[int]
"""
self.__trie = TrieNode()
self.__cur_node = self.__trie
self.__search = []
self.__sentence_to_count = collections.defaultdict(int)
for sentence, count in zip(sentences, times):
self.__sentence_to_count[sentence] = count
self.__trie.insert(sentence, count)
def input(self, c):
"""
:type c: str
:rtype: List[str]
"""
result = []
if c == '#':
self.__sentence_to_count["".join(self.__search)] += 1
self.__trie.insert("".join(self.__search), self.__sentence_to_count["".join(self.__search)])
self.__cur_node = self.__trie
self.__search = []
else:
self.__search.append(c)
if self.__cur_node:
if c not in self.__cur_node.leaves:
self.__cur_node = None
return []
self.__cur_node = self.__cur_node.leaves[c]
result = [p[1] for p in self.__cur_node.infos]
return result
# Your AutocompleteSystem object will be instantiated and called as such:
# obj = AutocompleteSystem(sentences, times)
# param_1 = obj.input(c)
| Python | 0.000001 |
0b9810227b91b7ee7bb58cee2dccec992c752768 | add xmpp plugin | gozerlib/plugs/xmpp.py | gozerlib/plugs/xmpp.py | # gozerlib/plugs/xmpp.py
#
#
""" xmpp related commands. """
## gozerlib imports
from gozerlib.commands import cmnds
from gozerlib.examples import examples
from gozerlib.fleet import fleet
## commands
def handle_xmppinvite(bot, event):
""" invite (subscribe to) a different user. """
if not event.rest:
event.missing("<list of jids>")
return
bot = fleet.getfirstjabber()
if bot:
for jid in event.args:
bot.invite(jid)
event.done()
else:
event.reply("can't find jabber bot in fleet")
cmnds.add("xmpp-invite", handle_xmppinvite, 'OPER')
examples.add("xmpp-invite", "invite a user.", "xmpp-invite jsoncloud@appspot.com")
| Python | 0 | |
b9986a12ac4370a9499e1ca3f006eb3a5050944f | Add spooler service module | cme/modules/spooler.py | cme/modules/spooler.py | # https://raw.githubusercontent.com/SecureAuthCorp/impacket/master/examples/rpcdump.py
from impacket.examples import logger
from impacket import uuid, version
from impacket.dcerpc.v5 import transport, epm
from impacket.dcerpc.v5.rpch import RPC_PROXY_INVALID_RPC_PORT_ERR, \
RPC_PROXY_CONN_A1_0X6BA_ERR, RPC_PROXY_CONN_A1_404_ERR, \
RPC_PROXY_RPC_OUT_DATA_404_ERR
KNOWN_PROTOCOLS = {
135: {'bindstr': r'ncacn_ip_tcp:%s[135]'},
445: {'bindstr': r'ncacn_np:%s[\pipe\epmapper]'},
}
class CMEModule:
'''
For printnightmare: detect if print spooler is enabled or not. Then use @cube0x0's project https://github.com/cube0x0/CVE-2021-1675 or Mimikatz from Benjamin Delpy
Module by @mpgn_x64
'''
name = 'spooler'
description = 'Detect if print spooler is enabled or not'
supported_protocols = ['smb']
opsec_safe= True
multiple_hosts = True
def options(self, context, module_options):
self.port = 135
if 'PORT' in module_options:
self.port = int(module_options['PORT'])
def on_login(self, context, connection):
entries = []
lmhash = getattr(connection, "lmhash", "")
nthash = getattr(connection, "nthash", "")
self.__stringbinding = KNOWN_PROTOCOLS[self.port]['bindstr'] % connection.host
logging.debug('StringBinding %s' % self.__stringbinding)
rpctransport = transport.DCERPCTransportFactory(self.__stringbinding)
rpctransport.set_credentials(connection.username, connection.password, connection.domain, lmhash, nthash)
rpctransport.setRemoteHost(connection.host)
rpctransport.set_dport(self.port)
try:
entries = self.__fetchList(rpctransport)
except Exception as e:
error_text = 'Protocol failed: %s' % e
logging.critical(error_text)
if RPC_PROXY_INVALID_RPC_PORT_ERR in error_text or \
RPC_PROXY_RPC_OUT_DATA_404_ERR in error_text or \
RPC_PROXY_CONN_A1_404_ERR in error_text or \
RPC_PROXY_CONN_A1_0X6BA_ERR in error_text:
logging.critical("This usually means the target does not allow "
"to connect to its epmapper using RpcProxy.")
return
# Display results.
endpoints = {}
# Let's groups the UUIDS
for entry in entries:
binding = epm.PrintStringBinding(entry['tower']['Floors'])
tmpUUID = str(entry['tower']['Floors'][0])
if (tmpUUID in endpoints) is not True:
endpoints[tmpUUID] = {}
endpoints[tmpUUID]['Bindings'] = list()
if uuid.uuidtup_to_bin(uuid.string_to_uuidtup(tmpUUID))[:18] in epm.KNOWN_UUIDS:
endpoints[tmpUUID]['EXE'] = epm.KNOWN_UUIDS[uuid.uuidtup_to_bin(uuid.string_to_uuidtup(tmpUUID))[:18]]
else:
endpoints[tmpUUID]['EXE'] = 'N/A'
endpoints[tmpUUID]['annotation'] = entry['annotation'][:-1].decode('utf-8')
endpoints[tmpUUID]['Bindings'].append(binding)
if tmpUUID[:36] in epm.KNOWN_PROTOCOLS:
endpoints[tmpUUID]['Protocol'] = epm.KNOWN_PROTOCOLS[tmpUUID[:36]]
else:
endpoints[tmpUUID]['Protocol'] = "N/A"
for endpoint in list(endpoints.keys()):
if "MS-RPRN" in endpoints[endpoint]['Protocol']:
logging.debug("Protocol: %s " % endpoints[endpoint]['Protocol'])
logging.debug("Provider: %s " % endpoints[endpoint]['EXE'])
logging.debug("UUID : %s %s" % (endpoint, endpoints[endpoint]['annotation']))
logging.debug("Bindings: ")
for binding in endpoints[endpoint]['Bindings']:
logging.debug(" %s" % binding)
logging.debug("")
context.log.highlight('Spooler service enabled')
break
if entries:
num = len(entries)
if 1 == num:
logging.info('Received one endpoint.')
else:
logging.info('Received %d endpoints.' % num)
else:
logging.info('No endpoints found.')
def __fetchList(self, rpctransport):
dce = rpctransport.get_dce_rpc()
dce.connect()
resp = epm.hept_lookup(None, dce=dce)
dce.disconnect()
return resp
| Python | 0.000001 | |
e1fad0e5759908b3c1f6d3bafa2110cb4c26b7e1 | Add get_jpp_env command... | km3pipe/shell.py | km3pipe/shell.py | # coding=utf-8
# cython: profile=True
# Filename: shell.py
# cython: embedsignature=True
# pylint: disable=C0103
"""
Some shell helpers
"""
from __future__ import division, absolute_import, print_function
import os
from .logger import logging
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
log = logging.getLogger(__name__) # pylint: disable=C0103
def get_jpp_env(jpp_dir):
"""Return the environment dict of a loaded Jpp env.
The returned env can be passed to `subprocess.Popen("J...", env=env)`
to execute Jpp commands.
"""
env = {v[0]:''.join(v[1:]) for v in
[l.split('=') for l in
os.popen("source {0}/setenv.sh {0} && env"
.format(jpp_dir)).read().split('\n')
if '=' in l]}
return env
| Python | 0 | |
1360a7031d4389f2ecdef24ce3190a88e5f8f794 | add trivial pjit tests | tests/pjit_test.py | tests/pjit_test.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as np
from jax import test_util as jtu
from jax.api import pjit
from jax.interpreters.parallel import psum
from jax.config import config
config.parse_flags_with_absl()
class PmapTest(jtu.JaxTestCase):
@jtu.skip_on_devices("gpu")
def testBasic(self):
f = lambda x: x - psum(x, 'i')
x = onp.arange(8., dtype=onp.float32).reshape(4, 2)
f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)
ans = f(x)
expected = x - x.sum(0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testTupleOutput(self):
f = lambda x: (x - psum(x, 'i'),)
x = onp.arange(8., dtype=onp.float32).reshape(4, 2)
f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)
ans = f(x)
expected = (x - x.sum(0),)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testTupleInput(self):
f = lambda x: x[0] - psum(x[0], 'i')
x = onp.arange(8., dtype=onp.float32).reshape(4, 2)
f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)
ans = f((x,))
expected = x - x.sum(0)
self.assertAllClose(ans, expected, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
| Python | 0.000144 | |
a8d359fd91cb6e92a034703d6203a2997b28c965 | Add utility to augment jsdoc with @method tag. | tools/yuimethod.py | tools/yuimethod.py | #!/usr/bin/env python
"""
Adds the @method tag to method comment blocks.
"""
import sys
import os
import re
import argparse
COMMENT_START_REGEX = re.compile('^(\s*)(/\*|###)\*\s*$')
COMMENT_END_REGEX = re.compile('^\s*(\*/|###)\s*$')
COMMENT_TAG_REGEX = re.compile('^\s*\* @(\w+).*$')
METHOD_REGEX = re.compile('^\s*(\w+)(\(.*\)|: ).*$')
class FormatError(Exception):
"""Error reformatting the method comments."""
def main(argv=sys.argv):
# Parse the command line arguments.
in_files, opts = _parse_arguments()
dest = opts.get('dest', 'out')
if os.path.exists(dest):
# The target location must be a directory.
if not os.path.isdir(dest):
raise FormatError("The download target is not a directory:"
" %s" % dest)
else:
# Make the download directory.
os.makedirs(dest)
# Filter each input file.
for in_file in in_files:
_, base_name = os.path.split(in_file)
out_file = os.path.join(dest, base_name)
with open(in_file) as ip:
with open(out_file, 'w') as op:
_filter(ip, op)
return 0
# Adds the @method tag if necessary.
#
# @param ip the input stream
# @oaram os the output stream
def _filter(ip, op):
comment = []
in_block_comment = False
has_method_tag = False
indent = ''
for line_nl in ip:
line = line_nl.rstrip()
if in_block_comment:
comment.append(line)
if COMMENT_END_REGEX.match(line):
in_block_comment = False
elif not has_method_tag:
tag_match = COMMENT_TAG_REGEX.match(line)
if tag_match and tag_match.group(1) == 'method':
has_method_tag = True
else:
if comment:
method_match = METHOD_REGEX.match(line)
method = method_match.group(1) if method_match else None
for comment_line in comment:
if method and not has_method_tag and COMMENT_TAG_REGEX.match(comment_line):
method_tag = "%s * @method %s" % (indent, method)
print >>op, method_tag
has_method_tag = True
print >>op, comment_line
has_method_tag = False
del comment[:]
comment_match = COMMENT_START_REGEX.match(line)
if comment_match:
in_block_comment = True
indent = comment_match.group(1)
comment.append(line)
else:
print >>op, line
for comment_line in comment:
print >>op, comment_line
def _parse_arguments():
"""Parses the command line arguments."""
parser = argparse.ArgumentParser()
# The input file path.
parser.add_argument('input', nargs='+', metavar="PATH", help='the input files')
# The output file path.
parser.add_argument('-d', '--dest', help='the destination directory (default ./out)')
args = vars(parser.parse_args())
nonempty_args = dict((k, v) for k, v in args.iteritems() if v != None)
return nonempty_args.pop('input'), nonempty_args
if __name__ == '__main__':
sys.exit(main())
| Python | 0 | |
7c1b0d4efd000fee8f065f2f5815075833811331 | Change file location and rename | scripts/reporting/svn_report.py | scripts/reporting/svn_report.py | '''
This file creates a .csv file containing the name of each laptop and its last changed date
'''
import argparse
import csv
from datetime import datetime, timezone
import os
import svn.local
import pandas as pd
'''
Constants -- paths for reports, default save names, SLA, columns, and sites
TO-DO: Change SLA_DAYS to a parser arg?
'''
REPORTS_DIR = '/fs/storage/laptops/ncanda'
DEFAULT_CSV = '/tmp/chris/import_reports/'
SLA_DAYS = 30
DATA_COLUMNS = ['laptop', 'date_updated', 'time_diff', 'sla', 'sla_percentage']
SITES = ['duke', 'sri', 'ohsu', 'upmc', 'ucsd']
def parse_args(arg_input=None):
'''
Set up parser arguments
'''
parser = argparse.ArgumentParser(
description="Create a CSV file with all laptops and dates they were last modified")
parser.add_argument(
"--file",
help="Path of file name to save as",
action="store",
default=DEFAULT_CSV)
return parser.parse_args(arg_input)
def create_dataframe():
'''
Writes the names of each laptop and the date they were updated to a .csv file
'''
# Grab all directories and set up SVN client
directories = os.listdir(REPORTS_DIR)
r = svn.local.LocalClient(REPORTS_DIR)
df = pd.DataFrame(columns=DATA_COLUMNS)
# Calculate time difference and appends to csv file
for directory in directories:
if (directory != ".svn"):
# Get commit date, time difference from today, and percentage of SLA
info = r.info(directory)
mod_time = info['commit/date']
time_diff = datetime.now(timezone.utc) - mod_time
sla_percentage = time_diff.total_seconds() / (SLA_DAYS * 24 * 60 * 60)
new_row = {
'laptop': directory,
'date_updated': mod_time,
'time_diff': time_diff,
'sla': SLA_DAYS,
'sla_percentage': sla_percentage
}
df = df.append(new_row, ignore_index=True)
# Sort by descending SLA percentage
df = df.sort_values(by=['sla_percentage'], ascending=False)
return df
def write_to_csv(df, path=None):
'''
Save data into a dataframe and save for each individual site
'''
df.to_csv(path + 'reports.csv', index=False)
for site in SITES:
site_df = df.loc[df['laptop'].str.contains(site, case=False)]
site_df.to_csv(path + site + '.csv', index=False)
def main():
'''
Grabs necessary SVN data from folders and then calls to write to the csv
'''
args = parse_args()
df = create_dataframe()
write_to_csv(df, args.file)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
d04c5c777b1603e274a5ce722f21dbcdf083f416 | add rgw module | ceph_deploy/rgw.py | ceph_deploy/rgw.py | from cStringIO import StringIO
import errno
import logging
import os
from ceph_deploy import conf
from ceph_deploy import exc
from ceph_deploy import hosts
from ceph_deploy.util import system
from ceph_deploy.lib import remoto
from ceph_deploy.cliutil import priority
LOG = logging.getLogger(__name__)
def get_bootstrap_rgw_key(cluster):
"""
Read the bootstrap-rgw key for `cluster`.
"""
path = '{cluster}.bootstrap-rgw.keyring'.format(cluster=cluster)
try:
with file(path, 'rb') as f:
return f.read()
except IOError:
raise RuntimeError('bootstrap-rgw keyring not found; run \'gatherkeys\'')
def create_rgw(distro, name, cluster, init):
conn = distro.conn
path = '/var/lib/ceph/rgw/{cluster}-{name}'.format(
cluster=cluster,
name=name
)
conn.remote_module.safe_mkdir(path)
bootstrap_keyring = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format(
cluster=cluster
)
keypath = os.path.join(path, 'keyring')
stdout, stderr, returncode = remoto.process.check(
conn,
[
'ceph',
'--cluster', cluster,
'--name', 'client.bootstrap-rgw',
'--keyring', bootstrap_keyring,
'auth', 'get-or-create', 'client.rgw.{name}'.format(name=name),
'osd', 'allow rwx',
'mon', 'allow rwx',
'-o',
os.path.join(keypath),
]
)
if returncode > 0 and returncode != errno.EACCES:
for line in stderr:
conn.logger.error(line)
for line in stdout:
# yes stdout as err because this is an error
conn.logger.error(line)
conn.logger.error('exit code from command was: %s' % returncode)
raise RuntimeError('could not create rgw')
remoto.process.check(
conn,
[
'ceph',
'--cluster', cluster,
'--name', 'client.bootstrap-rgw',
'--keyring', bootstrap_keyring,
'auth', 'get-or-create', 'client.rgw.{name}'.format(name=name),
'osd', 'allow *',
'mon', 'allow *',
'-o',
os.path.join(keypath),
]
)
conn.remote_module.touch_file(os.path.join(path, 'done'))
conn.remote_module.touch_file(os.path.join(path, init))
if init == 'upstart':
remoto.process.run(
conn,
[
'initctl',
'emit',
'ceph-rgw',
'cluster={cluster}'.format(cluster=cluster),
'id={name}'.format(name=name),
],
timeout=7
)
elif init == 'sysvinit':
remoto.process.run(
conn,
[
'service',
'ceph',
'start',
'rgw.{name}'.format(name=name),
],
timeout=7
)
if distro.is_el:
system.enable_service(distro.conn)
def rgw_create(args):
cfg = conf.ceph.load(args)
LOG.debug(
'Deploying rgw, cluster %s hosts %s',
args.cluster,
' '.join(':'.join(x or '' for x in t) for t in args.rgw),
)
if not args.rgw:
raise exc.NeedHostError()
key = get_bootstrap_rgw_key(cluster=args.cluster)
bootstrapped = set()
errors = 0
for hostname, name in args.rgw:
try:
distro = hosts.get(hostname, username=args.username)
rlogger = distro.conn.logger
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
LOG.debug('remote host will use %s', distro.init)
if hostname not in bootstrapped:
bootstrapped.add(hostname)
LOG.debug('deploying rgw bootstrap to %s', hostname)
conf_data = StringIO()
cfg.write(conf_data)
distro.conn.remote_module.write_conf(
args.cluster,
conf_data.getvalue(),
args.overwrite_conf,
)
path = '/var/lib/ceph/bootstrap-rgw/{cluster}.keyring'.format(
cluster=args.cluster,
)
if not distro.conn.remote_module.path_exists(path):
rlogger.warning('rgw keyring does not exist yet, creating one')
distro.conn.remote_module.write_keyring(path, key)
create_rgw(distro, name, args.cluster, distro.init)
distro.conn.exit()
except RuntimeError as e:
LOG.error(e)
errors += 1
if errors:
raise exc.GenericError('Failed to create %d RGWs' % errors)
def rgw(args):
if args.subcommand == 'create':
rgw_create(args)
else:
LOG.error('subcommand %s not implemented', args.subcommand)
def colon_separated(s):
host = s
name = s
if s.count(':') == 1:
(host, name) = s.split(':')
return (host, name)
@priority(30)
def make(parser):
"""
Deploy ceph RGW on remote hosts.
"""
parser.add_argument(
'subcommand',
metavar='SUBCOMMAND',
choices=[
'create',
'destroy',
],
help='create or destroy',
)
parser.add_argument(
'rgw',
metavar='HOST[:NAME]',
nargs='*',
type=colon_separated,
help='host (and optionally the daemon name) to deploy on',
)
parser.set_defaults(
func=rgw,
)
| Python | 0 | |
f3f363e8911d3a635d68c7dbe767ee2585ed4f36 | Check for duplicates based on coordinates and select only one database (EU/NASA) | checkDuplicates.py | checkDuplicates.py | import pandas as pd
from astropy import coordinates as coord
from astropy import units as u
class Sweetcat:
"""Load SWEET-Cat database"""
def __init__(self):
self.fname_sc = 'WEBSITE_online_EU-NASA_full_database.rdb'
# Loading the SweetCat database
self.readSC()
def readSC(self):
# TODO: Use the ra and dec, and match with coordinates instead of name
# stored in self.coordinates.
# Read the current version of SWEET-Cat
names_ = ['name', 'hd', 'ra', 'dec', 'V', 'Verr', 'p', 'perr',
'pflag', 'Teff', 'Tefferr', 'logg', 'logger',
'n1', 'n2', 'vt', 'vterr', 'feh', 'feherr', 'M', 'Merr',
'author', 'link', 'source', 'update', 'comment', 'database',
'n3']
# SC = pd.read_csv('WEBSITE_online.rdb', delimiter='\t', names=names_)
SC = pd.read_csv(self.fname_sc, delimiter='\t', names=names_)
# Clean star names
self.sc_names = [x.lower().replace(' ', '').replace('-', '') for x in SC.name]
self.sc_names = list(map(str.strip, self.sc_names))
# Original star names
self.sc_names_orig = [x.strip() for x in SC.name]
# Coordinates of the stars in SWEET-Cat
self.coordinates = SC.loc[:, ['ra', 'dec']]
# SWEET-Cat (used to automatically update the database label)
self.SC = SC
if __name__ == '__main__':
# Loading SWEET Cat
sc = Sweetcat()
# Check for duplicates, subset of columns can be changed
print(sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)])
# Indexes of the duplicates
indexes = sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)].index
# Remove a row
# new_sc = sc.SC.drop([2728])
# new_sc.to_csv('WEBSITE_online_EU-NASA_full_database_minusHD21749.rdb',
# sep='\t', index=False, header=False)
# Select only the EU data
sc_EU = new_sc[new_sc['database'].str.contains('EU')]
# Drop the database column
sc_like_old = sc_EU.drop(columns=['database'])
#sc_like_old.to_csv('WEBSITE_online_EU-updated_04-03-2020.rdb',
# sep='\t', index=False, header=False)
| Python | 0 | |
485bbe732dfb8539ffaf017f3a005896a7f3e503 | create subhash module | iscc_bench/imageid/subhash.py | iscc_bench/imageid/subhash.py | # -*- coding: utf-8 -*-
"""Test strategy with hashing mutiple shift invariant aligned patches
See: https://stackoverflow.com/a/20316789/51627
"""
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000001 | |
1742beec320d40e7859ea6f3b72e5fb3a7d1a51e | add flask hello world | hello.py | hello.py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
| Python | 0.999928 | |
f2b329d5ab98cfd1c1e9a9c28e373e1411a78967 | Convert text/plain to multipart/alternative | home/bin/parse_mail.py | home/bin/parse_mail.py | #!/usr/bin/python3
import email
from email import policy
import pypandoc
import fileinput
import subprocess
from email import charset
# use 8bit encoded utf-8 when applicable
charset.add_charset('utf-8', charset.SHORTEST, '8bit')
# read email
stdin_lines = []
with fileinput.input(["-"]) as stdin:
msg = email.message_from_string("".join(list(stdin)), policy=policy.SMTP)
# determine conversion
convert_simple = all([
not msg.is_multipart(),
msg.get_content_type() == "text/plain",
msg.get_content_disposition() == "inline",
])
convert_multi = all([
msg.get_content_type() == "multipart/mixed",
not any([part.is_multipart() for part in list(msg.walk())[1:]]),
len([part for part in msg.walk() if part.get_content_disposition() == "inline" and part.get_content_type() == "text/plain"]) == 1,
])
convert = any([convert_simple, convert_multi])
if convert:
# extract attachments
attachments = []
for part in msg.walk():
if part.is_multipart():
continue
elif part.get_content_disposition() == "inline" and part.get_content_type() == "text/plain":
inline = part.get_payload()
else:
attachments.append(part)
# copy headers
headers = [
"Date",
"From",
"To",
"CC",
"Subject",
"Message-ID",
]
new_msg = email.message.EmailMessage(policy=policy.SMTP)
for header in headers:
if msg[header]:
new_msg[header] = msg[header]
new_msg.add_header("MIME-Version", "1.0")
# make plain and html parts
text_plain = email.message.MIMEPart(policy=policy.SMTP)
text_plain.set_content(inline)
text_html = email.message.MIMEPart(policy=policy.SMTP)
text_html.set_content(pypandoc.convert_text(inline, "html", format="md"), subtype="html")
# attach attachments
if convert_simple:
new_msg.make_alternative()
new_msg.attach(text_plain)
new_msg.attach(text_html)
elif convert_multi:
new_msg.make_mixed()
alternative = email.message.EmailMessage(policy=policy.SMTP)
alternative.add_header("MIME-Version", "1.0")
alternative.make_alternative()
alternative.add_header("Content-Disposition", "inline")
alternative.attach(text_plain)
alternative.attach(text_html)
new_msg.attach(alternative)
for part in attachments:
new_msg.attach(part)
out_msg = new_msg
else:
out_msg = msg
# send
subprocess.run(["/usr/bin/msmtp", "--read-recipients", "-a", "AAU"], input=out_msg.as_bytes())
#print(out_msg.as_string())
| Python | 0.999999 | |
3fabca45d6071c7fe333050264e8b92f23336c12 | fix type | kaczmarz.py | kaczmarz.py | import numpy as np
#import matplotlib.pyplot as plt
def kaczmarz_ART(A,b,maxIter=8,x0=None,lambdaRelax=1,stopmode=None,taudelta=0,nonneg=True,dbglvl=0):
# TODO: add randomized ART, and other variants
# Michael Hirsch May 2014
# GPL v3+ license
#
# inputs:
# A: M x N 2-D projection matrix
# b: N x 1 1-D vector of observations
# maxIter: maximum number of ART iterations
# x0: N x 1 1-D vector of initialization (a guess at x)
# lambdaRelax: relaxation parameter (see Herman Ch.11.2)
# stopmode: {None, MDP} stop before maxIter if solution is good enough
# (MDP is Morozov Discrepancy Principle)
# nonneg: enforces non-negativity of solution
#
# outputs:
# x: the estimated solution of A x = b
# residual: the error b-Ax
#
# References:
# Herman, G. " Fundamentals of Computerized Tomography", 2nd Ed., Springer, 2009
# Natterer, F. "The mathematics of computed tomography", SIAM, 2001
#%% user parameters
if dbglvl>0:
print(('Lambda Relaxation: ' + str(lambdaRelax)))
n = A.shape[1] #only need rows
if x0 is None: # we'll use zeros
print('kaczmarz: using zeros to initialize x0')
x0 = np.zeros(n) #1-D vector
if stopmode is None: # just use number of iterations
sr = 0
elif stopmode == 'MDP' or stopmode== 'DP':
sr = 1
if taudelta==0: print('you used tauDelta=0, which effectively disables Morozov discrepancy principle')
else:
sr = 0
print("didn't understand stopmode command, defaulted to maximum iterations")
#%% disregard all-zero columns of A
goodRows = np.where( np.any(A>0,axis=1) )[0] #we want indices
#%% speedup: compute norms along columns at once, and retrieve
RowNormSq = np.linalg.norm(A,ord=2,axis=1)**2
x = np.copy(x0) # we'll leave the original x0 alone, and make a copy in x
iIter = 0
stop = False #FIXME will always run at least once
while not stop: #for each iteration
for iRow in goodRows: #only not all-zero rows
#denominator AND numerator are scalar!
#den = np.linalg.norm(A[iRow,:],2)**2
#print(RowNormSq[iRow] == den)
num = ( b[iRow] - A[iRow,:].dot(x) )
#x = x + np.dot( lambdaRelax * num/den , A[iRow,:] )
x = x + np.dot( lambdaRelax * num/RowNormSq[iRow] , A[iRow,:] )
if nonneg: x[x<0] = 0
residual = b - A.dot(x)
iIter += 1
#handle stop rule
stop = iIter > maxIter
if sr == 0: # no stopping till iterations are done
pass
elif sr == 1:
residualNorm = np.linalg.norm(residual,2)
stop |= (residualNorm <= taudelta)
if iIter % 200 == 0: #print update every N loop iterations for user comfort
print( ('kaczmarz: Iteration ' + str(iIter) + ', ||residual|| = ' + str(residualNorm) ) )
return x,residual,iIter-1
| Python | 0.000003 | |
ef192ebd7679b96317cc6d878fb82c925787710d | Add Pattern based filterer. | source/bark/filterer/pattern.py | source/bark/filterer/pattern.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import re
from .base import Filterer
class Pattern(Filterer):
'''Filter logs using pattern matching.'''
INCLUDE, EXCLUDE = ('include', 'exclude')
def __init__(self, pattern, key='name', mode=INCLUDE):
'''Initialise filterer with *pattern* and *key* to test.
If *pattern* is a string it will be converted to a compiled regular
expression instance.
*mode* can be either 'exclude' or 'include'. If set to 'exclude'
then any log matching the pattern will be filtered. Conversely, if set
to 'include' then any log not matching the pattern will be filtered.
'''
super(Pattern, self).__init__()
self.pattern = pattern
if isinstance(self.pattern, basestring):
self.pattern = re.compile(self.pattern)
self.key = key
self.mode = mode
def filter(self, log):
'''Filter *log* based on pattern matching.
If the log does not have the key to test against it will pass the
filter successfully. If the key is present, but not a string then the
log will be filtered.
'''
# If key was not present then pass filter
if self.key not in log:
return False
value = log[self.key]
# If not a string then can't test pattern against it so fail filter.
if not isinstance(value, basestring):
return True
matched = self.pattern.search(value)
if matched and self.mode == self.EXCLUDE:
return True
if not matched and self.mode == self.INCLUDE:
return True
return False
| Python | 0 | |
bcd485f240a7eb6373f847d6cc9dd07ebd2c3ef2 | add test case for redeem of default coupon (user limit=1, not bound to user) | coupons/tests/test_use_cases.py | coupons/tests/test_use_cases.py | from datetime import datetime
from django.contrib.auth.models import User
from django.utils import timezone
from django.test import TestCase
from coupons.forms import CouponForm
from coupons.models import Coupon
class DefaultCouponTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(username="user1")
self.coupon = Coupon.objects.create_coupon('monetary', 100)
def test_redeem(self):
self.coupon.redeem(self.user)
self.assertTrue(self.coupon.is_redeemed)
self.assertEquals(self.coupon.users.count(), 1)
self.assertIsInstance(self.coupon.users.first().redeemed_at, datetime)
self.assertEquals(self.coupon.users.first().user, self.user)
| Python | 0 | |
424d7107944f3ecb8ebf78a62dc35428952b380b | add reindex script | contrib/reindex.py | contrib/reindex.py | #!/bin/python
# coding: utf-8
import signal
import argparse
from datetime import datetime
argparser = argparse.ArgumentParser()
argparser.add_argument("--database-type", "-T", choices=["nic", "ipam"],
default="nic")
argparser.add_argument("database")
args = argparser.parse_args()
if args.database_type == "nic":
import lglass_sql.nic
db = lglass_sql.nic.NicDatabase(args.database)
elif args.database_type == "ipam":
import lipam.sql
db = lipam.sql.IPAMDatabase(args.database)
n = 0
start = datetime.now()
def sigusr1(*args):
global n
print("Processed {} objects in {}".format(n, datetime.now() - start))
signal.signal(signal.SIGUSR1, sigusr1)
with db.session() as sess:
for obj in sess.find():
n += 1
sess.reindex(obj)
sess.commit()
| Python | 0.000001 | |
65a1c06b6e5d7ec37ac232ab048b3cc541b75a45 | refactor Coupon | customermanage/models/Coupon.py | customermanage/models/Coupon.py | from django.db import models
from storemanage.models.Currency import Currency
from storemanage.models.Ticket import Ticket
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
# Create your models here.
class Coupon(models.Model):
ticket = models.ForeignKey(Ticket, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
remaining_date = models.DateTimeField(null=True)
active = models.BooleanField(default=True)
attribute = JSONField(default = dict())
| Python | 0.999424 | |
837dc69a430161f6b942b629793ec1d37db780d4 | Create virtool.db.settings | virtool/db/settings.py | virtool/db/settings.py | import logging
import pymongo.errors
logger = logging.getLogger(__name__)
async def initialize(db):
try:
await db.settings.insert_one({
"_id": "settings",
"enable_sentry": {"type": "boolean", "default": True},
"sample_group": "none",
"sample_group_read": True,
"sample_group_write": False,
"sample_all_read": True,
"sample_all_write": False,
"sample_unique_names": True,
"hmm_slug": "virtool/virtool-hmm",
"software_channel": "stable",
"minimum_password_length": 8,
"default_source_types": ["isolate", "strain"]
})
except pymongo.errors.DuplicateKeyError:
logger.debug("Settings collection already initialized.")
async def update(db, updates):
return await db.settings.find_one_and_update({"_id": "settings"}, {
"$set": updates
})
| Python | 0.000001 | |
094020855126721827342da98992a8c057d1a135 | fix memory benchmark for reference builds. | tools/perf/perf_tools/memory_benchmark.py | tools/perf/perf_tools/memory_benchmark.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import multi_page_benchmark
MEMORY_HISTOGRAMS = [
{'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'},
{'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'},
{'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}]
class MemoryBenchmark(multi_page_benchmark.MultiPageBenchmark):
def __init__(self):
super(MemoryBenchmark, self).__init__('stress_memory')
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--dom-automation')
# For a hard-coded set of Google pages (such as GMail), we produce custom
# memory histograms (V8.Something_gmail) instead of the generic histograms
# (V8.Something), if we detect that a renderer is only rendering this page
# and no other pages. For this test, we need to disable histogram
# customizing, so that we get the same generic histograms produced for all
# pages.
options.AppendExtraBrowserArg('--disable-histogram-customizer')
def CanRunForPage(self, page):
return hasattr(page, 'stress_memory')
def MeasurePage(self, page, tab, results):
for histogram in MEMORY_HISTOGRAMS:
name = histogram['name']
data = tab.runtime.Evaluate(
'window.domAutomationController.getHistogram ? '
'window.domAutomationController.getHistogram("%s") : ""' % name)
if data:
results.Add(name.replace('.', '_'), histogram['units'], data,
data_type='histogram')
| # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import multi_page_benchmark
MEMORY_HISTOGRAMS = [
{'name': 'V8.MemoryExternalFragmentationTotal', 'units': 'percent'},
{'name': 'V8.MemoryHeapSampleTotalCommitted', 'units': 'kb'},
{'name': 'V8.MemoryHeapSampleTotalUsed', 'units': 'kb'}]
class MemoryBenchmark(multi_page_benchmark.MultiPageBenchmark):
def __init__(self):
super(MemoryBenchmark, self).__init__('stress_memory')
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArg('--dom-automation')
# For a hard-coded set of Google pages (such as GMail), we produce custom
# memory histograms (V8.Something_gmail) instead of the generic histograms
# (V8.Something), if we detect that a renderer is only rendering this page
# and no other pages. For this test, we need to disable histogram
# customizing, so that we get the same generic histograms produced for all
# pages.
options.AppendExtraBrowserArg('--disable-histogram-customizer')
def CanRunForPage(self, page):
return hasattr(page, 'stress_memory')
def MeasurePage(self, page, tab, results):
for histogram in MEMORY_HISTOGRAMS:
name = histogram['name']
data = tab.runtime.Evaluate(
'window.domAutomationController.getHistogram("%s")' % name)
results.Add(name.replace('.', '_'), histogram['units'], data,
data_type='histogram')
| Python | 0.000017 |
4e797dd9c8b43ab62f70b0515dee9e6b5c17d043 | Create secret.py | propalyzer_site/propalyzer_site/secret.py | propalyzer_site/propalyzer_site/secret.py | class Secret():
SECRET_KEY = ''
| Python | 0.000032 | |
16a78150d9b208597e73157155646b2d534c11a1 | add methods for parinx | libcloudcli/apps/provider.py | libcloudcli/apps/provider.py | # -*- coding:utf-8 -*-
import inspect
import re
from libcloud.utils.misc import get_driver
from parinx import ARGS_TO_XHEADERS_DICT, parse_args, \
parse_docstring, get_method_docstring
from libcloudcli.errors import ProviderNotSupportedError,\
MissingArguments, MissingHeadersError, MethodParsingException,\
NoSuchOperationError
from libcloud_rest.api.entries import Entry
from parinx.utils import json
class DriverMethod(object):
_type_name_pattern = r'.\{([_0-9a-zA-Z]+)\}'
def __init__(self, driver_obj, method_name):
if inspect.isclass(driver_obj):
self.driver_cls = driver_obj
else:
self.driver_cls = driver_obj.__class__
self.driver_obj = driver_obj
self.method_name = method_name
self.method = getattr(self.driver_obj, method_name, None)
if not inspect.ismethod(self.method):
raise NoSuchOperationError()
method_doc = get_method_docstring(self.driver_cls, method_name)
if not method_doc:
raise MethodParsingException('Empty docstring')
argspec_arg = parse_args(self.method)
docstring_parse_result = parse_docstring(method_doc, self.driver_cls)
self.description = docstring_parse_result['description']
docstring_args = docstring_parse_result['arguments']
#check vargs
self.vargs_entries = []
for name, arg_info in argspec_arg.iteritems():
if name in docstring_args:
docstring_arg = docstring_args[name]
entry_kwargs = {
'name': name,
'description': docstring_arg['description'],
'type_name': docstring_arg['type_name'],
'required': (docstring_arg['required'] or
arg_info['required']),
}
if not entry_kwargs['required'] and 'default' in arg_info:
entry_kwargs['default'] = arg_info['default']
self.vargs_entries.append(Entry(**entry_kwargs))
else:
raise MethodParsingException(
'%s %s not described in docstring' % (method_name, name))
#update kwargs
kwargs = set(docstring_args).difference(argspec_arg)
self.kwargs_entries = [Entry(arg_name, **docstring_args[arg_name])
for arg_name in kwargs]
method_return = docstring_parse_result['return']
self.result_entry = Entry('', method_return['type_name'],
method_return['description'], True)
@classmethod
def _remove_type_name_brackets(cls, type_name):
return re.sub(cls._type_name_pattern, r'\1', type_name)
def get_description(self):
result_arguments = []
for entry in self.vargs_entries:
result_arguments.extend(entry.get_arguments())
for entry in self.kwargs_entries:
result_arguments.extend(entry.get_arguments())
result = {'name': self.method_name,
'description': self.description,
'arguments': result_arguments,
'return': {
'type': self._remove_type_name_brackets(
self.result_entry.type_name),
'description': self.result_entry.description}
}
return result
def invoke_result_to_json(self, value):
return self.result_entry.to_json(value)
def invoke(self, data):
vargs = [e.from_json(data, self.driver_obj)
for e in self.vargs_entries]
kwargs = {}
for kw_entry in self.kwargs_entries:
try:
kwargs[kw_entry.name] = kw_entry.from_json(data,
self.driver_obj)
except MissingArguments:
if kw_entry.required:
raise
if self.method_name == '__init__':
return self.driver_cls(*vargs, **kwargs)
return self.method(*vargs, **kwargs)
def get_providers_info(providers):
"""
List of all supported providers.
@param providers: object that contain supported providers.
@type providers: L{libcloud.types.Provider}
@return C{list} of C{dict} objects
"""
result = []
for provider, Driver in get_providers_dict(providers.DRIVERS,
providers.Provider).items():
result.append({
'id': provider,
'friendly_name': getattr(Driver, 'name', ''),
'website': getattr(Driver, 'website', ''),
})
return result
def get_providers_dict(drivers, providers):
result = {}
for provider_name in providers.__dict__.keys():
if provider_name.startswith('_'):
continue
provider_name = provider_name.upper()
try:
Driver = get_driver_by_provider_name(drivers,
providers,
provider_name)
result[provider_name] = Driver
except ProviderNotSupportedError:
continue
return result
def get_driver_by_provider_name(drivers, providers, provider_name):
"""
Get a driver by provider name
If the provider is unknown, will raise an exception.
@param drivers: Dictionary containing valid providers.
@param providers: object that contain supported providers.
@type providers: L{libcloud.types.Provider}
@param provider_name: String with a provider name (required)
@type provider_name: str
@return: L{NodeDriver} class
"""
provider_name = provider_name.upper()
provider = getattr(providers, provider_name, None)
try:
Driver = get_driver(drivers, provider)
except AttributeError:
raise ProviderNotSupportedError(provider=provider_name)
return Driver
def get_driver_instance(Driver, **kwargs):
"""
@param Driver:
@param kwargs:
@return:
"""
try:
json_data = json.dumps(kwargs)
driver_method = DriverMethod(Driver, '__init__')
return driver_method.invoke(json_data)
except MissingArguments, error:
str_repr = ', '.join([ARGS_TO_XHEADERS_DICT.get(arg, arg)
for arg in error.arguments])
raise MissingHeadersError(headers=str_repr)
| Python | 0.000001 | |
3d8fe5cfc64c3667f938fa221353489846a9aeb0 | Add test of F.diagonal | tests/chainer_tests/functions_tests/array_tests/test_diagonal.py | tests/chainer_tests/functions_tests/array_tests/test_diagonal.py | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 4, 6), 'args': (1, 2, 0)},
{'shape': (2, 4, 6), 'args': (-1, 2, 0)},
{'shape': (2, 4, 6), 'args': (0, -1, -2)},
{'shape': (2, 4, 6), 'args': (0, -1, 1)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestDiagonal(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.y_expected = self.x.diagonal(*self.args)
self.y_shape = self.y_expected.shape
self.gy = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_double_backward_options.update(dtype=numpy.float64)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.diagonal(x, *self.args)
testing.assert_allclose(y.data, self.y_expected)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
lambda x: functions.diagonal(x, *self.args),
x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
def f(x):
x = functions.diagonal(x, *self.args)
return x * x
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| Python | 0.999693 | |
254564ceb905dc512693febed44e908c27f249ce | Add tests for cupyx.scipy.ndimage.label | tests/cupyx_tests/scipy_tests/ndimage_tests/test_measurements.py | tests/cupyx_tests/scipy_tests/ndimage_tests/test_measurements.py | import unittest
import numpy
from cupy import testing
import cupyx.scipy.ndimage # NOQA
try:
import scipy.ndimage # NOQA
except ImportError:
pass
def _generate_binary_structure(rank, connectivity):
if connectivity < 1:
connectivity = 1
if rank < 1:
return numpy.array(True, dtype=bool)
output = numpy.fabs(numpy.indices([3] * rank) - 1)
output = numpy.add.reduce(output, 0)
return output <= connectivity
@testing.parameterize(*testing.product({
'ndim': [1, 2, 3, 4],
'size': [50, 100],
'density': [0.2, 0.3, 0.4],
'connectivity': [None, 2, 3],
'x_dtype': [bool, numpy.int8, numpy.int32, numpy.int64,
numpy.float32, numpy.float64],
'output': [None, numpy.int32, numpy.int64],
'o_type': [None, 'ndarray']
}))
@testing.gpu
@testing.with_requires('scipy')
class TestLabel(unittest.TestCase):
@testing.numpy_cupy_array_equal(scipy_name='scp')
def test_label(self, xp, scp):
size = int(pow(self.size, 1 / self.ndim))
x_shape = range(size, size + self.ndim)
x = xp.zeros(x_shape, dtype=self.x_dtype)
# x[numpy.where(testing.shaped_random(x_shape, xp) < self.density)] = 1
x[testing.shaped_random(x_shape, xp) < self.density] = 1
if self.connectivity is None:
structure = None
else:
structure = _generate_binary_structure(self.ndim,
self.connectivity)
if self.o_type == 'ndarray' and self.output is not None:
output = xp.empty(x_shape, dtype=self.output)
num_features = scp.ndimage.label(x, structure=structure,
output=output)
return output
labels, num_features = scp.ndimage.label(x, structure=structure,
output=self.output)
return labels
| Python | 0 | |
2a6907ddf9c7b5df2e1b59c8feeb0fa4bd4b5752 | add rudimentary validation tests for azure | tests/validation/cattlevalidationtest/core/test_machine_azure.py | tests/validation/cattlevalidationtest/core/test_machine_azure.py | import logging
from common_fixtures import * # NOQA
DEFAULT_TIMEOUT = 900
subscription_id = os.environ.get('AZURE_SUBSCRIPTION_ID')
subscription_cert = os.environ.get('AZURE_SUBSCRIPTION_CERT')
# Use azure settings from environment variables , if set
i = 'b39f27a8b8c64d52b05eac6a62ebad85__'
i = i + 'Ubuntu-14_04_1-LTS-amd64-server-20140927-en-us-30GB'
image = os.environ.get('AZURE_IMAGE', i)
location = os.environ.get('AZURE_LOCATION', "West US")
username = os.environ.get('AZURE_USERNAME', "")
password = os.environ.get('AZURE_PASSWORD', "")
size = os.environ.get('AZURE_SIZE', "Small")
if_machine_azure = pytest.mark.skipif(
not os.environ.get('AZURE_SUBSCRIPTION_ID') or
not os.environ.get('AZURE_SUBSCRIPTION_CERT'),
reason='Azure SubscriptionId/SubscriptionCert/AuthToken is not set')
# Get logger
logger = logging.getLogger(__name__)
@pytest.fixture(scope='session', autouse=True)
def register_host(admin_client):
test_url = cattle_url()
start = test_url.index("//") + 2
api_host = test_url[start:]
admin_client.create_setting(name="api.host", value=api_host)
@if_machine_azure
def test_azure_machine_all_params(client):
name = random_str()
create_args = {"name": name,
"azureConfig": {"subscriptionId": subscription_id,
"subscriptionCert": subscription_cert,
"image": image,
"location": location,
"username": username,
"password": password,
"size": size}}
expected_values = {"subscriptionId": subscription_id,
"subscriptionCert": subscription_cert,
"image": image,
"location": location,
"username": username,
"password": password,
"size": size}
azure_machine_life_cycle(client, create_args, expected_values)
def azure_machine_life_cycle(client, configs, expected_values):
machine = client.create_machine(**configs)
machine = client.wait_success(machine, timeout=DEFAULT_TIMEOUT)
assert machine.state == 'active'
# Wait until host shows up with some physicalHostId
machine = wait_for_host(client, machine)
host = machine.hosts()[0]
assert host.state == 'active'
assert machine.accountId == host.accountId
# Remove the machine and make sure that the host
# and the machine get removed
machine = client.wait_success(machine.remove())
assert machine.state == 'removed'
host = client.reload(machine.hosts()[0])
assert host.state == 'removed'
def wait_for_host(client, machine):
wait_for_condition(client,
machine,
lambda x: len(x.hosts()) == 1,
lambda x: 'Number of hosts associated with machine ' +
str(len(x.hosts())),
DEFAULT_TIMEOUT)
host = machine.hosts()[0]
host = wait_for_condition(client,
host,
lambda x: x.state == 'active',
lambda x: 'Host state is ' + x.state
)
return machine
| Python | 0 | |
e9091be4ae9ddf0cb83bd7535c4ced5bb2d691d2 | add config_edit.py | castiron/lib/castiron/actions/config_edit.py | castiron/lib/castiron/actions/config_edit.py | from castiron.tools import Action, register_actions
import os
import re
class G:
all_edits = []
def _file_contains_re(runner, path, contains_re):
real_path = os.path.realpath(os.path.expanduser(path))
if os.path.exists(real_path):
with open(real_path) as f:
for line in f:
if contains_re.search(line.rstrip()):
return True
return False
def _append_text(runner, path, text):
real_path = os.path.realpath(os.path.expanduser(path))
with open(real_path, 'a' if os.path.exists(real_path) else 'w') as f:
f.write('\n')
f.write(text)
if not text.endswith('\n'):
f.write('\n')
class EditBase(object):
def __init__(self, path):
self.path = path
class Inject(EditBase):
'''
Append to existing file or create new file.
'''
def __init__(self, path, skip_if, text):
'''
path is the file to edit or create.
text is the text to inject.
skip_if skips the edit when a line matches a regex pattern.
'''
super(Inject, self).__init__(path)
self.skip_if_re = re.compile(skip_if)
self.text = text
self.needed = False
def check(self, runner):
return _file_contains_re(runner, self.path, self.skip_if_re)
def perform(self, runner):
if _file_contains_re(runner, self.path, self.skip_if_re):
_append_text(runner, self.path, self.text)
def edits(*edits):
G.all_edits.extend(edits)
class ConfigEditAction(Action):
description = 'edit configuration files'
enabled = True
def __init__(self):
super(ConfigEditAction, self).__init__()
class CheckedEdit(object):
def __init__(self, edit):
self.edit = edit
self.needed = False
self.checked_edits = [CheckedEdit(edit) for edit in G.all_edits]
def check(self, runner):
okay = False
for checked_edit in self.checked_edits:
if runner.call(checked_edit.edit.check):
okay = checked_edit.needed = True
return okay
def perform(self, runner, needed):
for checked_edit in self.checked_edits:
if checked_edit.needed:
runner.call(checked_edit.edit.perform)
else:
print 'Configuration file was already changed: %s' % checked_edit.edit.path
register_actions(ConfigEditAction)
| Python | 0.000004 | |
99430e9f51eccb79f32af49bedfb28ba5f39cd09 | update : minor changes | ptop/plugins/system_sensor.py | ptop/plugins/system_sensor.py | '''
System sensor plugin
Generates the basic system info
'''
from ptop.core import Plugin
import psutil, socket, getpass
import datetime, time
class SystemSensor(Plugin):
def __init__(self,**kwargs):
super(SystemSensor,self).__init__(**kwargs)
# overriding the update method
def update(self):
# only text part for the system info
self.currentValue['text'] = {}
# updating values
self.currentValue['text']['user'] = getpass.getuser()
self.currentValue['text']['host_name'] = socket.gethostname()
self.currentValue['text']['running_time'] = datetime.timedelta(seconds=int(time.time() - psutil.boot_time()))
system_sensor = SystemSensor(name='System',sensorType='text',interval=1)
| Python | 0.000001 | |
ded21520c1fde89336480b48387d383a2e449c2a | Write test for array | tests/chainer_tests/utils_tests/test_array.py | tests/chainer_tests/utils_tests/test_array.py | import unittest
import numpy
from chainer import cuda
from chainer.utils import array
from chainer.testing import attr
class TestFullLike(unittest.TestCase):
def test_full_like_cpu(self):
x = numpy.array([1, 2], numpy.float32)
y = array.full_like(x, 3)
self.assertIsInstance(y, numpy.ndarray)
self.assertEqual(y.shape, (2,))
self.assertEqual(y[0], 3)
self.assertEqual(y[1], 3)
@attr.gpu
def test_full_like_gpu(self):
x = cuda.cupy.array([1, 2], numpy.float32)
y = array.full_like(x, 3)
self.assertIsInstance(y, cuda.cupy.ndarray)
y = cuda.to_cpu(y)
self.assertEqual(y.shape, (2,))
self.assertEqual(y[0], 3)
self.assertEqual(y[1], 3)
| Python | 0.000234 | |
f4260ad3e652a09922395e64d29bcf8f96ee12bc | Add test_colormap.py | tests/test_colormap.py | tests/test_colormap.py | # -*- coding: utf-8 -*-
""""
Folium Colormap Module
----------------------
"""
import folium.colormap as cm
def test_simple_step():
step = cm.StepColormap(['green','yellow','red'], vmin=3., vmax=10., index=[3,4,8,10], caption='step')
step = cm.StepColormap(['r','y','g','c','b','m'])
step._repr_html_()
def test_simple_linear():
linear = cm.LinearColormap(['green','yellow','red'], vmin=3., vmax=10.)
linear = cm.LinearColormap(['red','orange', 'yellow','green'], index=[0,0.1,0.9,1.])
linear._repr_html_()
def test_linear_to_step():
some_list = [30.6, 50, 51, 52, 53, 54, 55, 60, 70, 100]
lc = cm.linear.YlOrRd
lc.to_step(n=12)
lc.to_step(index=[0,2,4,6,8,10])
lc.to_step(data=some_list, n=12)
lc.to_step(data=some_list, n=12, method='linear')
lc.to_step(data=some_list, n=12, method='log')
lc.to_step(data=some_list, n=30, method='quantiles')
lc.to_step(data=some_list, quantiles=[0,0.3,0.7,1])
lc.to_step(data=some_list, quantiles=[0,0.3,0.7,1], round_method='int')
lc.to_step(data=some_list, quantiles=[0,0.3,0.7,1], round_method='log10')
def test_step_to_linear():
step = cm.StepColormap(['green','yellow','red'], vmin=3., vmax=10., index=[3,4,8,10], caption='step')
step.to_linear()
def test_linear_object():
cm.linear.OrRd._repr_html_()
cm.linear.PuBu.to_step(12)
cm.linear.YlGn.scale(3,12)
cm.linear._repr_html_()
| Python | 0.000003 | |
dd65fb84e41b11f8d97e3862d00137969589ab4b | integrate greenify | tests/test_greenify.py | tests/test_greenify.py | from __future__ import absolute_import
import sys
import time
import greenify
greenify.greenify()
import pylibmc
import random
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from gtornado import green
greenify.patch_lib("/usr/lib/x86_64-linux-gnu/libmemcached.so")
def call_mc(i):
mc = pylibmc.Client(["localhost"])
mc.get_stats()
mc.disconnect_all()
@coroutine
def use_greenlet():
s = time.time()
yield [green.spawn(call_mc, i) for i in range(1000)]
print(time.time() - s)
if __name__ == "__main__":
IOLoop.instance().run_sync(use_greenlet)
| Python | 0.000475 | |
7401d1ecd6b3323b266cf02eabd42a2c4e40d988 | Add initial tests for test module | tests/test_test.py | tests/test_test.py | """tests/test_test.py.
Test to ensure basic test functionality works as expected.
Copyright (C) 2019 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import pytest
import hug
api = hug.API(__name__)
def test_cli():
"""Test to ensure the CLI tester works as intended to allow testing CLI endpoints"""
@hug.cli()
def my_cli_function():
return 'Hello'
assert hug.test.cli(my_cli_function) == 'Hello'
assert hug.test.cli('my_cli_function', api=api) == 'Hello'
# Shouldn't be able to specify both api and module.
with pytest.raises(ValueError):
assert hug.test.cli('my_method', api=api, module=hug)
| Python | 0 | |
925da64adf0b74ba18eb78acd9127e3a6dc6f903 | Add test cases for reported issues | tests/test_reported.py | tests/test_reported.py | # -*- coding: utf-8 -*-
from pyrql import parse
CMP_OPS = ['eq', 'lt', 'le', 'gt', 'ge', 'ne']
class TestReportedErrors:
def test_like_with_string_parameter(self):
expr = 'like(name,*new jack city*)'
rep = {'name': 'like', 'args': ['name', '*new jack city*']}
pd = parse(expr)
assert pd == rep
def test_like_with_string_encoded_parameter(self):
expr = 'like(name,*new%20jack%20city*)'
rep = {'name': 'like', 'args': ['name', '*new jack city*']}
pd = parse(expr)
assert pd == rep
| Python | 0 | |
85b4c2a3966c09bdfebc788b7e3d31fbb8285b77 | Add tests for TaskWikiDelete | tests/test_selected.py | tests/test_selected.py | from tests.base import IntegrationTest
from time import sleep
class TestAnnotateAction(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.command(
"TaskWikiAnnotate This is annotation.",
regex="Task \"test task 1\" annotated.$",
lines=1)
self.tasks[0].refresh()
annotation = self.tasks[0]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
class TestAnnotateActionMoved(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.client.type('2gg') # Go to the second line
self.command(
"TaskWikiAnnotate This is annotation.",
regex="Task \"test task 2\" annotated.$",
lines=1)
self.tasks[1].refresh()
annotation = self.tasks[1]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
class TestAnnotateActionRange(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.client.type('V2gg') # Go to the second line
self.client.feedkeys(":TaskWikiAnnotate This is annotation.")
self.client.type('<Enter>')
sleep(2)
for task in self.tasks:
task.refresh()
annotation = self.tasks[0]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
annotation = self.tasks[1]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
class TestDeleteAction(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.command(
"TaskWikiDelete",
regex="Task \"test task 1\" deleted.$",
lines=1)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['status'] == "deleted"
assert self.tasks[1]['status'] == "pending"
class TestDeleteActionMoved(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.client.type('2gg')
self.command(
"TaskWikiDelete",
regex="Task \"test task 2\" deleted.$",
lines=1)
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[1]['status'] == "deleted"
assert self.tasks[0]['status'] == "pending"
class TestDeleteActionRange(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(1)
self.client.normal('VG')
sleep(1)
self.client.feedkeys(":TaskWikiDelete")
self.client.type('<Enter>')
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[1]['status'] == "deleted"
assert self.tasks[0]['status'] == "deleted"
| from tests.base import IntegrationTest
from time import sleep
class TestAnnotateAction(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.command(
"TaskWikiAnnotate This is annotation.",
regex="Task \"test task 1\" annotated.$",
lines=1)
self.tasks[0].refresh()
annotation = self.tasks[0]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
class TestAnnotateActionMoved(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.client.type('2gg') # Go to the second line
self.command(
"TaskWikiAnnotate This is annotation.",
regex="Task \"test task 2\" annotated.$",
lines=1)
self.tasks[1].refresh()
annotation = self.tasks[1]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
class TestAnnotateActionRange(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1"),
dict(description="test task 2"),
]
def execute(self):
self.client.type('V2gg') # Go to the second line
self.client.feedkeys(":TaskWikiAnnotate This is annotation.")
self.client.type('<Enter>')
sleep(2)
for task in self.tasks:
task.refresh()
annotation = self.tasks[0]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
annotation = self.tasks[1]['annotations']
assert annotation != []
assert annotation[0]['description'] == "This is annotation."
| Python | 0 |
e61c6eb5b5a9f6f70df036dcfedf552325a6e9bd | move unit test syn import to pytest fixture | tests/unit/conftest.py | tests/unit/conftest.py | import logging
import pytest
from synapseclient import Synapse
from synapseclient.core.logging_setup import SILENT_LOGGER_NAME
"""
pytest unit test session level fixtures
"""
@pytest.fixture(scope="session")
def syn():
"""
Create a Synapse instance that can be shared by all tests in the session.
"""
syn = Synapse(debug=False, skip_checks=True)
syn.logger = logging.getLogger(SILENT_LOGGER_NAME)
return syn
| Python | 0 | |
182762812cb1945dd2b50c21b34609be00b7bf45 | Create wordlist_add_digits.py | wordlist_add_digits.py | wordlist_add_digits.py | #!/usr/bin/env python
#Adds 4digits to the end of the common word lists
import os, sys
class Wordlist_Add_Digits():
def add_digits(self, wordlist, outfile):
#File to start with
file=wordlist
#Output file
out=open(outfile, 'w')
#Start loop of 0000-9999 added to each word
with open(file) as f:
content = f.read().splitlines()
for x in content:
for a in range(10):
x0=x+str(a)
for b in range(10):
x1=x0+str(b)
for c in range (10):
x2=x1+str(c)
for d in range (10):
x3=x2+str(d)
# print final combo
out.write(str(x3)+"\n")
if __name__ == '__main__':
try:
wordlist = sys.argv[1]
outfile = sys.argv[2]
wordz = Wordlist_Add_Digits()
wordz.add_digits(wordlist, outfile)
except IndexError:
print('Usage: wordlist_add_digits.py wordlist.txt output.txt')
sys.exit(1)
| Python | 0.000575 | |
da3e9d5f7ffeae68ef7ae3b07247a9f6cb16d40d | Create get_user_statuses.py | src/Python/get_user_statuses.py | src/Python/get_user_statuses.py | import sys
import urllib2
import time
import re
from lxml import html
def get_user_statuses(userid):
reached_end = False
i = 1
saying_list = []
while not reached_end:
page_url = "http://www.douban.com/people/%s/statuses?p=%d" % (userid, i)
# TODO: User login. Results limited to the first 10 pages without login
response = urllib2.urlopen(page_url)
page_html = response.read()
tree = html.fromstring(page_html)
statuses_element_list = tree.xpath('//*[@class="status-item"]')
if len(statuses_element_list) < 20:
reached_end = True
print len(statuses_element_list)
for s in range(len(statuses_element_list)):
author_element = statuses_element_list[s].findall('.//*[@class="hd"]/*[@class="text"]/a')[0]
author_link = author_element.get('href')
author_id=None
if re.search(r".*people/(.+?)/", author_link):
author_id=re.search(r".*people/(.+?)/", author_link).group(1)
if author_id == userid:
blockquote_element_list = statuses_element_list[s].findall('.//*[@class="status-saying"]/blockquote')
if blockquote_element_list:
content='\n'.join([p.text for p in blockquote_element_list[0].findall('p')])
saying_list.append(content)
i += 1
time.sleep(1)
return saying_list
if __name__ == "__main__":
userid = sys.argv[1]
result_list = get_user_statuses( userid )
for i in result_list:
print i
| Python | 0.000003 | |
c0637f482a95dd7ec02bb7b85bc8d164c0a80585 | add missing check_headers tool | tools/check_headers.py | tools/check_headers.py | #!/usr/bin/env python2
import sys
from os import unlink
from os.path import exists
HEADERS = ('Content-Disposition', 'Content-Length', 'Content-Type',
'ETag', 'Last-Modified')
def is_sig_header(header):
header = header.lower()
for s in HEADERS:
if header.startswith(s.lower()):
return True
def do():
headers_fn = sys.argv[1]
signature_fn = sys.argv[2]
# first, get all the headers from the latest request
with open(headers_fn) as fd:
headers = [line.strip() for line in fd.readlines()]
last_index = 0
for index, header in enumerate(headers):
if header.startswith('HTTP/1.'):
last_index = index
headers = headers[last_index:]
# select few headers for the signature
headers = [header for header in headers if is_sig_header(header)]
signature = '\n'.join(headers)
# read the original signature
if exists(signature_fn):
with open(signature_fn) as fd:
original_signature = fd.read()
if original_signature == signature:
return 0
unlink(signature_fn)
if signature:
with open(signature_fn, 'w') as fd:
fd.write(signature)
try:
ret = do()
except:
ret = 1
sys.exit(ret)
| Python | 0.000001 | |
34fd994053b581d0dbb29a5f8f5cbda805c796cf | fix minor bug | xgbmagic/__init__.py | xgbmagic/__init__.py | from xgboost.sklearn import XGBClassifier, XGBRegressor
import xgboost as xgb
import pandas as pd
import operator
import seaborn as sns
import numpy as np
from sklearn import grid_search, metrics
class Xgb:
def __init__(self, df, target_column='', id_column='', target_type='binary', categorical_columns=[], num_training_rounds=500, verbose=1):
"""
input params:
- df (DataFrame): dataframe of training data
- target_column (string): name of target column
- id_column (string): name of id column
- target_type (string): 'linear' or 'binary'
- categorical_columns (list): list of column names of categorical data. Will perform one-hot encoding
- verbose (bool): verbosity of printouts
"""
if type(df) == pd.core.frame.DataFrame:
self.df = df
if target_column:
self.target_column = target_column
self.id_column = id_column
self.target_type = target_type
self.categorical_columns = categorical_columns
self.verbose = verbose
# init the classifier
if self.target_type == 'binary':
self.clf = XGBClassifier(
learning_rate =0.1,
n_estimators = num_training_rounds,
subsample = 0.8,
colsample_bytree = 0.8,
objective = 'binary:logistic',
scale_pos_weight = 1,
seed = 123)
elif self.target_type == 'linear':
self.clf = XGBRegressor()
else:
print 'please provide target column name'
else:
print 'please provide pandas dataframe'
def train(self):
print '#### preprocessing ####'
self.df = self.preprocess(self.df)
print '#### training ####'
self.predictors = [x for x in self.df.columns if x not in [self.target_column, self.id_column]]
xgb_param = self.clf.get_xgb_params()
if self.target_type == 'binary':
xgtrain = xgb.DMatrix(self.df[self.predictors], label=self.df[self.target_column], missing=np.nan)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=self.clf.get_params()['n_estimators'], nfold=5,
metrics=['auc'], early_stopping_rounds=5, show_progress=self.verbose)
self.clf.set_params(n_estimators=cvresult.shape[0])
self.clf.fit(self.df[self.predictors], self.df[self.target_column],eval_metric='auc')
#Predict training set:
train_df_predictions = self.clf.predict(self.df[self.predictors])
train_df_predprob = self.clf.predict_proba(self.df[self.predictors])[:,1]
print "Accuracy : %.4g" % metrics.accuracy_score(self.df[self.target_column].values, train_df_predictions)
print "AUC Score (Train): %f" % metrics.roc_auc_score(self.df[self.target_column], train_df_predprob)
elif self.target_type == 'linear':
model = grid_search.GridSearchCV(estimator = self.clf, param_grid=xgb_param, verbose=1,cv=4, scoring='mean_squared_error')
model.fit(self.df[self.predictors], self.df[self.target_column])
train_df_predictions = model.predict(self.df[self.predictors])
print "Accuracy : %.4g" % metrics.accuracy_score(self.df[target_column].values, train_df_predictions)
def predict(self, test_df):
test_df = self.preprocess(test_df)
return self.clf.predict(test_df[self.predictors])
def feature_importance(self):
feature_importance = sorted(self.clf.booster().get_fscore().items(), key = operator.itemgetter(1), reverse=True)
impt = pd.DataFrame(feature_importance)
impt.columns = ['feature', 'importance']
impt[:10].plot("feature", "importance", kind="barh", color=sns.color_palette("deep", 3))
def preprocess(self, df, train=True):
self.cols_to_remove = []
# one hot encoding of categorical variables
print '## one hot encoding of categorical variables'
for col in self.categorical_columns:
if self.verbose:
print 'one hot encoding: ', col
df = pd.concat([df, pd.get_dummies(df[col]).rename(columns=lambda x: col+'_'+str(x))], axis=1)
df = df.drop([col], axis=1)
if train:
# drop columns that are too sparse to be informative
print '## dropping columns below sparsity threshold'
for col in df.columns:
nan_cnt = 0
for x in df[col]:
try:
if np.isnan(x):
nan_cnt += 1
except:
pass
if nan_cnt/float(len(df[col])) > 0.6: # arbitrary cutoff, if more than 60% missing then drop
if self.verbose:
print 'will drop', col
self.cols_to_remove.append(col)
# drop columns that have no standard deviation (not informative)
print '## dropping columns with no variation'
for col in df.columns:
if df[col].dtype == 'int64' or df[col].dtype == 'float64':
if df[col].std() == 0:
print 'will drop', col
self.cols_to_remove.append(col)
if self.verbose and self.cols_to_remove:
print 'dropping the following columns:', self.cols_to_remove
df = df.drop(self.cols_to_remove, axis=1)
if self.verbose:
print '## DataFrame shape is now:', df.shape
# convert to numerical where possible
print '## converting numerical data to numeric dtype'
df = df.convert_objects(convert_numeric=True)
# drop all those that are object type
print '## dropping non-numerical columns'
for col in df.columns:
if df[col].dtype == 'int64' or df[col].dtype == 'float64' or df[col].dtype == 'bool':
pass
else:
if self.verbose:
print 'dropping because not int, float, or bool:', col
df = df.drop([col], axis=1)
return df
| Python | 0.000001 | |
3e5105218976549a0a782f179bb358edfd4e89c9 | Add load_tests / __init__.py to the azure/cli/tests module to allow for simpler unit test discovery | src/azure/cli/tests/__init__.py | src/azure/cli/tests/__init__.py | from .test_argparse import Test_argparse
from unittest import TestSuite
test_cases = [Test_argparse]
def load_tests(loader, tests, pattern):
suite = TestSuite()
for testclass in test_cases:
tests = loader.loadTestsFromTestCase(testclass)
suite.addTests(tests)
return suite
| Python | 0.000001 | |
94128fb2c9dfec51a1a012130734e72cd74bb98b | add new tools to help cleaning fastq files | sequana/scripts/substractor.py | sequana/scripts/substractor.py | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Substract genomes from the raw reads"""
import os
import sys
import argparse
from sequana.scripts.tools import SequanaOptions
from sequana.bamtools import SAM
from sequana import FastQ
from easydev.console import purple
from sequana import logger
logger.name = "sequana.substractor"
from subprocess import STDOUT
import subprocess
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
epilog = purple("""
----
AUTHORS: Thomas Cokelaer
Documentation: http://sequana.readthedocs.io
Issues: http://github.com/sequana/sequana
""")
class Options(argparse.ArgumentParser, SequanaOptions):
def __init__(self, prog="sequana_substractor"):
usage = """%s reads (flag 256+4) saving the mapped reads in a file, and the unmapped in
another file\n""" % prog
usage += """usage2: %s --input test.fastq --reference Phix174.fa\n""" % prog
usage += """
"""
super(Options, self).__init__(usage=usage, prog=prog,
epilog=epilog,
formatter_class=CustomFormatter)
self.add_argument("--input", dest='input', type=str,
required=True, help="input FastQ file")
self.add_argument("--output", dest='outfile', type=str,
default="reads.fastq", help="output FastQ filename")
self.add_argument("--reference", dest="reference", type=str,
default=None)
self.add_argument("--references", dest="references", type=str,
nargs="+", default=[])
self.add_argument("--output-directory", dest='outdir', type=str,
default="sequana_substractor",
required=False, help="input fastq gzipped or not")
self.add_threads(self)
self.add_version(self)
self.add_level(self)
class Substractor(object):
def __init__(self, infile, references, outdir, threads=4):
self.infile = infile
self.references = references
self.outdir = outdir
self.threads = threads
try: os.mkdir(options.outdir)
except: pass
# this may be used later on for other mapper or methodology
self.mapper_cmd = "minimap2 -x map-pb -t {} {} {} -a > {}"
f = FastQ(self.infile)
self.L = len(f)
logger.info("Found {} reads in input FastQ file\n\n".format(self.L))
def run(self, output_filename):
MAPPED = 0
# temporary directory
for i, reference in enumerate(self.references):
if i == 0:
infile = self.infile
else:
infile = outfile
# we only accept reference ending in .fa or .fasta
assert reference.endswith(".fa") or reference.endswith(".fasta")
outfile = reference.replace(".fa", "").replace(".fasta", "")
tag = outfile[0:8]
outfile = "{}/mapping_{}.sam".format(self.outdir, tag)
cmd = self.mapper_cmd.format(self.threads, reference, infile, outfile)
# Now we need to extract the fastq from the SAM file.
logger.info("Removing {}. Mapping starting".format(reference))
logger.info(cmd)
from subprocess import PIPE
process = subprocess.call(cmd, shell=True, stderr=PIPE)
results = self.splitter_mapped_unmapped(outfile, tag)
# keep track of total mapped reads
MAPPED += results["mapped"]
outfile = "{}/{}.unmapped.fastq".format(self.outdir, tag)
logger.info("{} mapped. {} reads remaining".format(
results['mapped'], results["unmapped"]))
print()
# now we copy the last unmapped file into reads.fastq
cmd = "cp {} {}".format(outfile, output_filename)
process = subprocess.call(cmd, shell=True)
logger.info("Your final file: {} with {} reads".format(
output_filename, results['unmapped']))
logger.info("all mapped and unmapped files: {}. Input was {}".format(
MAPPED + results['unmapped'], self.L))
def splitter_mapped_unmapped(self, filename, prefix):
# helpful resources:
# https://broadinstitute.github.io/picard/explain-flags.html
logger.info("Creating 2 files (mapped and unmapped reads)")
data = SAM(filename)
results = {
"flags": [],
"mapped": 0,
"unmapped": 0,
"bad":0
}
logger.info("Please wait while creating output files")
with open("{}/{}.unmapped.fastq".format(self.outdir, prefix), "w") as fnosirv:
with open("{}/{}.mapped.fastq".format(self.outdir, prefix), "w") as fsirv:
for a in data:
if a.flag & 2048: # suppl
# a bad read, we can just drop it
results['bad']+=1
elif a.flag & 1024: # PCR duplicate
results['bad']+=1
elif a.flag & 256: # secondary alignment
results["bad"] += 1
elif a.flag &16: # mapped
read = "@{}\n{}\n+\n{}\n".format(a.qname, a.query_sequence, a.qual)
assert len(a.query_sequence) == len(a.qual)
fsirv.write(read)
results["mapped"] += 1
elif a.flag & 4: # unmapped
read = "@{}\n{}\n+\n{}\n".format(a.qname, a.query_sequence, a.qual)
assert len(a.query_sequence) == len(a.qual)
fnosirv.write(read)
results["unmapped"] += 1
elif a.flag == 0: # mapped
read = "@{}\n{}\n+\n{}\n".format(a.qname, a.query_sequence, a.qual)
assert len(a.query_sequence) == len(a.qual)
fsirv.write(read)
results["mapped"] += 1
else:
logger.warning("{} flag not handled".format(a.flag))
results["flags"].append(a.flag)
return results
def main(args=None):
if args is None:
args = sys.argv[:]
print(purple("Welcome to sequana_substractor"))
print(purple("WARNING. TESTED ON LONG READS ONLY. EXPERIMENTAL"))
user_options = Options(prog="sequana_substractor")
if len(args) ==1:
args.append("--help")
if "--version" in sys.argv:
import sequana
print(sequana.version)
sys.exit(0)
options = user_options.parse_args(args[1:])
logger.level = options.level
# build the references list
references = []
if options.reference:
references.append(options.reference)
if options.references:
references = options.references
logger.info("{} references provided: ".format(
len(references), ",".join(references)))
# call the entire machinery here
sub = Substractor(options.input, references, options.outdir, options.threads)
sub.run(options.outfile)
if __name__ == "__main__":
import sys
main(sys.argv)
| Python | 0 | |
f91d666cc06f5db48bea43de29ca4153e58c473d | add test for os platform check | check.py | check.py | #!/bin/py
import os
import sys
def osCheck():
""" Check if OS is 'UNIX-like' """
if not sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
# if not sys.platform.startswith('darwin'):
print("This program was designed for UNIX-like systems. Exiting.")
sys.exit()
osCheck()
| Python | 0 | |
cafb802c51e0c0b8ff58cb749fa30b99cd7182b4 | Fix versions script to accept versions without -ce suffix | scripts/versions.py | scripts/versions.py | import operator
import re
from collections import namedtuple
import requests
base_url = 'https://download.docker.com/linux/static/{0}/x86_64/'
categories = [
'edge',
'stable',
'test'
]
STAGES = ['tp', 'beta', 'rc']
class Version(namedtuple('_Version', 'major minor patch stage edition')):
@classmethod
def parse(cls, version):
edition = None
version = version.lstrip('v')
version, _, stage = version.partition('-')
if stage:
if not any(marker in stage for marker in STAGES):
edition = stage
stage = None
elif '-' in stage:
edition, stage = stage.split('-')
major, minor, patch = version.split('.', 3)
return cls(major, minor, patch, stage, edition)
@property
def major_minor(self):
return self.major, self.minor
@property
def order(self):
"""Return a representation that allows this object to be sorted
correctly with the default comparator.
"""
# non-GA releases should appear before GA releases
# Order: tp -> beta -> rc -> GA
if self.stage:
for st in STAGES:
if st in self.stage:
stage = (STAGES.index(st), self.stage)
break
else:
stage = (len(STAGES),)
return (int(self.major), int(self.minor), int(self.patch)) + stage
def __str__(self):
stage = '-{}'.format(self.stage) if self.stage else ''
edition = '-{}'.format(self.edition) if self.edition else ''
return '.'.join(map(str, self[:3])) + edition + stage
def main():
results = set()
for url in [base_url.format(cat) for cat in categories]:
res = requests.get(url)
content = res.text
versions = [
Version.parse(
v.strip('"').lstrip('docker-').rstrip('.tgz').rstrip('-x86_64')
) for v in re.findall(
r'"docker-[0-9]+\.[0-9]+\.[0-9]+-?.*tgz"', content
)
]
sorted_versions = sorted(
versions, reverse=True, key=operator.attrgetter('order')
)
latest = sorted_versions[0]
results.add(str(latest))
print(' '.join(results))
if __name__ == '__main__':
main()
| import operator
import re
from collections import namedtuple
import requests
base_url = 'https://download.docker.com/linux/static/{0}/x86_64/'
categories = [
'edge',
'stable',
'test'
]
STAGES = ['tp', 'beta', 'rc']
class Version(namedtuple('_Version', 'major minor patch stage edition')):
@classmethod
def parse(cls, version):
edition = None
version = version.lstrip('v')
version, _, stage = version.partition('-')
if stage:
if not any(marker in stage for marker in STAGES):
edition = stage
stage = None
elif '-' in stage:
edition, stage = stage.split('-')
major, minor, patch = version.split('.', 3)
return cls(major, minor, patch, stage, edition)
@property
def major_minor(self):
return self.major, self.minor
@property
def order(self):
"""Return a representation that allows this object to be sorted
correctly with the default comparator.
"""
# non-GA releases should appear before GA releases
# Order: tp -> beta -> rc -> GA
if self.stage:
for st in STAGES:
if st in self.stage:
stage = (STAGES.index(st), self.stage)
break
else:
stage = (len(STAGES),)
return (int(self.major), int(self.minor), int(self.patch)) + stage
def __str__(self):
stage = '-{}'.format(self.stage) if self.stage else ''
edition = '-{}'.format(self.edition) if self.edition else ''
return '.'.join(map(str, self[:3])) + edition + stage
def main():
results = set()
for url in [base_url.format(cat) for cat in categories]:
res = requests.get(url)
content = res.text
versions = [
Version.parse(
v.strip('"').lstrip('docker-').rstrip('.tgz').rstrip('-x86_64')
) for v in re.findall(
r'"docker-[0-9]+\.[0-9]+\.[0-9]+-.*tgz"', content
)
]
sorted_versions = sorted(
versions, reverse=True, key=operator.attrgetter('order')
)
latest = sorted_versions[0]
results.add(str(latest))
print(' '.join(results))
if __name__ == '__main__':
main()
| Python | 0 |
ba82331fa694ec26c7f0108451abf3912b5a37ff | Reimplement deprecated (1.6) _is_ignorable_404 | opbeat/contrib/django/middleware/__init__.py | opbeat/contrib/django/middleware/__init__.py | """
opbeat.contrib.django.middleware
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2012 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.conf import settings
from opbeat.contrib.django.models import client
import threading
import logging
def _is_ignorable_404(self, uri):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
class Opbeat404CatchMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404 or _is_ignorable_404(request.get_full_path()):
return response
data = client.get_data_from_request(request)
data.update({
'level': logging.INFO,
'logger': 'http404',
})
result = client.capture('Message', param_message={'message':'Page Not Found: %s','params':[request.build_absolute_uri()]}, data=data)
request.opbeat = {
'app_id': data.get('app_id', client.app_id),
'id': client.get_ident(result),
}
return response
class OpbeatResponseErrorIdMiddleware(object):
"""
Appends the X-Opbeat-ID response header for referencing a message within
the Opbeat datastore.
"""
def process_response(self, request, response):
if not getattr(request, 'opbeat', None):
return response
response['X-Opbeat-ID'] = request.opbeat['id']
return response
class OpbeatLogMiddleware(object):
# Create a threadlocal variable to store the session in for logging
thread = threading.local()
def process_request(self, request):
self.thread.request = request
| """
opbeat.contrib.django.middleware
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2012 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.middleware.common import _is_ignorable_404
from opbeat.contrib.django.models import client
import threading
import logging
class Opbeat404CatchMiddleware(object):
def process_response(self, request, response):
if response.status_code != 404 or _is_ignorable_404(request.get_full_path()):
return response
data = client.get_data_from_request(request)
data.update({
'level': logging.INFO,
'logger': 'http404',
})
result = client.capture('Message', param_message={'message':'Page Not Found: %s','params':[request.build_absolute_uri()]}, data=data)
request.opbeat = {
'app_id': data.get('app_id', client.app_id),
'id': client.get_ident(result),
}
return response
class OpbeatResponseErrorIdMiddleware(object):
"""
Appends the X-Opbeat-ID response header for referencing a message within
the Opbeat datastore.
"""
def process_response(self, request, response):
if not getattr(request, 'opbeat', None):
return response
response['X-Opbeat-ID'] = request.opbeat['id']
return response
class OpbeatLogMiddleware(object):
# Create a threadlocal variable to store the session in for logging
thread = threading.local()
def process_request(self, request):
self.thread.request = request
| Python | 0.000001 |
92b572004264c69baed5cce721e20e1a830514f8 | add 'is_changed' filter | filter_plugins/is_changed.py | filter_plugins/is_changed.py | class FilterModule(object):
''' A comment '''
def filters(self):
return {
'is_changed': self.is_changed,
}
def is_changed(self, input_value, key, value):
if type(input_value) is not dict:
raise TypeError, u"{} must be dict (got {})".format(input_value, str(type(input_value)))
if input_value.has_key('results'):
res = input_value['results']
else:
res = [input_value]
for item in res:
if item.has_key(key) and item.has_key('changed'):
if item[key] == value and item['changed'] == True:
return True
return False
| Python | 0.000036 | |
82cab3f91df9b4bb9f60e553d6b9e4ef431cb6ae | Add __init__.py | eppconvert/__init__.py | eppconvert/__init__.py | #
# Copyright (c) 2017 Ralf Horstmann <ralf@ackstorm.de>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
__all__ = ['eppformat', 'eppread', 'gpx2epp']
| Python | 0.006636 | |
2f9699d5088266aaa76dad1742f2432d78da9d3b | add validator class | biothings_explorer/resolve_ids/validator.py | biothings_explorer/resolve_ids/validator.py | from collections import defaultdict
from ..config_new import ID_RESOLVING_APIS
from ..exceptions.id_resolver import InvalidIDResolverInputError
from ..utils.common import getPrefixFromCurie
class Validator:
def __init__(self, user_input):
self.__user_input = user_input
self.__valid = defaultdict(list)
self.__invalid = defaultdict(list)
def get_valid_inputs(self):
return self.__valid
def get_invalid_inputs(self):
return self.__invalid
def _validate_if_input_is_dict(self, user_input):
if not isinstance(user_input, dict):
raise InvalidIDResolverInputError(
user_input,
message="Your Input to ID Resolver is Invalid. It should be a dictionary!",
)
def _validate_if_values_of_input_is_list(self, user_input):
for k, v in user_input.items():
if not isinstance(v, list):
raise InvalidIDResolverInputError(
user_input,
message="Your Input to ID Resolver is Invalid. All values of your input dictionary should be a list!",
)
def _validate_if_each_item_in_input_values_is_curie(self, user_input):
for k, v in user_input.items():
for _v in v:
if not isinstance(_v, str) or ":" not in _v:
raise InvalidIDResolverInputError(
user_input,
message="Your Input to ID Resolver is Invalid. Each item in the values of your input dictionary should be a curie. Spotted {} is not a curie".format(
_v
),
)
def _check_if_semantic_type_can_be_resolved(self, user_input):
res = {}
for k, v in user_input.items():
if k not in ID_RESOLVING_APIS:
self.__invalid[k] = v
else:
res[k] = v
return res
def _check_if_prefix_can_be_resolved(self, user_input):
for k, v in user_input.items():
for _v in v:
if getPrefixFromCurie(_v) not in ID_RESOLVING_APIS[k]["mapping"]:
self.__invalid[k].append(_v)
else:
self.__valid[k].append(_v)
def validate(self):
self._validate_if_input_is_dict(self.__user_input)
self._validate_if_values_of_input_is_list(self.__user_input)
self._validate_if_each_item_in_input_values_is_curie(self.__user_input)
tmp_valid_res = self._check_if_semantic_type_can_be_resolved(self.__user_input)
self._check_if_prefix_can_be_resolved(tmp_valid_res)
| Python | 0.000001 | |
c69fdba07aa4228f3e708b49e7fef4d0143e7a13 | Add missing stats.py | vpr/tests/api_stats.py | vpr/tests/api_stats.py | from django.db import connection
SQL_COUNT = 'select count(id) from vpr_api_apirecord where %s=%s;'
def countValue(field, value, time_start=None, time_end=None):
cur = connection.cursor()
cur.execute(SQL_COUNT % (field, value))
return cur.fetchone()
| Python | 0.000095 | |
13addaf6e5a0423b632efcc4d16e3e5d864fdac3 | Create validate_csv_wd.py | validate_csv_wd.py | validate_csv_wd.py | #!/usr/bin/env python3.5
import sys
import re
import os
import csv
def read_file(fname):
f = open(fname, 'r')
csv_reader = csv.reader(f, delimiter='~')
no_rows = 0
for row in csv_reader:
no_rows += 1
no_cols = len(row)
print("Row %d: columns = %d" % (no_rows, no_cols))
f.close()
print(".........")
print("Number of records in csv file: %d" % no_rows)
if __name__ == '__main__':
args = sys.argv[1:]
for fl in args:
print("File : %s" % fl)
print("..................................")
read_file(fl)
| Python | 0.001169 | |
6c4ef8298bbdf48f82d13fb25a0f3958237392f2 | Add nova client for retrieving instance information | novajoin/nova.py | novajoin/nova.py | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handle communication with Nova."""
from novaclient import client
from oslo_config import cfg
from oslo_log import log as logging
from novajoin import keystone_client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
NOVA_APIVERSION = 2.1
class NovaClient(object):
"""Wrapper around nova client."""
def __init__(self):
self.version = NOVA_APIVERSION
self.client = self._nova_client()
def _nova_client(self):
"""Instantiate a new novaclient.Client object."""
session = keystone_client.get_session()
return client.Client(str(self.version), session=session)
def get_instance(instance_id):
novaclient = NovaClient()
try:
return novaclient.client.servers.get(instance_id)
except novaclient.exceptions.NotFound:
return None
| Python | 0 | |
d53358a6a0a564a5b4982f7f3dfdfd1163d6a295 | Add test covering no RunStop for v2. | databroker/tests/test_v2/test_no_run_stop.py | databroker/tests/test_v2/test_no_run_stop.py | # This is a special test because we corrupt the generated data.
# That is why it does not reuse the standard fixures.
import tempfile
from suitcase.jsonl import Serializer
from bluesky import RunEngine
from bluesky.plans import count
from ophyd.sim import det
from databroker._drivers.jsonl import BlueskyJSONLCatalog
def test_no_stop_document(RE, tmpdir):
"""
When a Run has no RunStop document, whether because it does not exist yet
or because the Run was interrupted in a critical way and never completed,
we expect the field for 'stop' to contain None.
"""
directory = str(tmpdir)
serializer = Serializer(directory)
def insert_all_except_stop(name, doc):
if name != 'stop':
serializer(name, doc)
RE(count([det]), insert_all_except_stop)
serializer.close()
catalog = BlueskyJSONLCatalog(f'{directory}/*.jsonl')
assert catalog[-1].metadata['start'] is not None
assert catalog[-1].metadata['stop'] is None
| Python | 0 | |
ace782a3f4c616f9e22e1a1ce29f053b71391845 | Add missing migration for column description. | cms/migrations/0002_update_template_field.py | cms/migrations/0002_update_template_field.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='page',
name='template',
field=models.CharField(help_text='Templates are used to render the layout of a page.', max_length=100, verbose_name='template', choices=[(b'test-template.html', 'Test Template')]),
preserve_default=True,
),
]
| Python | 0 | |
750fd86c3b5d490003c68adf78b49a74d0a17862 | Add cluster.conf shared mapper | falafel/mappers/cluster_conf.py | falafel/mappers/cluster_conf.py | from falafel.core.plugins import mapper
import xml.etree.ElementTree as ET
@mapper('cluster.conf')
def get_cluster_conf(context):
"""
Return node, fence, fencedevices and resources infomation.The json structure accord the xml struct.
Here is example:
<cluster name="mycluster" config_version="3">
<clusternodes>
<clusternode name="node-01.example.com" nodeid="1">
<fence>
<method name="APC">
<device name="apc" port="1"/>
</method>
<method name="SAN">
<device name="sanswitch1" port="12" action="on"/>
<device name="sanswitch2" port="12" action="on"/>
</method>
</fence>
</clusternode>
<clusternode name="node-02.example.com" nodeid="2">
<fence>
<method name="APC">
<device name="apc" port="2"/>
</method>
<method name="SAN">
<device name="sanswitch1" port="12"/>
</method>
</fence>
</clusternode>
</clusternodes>
<cman expected_votes="3"/>
<fencedevices>
<fencedevice agent="fence_imm" ipaddr="139.223.41.219" login="opmgr" name="fence1" passwd="***"/>
<fencedevice agent="fence_imm" ipaddr="139.223.41.229" login="opmgr" name="fence2" passwd="***"/>
</fencedevices>
<rm>
<resources>
<lvm name="lvm" vg_name="shared_vg" lv_name="ha-lv"/>
<fs name="FS" device="/dev/shared_vg/ha-lv" force_fsck="0" force_unmount="1" fsid="64050" fstype="ext4" mountpoint="/mnt" options="" self_fence="0"/>
</resources>
</rm>
</cluster>
OUTPUT like this:
{
"fencedevices": [
{
"passwd": "***",
"login": "opmgr",
"ipaddr": "139.223.41.219",
"name": "fence1",
"agent": "fence_imm"
},
{
"passwd": "***",
"login": "opmgr",
"ipaddr": "139.223.41.229",
"name": "fence2",
"agent": "fence_imm"
}
],
"nodes": [
{
"fences": [
{
"device": [
{
"name": "apc",
"port": "1"
}
],
"meth_name": "APC"
},
{
"device": [
{
"action": "on",
"name": "sanswitch1",
"port": "12"
},
{
"action": "on",
"name": "sanswitch2",
"port": "12"
}
],
"meth_name": "SAN"
}
],
"name": "node-01.example.com",
"nodeid": "1"
},
{
"fences": [
{
"device": [
{
"name": "apc",
"port": "2"
}
],
"meth_name": "APC"
},
{
"device": [
{
"name": "sanswitch1",
"port": "12"
}
],
"meth_name": "SAN"
}
],
"name": "node-02.example.com",
"nodeid": "2"
}
],
"resources": {
"lvm": {
"name": "lvm",
"vg_name": "shared_vg",
"lv_name": "ha-lv"
},
"fs": {
"self_fence": "0",
"name": "FS",
"force_unmount": "1",
"fstype": "ext4",
"device": "/dev/shared_vg/ha-lv",
"mountpoint": "/mnt",
"options": "",
"fsid": "64050",
"force_fsck": "0"
}
}
}
"""
cluster_xml = ET.fromstringlist(context.content)
result = {"nodes": []}
for node in cluster_xml.iter('clusternode'):
attr = node.attrib
attr["fences"] = []
for fence in node.iter('fence'):
# There are only one fence in one node part
for method in fence.iter("method"):
attr["fences"].append({
"meth_name": method.attrib["name"],
"device": [device.attrib for device in method.iter("device")]
})
result["nodes"].append(attr)
result["fencedevices"] = []
result["fencedevices"] += [fencedevice.attrib for fencedevice in cluster_xml.findall(".//fencedevices//")]
result["resources"] = {}
result["resources"].update({key: value for key, value in [(sub.tag, sub.attrib) for sub in cluster_xml.findall("./rm/resources//")]})
return result
| Python | 0.000001 | |
c1e76dbdf07e67d98814d6f357a70c692af3a31d | Add first pass at db router | osf/db/router.py | osf/db/router.py | from django.conf import settings
import psycopg2
CACHED_MASTER = None
class PostgreSQLFailoverRouter(object):
"""
1. CHECK MASTER_SERVER_DSN @ THREAD LOCAL
2. THERE?, GOTO 9
3. GET RANDOM_SERVER FROM `settings.DATABASES`
4. CONNECT TO RANDOM_SERVER
5. IS MASTER SERVER?
6. YES? GOTO 8
7. NO?, `exit()`
8. STOR MASTER_SERVER_DSN @ THREAD_LOCAL
9. PROFIT
Number of servers can be assumed to be > 1 but shouldn't assume 2 max.
Might be nice to keep track of the servers that have been tried from settings.DATABASES so we don't get into a loop.
"""
DSNS = dict()
def __init__(self):
self._get_dsns()
global CACHED_MASTER
if not CACHED_MASTER:
CACHED_MASTER = self._get_master()
def _get_master(self):
for name, dsn in self.DSNS.iteritems():
conn = self._get_conn(dsn)
cur = conn.cursor()
cur.execute('SELECT pg_is_in_recovery();')
row = cur.fetchone()
if not row[0]:
cur.close()
conn.close()
return name
cur.close()
conn.close()
return None
def _get_dsns(self):
template = '{protocol}://{USER}:{PASSWORD}@{HOST}:{PORT}/{NAME}'
for name, db in settings.DATABASES.iteritems():
if 'postgresql' in db['ENGINE']:
db['protocol'] = 'postgres'
# db.setdefault('protocol', 'postgres')
else:
raise Exception('PostgreSQLFailoverRouter only works with PostgreSQL... ... ...')
self.DSNS[name] = template.format(**db)
def _get_conn(self, dsn):
return psycopg2.connect(dsn)
def db_for_read(self, model, **hints):
if not CACHED_MASTER:
exit()
return CACHED_MASTER
def db_for_write(self, model, **hints):
if not CACHED_MASTER:
exit()
return CACHED_MASTER
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
return None
| Python | 0 | |
aca6b8b4cd221efca6d3a5f59f96b73d70e65714 | test integration against scipy | gary/integrate/tests/test_1d.py | gary/integrate/tests/test_1d.py | # coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os
import time
import logging
# Third-party
import numpy as np
from astropy import log as logger
from scipy.integrate import simps
# Project
from ..simpsgauss import simpson
logger.setLevel(logging.DEBUG)
plot_path = "plots/tests/TODO"
if not os.path.exists(plot_path):
os.makedirs(plot_path)
def test_simpson():
ncalls = 10
func = lambda x: np.sin(x - 0.2414)*x + 2.
x = np.linspace(0, 10, 250001)
y = func(x)
t0 = time.time()
for i in range(ncalls):
s1 = simpson(y, dx=x[1]-x[0])
print("cython (odd): {0} sec for {1} calls".format(time.time() - t0,ncalls))
t0 = time.time()
for i in range(ncalls):
s2 = simps(y, x=x)
print("python (odd): {0} sec for {1} calls".format(time.time() - t0,ncalls))
np.testing.assert_allclose(s1, s2)
# -----------------------------------------------------
print()
x = np.linspace(0, 10, 250000)
y = func(x)
t0 = time.time()
for i in range(ncalls):
s1 = simpson(y, dx=x[1]-x[0])
print("cython (even): {0} sec for {1} calls".format(time.time() - t0,ncalls))
t0 = time.time()
for i in range(ncalls):
s2 = simps(y, x=x)
print("python (even): {0} sec for {1} calls".format(time.time() - t0,ncalls))
np.testing.assert_allclose(s1, s2)
| Python | 0.000001 | |
003cb0478a7fcbc7fb9b3521c174397a9dd9a318 | Use sources.list instead of sources.all. | zilencer/lib/stripe.py | zilencer/lib/stripe.py | from functools import wraps
import logging
import os
from typing import Any, Callable, TypeVar
from django.conf import settings
from django.utils.translation import ugettext as _
import stripe
from zerver.lib.exceptions import JsonableError
from zerver.lib.logging_util import log_to_file
from zerver.models import Realm, UserProfile
from zilencer.models import Customer
from zproject.settings import get_secret
STRIPE_SECRET_KEY = get_secret('stripe_secret_key')
STRIPE_PUBLISHABLE_KEY = get_secret('stripe_publishable_key')
stripe.api_key = STRIPE_SECRET_KEY
BILLING_LOG_PATH = os.path.join('/var/log/zulip'
if not settings.DEVELOPMENT
else settings.DEVELOPMENT_LOG_DIRECTORY,
'billing.log')
billing_logger = logging.getLogger('zilencer.stripe')
log_to_file(billing_logger, BILLING_LOG_PATH)
log_to_file(logging.getLogger('stripe'), BILLING_LOG_PATH)
CallableT = TypeVar('CallableT', bound=Callable[..., Any])
class StripeError(JsonableError):
pass
def catch_stripe_errors(func: CallableT) -> CallableT:
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
if STRIPE_PUBLISHABLE_KEY is None:
# Dev-only message; no translation needed.
raise StripeError(
"Missing Stripe config. In dev, add to zproject/dev-secrets.conf .")
try:
return func(*args, **kwargs)
except stripe.error.StripeError as e:
billing_logger.error("Stripe error: %d %s",
e.http_status, e.__class__.__name__)
if isinstance(e, stripe.error.CardError):
raise StripeError(e.json_body.get('error', {}).get('message'))
else:
raise StripeError(
_("Something went wrong. Please try again or email us at %s.")
% (settings.ZULIP_ADMINISTRATOR,))
except Exception as e:
billing_logger.exception("Uncaught error in Stripe integration")
raise
return wrapped # type: ignore # https://github.com/python/mypy/issues/1927
@catch_stripe_errors
def count_stripe_cards(realm: Realm) -> int:
try:
customer_obj = Customer.objects.get(realm=realm)
cards = stripe.Customer.retrieve(customer_obj.stripe_customer_id).sources.all(object="card")
return len(cards["data"])
except Customer.DoesNotExist:
return 0
@catch_stripe_errors
def save_stripe_token(user: UserProfile, token: str) -> int:
"""Returns total number of cards."""
# The card metadata doesn't show up in Dashboard but can be accessed
# using the API.
card_metadata = {"added_user_id": user.id, "added_user_email": user.email}
try:
customer_obj = Customer.objects.get(realm=user.realm)
customer = stripe.Customer.retrieve(customer_obj.stripe_customer_id)
billing_logger.info("Adding card on customer %s: source=%r, metadata=%r",
customer_obj.stripe_customer_id, token, card_metadata)
card = customer.sources.create(source=token, metadata=card_metadata)
customer.default_source = card.id
customer.save()
return len(customer.sources.list(object="card")["data"])
except Customer.DoesNotExist:
customer_metadata = {"string_id": user.realm.string_id}
# Description makes it easier to identify customers in Stripe dashboard
description = "{} ({})".format(user.realm.name, user.realm.string_id)
billing_logger.info("Creating customer: source=%r, description=%r, metadata=%r",
token, description, customer_metadata)
customer = stripe.Customer.create(source=token,
description=description,
metadata=customer_metadata)
card = customer.sources.list(object="card")["data"][0]
card.metadata = card_metadata
card.save()
Customer.objects.create(realm=user.realm, stripe_customer_id=customer.id)
return 1
| from functools import wraps
import logging
import os
from typing import Any, Callable, TypeVar
from django.conf import settings
from django.utils.translation import ugettext as _
import stripe
from zerver.lib.exceptions import JsonableError
from zerver.lib.logging_util import log_to_file
from zerver.models import Realm, UserProfile
from zilencer.models import Customer
from zproject.settings import get_secret
STRIPE_SECRET_KEY = get_secret('stripe_secret_key')
STRIPE_PUBLISHABLE_KEY = get_secret('stripe_publishable_key')
stripe.api_key = STRIPE_SECRET_KEY
BILLING_LOG_PATH = os.path.join('/var/log/zulip'
if not settings.DEVELOPMENT
else settings.DEVELOPMENT_LOG_DIRECTORY,
'billing.log')
billing_logger = logging.getLogger('zilencer.stripe')
log_to_file(billing_logger, BILLING_LOG_PATH)
log_to_file(logging.getLogger('stripe'), BILLING_LOG_PATH)
CallableT = TypeVar('CallableT', bound=Callable[..., Any])
class StripeError(JsonableError):
pass
def catch_stripe_errors(func: CallableT) -> CallableT:
@wraps(func)
def wrapped(*args: Any, **kwargs: Any) -> Any:
if STRIPE_PUBLISHABLE_KEY is None:
# Dev-only message; no translation needed.
raise StripeError(
"Missing Stripe config. In dev, add to zproject/dev-secrets.conf .")
try:
return func(*args, **kwargs)
except stripe.error.StripeError as e:
billing_logger.error("Stripe error: %d %s",
e.http_status, e.__class__.__name__)
if isinstance(e, stripe.error.CardError):
raise StripeError(e.json_body.get('error', {}).get('message'))
else:
raise StripeError(
_("Something went wrong. Please try again or email us at %s.")
% (settings.ZULIP_ADMINISTRATOR,))
except Exception as e:
billing_logger.exception("Uncaught error in Stripe integration")
raise
return wrapped # type: ignore # https://github.com/python/mypy/issues/1927
@catch_stripe_errors
def count_stripe_cards(realm: Realm) -> int:
try:
customer_obj = Customer.objects.get(realm=realm)
cards = stripe.Customer.retrieve(customer_obj.stripe_customer_id).sources.all(object="card")
return len(cards["data"])
except Customer.DoesNotExist:
return 0
@catch_stripe_errors
def save_stripe_token(user: UserProfile, token: str) -> int:
"""Returns total number of cards."""
# The card metadata doesn't show up in Dashboard but can be accessed
# using the API.
card_metadata = {"added_user_id": user.id, "added_user_email": user.email}
try:
customer_obj = Customer.objects.get(realm=user.realm)
customer = stripe.Customer.retrieve(customer_obj.stripe_customer_id)
billing_logger.info("Adding card on customer %s: source=%r, metadata=%r",
customer_obj.stripe_customer_id, token, card_metadata)
card = customer.sources.create(source=token, metadata=card_metadata)
customer.default_source = card.id
customer.save()
return len(customer.sources.all(object="card")["data"])
except Customer.DoesNotExist:
customer_metadata = {"string_id": user.realm.string_id}
# Description makes it easier to identify customers in Stripe dashboard
description = "{} ({})".format(user.realm.name, user.realm.string_id)
billing_logger.info("Creating customer: source=%r, description=%r, metadata=%r",
token, description, customer_metadata)
customer = stripe.Customer.create(source=token,
description=description,
metadata=customer_metadata)
card = customer.sources.all(object="card")["data"][0]
card.metadata = card_metadata
card.save()
Customer.objects.create(realm=user.realm, stripe_customer_id=customer.id)
return 1
| Python | 0.000001 |
ca002a18b7e392bbdca9d7e0ed8c39739dc5b4a3 | Add code to get 99th percentile absolute pointing for POG | pog_absolute_pointing.py | pog_absolute_pointing.py | import numpy as np
from Chandra.Time import DateTime
import plot_aimpoint
# Get 99th percential absolute pointing radius
plot_aimpoint.opt = plot_aimpoint.get_opt()
asols = plot_aimpoint.get_asol()
# Last six months of data
asols = asols[asols['time'] > DateTime(-183).secs]
# center of box of range of data
mid_dy = (np.max(asols['dy']) + np.min(asols['dy'])) / 2.
mid_dz = (np.max(asols['dz']) + np.min(asols['dz'])) / 2.
# radius of each delta in mm (asol dy dz in mm)
dr = np.sqrt((asols['dy'] - mid_dy) ** 2 + (asols['dz'] - mid_dz) ** 2)
dr_99 = np.percentile(dr, 99)
dr_99_arcsec = dr_99 * 20
print "99th percentile radius of 6m data is {} arcsec".format(dr_99_arcsec)
| Python | 0 | |
874da8664a6ee62937fb859665e17c035a66324b | add utils | custom/enikshay/management/commands/utils.py | custom/enikshay/management/commands/utils.py | from __future__ import print_function
import csv
import datetime
from django.core.management import BaseCommand
from casexml.apps.case.mock import CaseFactory
from casexml.apps.case.xform import get_case_updates
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.case_utils import (
CASE_TYPE_OCCURRENCE,
CASE_TYPE_PERSON,
get_first_parent_of_case,
)
from custom.enikshay.exceptions import ENikshayCaseNotFound
def get_result_recorded_form(test):
"""get last form that set result_recorded to yes"""
for action in reversed(test.actions):
if action.form is not None:
for update in get_case_updates(action.form):
if (
update.id == test.case_id
and update.get_update_action()
and update.get_update_action().dynamic_properties.get('result_recorded') == 'yes'
):
return action.form.form_data
def is_person_public(domain, test):
try:
occurrence_case = get_first_parent_of_case(domain, test.case_id, CASE_TYPE_OCCURRENCE)
person_case = get_first_parent_of_case(domain, occurrence_case.case_id, CASE_TYPE_PERSON)
except ENikshayCaseNotFound:
return False
return person_case.get_case_property('enrolled_in_private') != 'true'
class BaseEnikshayCaseMigration(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('log_file_name')
parser.add_argument('case_ids', nargs='*')
parser.add_argument('--commit', action='store_true')
def handle(self, domain, log_file_name, case_ids, **options):
commit = options['commit']
print("Starting {} migration on {} at {}".format(
"real" if commit else "fake", domain, datetime.datetime.utcnow()
))
with open(log_file_name, "w") as log_file:
writer = csv.writer(log_file)
writer.writerow(
['case_id']
+ ['current_' + case_prop for case_prop in self.case_properties_to_update]
+ self.case_properties_to_update
+ [self.datamigration_case_property]
)
for case in self.get_cases(domain, self.case_type, case_ids):
updated_case_properties = self.get_case_property_updates(case, domain)
needs_update = bool(updated_case_properties)
updated_case_properties[self.datamigration_case_property] = 'yes' if needs_update else 'no'
writer.writerow(
[case.case_id]
+ [case.get_case_property(case_prop) or '' for case_prop in self.case_properties_to_update]
+ [updated_case_properties.get(case_prop, '') for case_prop in (
self.case_properties_to_update + [self.datamigration_case_property])]
)
if needs_update and commit:
self.commit_updates(domain, case.case_id, updated_case_properties)
@staticmethod
def get_cases(domain, case_type, case_ids):
accessor = CaseAccessors(domain)
case_ids = case_ids or accessor.get_case_ids_in_domain(type=case_type)
return accessor.iter_cases(case_ids)
@staticmethod
def commit_updates(domain, case_id, updated_case_properties):
CaseFactory(domain).update_case(case_id, update=updated_case_properties)
@property
def case_type(self):
raise NotImplementedError
@property
def case_properties_to_update(self):
raise NotImplementedError
@property
def datamigration_case_property(self):
raise NotImplementedError
@staticmethod
def get_case_property_updates(case, domain):
raise NotImplementedError
| Python | 0.000004 | |
fa28e80dc7aeed1eb4fb0a18126a2f8105d5a5d2 | Create Cleverbot.py | Cleverbot.py | Cleverbot.py | mport re
import cleverbot
import traceback
WORDS = ["CLEVERBOT", "BOT"]
PATTERN = r"\b(cleverbot|bot)\b"
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, starting a conversation with cleverbot
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone number)
"""
mic.say('Starting clever bot')
exit = False
bot = cleverbot.Cleverbot()
errors = 0
while not exit:
try:
question = mic.activeListen()
if is_exit(question):
break
answer = bot.ask(question)
mic.say(answer)
except Exception as e:
mic.say('Oops')
print traceback.format_exc()
errors += 1
if errors > 5:
break
mic.say('Stopping clever bot')
def is_exit(text):
return bool(re.search(r"(exit|quit|stop)", text, re.IGNORECASE))
def isValid(text):
return bool(re.search(PATTERN, text, re.IGNORECASE))
| Python | 0 | |
405dfc9a0a814001961e4090be83a3da4a4d4369 | Copy in constants file from master | cea/technologies/constants.py | cea/technologies/constants.py | """
Constants used throughout the cea.technologies package.
History lesson: This is a first step at removing the `cea.globalvars.GlobalVariables` object.
"""
# Heat Exchangers
U_cool = 2500.0 # W/m2K
U_heat = 2500.0 # W/m2K
dT_heat = 5.0 # K - pinch delta at design conditions
dT_cool = 2.0 # K - pinch delta at design conditions
# Specific heat
rho_W = 998.0 # [kg/m^3] density of Water
cp = 4185.0 # [J/kg K]
# Substation data
roughness = 0.02 / 1000 # roughness coefficient for heating network pipe in m (for a steel pipe, from Li &
NetworkDepth = 1 # m
# Initial Diameter guess
REDUCED_TIME_STEPS = 50 # number of time steps of maximum demand which are evaluated as an initial guess of the edge diameters | Python | 0 | |
656cf2955510151675dfb4acae4e92e21021a6b5 | Add the Course of LiaoXueFeng | LiaoXueFeng/function.py | LiaoXueFeng/function.py | def fact(n):
if n==1:
return 1
return n * fact(n - 1)
print fact(10)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.