commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
198dc11cadc1a20f95dccd5bb4897fa2947ff810 | Add Affichage.py | SUPINFOLaboDev/TheSnake | Affichage.py | Affichage.py | class Affichage:
def affichage_jeux(self):
return 0
| mit | Python | |
abe6ead4f93f98406fe197b6884e51015c200ca1 | Add a test for query_result_to_dict | jeffweeksio/sir | test/test_searchentities.py | test/test_searchentities.py | import unittest
from . import models
from sir.schema.searchentities import SearchEntity as E, SearchField as F
class QueryResultToDictTest(unittest.TestCase):
def setUp(self):
self.entity = E(models.B, [
F("id", "id"),
F("c_bar", "c.bar"),
F("c_bar_trans", "c.bar", transformfunc=lambda v:
v.union(set(["yay"])))
],
1.1
)
self.expected = {
"id": 1,
"c_bar": "foo",
"c_bar_trans": set(["foo", "yay"]),
}
c = models.C(id=2, bar="foo")
self.val = models.B(id=1, c=c)
def test_fields(self):
res = self.entity.query_result_to_dict(self.val)
self.assertDictEqual(self.expected, res)
| mit | Python | |
c1bbbd7ac51a25919512722d633f0d8c2d1009e2 | Create unit tests directory | open2c/cooltools | tests/test_lazy_toeplitz.py | tests/test_lazy_toeplitz.py | from scipy.linalg import toeplitz
import numpy as np
from cooltools.snipping import LazyToeplitz
n = 100
m = 150
c = np.arange(1, n+1)
r = np.r_[1,np.arange(-2, -m, -1)]
L = LazyToeplitz(c, r)
T = toeplitz(c, r)
def test_symmetric():
for si in [
slice(10, 20),
slice(0, 150),
slice(0, 0),
slice(150, 150),
slice(10, 10)
]:
assert np.allclose(L[si, si], T[si, si])
def test_triu_no_overlap():
for si, sj in [
(slice(10, 20), slice(30, 40)),
(slice(10, 15), slice(30, 40)),
(slice(10, 20), slice(30, 45)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_tril_no_overlap():
for si, sj in [
(slice(30, 40), slice(10, 20)),
(slice(30, 40), slice(10, 15)),
(slice(30, 45), slice(10, 20)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_triu_with_overlap():
for si, sj in [
(slice(10, 20), slice(15, 25)),
(slice(13, 22), slice(15, 25)),
(slice(10, 20), slice(18, 22)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_tril_with_overlap():
for si, sj in [
(slice(15, 25), slice(10, 20)),
(slice(15, 22), slice(10, 20)),
(slice(15, 25), slice(10, 18)),
]:
assert np.allclose(L[si, sj], T[si, sj])
def test_nested():
for si, sj in [
(slice(10, 40), slice(20, 30)),
(slice(10, 35), slice(20, 30)),
(slice(10, 40), slice(20, 25)),
(slice(20, 30), slice(10, 40)),
]:
assert np.allclose(L[si, sj], T[si, sj])
| mit | Python | |
c3026f4c6e5edff30347f544746781c7214c2c2e | Add root test file. | levilucio/SyVOLT,levilucio/SyVOLT | test_SM2SM.py | test_SM2SM.py | '''
Created on 2015-01-20
@author: levi
'''
'''
Created on 2015-01-19
@author: levi
'''
import unittest
from patterns.HSM2SM_matchLHS import HSM2SM_matchLHS
from patterns.HSM2SM_rewriter import HSM2SM_rewriter
from PyRamify import PyRamify
from t_core.messages import Packet
from t_core.iterator import Iterator
from t_core.matcher import Matcher
from t_core.rewriter import Rewriter
from himesis_utils import graph_to_dot
from tests.TestModules.HSM2SM import HSM2SM
class Test(unittest.TestCase):
def testName(self):
i = Iterator()
p = Packet()
pyramify = PyRamify()
[self.rules, self.ruleTraceCheckers, backwardPatterns2Rules, backwardPatternsComplete, self.matchRulePatterns, self.ruleCombinators] = \
pyramify.ramify_directory("dir_for_pyramify/", True)
HSM2SM_py = self.matchRulePatterns["HSM2SM"]
print(HSM2SM_py)
matcher = HSM2SM_py[0]
rewriter = HSM2SM_py[1]
p.graph = matcher.condition
graph_to_dot("test_before_SM2SM", HSM2SM())
graph_to_dot("test_SM2SM", p.graph)
s2s_match = matcher
s2s_rewrite = rewriter
graph_to_dot("test_SM2SM_matcher", matcher.condition)
graph_to_dot("test_SM2SM_rewriter", rewriter.condition)
s2s_match.packet_in(p)
if s2s_match.is_success:
print "Yes!"
else:
print "no"
p = i.packet_in(p)
p = s2s_rewrite.packet_in(p)
if s2s_rewrite.is_success:
print "Yes!"
else:
print "no"
graph_to_dot("after_SM2SM", p.graph)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | mit | Python | |
3c7c81fa65206ea70cbff8394efe35749dc9dddd | add bitquant.py driver | joequant/bitquant,joequant/bitquant,linkmax91/bitquant,joequant/bitquant,linkmax91/bitquant,linkmax91/bitquant,linkmax91/bitquant,linkmax91/bitquant,joequant/bitquant,linkmax91/bitquant,joequant/bitquant | web/bitquant.py | web/bitquant.py | from flask import Flask, request
app = Flask(__name__, static_url_path='', static_folder='bitquant')
@app.route("/")
def root():
return app.send_static_file('index.html')
if __name__ == "__main__":
app.run()
| bsd-2-clause | Python | |
58d3df14b1b60da772f59933345a2dfdf2cadec2 | Add python solution for day 17 | Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015 | day17/solution.py | day17/solution.py | import itertools
data = open("data", "r").read()
containers = map(int, data.split("\n"))
part1 = []
minLength = None
for length in range(len(containers)):
combinations = itertools.combinations(containers, length)
combinations = filter(lambda containers: sum(containers) == 150, combinations)
part1 += combinations
if minLength is None and len(combinations) > 0:
minLength = length
print "Combinations (Part 1):", len(part1)
part2 = filter(lambda containers: len(containers) == minLength, part1)
print "Minimum Length Combinations (Part 2):", len(part2)
| mit | Python | |
88cfd7529c6c08e24b20576c1e40f41f3156a47e | add tandem sam scores script | BenLangmead/qtip-experiments,BenLangmead/qtip-experiments | bin/tandem_sam_scores.py | bin/tandem_sam_scores.py | """
tandem_sam_scores.py
For each alignment, compare the "target" simulated alignment score to the
actual score obtained by the aligner. When the read is simulated, we borrow
the target score and the pattern of mismatches and gaps from an input
alignment. But because the new read's sequence and point of origin are
different, and because the aligner's heuristics might act differently on the
tandem read than on the input read, the aligned score might be different.
Could be either higher or lower. Here we compare and make a table showing
how scores change before and after.
"""
from __future__ import print_function
import sys
from collections import defaultdict
scores = defaultdict(int)
for ln in sys.stdin:
if ln[0] == '@':
continue
toks = ln.rstrip().split('\t')
assert toks[0][:6] == '!!ts!!'
ref_id, fw, ref_off, expected_score, typ = toks[0][16:].split('!!ts-sep!!')
actual_score = None
for ef in toks[11:]:
if ef.startswith('AS:i:'):
actual_score = int(ef[5:])
break
scores[(expected_score, actual_score)] += 1
for k, v in sorted(scores.items()):
expected_score, actual_score = k
actual_score = 'NA' if actual_score is None else str(actual_score)
print("%s,%s,%d" % (expected_score, actual_score, v))
| mit | Python | |
b4c21650cfd92d722a0ac20ea51d90f15adca44e | add permissions classes for Group API | amschaal/bioshare,amschaal/bioshare,amschaal/bioshare,amschaal/bioshare,amschaal/bioshare | bioshareX/permissions.py | bioshareX/permissions.py | from django.http.response import Http404
from rest_framework.permissions import DjangoModelPermissions, SAFE_METHODS
from django.contrib.auth.models import Group
class ViewObjectPermissions(DjangoModelPermissions):
def has_object_permission(self, request, view, obj):
if hasattr(view, 'get_queryset'):
queryset = view.get_queryset()
else:
queryset = getattr(view, 'queryset', None)
assert queryset is not None, (
'Cannot apply DjangoObjectPermissions on a view that '
'does not set `.queryset` or have a `.get_queryset()` method.'
)
model_cls = queryset.model
user = request.user
# raise Exception(','.join(self.perms))
if not user.has_perms(self.perms, obj):
# If the user does not have permissions we need to determine if
# they have read permissions to see 403, or not, and simply see
# a 404 response.
if request.method in SAFE_METHODS:
# Read permissions already checked and failed, no need
# to make another lookup.
raise Http404
read_perms = self.get_required_object_permissions('GET', model_cls,view)
if not user.has_perms(read_perms, obj):
raise Http404
# Has read permissions.
return False
return True
class ManageGroupPermission(ViewObjectPermissions):
perms = ['manage_group']
# def has_object_permission(self, request, view, obj):
# if not request.user.groups.filter(id=obj.id).exists():
# return False
# return super(ManageGroupPermission, self).has_object_permission(request,view,obj) | mit | Python | |
43fe12c4dc2778e6c7a4b65dae587004a0ec0155 | rename plugin -> calico_rkt | alexhersh/calico-rkt-intial | calico_rkt/calico_rkt.py | calico_rkt/calico_rkt.py | #!/usr/bin/env python
from __future__ import print_function
import socket
from netaddr import IPAddress
from pycalico import datastore, netns
import functools
import json
import os
import sys
from subprocess import check_output, CalledProcessError
from pycalico.datastore_datatypes import Rules
from pycalico.netns import NamedNamespace as Namespace
print_stderr = functools.partial(print, file=sys.stderr)
# Append to existing env, to avoid losing PATH etc.
# TODO-PAT: This shouldn't be hardcoded
# env = os.environ.copy()
# env['ETCD_AUTHORITY'] = 'localhost:2379'
# ETCD_AUTHORITY_ENV = "ETCD_AUTHORITY"
# PROFILE_LABEL = 'CALICO_PROFILE'
# ETCD_PROFILE_PATH = '/calico/'
RKT_ORCHESTRATOR = 'rkt'
INTERFACE_NAME = 'eth0'
def main():
print_stderr('Args: ', sys.argv)
print_stderr('Env: ', os.environ)
input_ = ''.join(sys.stdin.readlines()).replace('\n', '')
print_stderr('Input: ', input_)
input_json = json.loads(input_)
mode = os.environ['CNI_COMMAND']
if mode == 'init':
print_stderr('No initialization work to perform')
elif mode == 'ADD':
print_stderr('Executing Calico pod-creation plugin')
add(
pod_id=os.environ['CNI_PODID'],
netns_path=os.environ['CNI_NETNS'],
ip='192.168.0.111',
)
elif mode == 'teardown':
print_stderr('No pod-deletion work to perform')
def add(pod_id, netns_path, ip):
""""Handle rkt pod-add event."""
client = datastore.DatastoreClient()
print_stderr('Configuring pod %s' % pod_id, file=sys.stderr)
try:
endpoint = _create_calico_endpoint(pod_id, ip, netns_path,
client=client)
_create_profile(endpoint=endpoint, profile_name=pod_id, client=client)
except CalledProcessError as e:
print_stderr('Error code %d creating pod networking: %s\n%s' % (
e.returncode, e.output, e))
sys.exit(1)
def _create_calico_endpoint(pod_id, ip, netns_path, client):
"""Configure the Calico interface for a pod."""
print_stderr('Configuring Calico networking.', file=sys.stderr)
endpoint = client.create_endpoint(socket.gethostname(), RKT_ORCHESTRATOR,
pod_id, [IPAddress(ip)])
endpoint.provision_veth(Namespace(netns_path), INTERFACE_NAME)
client.set_endpoint(endpoint)
print_stderr('Finished configuring network interface', file=sys.stderr)
return endpoint
def _create_profile(endpoint, profile_name, client):
"""
Configure the calico profile for a pod.
Currently assumes one pod with each name.
"""
print_stderr('Configuring Pod Profile: %s' % profile_name)
if client.profile_exists(profile_name):
print_stderr("Error: Profile with name %s already exists, exiting." % profile_name)
sys.exit(1)
rules = _create_rules(profile_name)
client.create_profile(profile_name, rules)
# Also set the profile for the workload.
print_stderr('Setting profile %s on endpoint %s' %
(profile_name, endpoint.endpoint_id))
client.set_profiles_on_endpoint(
profile_name, endpoint_id=endpoint.endpoint_id
)
print_stderr('Finished configuring profile.')
print(json.dumps(
{
'ip4': {
'ip': '192.168.0.111/24'
}
}))
def _create_rules(id_):
rules_dict = {
'id': id_,
'inbound_rules': [
{
'action': 'allow',
},
],
'outbound_rules': [
{
'action': 'allow',
},
],
}
rules_json = json.dumps(rules_dict, indent=2)
rules = Rules.from_json(rules_json)
return rules
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
9557fc7696b182dd25f15bee85d522c22910bd90 | Add test for siingle camera. | microy/PyStereoVisionToolkit,microy/VisionToolkit,microy/VisionToolkit,microy/PyStereoVisionToolkit,microy/StereoVision,microy/StereoVision | camera-capture-1.py | camera-capture-1.py | #! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Application to capture images from two AVT Manta cameras with the Vimba SDK
#
#
# External dependencies
#
import ctypes
import os
import cv2
import numpy
import time
#
# Vimba frame structure
#
class VmbFrame( ctypes.Structure ) :
# VmbFrame structure fields
_fields_ = [('buffer', ctypes.POINTER(ctypes.c_char)),
('bufferSize', ctypes.c_uint32),
('context', ctypes.c_void_p * 4),
('receiveStatus', ctypes.c_int32),
('receiveFlags', ctypes.c_uint32),
('imageSize', ctypes.c_uint32),
('ancillarySize', ctypes.c_uint32),
('pixelFormat', ctypes.c_uint32),
('width', ctypes.c_uint32),
('height', ctypes.c_uint32),
('offsetX', ctypes.c_uint32),
('offsetY', ctypes.c_uint32),
('frameID', ctypes.c_uint64),
('timestamp', ctypes.c_uint64)]
# Initialize the image buffer
def __init__( self, frame_size ) :
self.buffer = ctypes.create_string_buffer( frame_size )
self.bufferSize = ctypes.c_uint32( frame_size )
#
# Initialization
#
# Default image parameters from our cameras (AVT Manta G504B)
width = 2452
height = 2056
payloadsize = 5041312
# Camera handles
camera_1 = ctypes.c_void_p()
# Image frames
frame_1 = VmbFrame( payloadsize )
# Number of images saved
image_count = 0
# Frame per second counter
frame_counter = 0
fps_counter = 0
#
# Vimba initialization
#
print( 'Vimba initialization...' )
# Get Vimba installation directory
vimba_path = "/" + "/".join(os.environ.get("GENICAM_GENTL64_PATH").split("/")[1:-3]) + "/VimbaC/DynamicLib/x86_64bit/libVimbaC.so"
# Load Vimba library
vimba = ctypes.cdll.LoadLibrary( vimba_path )
# Initialize the library
vimba.VmbStartup()
# Send discovery packet to GigE cameras
vimba.VmbFeatureCommandRun( ctypes.c_void_p(1), "GeVDiscoveryAllOnce" )
#
# Camera connection
#
print( 'Camera connection...' )
# Connect the cameras via their serial number
vimba.VmbCameraOpen( '50-0503323406', 1, ctypes.byref(camera_1) )
# Adjust packet size automatically on each camera
vimba.VmbFeatureCommandRun( camera_1, "GVSPAdjustPacketSize" )
# Configure frame software trigger
vimba.VmbFeatureEnumSet( camera_1, "TriggerSource", "Software" )
#
# Start image acquisition
#
print( 'Start acquisition...' )
# Announce the frames
vimba.VmbFrameAnnounce( camera_1, ctypes.byref(frame_1), ctypes.sizeof(frame_1) )
# Start capture engine
vimba.VmbCaptureStart( camera_1 )
# Start acquisition
vimba.VmbFeatureCommandRun( camera_1, "AcquisitionStart" )
# Initialize the clock for counting the number of frames per second
time_start = time.clock()
# Live display
while True :
# Queue frames
vimba.VmbCaptureFrameQueue( camera_1, ctypes.byref(frame_1), None )
# Send software trigger
vimba.VmbFeatureCommandRun( camera_1, "TriggerSoftware" )
# Get frames back
vimba.VmbCaptureFrameWait( camera_1, ctypes.byref(frame_1), 1000 )
# Check frame validity
if frame_1.receiveStatus :
continue
# Convert frames to numpy arrays
image_1 = numpy.fromstring( frame_1.buffer[ 0 : payloadsize ], dtype=numpy.uint8 ).reshape( height, width )
# Resize image for display
image_final = cv2.resize( image_1, None, fx=0.5, fy=0.5 )
# Frames per second counter
frame_counter += 1
time_elapsed = time.clock() - time_start
if time_elapsed > 0.5 :
fps_counter = frame_counter / time_elapsed
frame_counter = 0
time_start = time.clock()
# Write FPS counter on the displayed image
cv2.putText( image_final, '{:.2f} FPS'.format( fps_counter ), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255) )
# Display the image (scaled down)
cv2.imshow( "Stereo Cameras", image_final )
# Keyboard interruption
key = cv2.waitKey(1) & 0xFF
# Escape key
if key == 27 :
# Exit live display
break
# Space key
elif key == 32 :
# Save images to disk
image_count += 1
print( 'Save image {} to disk...'.format( image_count ) )
cv2.imwrite( 'camera1-{:0>2}.png'.format(image_count), image_1 )
# Cleanup OpenCV
cv2.destroyAllWindows()
#
# Stop image acquisition
#
print( 'Stop acquisition...' )
# Stop acquisition
vimba.VmbFeatureCommandRun( camera_1, "AcquisitionStop" )
# Stop capture engine
vimba.VmbCaptureEnd( camera_1 )
# Flush the frame queue
vimba.VmbCaptureQueueFlush( camera_1 )
# Revoke frames
vimba.VmbFrameRevokeAll( camera_1 )
#
# Camera disconnection
#
print( 'Camera disconnection...' )
# Close the cameras
vimba.VmbCameraClose( camera_1 )
#
# Vimba shutdown
#
print( 'Vimba shutdown...' )
# Release the library
vimba.VmbShutdown()
| mit | Python | |
6e1d1da7983da2ca43a1185adc2ddb2e2e1b7333 | Add basic cycles exercices | MindCookin/python-exercises | chapter02/cicles.py | chapter02/cicles.py | #!/usr/bin/env python
print "Escribir un ciclo definido para imprimir por pantalla todos los numeros entre 10 y 20."
print [x for x in range(10, 20)]
print "Escribir un ciclo definido que salude por pantalla a sus cinco mejores amigos/as."
print [amigo for amigo in ['Lola', 'Dolores', 'Quique', 'Manuel', 'Manolo']]
print "Escribir un programa que use un ciclo definido con rango numerico, que pregunte los nombres de sus cinco mejores amigos/as, y los salude."
for i in range(5):
amigo = raw_input('Escribe el nombre de tu {0} amigo: '.format(i + 1))
print 'Hola {0}'.format(amigo)
print "Escribir un programa que use un ciclo definido con rango numerico, que pregunte los nombres de sus seis mejores amigos/as, y los salude."
for i in range(6):
amigo = raw_input('Escribe el nombre de tu {0} amigo: '.format(i + 1))
print 'Hola {0}'.format(amigo)
print "Escribir un programa que use un ciclo definido con rango numerico, que averigue a cuantos amigos quieren saludar, les pregunte los nombres de esos amigos/as, y los salude."
num = input("A cuantos amigos quieres saludar?")
for i in range(num):
amigo = raw_input('Escribe el nombre de tu {0} amigo: '.format(i + 1))
print 'Hola {0}'.format(amigo)
| apache-2.0 | Python | |
08c189a643f0b76ad28f9c0e0bc376a0ae202343 | Create nesting.py | py-in-the-sky/challenges,py-in-the-sky/challenges,py-in-the-sky/challenges | codility/nesting.py | codility/nesting.py | """
https://codility.com/programmers/task/nesting/
"""
def solution(S):
balance = 0
for char in S:
balance += (1 if char == '(' else -1)
if balance < 0:
return 0
return int(balance == 0)
| mit | Python | |
7055485e8c29c1002a0b3d9cb45cffef1bb5dc46 | Add script | sulir/simsycam | SimSyCam.py | SimSyCam.py | # SimSyCam - Simple Symbian Camera
import appuifw
appuifw.app.orientation='landscape' # must be called before importing camera
appuifw.app.screen='full'
from key_codes import *
import e32, time, camera, globalui, graphics
# variables used for mode change messages
info = u""
start_time = 0
# supportet modes
flash_modes = camera.flash_modes()
exposure_modes = camera.exposure_modes()
white_modes = camera.white_balance_modes()
# default mode values
if 'forced' in flash_modes:
flash_mode = flash_modes.index('forced')
else:
flash_mode = 0
exposure_mode = 0
white_mode = 0
# applicaton lock
app_lock = e32.Ao_lock()
# exit function
def quit():
camera.stop_finder()
camera.release()
app_lock.signal()
appuifw.app.exit_key_handler = quit
# display message on screen
def message(img):
img.rectangle((5, 3, 195, 28), fill=0)
img.text((10, 20), info, 0xff0000, (None, None, graphics.FONT_BOLD))
# displaying viewfinder
def viewfinder(img):
if time.time() <= start_time + 5:
message(img)
appuifw.app.body.blit(img)
# start viewfinder function
def start_view():
camera.start_finder(viewfinder, backlight_on = 1, size=(320, 240))
# take dummy photo - viewfinder will start to show image in current exposure
# and white balance mode
def new_view():
camera.stop_finder()
message(appuifw.app.body)
camera.take_photo(
mode="JPEG_Exif",
size=camera.image_sizes()[-1], # should be the smallest available
exposure=exposure_modes[exposure_mode],
white_balance=white_modes[white_mode])
start_view()
# event callback
def callback(event):
global flash_mode, exposure_mode, white_mode
if event['type'] == appuifw.EEventKey:
ev = event['keycode']
if ev == EKeySelect:
photo()
elif ev == EKey5:
flash_mode = new(flash_modes, flash_mode, "Flash")
elif ev == EKey4:
exposure_mode = new(exposure_modes, exposure_mode, "Exposure")
new_view()
elif ev == EKey6:
white_mode = new(white_modes, white_mode, "White balance")
new_view()
# take photo
def photo():
global info
info = u"Taking photo..."
start_time = time.time()
camera.stop_finder()
message(appuifw.app.body)
p = camera.take_photo(
mode="JPEG_Exif",
size=(1600, 1200),
flash=flash_modes[flash_mode],
exposure=exposure_modes[exposure_mode],
white_balance=white_modes[white_mode])
filename = time.strftime("E:\\Images\\%d%m%y-%H%M%S.jpg")
f = open(filename, "w")
f.write(p)
f.close()
appuifw.app.body.blit(graphics.Image.open(filename), scale=1)
time.sleep(2)
start_time = 0
start_view()
# returns the new mode of given property
def new(modes, mode, string):
global info, start_time
mode += 1
mode %= len(modes)
info = unicode(string) + ": " + modes[mode]
start_time = time.time()
return mode
# start
appuifw.app.body = appuifw.Canvas(event_callback=callback)
start_view()
app_lock.wait() | mit | Python | |
02b5ba55c854e5157ef5f65d3faa9bce960eced2 | Add pygments support. | rescrv/firmant | firmant/pygments.py | firmant/pygments.py | # Copyright (c) 2011, Robert Escriva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Firmant nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
# COPIED CODE
# This code was borrowed from Pygments
# Version: 1.3.1
# Source: http://pypi.python.org/pypi/Pygments/1.3.1
# Copyright: 2006-2010 respective authors
# License: 2-clause BSD
# Copyright (c) 2006-2010 by the respective authors (see AUTHORS file).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
INLINESTYLES = True
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('sourcecode', Pygments)
# End borrowed code
| bsd-3-clause | Python | |
631aa503d1457f823cacd0642a1554ce8f31c1f9 | add jm server | Zex/juicemachine,Zex/juicemachine,Zex/juicemachine | python/jm_server.py | python/jm_server.py | #!/usr/bin/python
#
# jm_server.py
#
# Author: Zex <top_zlynch@yahoo.com>
#
import dbus
import dbus.service
from basic import *
class JuiceMachine(dbus.service.FallbackObject):
"""
JuiceMachine server
"""
def __init__(self):
connection = dbus.SessionBus()
connection_name = dbus.service.BusName(
JM_SERVICE_NAME, bus = connection)
dbus.service.Object.__init__(self, connection_name,
JM_1_PATH)
@dbus.service.method(JM_1_IFACE,
in_signature = '', out_signature = 's',
path_keyword = 'path')
def list(self, path = JM_1_PATH):
return 'Service unique name: ['+self.connection.get_unique_name()+']'
def start_server():
"""
Stadrt juicemachine server
"""
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
global loop
obj = JuiceMachine()
loop = gobject.MainLoop()
connection = dbus.StarterBus()
loop.run()
| mit | Python | |
59a108840f0fb07f60f20bc9ff59a0d194cb0ee3 | enable import as module | sao-eht/lmtscripts,sao-eht/lmtscripts,sao-eht/lmtscripts,sao-eht/lmtscripts | __init__.py | __init__.py | """
.. module:: lmtscripts
:platform: Unix
:synopsis: useful scripts for EHT observations at LMT
.. moduleauthor:: Lindy Blackburn <lindylam@gmail.com>
.. moduleauthor:: Katie Bouman <klbouman@gmail.com>
"""
| mit | Python | |
88b6549b74dd767733cd823de410e00067a79756 | add test auto updater | yurydelendik/binaryen,yurydelendik/binaryen,WebAssembly/binaryen,ddcc/binaryen,WebAssembly/binaryen,WebAssembly/binaryen,yurydelendik/binaryen,ddcc/binaryen,ddcc/binaryen,yurydelendik/binaryen,WebAssembly/binaryen,WebAssembly/binaryen,ddcc/binaryen,yurydelendik/binaryen,ddcc/binaryen | auto_update_tests.py | auto_update_tests.py | #!/usr/bin/env python
import os, sys, subprocess, difflib
print '[ processing and updating testcases... ]\n'
for asm in sorted(os.listdir('test')):
if asm.endswith('.asm.js'):
print '..', asm
wasm = asm.replace('.asm.js', '.wast')
actual, err = subprocess.Popen([os.path.join('bin', 'asm2wasm'), os.path.join('test', asm)], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
assert err == '', 'bad err:' + err
# verify output
if not os.path.exists(os.path.join('test', wasm)):
print actual
raise Exception('output .wast file does not exist')
open(os.path.join('test', wasm), 'w').write(actual)
print '\n[ success! ]'
| apache-2.0 | Python | |
322dd59f362a1862c739c5c63cd180bce8655a6d | Test to add data | elixirhub/events-portal-scraping-scripts | AddDataTest.py | AddDataTest.py | __author__ = 'chuqiao'
import script
script.addDataToSolrFromUrl("http://www.elixir-europe.org:8080/events", "http://www.elixir-europe.org:8080/events");
script.addDataToSolrFromUrl("http://localhost/ep/events?state=published&field_type_tid=All", "http://localhost/ep/events");
| mit | Python | |
be9cf41600b2a00494ca34e3b828e7a43d8ae457 | Create testing.py | a378ec99/bcn | bcn/utils/testing.py | bcn/utils/testing.py | """Utility functions for unittests.
Notes
-----
Defines a function that compares the hash of outputs with the expected output, given a particular seed.
"""
from __future__ import division, absolute_import
import hashlib
def assert_consistency(X, true_md5):
'''
Asserts the consistency between two function outputs based on a hash.
Parameters
----------
X : ndarray
Array to be hashed.
true_md5 : str
Expected hash.
'''
m = hashlib.md5()
m.update(X)
current_md5 = m.hexdigest()
print
print current_md5, true_md5
print
assert current_md5 == true_md5
| mit | Python | |
41904abd0778719a1586b404c1ca56eb3205f998 | Include undg/myip to this repo bin. | und3rdg/zsh,und3rdg/zsh | bin/myip.py | bin/myip.py | #!/usr/bin/python3
def extIp(site): # GETING PUBLIC IP
import urllib.request
from re import findall
ipMask = '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if site == 'dyndns':
url = 'http://checkip.dyndns.org'
regexp = '<body>Current IP Address: ('+ipMask+')</body>'
if site == 'google':
url = 'https://www.google.co.uk/search?q=my+ip'
regexp = '<w-answer-desktop><div class="...... ...... .... ...... ......" style="-webkit-line-clamp:2">('+ipMask+')</div>'
req = urllib.request.Request( url, data=None, headers={
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0'
}
)
try:
# opener = urllib.request.urlopen(url,timeout=10)
opener = urllib.request.urlopen(req,timeout=10)
except urllib.error.URLError as e:
print(e.reason)
html = opener.read()
ip = findall(regexp,str(html))[0]
return ip
def localIp(): # GETING LOCAL IP FROM SYSTEM
import socket
get = socket.gethostname()
ip = socket.gethostbyname(get)
return ip
def flags(): # CLI ARGUMENTS
import argparse
import sys
parser = argparse.ArgumentParser()
# FLAGS
parser.add_argument('-l', '--local', help='show local ip', action='store_true')
parser.add_argument('-p', '--public', help='show public ip', action='store_true')
parser.add_argument('-g', '--google', help='use google to check ip, faster but may block ip', action='store_true')
parser.add_argument('-v', '--verbose', help='Make output verbose', action='store_true')
args = parser.parse_args()
# LOCAL IP
if args.local:
if args.verbose:
print('system => Your local IP:', localIp())
else:
print(localIp())
# PUBLIC IP
if args.google:
site = 'google'
args.public = True
else:
site = 'dyndns'
if args.public:
if args.verbose:
print(site, '=> Your public IP:', extIp(site))
else:
print(extIp(site))
# IF NO ARGS
if len(sys.argv) < 2:
parser.print_help()
print('\nsystem => Your local IP:', localIp())
print(site, '=> Your public IP:', extIp(site))
flags()
| mit | Python | |
c5a7e6dc9a98f056a31552e7ace4d150b13b998f | Create markdown.py | Vengeanceplays/CsGo,Vengeanceplays/CsGo | markdown.py | markdown.py | import os
import sys
import markdown
from cactus.utils import fileList
template = """
%s
{%% extends "%s" %%}
{%% block %s %%}
%s
{%% endblock %%}
"""
title_template = """
{%% block title %%}%s{%% endblock %%}
"""
CLEANUP = []
def preBuild(site):
for path in fileList(site.paths['pages']):
if not path.endswith('.md'):
continue
md = markdown.Markdown(extensions=['meta'])
with open(path, 'r') as f:
html = md.convert(f.read())
metadata = []
for k, v in md.Meta.iteritems():
if k == 'title':
pass
metadata.append('%s: %s' % (k, v[0]))
outPath = path.replace('.md', '.html')
with open(outPath, 'w') as f:
data = template % (
'\n'.join(metadata),
md.Meta['extends'][0],
md.Meta['block'][0],
html
)
try:
data += title_template % md.Meta['title'][0]
except KeyError:
pass
f.write(data)
CLEANUP.append(outPath)
def postBuild(site):
global CLEANUP
for path in CLEANUP:
print path
os.remove(path)
CLEANUP = []
| mit | Python | |
5492e1b318ff0af3f1e2b1ed0217ed2744b50b68 | Add first structure for issue 107 (automatic configuration doc generation) | weblabdeusto/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto | server/src/configuration_doc.py | server/src/configuration_doc.py | from collections import namedtuple
NO_DEFAULT = object()
ANY_TYPE = object()
_Argument = namedtuple('Argument', 'category type default message')
_sorted_variables = []
######################################
#
# CORE
#
CORE = 'core'
WEBLAB_CORE_SERVER_SESSION_TYPE = 'core_session_type'
WEBLAB_CORE_SERVER_SESSION_POOL_ID = 'core_session_pool_id'
_sorted_variables.extend([
(WEBLAB_CORE_SERVER_SESSION_TYPE, _Argument(CORE, str, 'Memory', """What type of session manager the Core Server will use: Memory or MySQL.""")),
(WEBLAB_CORE_SERVER_SESSION_POOL_ID, _Argument(CORE, str, 'UserProcessingServer', """ A unique identifier of the type of sessions, in order to manage them. For instance, if there are four servers (A, B, C and D), the load of users can be splitted in two groups: those being sent to A and B, and those being sent to C and D. A and B can share those sessions to provide fault tolerance (if A falls down, B can keep working from the same point A was) using a MySQL session manager, and the same may apply to C and D. The problem is that if A and B want to delete all the sessions -at the beginning, for example-, but they don't want to delete sessions of C and D, then they need a unique identifier shared for A and B, and another for C and D. In this case, "!UserProcessing_A_B" and "!UserProcessing_C_D" would be enough.""")),
])
#####################################
#
# The rest
#
variables = dict(_sorted_variables)
if __name__ == '__main__':
categories = set([ variable.category for variable in variables.values() ])
variables_by_category = {}
for category in categories:
variables_by_category[category] = [ variable for variable in variables if variables[variable].category == category ]
for category in categories:
print "|| *Property* || *Type* || *Default value* || *Description* ||"
for variable, argument in _sorted_variables:
if variable in variables_by_category[category]:
print "|| %(variable)s || %(type)s || %(default)s || %(doc)s ||" % {
'variable' : variable,
'type' : variables[variable].type.__name__,
'default' : variables[variable].default,
'doc' : variables[variable].message
}
| bsd-2-clause | Python | |
411f855daa9f06868aa597f84c0b739429d705f4 | Create bot_read.py | shantnu/RedditBot,shantnu/RedditBot | bot_read.py | bot_read.py | #!/usr/bin/python
import praw
user_agent = ("PyFor Eng bot 0.1")
r = praw.Reddit(user_agent=user_agent)
subreddit = r.get_subreddit('python')
for submission in subreddit.get_hot(limit=5):
print submission.title
print submission.selftext
print submission.score
subreddit = r.get_subreddit('learnpython')
for submission in subreddit.get_hot(limit=5):
print submission.title
print submission.selftext
print submission.score
| mit | Python | |
608120909f05096b51f43d99da4ea3bb86d02472 | add slim vgg19 model | kozistr/Awesome-GANs | SRGAN/vgg19.py | SRGAN/vgg19.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from collections import OrderedDict
tf.set_random_seed(777) # reproducibility
def get_tensor_aliases(tensor):
"""Get a list with the aliases of the input tensor.
If the tensor does not have any alias, it would default to its its op.name or
its name.
Args:
tensor: A `Tensor`.
Returns:
A list of strings with the aliases of the tensor.
"""
if hasattr(tensor, 'aliases'):
aliases = tensor.aliases
else:
if tensor.name[-2:] == ':0':
# Use op.name for tensor ending in :0
aliases = [tensor.op.name]
else:
aliases = [tensor.name]
return aliases
def convert_collection_to_dict(collection, clear_collection=False):
from tensorflow.python.framework import ops
"""Returns an OrderedDict of Tensors with their aliases as keys.
Args:
collection: A collection.
clear_collection: When True, it clears the collection after converting to
OrderedDict.
Returns:
An OrderedDict of {alias: tensor}
"""
output = OrderedDict((alias, tensor)
for tensor in ops.get_collection(collection)
for alias in get_tensor_aliases(tensor))
if clear_collection:
ops.get_default_graph().clear_collection(collection)
return output
def vgg_19(inputs, num_classes=1000, is_training=False, dropout_keep_prob=0.5,
spatial_squeeze=True, scope='vgg_19', reuse=False, fc_conv_padding='VALID'):
"""Oxford Net VGG 19-Layers version E Example.
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
fc_conv_padding: the type of padding to use for the fully connected layer
that is implemented as a convolutional layer. Use 'SAME' padding if you
are applying the network in a fully convolutional manner and want to
get a prediction map downsampled by a factor of 32 as an output. Otherwise,
the output prediction map will be (input / 32) - 6 in case of 'VALID' padding.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'vgg_19', [inputs], reuse=reuse) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d.
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.repeat(inputs, 2, slim.conv2d, 64, 3, scope='conv1', reuse=reuse)
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, 3, scope='conv2', reuse=reuse)
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.repeat(net, 4, slim.conv2d, 256, 3, scope='conv3', reuse=reuse)
net = slim.max_pool2d(net, [2, 2], scope='pool3')
net = slim.repeat(net, 4, slim.conv2d, 512, 3, scope='conv4', reuse=reuse)
net = slim.max_pool2d(net, [2, 2], scope='pool4')
net = slim.repeat(net, 4, slim.conv2d, 512, 3, scope='conv5', reuse=reuse)
net = slim.max_pool2d(net, [2, 2], scope='pool5')
# Use conv2d instead of fully_connected layers.
# Convert end_points_collection into a end_point dict.
end_points = convert_collection_to_dict(end_points_collection)
return net, end_points
| mit | Python | |
b62b37db1141221ae735b531bdb46264aadbe2e7 | add make_requests client in python | pauldardeau/python-concurrent-disk-io,pauldardeau/python-concurrent-disk-io,pauldardeau/python-concurrent-disk-io,pauldardeau/python-concurrent-disk-io,pauldardeau/python-concurrent-disk-io,pauldardeau/python-concurrent-disk-io,pauldardeau/python-concurrent-disk-io | make_requests.py | make_requests.py | import os
import sys
import time
def main(timeout_secs, server_port, iteration_count, file_name):
for i in range(iteration_count):
start_time_secs = time.time()
cmd = 'nc localhost %d < file_list_with_time.txt' % server_port
rc = os.system(cmd)
if rc != 0:
sys.exit(1)
else:
end_time_secs = time.time()
total_time_secs = end_time_secs - start_time_secs
if total_time_secs > timeout_secs:
print('***client timeout %d' % total_time_secs)
if __name__=='__main__':
main(5, 7000, 1000, 'some_file')
| bsd-3-clause | Python | |
5794a2d8d2b59a6a37b5af4e8c1adba276c325c4 | Create TagAnalysis.py | Nik0l/UTemPro,Nik0l/UTemPro | TagAnalysis.py | TagAnalysis.py | # Analysis of question tags
| mit | Python | |
a6a9bb5a365aef9798091335c81b1b793578ed1f | Initialize car classifier | shawpan/vehicle-detector | car_classifier.py | car_classifier.py | class CarClassifier(object):
""" Classifier for car object
Attributes:
car_img_dir: path to car images
not_car_img_dir: path to not car images
sample_size: number of images to be used to train classifier
"""
def __init__(self, car_img_dir, not_car_img_dir, sample_size):
""" Initialize class members
Attr:
car_img_dir: path to car images
not_car_img_dir: path to not car images
sample_size: number of images to be used to train classifier
"""
self.car_img_dir = car_img_dir
self.not_car_img_dir = not_car_img_dir
self.sample_size = sample_size
def get_features(self):
""" Extract feature vector from images
"""
pass
| mit | Python | |
1732fe53dc228da64f3536ce2c76b420d8b100dc | Create the animation.py module. | aclogreco/InventGamesWP | ch17/animation.py | ch17/animation.py | # animation.py
# Animation
"""
This is an example of animation using pygame.
An example from Chapter 17 of
'Invent Your Own Games With Python' by Al Sweigart
A.C. LoGreco
"""
| bsd-2-clause | Python | |
248a756cd6ff44eca6e08b3e976bc2ae027accd4 | Add memory ok check | cloudnull/rpc-openstack,andymcc/rpc-openstack,prometheanfire/rpc-openstack,rcbops/rpc-openstack,darrenchan/rpc-openstack,stevelle/rpc-openstack,stevelle/rpc-openstack,hughsaunders/rpc-openstack,galstrom21/rpc-openstack,briancurtin/rpc-maas,sigmavirus24/rpc-openstack,mattt416/rpc-openstack,busterswt/rpc-openstack,cloudnull/rpc-maas,xeregin/rpc-openstack,byronmccollum/rpc-openstack,cloudnull/rpc-maas,byronmccollum/rpc-openstack,busterswt/rpc-openstack,cfarquhar/rpc-maas,jpmontez/rpc-openstack,nrb/rpc-openstack,cfarquhar/rpc-maas,galstrom21/rpc-openstack,jpmontez/rpc-openstack,jacobwagner/rpc-openstack,hughsaunders/rpc-openstack,byronmccollum/rpc-openstack,nrb/rpc-openstack,briancurtin/rpc-maas,cloudnull/rpc-openstack,mancdaz/rpc-openstack,sigmavirus24/rpc-openstack,major/rpc-openstack,briancurtin/rpc-maas,robb-romans/rpc-openstack,miguelgrinberg/rpc-openstack,jacobwagner/rpc-openstack,xeregin/rpc-openstack,BjoernT/rpc-openstack,mancdaz/rpc-openstack,nrb/rpc-openstack,BjoernT/rpc-openstack,darrenchan/rpc-openstack,xeregin/rpc-openstack,npawelek/rpc-maas,sigmavirus24/rpc-openstack,git-harry/rpc-openstack,npawelek/rpc-maas,git-harry/rpc-openstack,cfarquhar/rpc-openstack,darrenchan/rpc-openstack,shannonmitchell/rpc-openstack,major/rpc-openstack,claco/rpc-openstack,rcbops/rpc-openstack,andymcc/rpc-openstack,xeregin/rpc-openstack,darrenchan/rpc-openstack,npawelek/rpc-maas,stevelle/rpc-openstack,busterswt/rpc-openstack,prometheanfire/rpc-openstack,cfarquhar/rpc-openstack,robb-romans/rpc-openstack,miguelgrinberg/rpc-openstack,claco/rpc-openstack,shannonmitchell/rpc-openstack,miguelgrinberg/rpc-openstack,andymcc/rpc-openstack,cfarquhar/rpc-maas,claco/rpc-openstack,mattt416/rpc-openstack,mattt416/rpc-openstack,sigmavirus24/rpc-openstack,jpmontez/rpc-openstack,cloudnull/rpc-maas | chassis_memory.py | chassis_memory.py | import re
import subprocess
from maas_common import status_err, status_ok, metric_bool
OKAY = re.compile('(?:Health|Status)\s+:\s+(\w+)')
def chassis_memory_report():
"""Return the report as a string."""
return subprocess.check_output(['omreport', 'chassis', 'memory'])
def memory_okay(report):
"""Determine if the installed memory array is okay.
:returns: True if all "Ok", False otherwise
:rtype: bool
"""
return all(v.lower() == 'ok' for v in OKAY.findall(report))
def main():
try:
report = chassis_memory_report()
except OSError as e:
status_err(str(e))
status_ok()
metric_bool('memory_okay', memory_okay(report))
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
260e2b6d4820ce008d751bc21289ece997247d05 | add source | rishubil/sqlalchemy-fulltext-search | sqlalchemy_fulltext/__init__.py | sqlalchemy_fulltext/__init__.py | # -*- coding: utf-8 -*-s
import re
from sqlalchemy import event
from sqlalchemy.schema import DDL
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.sql.expression import ClauseElement
MYSQL = "mysql"
MYSQL_BUILD_INDEX_QUERY = """
ALTER TABLE {0.__tablename__}
ADD FULLTEXT ({1})
"""
MYSQL_MATCH_AGAINST = """
MATCH ({0})
AGAINST ("{1}")
"""
def escape_quote(string):
return re.sub(r"[\"\']+", "", string)
class FullTextSearch(ClauseElement):
"""
Search FullText
:param against: the search query
:param table: the table needs to be query
FullText support with in query, i.e.
>>> from sqlalchemy_fulltext import FullTextSearch
>>> session.query(Foo).filter(FullTextSearch('adfadf', Foo))
"""
def __init__(self, against, model):
self.model = model
self.against = escape_quote(against)
@compiles(FullTextSearch, MYSQL)
def __mysql_fulltext_search(element, compiler, **kw):
assert issubclass(element.model, FullText), "{0} not FullTextable".format(element.model)
return MYSQL_MATCH_AGAINST.format(",".join(
element.model.__fulltext_columns__),
element.against)
class FullText(object):
"""
FullText Minxin object for SQLAlchemy
"""
__fulltext_columns__ = tuple()
@classmethod
def build_fulltext(cls):
"""
build up fulltext index after table is created
"""
if FullText not in cls.__bases__:
return
assert cls.__fulltext_columns__, "Model:{0.__name__} No FullText columns defined".format(cls)
event.listen(cls.__table__,
'after_create',
DDL(MYSQL_BUILD_INDEX_QUERY.format(cls,
", ".join((escape_quote(c)
for c in cls.__fulltext_columns__)))
)
)
@declared_attr
def __contains__(*arg):
print arg
return True
def __build_fulltext_index(mapper, class_):
if issubclass(class_, FullText):
class_.build_fulltext()
event.listen(Mapper, 'instrument_class', __build_fulltext_index)
| mit | Python | |
257a328745b9622713afa218940d2cd820987e93 | Add a super simple color correction client example | PimentNoir/fadecandy,fragmede/fadecandy,fragmede/fadecandy,Jorgen-VikingGod/fadecandy,PimentNoir/fadecandy,poe/fadecandy,pixelmatix/fadecandy,pixelmatix/fadecandy,adam-back/fadecandy,jsestrich/fadecandy,Protoneer/fadecandy,Protoneer/fadecandy,hakan42/fadecandy,adam-back/fadecandy,Protoneer/fadecandy,piers7/fadecandy,piers7/fadecandy,adam-back/fadecandy,hakan42/fadecandy,pixelmatix/fadecandy,poe/fadecandy,adam-back/fadecandy,nomis52/fadecandy,fragmede/fadecandy,pixelmatix/fadecandy,Jorgen-VikingGod/fadecandy,fragmede/fadecandy,jsestrich/fadecandy,nomis52/fadecandy,scanlime/fadecandy,Jorgen-VikingGod/fadecandy,nomis52/fadecandy,nomis52/fadecandy,lincomatic/fadecandy,lincomatic/fadecandy,fragmede/fadecandy,poe/fadecandy,nomis52/fadecandy,Protoneer/fadecandy,poe/fadecandy,hakan42/fadecandy,piers7/fadecandy,lincomatic/fadecandy,PimentNoir/fadecandy,fragmede/fadecandy,poe/fadecandy,nomis52/fadecandy,adam-back/fadecandy,fragmede/fadecandy,pixelmatix/fadecandy,PimentNoir/fadecandy,Jorgen-VikingGod/fadecandy,poe/fadecandy,PimentNoir/fadecandy,scanlime/fadecandy,hakan42/fadecandy,piers7/fadecandy,lincomatic/fadecandy,poe/fadecandy,scanlime/fadecandy,poe/fadecandy,PimentNoir/fadecandy,Jorgen-VikingGod/fadecandy,nomis52/fadecandy,scanlime/fadecandy,jsestrich/fadecandy,lincomatic/fadecandy,scanlime/fadecandy,Protoneer/fadecandy,jsestrich/fadecandy,nomis52/fadecandy,jsestrich/fadecandy,lincomatic/fadecandy,piers7/fadecandy,lincomatic/fadecandy,hakan42/fadecandy,fragmede/fadecandy,scanlime/fadecandy,lincomatic/fadecandy | examples/color-correction-ui.py | examples/color-correction-ui.py | #!/usr/bin/env python
#
# Simple example color correction UI.
# Talks to an fcserver running on localhost.
#
# Micah Elizabeth Scott
# This example code is released into the public domain.
#
import Tkinter as tk
import socket
import json
import struct
s = socket.socket()
s.connect(('localhost', 7890))
print "Connected to OPC server"
def setGlobalColorCorrection(**obj):
msg = json.dumps(obj)
s.send(struct.pack(">BBH", 0, 0xF0, len(msg)) + msg)
def update(_):
setGlobalColorCorrection(
gamma = gamma.get(),
whitepoint = [
red.get(),
green.get(),
blue.get(),
])
def slider(name, from_, to):
s = tk.Scale(root, label=name, from_=from_, to=to, resolution=0.01,
showvalue='yes', orient='horizontal', length=400, command=update)
s.set(1.0)
s.pack()
return s
root = tk.Tk()
root.title("Fadecandy Color Correction Example")
gamma = slider("Gamma", 0.2, 3.0)
red = slider("Red", 0.0, 1.5)
green = slider("Green", 0.0, 1.5)
blue = slider("Blue", 0.0, 1.5)
root.mainloop()
| mit | Python | |
2359d7f6140b7b8292c3d9043064a9ee195ecebb | add module for storing repeated constants | Salman-H/mars-search-robot | code/constants.py | code/constants.py | """Module for constants and conversion factors."""
__author__ = 'Salman Hashmi, Ryan Keenan'
__license__ = 'BSD License'
TO_DEG = 180./np.pi
TO_RAD = np.pi/180.
| bsd-2-clause | Python | |
e2744bef45b62b6af2882aa881c494b9367a7d2a | Add 2D hybridization demo with file-write | thomasgibson/tabula-rasa | experiments/hybridization_2D.py | experiments/hybridization_2D.py | """Solve a mixed Helmholtz problem
sigma + grad(u) = 0,
u + div(sigma) = f,
using hybridisation with SLATE performing the forward elimination and
backwards reconstructions. The corresponding finite element variational
problem is:
dot(sigma, tau)*dx - u*div(tau)*dx + lambdar*dot(tau, n)*dS = 0
div(sigma)*v*dx + u*v*dx = f*v*dx
gammar*dot(sigma, n)*dS = 0
for all tau, v, and gammar.
This is solved using broken Raviart-Thomas elements of degree k for
(sigma, tau), discontinuous Galerkin elements of degree k - 1
for (u, v), and HDiv-Trace elements of degree k - 1 for (lambdar, gammar).
The forcing function is chosen as:
(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2),
which reproduces the known analytical solution:
sin(x[0]*pi*2)*sin(x[1]*pi*2)
"""
from __future__ import absolute_import, print_function, division
from firedrake import *
def test_slate_hybridization(degree, resolution, quads=False):
# Create a mesh
mesh = UnitSquareMesh(2 ** resolution, 2 ** resolution,
quadrilateral=quads)
# Create mesh normal
n = FacetNormal(mesh)
# Define relevant function spaces
if quads:
RT = FiniteElement("RTCF", quadrilateral, degree + 1)
else:
RT = FiniteElement("RT", triangle, degree + 1)
BRT = FunctionSpace(mesh, BrokenElement(RT))
DG = FunctionSpace(mesh, "DG", degree)
T = FunctionSpace(mesh, "HDiv Trace", degree)
W = BRT * DG
# Define the trial and test functions
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
gammar = TestFunction(T)
# Define the source function
f = Function(DG)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*sin(x*pi*2)*sin(y*pi*2))
# Define finite element variational forms
Mass_v = dot(sigma, tau) * dx
Mass_p = u * v * dx
Div = div(sigma) * v * dx
Div_adj = div(tau) * u * dx
local_trace = gammar('+') * dot(sigma, n) * dS
L = f * v * dx
# Trace variables are 0 on the boundary of the domain
# so we remove their contribution on all exterior edges
bcs = DirichletBC(T, Constant(0.0), (1, 2, 3, 4))
# Perform the Schur-complement with SLATE expressions
A = Tensor(Mass_v + Mass_p + Div - Div_adj)
K = Tensor(local_trace)
Schur = -K * A.inv * K.T
F = Tensor(L)
RHS = -K * A.inv * F
S = assemble(Schur, bcs=bcs)
E = assemble(RHS)
# Solve the reduced system for the Lagrange multipliers
lambda_sol = Function(T)
solve(S, lambda_sol, E, solver_parameters={'pc_type': 'lu',
'ksp_type': 'cg'})
# Currently, SLATE can only assemble one expression at a time.
# However, we may still write out the pressure and velocity
# reconstructions in SLATE and obtain our solutions by assembling
# the SLATE tensor expressions.
# NOTE: SLATE cannot assemble expressions that result in a tensor
# with arguments in a mixed function space (yet). Therefore we have
# to separate the arguments from the mixed space:
sigma = TrialFunction(BRT)
tau = TestFunction(BRT)
u = TrialFunction(DG)
v = TestFunction(DG)
A_v = Tensor(dot(sigma, tau) * dx)
A_p = Tensor(u * v * dx)
B = Tensor(div(sigma) * v * dx)
K = Tensor(dot(sigma, n) * gammar('+') * dS)
F = Tensor(f * v * dx)
# SLATE expression for pressure recovery:
u_sol = (B * A_v.inv * B.T + A_p).inv * (F + B * A_v.inv * K.T * lambda_sol)
u_h = assemble(u_sol)
# SLATE expression for velocity recovery
sigma_sol = A_v.inv * (B.T * u_h - K.T * lambda_sol)
sigma_h = assemble(sigma_sol)
new_sigma_h = project(sigma_h, FunctionSpace(mesh, RT))
File("hybrid-2d.pvd").write(new_sigma_h, u_h)
test_slate_hybridization(degree=0, resolution=6, quads=True)
| mit | Python | |
07da1b8a2d0a8c8e28db3c9bed9de1d9f9a7ad6f | Add base solver class | Cosiek/KombiVojager | base_solver.py | base_solver.py | #!/usr/bin/env python
# encoding: utf-8
from datetime import datetime
class BaseSolver(object):
task = None
best_solution = None
best_distance = float('inf')
search_time = None
def __init__(self, task):
self.task = task
def run(self):
start_time = datetime.now()
self.best_solution, self.best_distance = self.run_search()
finish_time = datetime.now()
self.search_time = finish_time - start_time
def run_search(self):
# dummy - this is where one should implement the algorithm
pass
| mit | Python | |
ff76d47f210e97f3ac4ba58a2c3eecb045b28cde | Create RateLimit.py | Mercurial/CorpBot.py,Mercurial/CorpBot.py | Cogs/RateLimit.py | Cogs/RateLimit.py | import asyncio
import discord
import os
from datetime import datetime
from discord.ext import commands
# This is the RateLimit module. It keeps users from being able to spam commands
class RateLimit:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.commandCooldown = 5 # 5 seconds between commands
def canRun( firstTime, threshold ):
# Check if enough time has passed since the last command to run another
currentTime = int(time.time())
if currentTime > (int(firstTime) + int(threshold)):
return True
else:
return False
async def message(self, message):
# Check the message and see if we should allow it - always yes.
# This module doesn't need to cancel messages - but may need to ignore
ignore = False
# Check if we can run commands
lastTime = int(self.settings.getUserStat(message.author, message.server, "LastCommand"))
if not self.canRun( lastTime, self.commandCooldown ):
# We can't run commands yet - ignore
ignore = True
return { 'Ignore' : ignore, 'Delete' : False }
async def oncommand(self, command, ctx):
# Let's grab the user who had a completed command - and set the timestamp
self.settings.setUserStat(ctx.message.author, ctx.message.server, "LastCommand", int(time.time()))
| mit | Python | |
d2b7f191519835a3a8f0e8a32fb52c7b354b0e33 | Add Slurp command | Heufneutje/PyMoronBot,MatthewCox/PyMoronBot,DesertBot/DesertBot | Commands/Slurp.py | Commands/Slurp.py | # -*- coding: utf-8 -*-
"""
Created on Aug 31, 2015
@author: Tyranic-Moron
"""
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
from Utils import WebUtils
from bs4 import BeautifulSoup
class Slurp(CommandInterface):
triggers = ['slurp']
help = "slurp <attribute> <url> <css selector> - scrapes the given attribute from the tag selected at the given url"
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) < 3:
return IRCResponse(ResponseType.Say, u"Not enough parameters, usage: {}".format(self.help), message.ReplyTo)
prop, url, selector = (message.ParameterList[0], message.ParameterList[1], u" ".join(message.ParameterList[2:]))
page = WebUtils.fetchURL(url)
if page is None:
return IRCResponse(ResponseType.Say, u"Problem fetching {}".format(url), message.ReplyTo)
soup = BeautifulSoup(page.body)
tag = soup.select_one(selector)
if tag is None:
return IRCResponse(ResponseType.Say,
u"'{}' does not select a tag at {}".format(selector, url),
message.ReplyTo)
specials = {
'name': tag.name,
'text': tag.text
}
if prop in specials:
value = specials[prop]
elif prop in tag:
value = tag[prop]
else:
return IRCResponse(ResponseType.Say,
u"The tag selected by '{}' ({}) does not have attribute '{}'".format(selector,
tag.name,
prop),
message.ReplyTo)
if not isinstance(value, basestring):
value = u" ".join(value)
return IRCResponse(ResponseType.Say, value, message.ReplyTo)
| mit | Python | |
8b6020384e20305411d2bbb587a2504ef302a17c | Create calculatepi.py | eliwoloshin/Calculate-Pi,anoushkaalavilli/Calculate-Pi,CriticalD20/Calculate-Pi,glenpassow/Calculate-Pi,RDanilek/Calculate-Pi,tesssny/Calculate-Pi,ChubbyPotato/Calculate-Pi,sarahdunbar/Calculate-Pi,Jamin2345/Calculate-Pi,glenpassow/Calculate-Pi,haydenhatfield/Calculate-Pi,sawyerhanlon/Calculate-Pi,davidwilson826/Calculate-Pi,voidJeff/Calculate-Pi,danielwilson2017/Calculate-Pi,SSupattapone/Calculate-Pi,HHS-IntroProgramming/Calculate-Pi,willcampbel/Calculate-Pi,morganmeliment/Calculate-Pi,mcfey/Calculate-Pi,mcfey/Calculate-Pi,davidwilson826/Calculate-Pi,anoushkaalavilli/Calculate-Pi,jasminelou/Calculate-Pi,HHS-IntroProgramming/Calculate-Pi,APikielny/Calculate-Pi,willcampbel/Calculate-Pi,voidJeff/Calculate-Pi,ChubbyPotato/Calculate-Pi,Jamin2345/Calculate-Pi,averywallis/Fractionspatterns,APikielny/Calculate-Pi,averywallis/Fractionspatterns,CriticalD20/Calculate-Pi,ryankynor/Calculate-Pi,dina-hertog/Calculate-Pi,CANDYISLIFE/Calculate-Pi,DRBarnum/Calculate-Pi,HaginCodes/Calculate-Pi,jasminelou/Calculate-Pi,nilskingston/Calculate-Pi,morganmeliment/Calculate-Pi,haydenhatfield/Calculate-Pi,CANDYISLIFE/Calculate-Pi,tesssny/Calculate-Pi,sarahdunbar/Calculate-Pi,HaginCodes/Calculate-Pi,SSupattapone/Calculate-Pi,DRBarnum/Calculate-Pi,RDanilek/Calculate-Pi,sawyerhanlon/Calculate-Pi,nilskingston/Calculate-Pi,ryankynor/Calculate-Pi,dina-hertog/Calculate-Pi,HHStudent/Calculate-Pi,eliwoloshin/Calculate-Pi,danielwilson2017/Calculate-Pi,HHStudent/Calculate-Pi | calculatepi.py | calculatepi.py | """
calculatepi.py
Author: <your name here>
Credit: <list sources used, if any>
Assignment:
| mit | Python | |
d3a320bf9387a0a36419c5166a4052e87d6c059e | Check Tabular Widths script added | kateliev/TypeDrawers | K11.07-CheckTabularWidths.py | K11.07-CheckTabularWidths.py | #FLM: Check Tabular Widths 1.2
# ------------------------
# (C) Vassil Kateliev, 2017 (http://www.kateliev.com)
# * Based on TypeDrawers Thread
# http://typedrawers.com/discussion/1918/simple-script-test-in-batch-if-all-tabular-and-fixed-width-values-are-correct
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependancies
from FL import *
# - Functions
def checkWidths(font, query, width, reportMode='s', mark=255):
'''
Checks if <<query>> matches given (width)(INT) and returns a list of divergent glyph ID's for [font] and/or reports them.
Where:
- <<query>> may be glyph suffix in STR format (EX: '.tonum') or a LIST of glyph names (EX:. ['One.tonum', ... 'Nine.tonum'])
- 'reportMode' is a STR witch may contain: 'r'('R') = REPORT; 'm'('M') = MARK using (mark)(RGBINT); 's'('S') = silently return value (EX: Combination 'rm' will mark and report)
'''
# - Core functionality
if isinstance(query, str):
divergent = zip(*[(gID, font[gID].name) for gID in range(len(font)) if query in font[gID].name and font[gID].width != width])
elif isinstance(query, list):
divergent = zip(*[(gID, font[gID].name) for gID in range(len(font)) if font[gID].name in query and font[gID].width != width])
# - Report (remove/comment if not needed!)
if len(reportMode) and len(divergent):
print '\n-----\nFONT <%s> has %s glyphs that do not match %s width citeria!\n-----' %(font.full_name, len(divergent[0]), width)
if 'r' in reportMode.lower():
print 'Divergent glyphs: %s\n-----' %(' '.join(divergent[1]))
if 'm' in reportMode.lower():
for gID in divergent[0]:
font[gID].mark = mark
print 'Divergent glyphs: Marked!\n-----'
return list(divergent[0])
# - Run --------------------------
# - Examples follow based on TypeDrawes Thread, uncomment to test
'''
# --- Example A: Hardcoded Static (Simple)
# --- Init
numbers = ['zero.tosf','one.tosf','two.tosf','three.tosf','four.tosf','five.tosf','six.tosf','seven.tosf','eight.tosf','nine.tosf','zero.tf','one.tf','two.tf','three.tf','four.tf','five.tf','six.tf','seven.tf','eight.tf','nine.tf']
operators = ['plus','minus','equal','multiply','divide','plusminus','approxequal','logicalnot','notequal','lessequal','greaterequal','less','greater','asciicircum']
currencies = ['Euro.tf','dollar.tf','cent.tf','sterling.tf','yen.tf','florin.tf']
diacritics = ['gravecomb','acutecomb','uni0302','tildecomb','uni0304','uni0306','uni0307','uni0308','uni030A','uni030B','uni030C','uni0312','uni0313','uni0326','uni0327','uni0328','gravecomb.case','acutecomb.case','uni0302.case','tildecomb.case','uni0304.case','uni0306.case','uni0307.case','uni0308.case','uni030A.case','uni030B.case','uni030C.case','uni0326.case','uni0327.case','uni0328.case','gravecomb.sc','acutecomb.sc','uni0302.sc','tildecomb.sc','uni0304.sc','uni0306.sc','uni0307.sc','uni0308.sc','uni030A.sc','uni030B.sc','uni030C.sc','uni0326.sc','uni0327.sc','uni0328.sc']
tabWidth = 698
operatorsWidth = 500
diacriticsWidth = 1
#--- Process All open fonts
for fID in range(len(fl)):
currentFont = fl[fID]
# - Enter manually below every glyph type to be checked
checkWidths(currentFont, numbers, tabWidth, 'rm')
checkWidths(currentFont, operators, operatorsWidth, 'rm')
checkWidths(currentFont, currencies, tabWidth, 'rm')
checkWidths(currentFont, diacritics, diacriticsWidth, 'rm')
print 'Done.'
'''
# --- Example B: Dynamic (Versatile)
# --- Init
# - Enter below all types to be checked in format: list[tuple(Parameter_01 !!STR/LIST!!, Width_01 !!INT!!),..(Parameter_NN, Width_NN)]
widths2check = [('.tosf', 698),
('.tf', 698),
(['plus','minus','equal','multiply','divide','plusminus','approxequal','logicalnot','notequal','lessequal','greaterequal','less','greater','asciicircum'], 500),
(['gravecomb','acutecomb','uni0302','tildecomb','uni0304','uni0306','uni0307','uni0308','uni030A','uni030B','uni030C','uni0312','uni0313','uni0326','uni0327','uni0328','gravecomb.case','acutecomb.case','uni0302.case','tildecomb.case','uni0304.case','uni0306.case','uni0307.case','uni0308.case','uni030A.case','uni030B.case','uni030C.case','uni0326.case','uni0327.case','uni0328.case','gravecomb.sc','acutecomb.sc','uni0302.sc','tildecomb.sc','uni0304.sc','uni0306.sc','uni0307.sc','uni0308.sc','uni030A.sc','uni030B.sc','uni030C.sc','uni0326.sc','uni0327.sc','uni0328.sc'], 1)
]
#--- Process All open fonts
for fID in range(len(fl)):
currentFont = fl[fID]
for item in widths2check:
checkWidths(currentFont, item[0], item[1], 'rm')
print 'Done.' | mit | Python | |
0bc5b307d5121a3cacac159fa27ab42f97e208aa | Add database module | jcollado/rabbithole | rabbithole/db.py | rabbithole/db.py | # -*- coding: utf-8 -*-
import logging
from sqlalchemy import (
create_engine,
text,
)
logger = logging.getLogger(__name__)
class Database(object):
"""Database writer.
:param url: Database connection string
:type url: str
"""
def __init__(self, url, insert_query):
"""Connect to database."""
engine = create_engine(url)
self.connection = engine.connect()
logger.debug('Connected to: %r', url)
self.insert_query = text(insert_query)
def insert(self, rows):
"""Insert rows in database."""
self.connection.execute(self.insert_query, rows)
logger.debug('Inserted %d rows', len(rows))
| mit | Python | |
b62ed7a60349536457b03a407e99bae3e3ff56e8 | install issue | gsnbng/erpnext,gsnbng/erpnext,njmube/erpnext,indictranstech/erpnext,njmube/erpnext,njmube/erpnext,geekroot/erpnext,geekroot/erpnext,indictranstech/erpnext,njmube/erpnext,indictranstech/erpnext,gsnbng/erpnext,gsnbng/erpnext,indictranstech/erpnext,geekroot/erpnext,geekroot/erpnext,Aptitudetech/ERPNext | erpnext/setup/install.py | erpnext/setup/install.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>Sent via
<a style="color: #888" href="http://erpnext.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
add_all_roles_to("Administrator")
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print
print "ERPNext can only be installed on a fresh site where the setup wizard is not completed"
print "You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall"
print
return False
def set_single_defaults():
for dt in ('Accounts Settings', 'Print Settings', 'HR Settings', 'Buying Settings',
'Selling Settings', 'Stock Settings'):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
except frappe.ValidationError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
}) | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>Sent via
<a style="color: #888" href="http://erpnext.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
add_all_roles_to("Administrator")
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print
print "ERPNext can only be installed on a fresh site where the setup wizard is not completed"
print "You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall"
print
return False
def set_single_defaults():
for dt in frappe.db.sql_list("""select name from `tabDocType` where issingle=1"""):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
}) | agpl-3.0 | Python |
43e823ad9ea7c44b49c883e8633dc488dff0d2ca | Add end_time for indexing. | City-of-Helsinki/linkedevents,kooditiimi/linkedevents,kooditiimi/linkedevents,aapris/linkedevents,aapris/linkedevents,kooditiimi/linkedevents,aapris/linkedevents,tuomas777/linkedevents,City-of-Helsinki/linkedevents,tuomas777/linkedevents,kooditiimi/linkedevents,tuomas777/linkedevents,City-of-Helsinki/linkedevents | events/search_indexes.py | events/search_indexes.py | from haystack import indexes
from .models import Event
from django.utils.translation import get_language
from django.utils.html import strip_tags
class EventIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
end_time = indexes.DateField(model_attr='end_time')
def get_updated_field(self):
return 'origin_last_modified_time'
def get_model(self):
return Event
def prepare(self, obj):
#obj.lang_keywords = obj.keywords.filter(language=get_language())
obj.description = strip_tags(obj.description)
return super(EventIndex, self).prepare(obj)
| from haystack import indexes
from .models import Event
from django.utils.translation import get_language
from django.utils.html import strip_tags
class EventIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
autosuggest = indexes.EdgeNgramField(model_attr='name')
def get_updated_field(self):
return 'origin_last_modified_time'
def get_model(self):
return Event
def prepare(self, obj):
#obj.lang_keywords = obj.keywords.filter(language=get_language())
obj.description = strip_tags(obj.description)
return super(EventIndex, self).prepare(obj)
| mit | Python |
a85e444e9411f9f768db7c3e1b589b737c01b0a0 | add mnist examples | deercoder/PhD,DeercoderResearch/PhD,DeercoderResearch/PhD,deercoder/PhD,DeercoderResearch/PhD,deercoder/PhD,DeercoderResearch/PhD,deercoder/PhD,DeercoderResearch/PhD,deercoder/PhD,deercoder/0-PhD,deercoder/0-PhD,deercoder/0-PhD,deercoder/PhD,deercoder/PhD,DeercoderResearch/PhD,deercoder/0-PhD,deercoder/0-PhD,deercoder/0-PhD,DeercoderResearch/PhD,deercoder/0-PhD | TensorFlow/ex3/test_mnist.py | TensorFlow/ex3/test_mnist.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tempfile
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/tensorflow/mnist/input_data", one_hot=True)
x = tf.placeholder("float", [None, 784])
w = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, w))
y_ = tf.placeholder("float", [None, 10])
cross_entropy = - tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for _ in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
print(sess.run(cross_entropy, feed_dict={x: batch_xs, y_: batch_ys}))
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| mit | Python | |
523ef51278c964718da68bb789e78e6c8f5f8766 | Add the init method to the notification model. | yiyangyi/cc98-tornado | model/notification.py | model/notification.py | def NotificationModel(Query):
def __init__(self, db):
self.db = db
self.table_name = "notification"
super(NotificationModel, self).__init__() | mit | Python | |
c68c5bf488cb7224d675bec333c6b7a4992574ed | Add a simple APL exception class | NewForester/apl-py,NewForester/apl-py | apl_exception.py | apl_exception.py | """
A simple APL exception class
"""
class APL_Exception (BaseException):
"""
APL Exception Class
"""
def __init__ (self,message,line=None):
self.message = message
self.line = line
# EOF
| apache-2.0 | Python | |
e68590e9e05ab54b91ad3d03e372fbf8b341c3b9 | Use a logger thread to prevent stdout races. | ehlemur/gtest-parallel,bbannier/gtest-parallel,kwiberg/gtest-parallel,pbos/gtest-parallel,greggomann/gtest-parallel,google/gtest-parallel | gtest-parallel.py | gtest-parallel.py | #!/usr/bin/env python2
import Queue
import optparse
import subprocess
import sys
import threading
parser = optparse.OptionParser(
usage = 'usage: %prog [options] executable [executable ...]')
parser.add_option('-w', '--workers', type='int', default=16,
help='number of workers to spawn')
parser.add_option('--gtest_filter', type='string', default='',
help='test filter')
parser.add_option('--gtest_also_run_disabled_tests', action='store_true',
default=False, help='run disabled tests too')
(options, binaries) = parser.parse_args()
if binaries == []:
parser.print_usage()
sys.exit(1)
log = Queue.Queue()
tests = Queue.Queue()
# Find tests.
job_id = 0
for test_binary in binaries:
command = [test_binary]
if options.gtest_filter != '':
command += ['--gtest_filter=' + options.gtest_filter]
if options.gtest_also_run_disabled_tests:
command += ['--gtest_also_run_disabled_tests']
test_list = subprocess.Popen(command + ['--gtest_list_tests'],
stdout=subprocess.PIPE).communicate()[0]
test_group = ''
for line in test_list.split('\n'):
if not line.strip():
continue
if line[0] != " ":
test_group = line.strip()
continue
line = line.strip()
# Skip disabled tests unless they should be run
if not options.gtest_also_run_disabled_tests and 'DISABLED' in line:
continue
test = test_group + line
tests.put((command, job_id, test))
print str(job_id) + ': TEST ' + test_binary + ' ' + test
job_id += 1
def run_job((command, job_id, test)):
sub = subprocess.Popen(command + ['--gtest_filter=' + test],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
while True:
line = sub.stdout.readline()
if line == '':
break
log.put(str(job_id) + '> ' + line.rstrip())
code = sub.wait()
log.put(str(job_id) + ': EXIT ' + str(code))
def worker():
while True:
try:
run_job(tests.get_nowait())
tests.task_done()
except Queue.Empty:
return
def logger():
while True:
line = log.get()
if line == "":
return
print line
threads = []
for i in range(options.workers):
t = threading.Thread(target=worker)
t.daemon = True
threads.append(t)
[t.start() for t in threads]
printer = threading.Thread(target=logger)
printer.start()
[t.join() for t in threads]
log.put("")
printer.join()
| #!/usr/bin/env python2
import Queue
import optparse
import subprocess
import sys
import threading
parser = optparse.OptionParser(
usage = 'usage: %prog [options] executable [executable ...]')
parser.add_option('-w', '--workers', type='int', default=16,
help='number of workers to spawn')
parser.add_option('--gtest_filter', type='string', default='',
help='test filter')
parser.add_option('--gtest_also_run_disabled_tests', action='store_true',
default=False, help='run disabled tests too')
(options, binaries) = parser.parse_args()
if binaries == []:
parser.print_usage()
sys.exit(1)
tests = Queue.Queue()
# Find tests.
job_id = 0
for test_binary in binaries:
command = [test_binary]
if options.gtest_filter != '':
command += ['--gtest_filter=' + options.gtest_filter]
if options.gtest_also_run_disabled_tests:
command += ['--gtest_also_run_disabled_tests']
test_list = subprocess.Popen(command + ['--gtest_list_tests'],
stdout=subprocess.PIPE).communicate()[0]
test_group = ''
for line in test_list.split('\n'):
if not line.strip():
continue
if line[0] != " ":
test_group = line.strip()
continue
line = line.strip()
# Skip disabled tests unless they should be run
if not options.gtest_also_run_disabled_tests and 'DISABLED' in line:
continue
test = test_group + line
tests.put((command, job_id, test))
print str(job_id) + ': TEST ' + test_binary + ' ' + test
job_id += 1
def run_job((command, job_id, test)):
sub = subprocess.Popen(command + ['--gtest_filter=' + test],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT)
while True:
line = sub.stdout.readline()
if line == '':
break
print str(job_id) + '> ' + line.rstrip()
code = sub.wait()
print str(job_id) + ': EXIT ' + str(code)
def worker():
while True:
try:
run_job(tests.get_nowait())
tests.task_done()
except Queue.Empty:
return
threads = []
for i in range(options.workers):
t = threading.Thread(target=worker)
t.daemon = True
threads.append(t)
[t.start() for t in threads]
[t.join() for t in threads]
| apache-2.0 | Python |
e093ce0730fa3071484fed251535fea62e0430d6 | add logger view | RoboCupULaval/UI-Debug | View/LoggerView.py | View/LoggerView.py | # Under MIT License, see LICENSE.txt
from PyQt4.QtGui import QWidget
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QListWidget
from PyQt4.QtGui import QHBoxLayout
from PyQt4.QtGui import QVBoxLayout
from PyQt4.QtGui import QPushButton
from Model.DataInModel import DataInModel
__author__ = 'RoboCupULaval'
class LoggerView(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self._parent = parent
self._model = None
self._count = 0
self.pause = False
self.init_ui()
def init_ui(self):
self.log_queue = QListWidget(self)
layout = QHBoxLayout()
layout.addWidget(self.log_queue)
layout_btn = QVBoxLayout()
self.btn_pause = QPushButton('Pause')
self.btn_pause.setCheckable(True)
self.btn_pause.setChecked(self.pause)
self.btn_pause.clicked.connect(self.pauseEvent)
layout_btn.addWidget(self.btn_pause)
layout.addLayout(layout_btn)
self.setLayout(layout)
self.hide()
self.timer = QTimer()
self.timer.timeout.connect(self.update_logger)
self.timer.start(250)
def pauseEvent(self):
self.pause = not self.pause
self.btn_pause.setChecked(self.pause)
def set_model(self, model):
if isinstance(model, DataInModel):
self._model = model
else:
raise TypeError('Logger should get data in model argument.')
def update_logger(self):
if not self.pause:
if self._model is not None:
messages = self._model.get_last_log(self._count)
if messages is not None:
self._count += len(messages)
for msg in messages:
self.log_queue.addItem(str(msg))
self.log_queue.scrollToBottom()
def get_count(self):
return self._count
def show_hide(self):
if self.isVisible():
self.hide()
else:
self.show()
self._parent.resize_window()
| mit | Python | |
b2f07c815c66be310ee1c126ba743bb786d79a08 | Create problem2.py | Amapolita/MITx--6.00.1x- | W2/PS2/problem2.py | W2/PS2/problem2.py | '''
PROBLEM 2: PAYING DEBT OFF IN A YEAR (15.0/15.0 points)
Now write a program that calculates the minimum fixed monthly payment needed in order pay off a credit card balance within 12 months. By a fixed monthly payment, we mean a single number which does not change each month, but instead is a constant amount that will be paid each month.
In this problem, we will not be dealing with a minimum monthly payment rate.
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
The program should print out one line: the lowest monthly payment that will pay off all debt in under 1 year, for example:
Lowest Payment: 180
'''
balance = 3329
annualInterestRate = 0.2
payment = 0
new_balance = balance
while new_balance > 0:
new_balance = balance
for month in range (1,13):
new_balance -= payment
new_balance += (new_balance*(annualInterestRate/12))
payment += 10
print('Lowest Payment: ' + str(payment-10))
| unlicense | Python | |
61b7ee073efcd698329bec69a9eb682a1bc032d3 | Add py_trace_event to DEPS. | catapult-project/catapult,catapult-project/catapult-csm,sahiljain/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,benschmaus/catapult,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,sahiljain/catapult,catapult-project/catapult,sahiljain/catapult,SummerLW/Perf-Insight-Report | telemetry/telemetry/util/trace.py | telemetry/telemetry/util/trace.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core import util
util.AddDirToPythonPath(util.GetChromiumSrcDir(),
'third_party', 'py_trace_event', 'src')
from trace_event import * # pylint: disable=F0401
| bsd-3-clause | Python | |
c3789b5f8a8c90902693194cf257b6c9e4ac7783 | Add solution to 119. | bsamseth/project-euler,bsamseth/project-euler | 119/119.py | 119/119.py | """
The number 512 is interesting because it is equal to the sum of its digits
raised to some power: 5 + 1 + 2 = 8, and 83 = 512. Another example of a number
with this property is 614656 = 284.
We shall define an to be the nth term of this sequence and insist that a number
must contain at least two digits to have a sum.
You are given that a2 = 512 and a10 = 614656.
Find a30.
Solution comment: Well, this number is on oeis.org. So kinda cheating, but...
Other solution is simple brute force. The search space was just manually set quite a
bit larger than what is set now, then just reduced a bit afterwards. Terms not generated
in order, so need to generate some more terms than just 30.
"""
# from urllib.request import urlopen
# data = urlopen('https://oeis.org/A023106/b023106.txt').read().splitlines()
# answer = int(data[30 + 9].split()[-1]) # Skip first 10 single digit terms.
# print('Answer:', answer)
from time import time
def digit_sum(n):
s = 0
while n:
s += n % 10
n //= 10
return s
t0 = time()
terms = []
for b in range(2, 100):
x = b
for _ in range(2, 30):
x *= b
if digit_sum(x) == b:
terms.append(x)
if len(terms) >= 40:
break
print('Answer:', sorted(terms)[29])
print('Execution time: {:.3f} ms'.format((time() - t0) * 1e3))
| mit | Python | |
2c155d4fe286f685bca696c60730bd2fca2151f1 | Add new package: sysbench (#18310) | iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack | var/spack/repos/builtin/packages/sysbench/package.py | var/spack/repos/builtin/packages/sysbench/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Sysbench(AutotoolsPackage):
"""Scriptable database and system performance benchmark."""
homepage = "https://github.com/akopytov/sysbench"
url = "https://github.com/akopytov/sysbench/archive/1.0.20.tar.gz"
version('1.0.20', sha256='e8ee79b1f399b2d167e6a90de52ccc90e52408f7ade1b9b7135727efe181347f')
version('1.0.19', sha256='39cde56b58754d97b2fe6a1688ffc0e888d80c262cf66daee19acfb2997f9bdd')
version('1.0.18', sha256='c679b285e633c819d637bdafaeacc1bec13f37da5b3357c7e17d97a71bf28cb1')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('mysql-client')
| lgpl-2.1 | Python | |
34a9969495f1b1c9452bff54cb03148e68fde303 | Create Insertion_sort_with_binary_search.py | gzc/CLRS,gzc/CLRS,gzc/CLRS | C02-Getting-Started/exercise_code/Insertion_sort_with_binary_search.py | C02-Getting-Started/exercise_code/Insertion_sort_with_binary_search.py | # Exercise 2.3-6 in book
# Standalone Python version 2.7 code
import os
import re
import math
import time
from random import randint
def insertion_sort(array):
for j, v in enumerate(array):
key = v
i = j - 1
while i > -1 and array[i] > key:
array[i+1] = array[i]
i = i - 1
array[i+1] = key
def insertion_sort_v2(array):
for j, v in enumerate(array):
if j > 0:
key = array[j]
a = binary_search(array, key, j)
for i in range(j, a, -1):
array[i] = array[i-1]
array[a] = key
def binary_search(array, searchingelement, arraypart):
array = list(array[:arraypart])
last = array.__len__()
mid = int(last/2)
min = 0
for i in range(int(math.log(last)/math.log(2)) + 1):
if array[mid] == searchingelement:
return mid
elif array[mid] < searchingelement:
min = mid
mid = int((last + mid) / 2)
else:
last = mid
mid = int((mid + min) / 2)
if array[mid] < searchingelement:
return mid+1
elif array[mid] > searchingelement:
if mid-1 > -1:
return mid-1
else:
return mid
else:
return mid
if __name__ == '__main__':
array1 = []
for i in range(10000):
array1.append(randint(0, 1000))
array = list(array1)
t0 = time.clock()
insertion_sort(array)
t1 = time.clock()
print "insertion_sort: " + str(t1-t0)
array = list(array1)
t0 = time.clock()
insertion_sort_v2(array)
t1 = time.clock()
print "insertion_sort_v2: " + str(t1-t0)
# Test results shows that worst case of improved insertion sort is O(n * (n\2) * lg(n))
# Better than insertion sort but still very bad
# Tested for 1000 random elements
# insertion_sort:----0.0390096090178
# insertion_sort_v2:-0.0287921815039
# Tested for 10000 random elements
# insertion_sort:----3.76619711492
# insertion_sort_v2:-2.25984142782
# End of 2.3-6 in book
| mit | Python | |
274e7a93bac93461f07dd43f3f84f1f00e229ffd | Add migration script hr_family -> hr_employee_relative | OCA/hr,OCA/hr,OCA/hr | hr_employee_relative/migrations/12.0.1.0.0/post-migration.py | hr_employee_relative/migrations/12.0.1.0.0/post-migration.py | # Copyright 2019 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
cr = env.cr
columns = 'fam_spouse, fam_spouse_employer, fam_spouse_tel, fam_father,' \
' fam_father_date_of_birth, fam_mother, fam_mother_date_of_birth'
cr.execute('SELECT id, %s FROM hr_employee' % columns)
relation_spouse = env.ref('hr_employee_relative.relation_spouse').id
relation_parent = env.ref('hr_employee_relative.relation_parent').id
relation_child = env.ref('hr_employee_relative.relation_child').id
for employee in cr.fetchall():
if employee[1] or employee[2] or employee[3]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[1] or 'Spouse',
'relation_id': relation_spouse
})
if employee[4] or employee[5]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[4] or 'Father',
'date_of_birth': employee[5] or False,
'relation_id': relation_parent
})
if employee[6] or employee[7]:
env['hr.employee.relative'].create({
'employee_id': employee[0],
'name': employee[6] or 'Mother',
'date_of_birth': employee[7] or False,
'relation_id': relation_parent
})
cr.execute(
'SELECT name, date_of_birth, employee_id, gender'
' FROM hr_employee_children'
)
for children in cr.fetchall():
env['hr.employee.relative'].create({
'name': children[0] or 'Child',
'date_of_birth': children[1] or False,
'employee_id': children[2],
'gender': children[3] or False,
'relation_id': relation_child
})
| agpl-3.0 | Python | |
5f9bb1a027664a0107a213b5dfa82c22d75c1196 | handle relative paths | ahihi/pls-files | pls-files.py | pls-files.py | #!/usr/bin/env python
from ConfigParser import SafeConfigParser
from contextlib import closing
from os.path import basename, dirname, join, normpath, realpath
import sys
from urllib2 import urlopen
def generic_open(arg):
try:
return urlopen(arg), None
except ValueError:
arg = normpath(realpath(arg))
return open(arg, "r"), dirname(arg)
def playlist_files(config):
n = config.getint("playlist", "NumberOfEntries")
for i in xrange(1, n+1):
yield config.get("playlist", "File%d" % i)
if len(sys.argv) > 1:
config = SafeConfigParser()
for arg in sys.argv[1:]:
raw_handle, directory = generic_open(arg)
with closing(raw_handle) as handle:
try:
config.readfp(handle)
for raw_fn in playlist_files(config):
fn = "file://" + join(directory, raw_fn) if directory != None else raw_fn
print fn
except Exception, e:
print >> sys.stderr, "%s\n [%s] %s" % (arg, type(e).__name__, e)
else:
print >> sys.stderr, "Usage: %s file-or-url [file-or-url ...]" % basename(sys.argv[0])
sys.exit(1) | #!/usr/bin/env python
from ConfigParser import SafeConfigParser
from contextlib import closing
from os.path import basename, dirname, join
import sys
from urllib2 import urlopen
def generic_open(arg):
try:
return urlopen(arg), None
except ValueError:
return open(arg, "r"), dirname(arg)
def playlist_files(config):
n = config.getint("playlist", "NumberOfEntries")
for i in xrange(1, n+1):
yield config.get("playlist", "File%d" % i)
if len(sys.argv) > 1:
config = SafeConfigParser()
for arg in sys.argv[1:]:
raw_handle, directory = generic_open(arg)
with closing(raw_handle) as handle:
try:
config.readfp(handle)
for raw_fn in playlist_files(config):
fn = "file://" + join(directory, raw_fn) if directory != None else raw_fn
print fn
except Exception, e:
print >> sys.stderr, "%s\n [%s] %s" % (arg, type(e).__name__, e)
else:
print >> sys.stderr, "Usage: %s file-or-url [file-or-url ...]" % basename(sys.argv[0])
sys.exit(1) | cc0-1.0 | Python |
061ba14918eb6598031c9ad8a1c3f8e9c0f0a34b | Create LeetCode-LowestCommonAncestor2.py | lingcheng99/Algorithm | LeetCode-LowestCommonAncestor2.py | LeetCode-LowestCommonAncestor2.py | """
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
Notice it is binary tree, not BST
"""
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return None
elif root.val==p.val or root.val==q.val:
return root
else:
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if not left and not right:
return root
else:
if not left:
return left
if not right:
return right
| mit | Python | |
63208828762d01122054d122c8d305fa8930f9bd | Make service postage nullable | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0258_service_postage_nullable.py | migrations/versions/0258_service_postage_nullable.py | """
Revision ID: 0258_service_postage_nullable
Revises: 0257_letter_branding_migration
Create Date: 2019-02-12 11:52:53.139383
"""
from alembic import op
import sqlalchemy as sa
revision = '0258_service_postage_nullable'
down_revision = '0257_letter_branding_migration'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('services_history', 'postage', existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column('services', 'postage', existing_type=sa.BOOLEAN(), nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('services_history', 'postage', existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column('services', 'postage', existing_type=sa.BOOLEAN(), nullable=False)
# ### end Alembic commands ###
| mit | Python | |
bc1fe15c77b8eedb40993e5ea24fa4d7340ff646 | Fix bug 17 (#4254) | PaddlePaddle/models,PaddlePaddle/models,PaddlePaddle/models | PaddleRec/multi-task/MMoE/args.py | PaddleRec/multi-task/MMoE/args.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import distutils.util
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--base_lr", type=float, default=0.01, help="learning_rate")
parser.add_argument("--batch_size", type=int, default=5, help="batch_size")
parser.add_argument("--dict_dim", type=int, default=64, help="dict dim")
parser.add_argument(
"--emb_dim", type=int, default=100, help="embedding_dim")
parser.add_argument(
'--use_gpu', type=bool, default=False, help='whether using gpu')
parser.add_argument('--ce', action='store_true', help="run ce")
args = parser.parse_args()
return args
| apache-2.0 | Python | |
0179d4d84987da76c517de4e01100f0e1d2049ea | Add unit tests for pacman list packages | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/unit/modules/pacman_test.py | tests/unit/modules/pacman_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Eric Vz <eric@base10.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import pacman
from salt.exceptions import CommandExecutionError
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PacmanTestCase(TestCase):
'''
Test cases for salt.modules.pacman
'''
def setUp(self):
pacman.__salt__ = {}
pacman.__context__ = {}
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_called_once()
def test_list_pkgs_as_list(self):
'''
Test if it list the packages currently installed in a dict
'''
cmdmock = MagicMock(return_value='A 1.0\nB 2.0')
sortmock = MagicMock()
stringifymock = MagicMock()
with patch.dict(pacman.__salt__, {'cmd.run': cmdmock, 'pkg_resource.add_pkg': self._add_pkg, 'pkg_resource.sort_pkglist': sortmock, 'pkg_resource.stringify': stringifymock}):
self.assertDictEqual(pacman.list_pkgs(True), {'A': ['1.0'], 'B': ['2.0']})
sortmock.assert_called_once()
stringifymock.assert_not_called()
'''
Helper methods for test cases
'''
def _add_pkg(self, pkgs, name, version):
pkgs.setdefault(name, []).append(version)
if __name__ == '__main__':
from integration import run_tests
run_tests(PacmanTestCase, needs_daemon=False)
| apache-2.0 | Python | |
f2d4ddba7c594ec93f0ede0be1fc515b0c7c2d7b | Remove HInput and Isolate joystick related code because son path isues with pygame | hikaruAi/HPanda | HJoystick.py | HJoystick.py | #from direct.showbase import DirectObject
import pygame #pygame must be in the Main.py directory
#THIS FILE MUST BE IN THE MAIN.PY DIRECTORY BECAUSE SON PATH ISSUES
class HJoystickSensor():
def __init__(self,joystickId=0):
#print os.getcwd()
pygame.init()
pygame.joystick.init()
c=pygame.joystick.get_count()
if c>0:
self.id=joystickId
self.object=pygame.joystick.Joystick(self.id)
self.numButtons=self.object.get_numbuttons()
self.numAxes=self.object.get_numaxes()
base.taskMgr.add(self._task,"taskForJoystick_"+self.id)
else:
print "No Joystick"
def _task(self,t):
pygame.event.pump()
for b in range(self.numButtons):
if self.object.get_button(b):
messenger.send("Joystick_Button_"+str(b))
for a in range(self.numAxes):
axis=self.object.get_axis(a)
if axis!=0:
messenger.send("Joystick_Axis_"+str(a),sentArgs[a])
return t.cont
##Hats y otras cosas que no uso ahorita
if __name__=="__main__":
a=HJoystickSensor() | bsd-2-clause | Python | |
fe63d6e1e822f7cb60d1c0bdaa08eb53d3849783 | Add script to extract artist names from MusicBrainz database | xhochy/libfuzzymatch,xhochy/libfuzzymatch | benchmark/datasets/musicbrainz/extract-from-dbdump.py | benchmark/datasets/musicbrainz/extract-from-dbdump.py | #!/usr/bin/env python
"""
Script to extract the artist names from a MusicBrainz database dump.
Usage:
./extract-from-dbdump.py <dump_dir>/artist <outfile>
"""
import pandas as pd
import sys
__author__ = "Uwe L. Korn"
__license__ = "MIT"
input_file = sys.argv[1]
output_file = sys.argv[2]
df = pd.read_csv(input_file, sep='\t', header=None)
df.ix[:, 2].to_csv(outfile, index=False)
| mit | Python | |
842092122b14343c9b1c2e2a4e0dd67dd8bdf767 | build SlideEvaluation objects from existing data | crs4/ProMort,lucalianas/ProMort,crs4/ProMort,lucalianas/ProMort,lucalianas/ProMort,crs4/ProMort | promort/slides_manager/migrations/0014_auto_20171201_1119.py | promort/slides_manager/migrations/0014_auto_20171201_1119.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-01 11:19
from __future__ import unicode_literals
from django.db import migrations
def populate_slide_evaluations(apps, schema_editor):
SlideEvaluation = apps.get_model('slides_manager', 'SlideEvaluation')
SlideQualityControl = apps.get_model('slides_manager', 'SlideQualityControl')
for qc in SlideQualityControl.objects.all():
SlideEvaluation(
slide=qc.slide, rois_annotation_step=qc.rois_annotation_step,
staining=qc.slide.staining, adequate_slide=qc.adequate_slide,
not_adequacy_reason=qc.not_adequacy_reason, notes=qc.notes,
reviewer=qc.reviewer, acquisition_date=qc.acquisition_date
).save()
def clear_slide_evaluations(apps, schema_editor):
SlideEvaluation = apps.get_model('slides_manager', 'SlideEvaluation')
for se in SlideEvaluation.objects.all():
se.delete()
class Migration(migrations.Migration):
dependencies = [
('slides_manager', '0013_slideevaluation'),
]
operations = [
migrations.RunPython(populate_slide_evaluations, clear_slide_evaluations)
]
| mit | Python | |
116f41481062e6d9f15c7a81c2e5268aa1b706c7 | add sources script | TobyRoseman/PS4M,TobyRoseman/PS4M,TobyRoseman/PS4M | admin/scripts/addListOfSources.py | admin/scripts/addListOfSources.py | from collections import defaultdict
import re
import sys
import time
sys.path.append('../..')
from crawler.crawler import crawl, itemFactory
from engine.data.database.databaseConnection import commit, rollback
from engine.data.database.sourceTable import addSource, sourceExists, urlToLookupId
from engine.data.database.sourceGroupAssignmentTable import addSourceGroupAssignment
from engine.data.database.itemTable import getSourceUrlsForItemUrl
from engine.data.url import Url
def handleLine(line):
# Parse line
m = lineParser.match(line.rstrip())
assert(m.lastindex == 1 or m.lastindex == 2)
url = Url(m.group(1))
sourceGroupName = None
if(m.lastindex == 2):
sourceGroupName = m.group(2)
# Add source
if not sourceExists(url):
print("Adding " + url.value)
webFeed = itemFactory(url)
if not hasSimilarSource(webFeed):
addSource(url, webFeed.name)
crawl(webFeed)
sourceId = urlToLookupId(url.value)
print "https://ps4m.com/s/%d" % (sourceId)
else:
print "NOT ADDING!"
return
else:
print (url.value + " already exists")
# If nessecary, assign source to group
if(sourceGroupName is not None):
print "\tAdding to %s" % (sourceGroupName)
addSourceGroupAssignment(url, sourceGroupName)
return
def usage():
message = """%s
NAME
addListOfSources - adds a file of source urls
SYNOPSIS
addListOfSources SOURCE_FILE
SOURCE_FILE -
Contains one url per line. Also optionally, a space then a source group.
""" % sys.argv[0]
print message
lineParser = re.compile("^(\S+)\s?(.+)?$")
def hasSimilarSource(webfeed):
duplicateUrlCounter = defaultdict(lambda:0)
for i in webfeed.items:
for sourceUrl in getSourceUrlsForItemUrl(i[1]):
duplicateUrlCounter[sourceUrl] += 1
# Print a warning, if any other webfeed has more than half of this webfeed
result = False
for c in duplicateUrlCounter.keys():
if (duplicateUrlCounter[c] > len(webfeed.items)/2):
print "Possible duplicate feed. New feed %s. Old feed: %s" % (webfeed.url, c)
result = True
return result
if(len(sys.argv) != 2):
usage()
exit(1)
sourceFilePath = sys.argv[1]
sourceFile = open(sourceFilePath, 'r')
problemLine = set()
for line in sourceFile:
try:
handleLine(line)
except Exception, e:
rollback()
print "fail %s: %s" % (line, e)
problemLine.add(line)
continue
print # Add a blank line between sources
commit()
time.sleep(1)
sourceFile.close()
# Report errors
print 'Could Not Add the Following Line:'
for i in problemLine:
print i
| mit | Python | |
76ff934621268a52bf4502449ea6a3843036c849 | add missing test | nathanbjenx/cairis,failys/CAIRIS,failys/CAIRIS,nathanbjenx/cairis,failys/CAIRIS,nathanbjenx/cairis,nathanbjenx/cairis | cairis/cairis/test/test_Persona.py | cairis/cairis/test/test_Persona.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import json
import BorgFactory
from Borg import Borg
from RoleParameters import RoleParameters
from EnvironmentParameters import EnvironmentParameters
from PersonaParameters import PersonaParameters
from PersonaEnvironmentProperties import PersonaEnvironmentProperties
class PersonaTest(unittest.TestCase):
def setUp(self):
BorgFactory.initialise()
f = open(os.environ['CAIRIS_SRC'] + '/test/personas.json')
d = json.load(f)
f.close()
self.iEnvironments = d['environments']
iep1 = EnvironmentParameters(self.iEnvironments[0]["theName"],self.iEnvironments[0]["theShortCode"],self.iEnvironments[0]["theDescription"])
b = Borg()
b.dbProxy.addEnvironment(iep1)
self.theEnvironments = b.dbProxy.getEnvironments()
self.iRoles = d['roles']
irp = RoleParameters(self.iRoles[0]["theName"], self.iRoles[0]["theType"], self.iRoles[0]["theShortCode"], self.iRoles[0]["theDescription"],[])
b.dbProxy.addRole(irp)
self.theRoles = b.dbProxy.getRoles()
self.iPersonas = d['personas']
self.iExternalDocuments = d['external_documents']
self.iDocumentReferences = d['document_references']
self.iPersonaCharacteristics = d['persona_characteristics']
def testPersona(self):
ipp = PersonaParameters(self.iPersonas[0]["theName"],self.iPersonas[0]["theActivities"],self.iPersonas[0]["theAttitudes"],self.iPersonas[0]["theAptitudes"],self.iPersonas[0]["theMotivations"],self.iPersonas[0]["theSkills"],self.iPersonas[0]["theIntrinsic"],self.iPersonas[0]["theContextual"],"","0",self.iPersonas[0]["thePersonaType"],[],[PersonaEnvironmentProperties(self.iPersonas[0]["theEnvironmentProperties"][0]["theName"],self.iPersonas[0]["theEnvironmentProperties"][0]["theDirectFlag"],self.iPersonas[0]["theEnvironmentProperties"][0]["theNarrative"],self.iPersonas[0]["theEnvironmentProperties"][0]["theRole"])],[])
b = Borg()
b.dbProxy.addPersona(ipp)
thePersonas = b.dbProxy.getPersonas()
op = thePersonas[self.iPersonas[0]["theName"]]
self.assertEqual(ipp.name(),op.name())
self.assertEqual(ipp.activities(),op.activities())
self.assertEqual(ipp.attitudes(),op.attitudes())
self.assertEqual(ipp.aptitudes(),op.aptitudes())
self.assertEqual(ipp.motivations(),op.motivations())
self.assertEqual(ipp.skills(),op.skills())
self.assertEqual(ipp.intrinsic(),op.intrinsic())
self.assertEqual(ipp.contextual(),op.contextual())
b.dbProxy.deletePersona(op.id())
def tearDown(self):
b = Borg()
b.dbProxy.deleteRole(self.theRoles[self.iRoles[0]["theName"]].id())
b.dbProxy.deleteEnvironment(self.theEnvironments[self.iEnvironments[0]["theName"]].id())
b.dbProxy.close()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
3a235e25ac3f5d76eb4030e01afbe7b716ec6d91 | Add py solution for 331. Verify Preorder Serialization of a Binary Tree | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/verify-preorder-serialization-of-a-binary-tree.py | py/verify-preorder-serialization-of-a-binary-tree.py | class Solution(object):
def isValidSerialization(self, preorder):
"""
:type preorder: str
:rtype: bool
"""
def get_tree(nodes, offset):
if nodes[offset] == '#':
return offset + 1
else:
left = get_tree(nodes, offset + 1)
right = get_tree(nodes, left)
return right
nodes = preorder.split(',')
try:
ret = get_tree(nodes, 0)
return ret == len(nodes)
except IndexError:
return False
| apache-2.0 | Python | |
f6dce9177421f61c7a773e1bbe53588eb54defc9 | Create score.py | Azure/azure-stream-analytics | Samples/AzureML/score.py | Samples/AzureML/score.py | #example: scikit-learn and Swagger
import json
import numpy as np
import pandas as pd
import azureml.train.automl
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from azureml.core.model import Model
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
def init():
global model
# note here "sklearn_regression_model.pkl" is the name of the model registered under
# this is a different behavior than before when the code is run locally, even though the code is the same.
model_path = Model.get_model_path('nyc.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
input_sample = np.array([[1,"Thursday",16,1,3.48]])
output_sample = np.array([13.66304196])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = np.round(model.predict(data),2)
# you can return any datatype as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
| mit | Python | |
f1599a7b3f342a86cf7eb7201593b8515d5f13ad | Add views for handling 400 & 500 errors | wylee/django-arcutils,wylee/django-arcutils,PSU-OIT-ARC/django-arcutils,PSU-OIT-ARC/django-arcutils | arcutils/views.py | arcutils/views.py | import logging
from django.http import HttpResponseBadRequest, HttpResponseServerError
from django.template import loader
from django.views.decorators.csrf import requires_csrf_token
log = logging.getLogger(__name__)
@requires_csrf_token
def bad_request(request, exception=None, template_name='400.html'):
"""Override default Django bad_request view so context is passed.
Otherwise, static files won't be loaded and default context vars
won't be available (&c).
If loading or rendering the template causes an error, a bare 400
response will be returned.
To use this in a project, import it into the project's root
urls.py and add a 400.html template.
.. note:: The ``exception`` arg was added in Django 1.9.
"""
try:
template = loader.get_template(template_name)
body, content_type = template.render({'request': request}, request), None
except Exception:
log.exception('Exception encountered while rendering 400 error')
body, content_type = '<h1>Bad Request (400)</h1>', 'text/html'
return HttpResponseBadRequest(body, content_type=content_type)
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""Override default Django server_error view so context is passed.
Otherwise, static files won't be loaded and default context vars
won't be available (&c).
If loading or rendering the template causes an error, a bare 500
response will be returned.
"""
try:
template = loader.get_template(template_name)
body, content_type = template.render({'request': request}, request), None
except Exception:
log.exception('Exception encountered while rendering 500 error')
body, content_type = '<h1>Server Error (500)</h1>', 'text/html'
return HttpResponseServerError(body, content_type=content_type)
| mit | Python | |
7aee25badd2085d63012c83f6be8082d93427754 | Add files via upload | Vikramank/Deep-Learning-,Vikramank/Deep-Learning- | polyregreesion.py | polyregreesion.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 31 10:03:15 2016
@author:Viky
Code for polynomial regression
"""
#importing necessary packages
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
#input data:
x_input=np.linspace(0,3,1000)
x1=x_input/np.max(x_input)
x2=np.power(x_input,2)/np.max(np.power(x_input,2))
y_input=5*x1-3*x2
y_input= y_input.reshape((y_input.size, 1))
#model parameters
#order of polynomial
n=2
W = tf.Variable(tf.random_normal([n,1]), name='weight')
#bias
b = tf.Variable(tf.random_normal([1]), name='bias')
#X=tf.placeholder(tf.float32,shape=(None,2))
X=tf.placeholder(tf.float32,shape=[None,n])
Y=tf.placeholder(tf.float32,shape=[None, 1])
#preparing the data
def modify_input(x,x_size,n_value):
x_new=np.zeros([x_size,n_value])
for i in range(n):
x_new[:,i]=np.power(x,(i+1))
x_new[:,i]=x_new[:,i]/np.max(x_new[:,i])
return x_new
#model
x_modified=modify_input(x_input,x_input.size,n)
Y_pred=tf.add(tf.matmul(X,W),b)
#algortihm
loss = tf.reduce_mean(tf.square(Y_pred -Y ))
#training algorithm
optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss)
#initializing the variables
init = tf.initialize_all_variables()
#starting the session session
sess = tf.Session()
sess.run(init)
epoch=12000
for step in xrange(epoch):
_, c=sess.run([optimizer, loss], feed_dict={X: x_modified, Y: y_input})
if step%1000==0 :
print c
print "Model paramters:"
print sess.run(W)
print "bias:%f" %sess.run(b)
# comparing our model
y_test=sess.run(Y_pred, feed_dict={X:x_modified})
plt.plot(x_input,y_input,x_input, y_test)
plt.show() | mit | Python | |
6f4d5917abdbae1fe731e7a1786d8589d2b31ac0 | Fix #160 -- Add missing migration | ellmetha/django-machina,ellmetha/django-machina,ellmetha/django-machina | machina/apps/forum/migrations/0011_auto_20190627_2132.py | machina/apps/forum/migrations/0011_auto_20190627_2132.py | # Generated by Django 2.2.2 on 2019-06-28 02:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0010_auto_20181103_1401'),
]
operations = [
migrations.AlterField(
model_name='forum',
name='level',
field=models.PositiveIntegerField(editable=False),
),
migrations.AlterField(
model_name='forum',
name='lft',
field=models.PositiveIntegerField(editable=False),
),
migrations.AlterField(
model_name='forum',
name='rght',
field=models.PositiveIntegerField(editable=False),
),
]
| bsd-3-clause | Python | |
4c601ce9b91a0bef7082e3d8a5c1b95dc512d829 | add csl_util | lifei96/Medium_Crawler,lifei96/Medium-crawler-with-data-analyzer,lifei96/Medium-crawler-with-data-analyzer,lifei96/Medium-crawler-with-data-analyzer,lifei96/Medium-crawler-with-data-analyzer,lifei96/Medium_Crawler | User_Crawler/util_csl.py | User_Crawler/util_csl.py | # -*- coding: utf-8 -*-
from types import *
import pandas as pd
USER_ATTR_LIST = ['./data/cross-site-linking/user_type.csv',
'./data/graph/CC.csv',
'./data/graph/degree.csv',
'./data/graph/pagerank.csv'
]
def dict_merge(dict_1, dict_2):
res = {}
for key in dict_1:
if key in dict_2:
res[key] = {}
res[key].update(dict_1[key])
res[key].update(dict_2[key])
return res
def load_user_attr_to_dict(file_path):
user_attr_dict = {}
user_attr_df = pd.read_csv(file_path)
attr_list = list(user_attr_df)
attr_list.remove('username')
for idx, row in user_attr_df.iterrows():
user_attr_dict[row['username']] = {}
for attr in attr_list:
user_attr_dict[row['username']][attr] = row[attr]
return user_attr_dict
def load_all_attr_to_dict(file_list=USER_ATTR_LIST):
res = {}
for file_path in file_list:
user_attr_dict = load_user_attr(file_path)
res = dict_merge(res, user_attr_dict)
return res
def load_user_attr_to_df(file_path):
return pd.read_csv(file_path)
def load_all_attr_to_df(file_list=USER_ATTR_LIST):
df_list = [load_user_attr_to_df(file_path) for file_path in file_list]
res = df_list[0]
for i in range(1, len(df_list)):
res = pd.merge(res, df_list[i], on='username')
return res
def split_df(df, by='user_type'):
by_value_list = sorted(df[by].drop_duplicates().values.tolist())
for by_value in by_value_list:
df[df[by] == by_value].to_csv('./data/cross-site-linking/user_attr_' + str(by_value) + '.csv', index=False, encoding='utf-8')
| mit | Python | |
27b10f95e12c1fc1492be61643a057a9934ad535 | Add SSA. | divergentdave/inspectors-general,lukerosiak/inspectors-general | inspectors/ssa.py | inspectors/ssa.py | #!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# http://oig.ssa.gov/
# Oldest report: 1996
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
#
AUDIT_REPORTS_URL = "http://oig.ssa.gov/audits-and-investigations/audit-reports/{year}-01--{year}-12?page={page}"
INVESTIGATIONS_REPORT_URL = "http://oig.ssa.gov/audits-and-investigations/investigations?page={page}"
SEMIANNUAL_REPORTS_URL = "http://oig.ssa.gov/newsroom/semiannual-reports?page={page}"
CONGRESSIONAL_TESTIMONY_URL = "http://oig.ssa.gov/newsroom/congressional-testimony?page={page}"
PERFORMANCE_REPORTS_URL = "http://oig.ssa.gov/newsroom/performance-reports?page={page}"
OTHER_REPORT_URLS = [
PERFORMANCE_REPORTS_URL,
CONGRESSIONAL_TESTIMONY_URL,
SEMIANNUAL_REPORTS_URL,
INVESTIGATIONS_REPORT_URL,
]
BASE_REPORT_URL = "http://oig.ssa.gov/"
def run(options):
year_range = inspector.year_range(options)
# Pull the audit reports
for year in year_range:
for page in range(0, 999):
url = AUDIT_REPORTS_URL.format(year=year, page=page)
doc = BeautifulSoup(utils.download(url))
results = doc.select("td.views-field")
if not results:
break
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
# Pull the other reports
for report_format in OTHER_REPORT_URLS:
for page in range(0, 999):
url = report_format.format(page=page)
doc = BeautifulSoup(utils.download(url))
results = doc.select("td.views-field")
if not results:
results = doc.select("div.views-row")
if not results:
break
for result in results:
if not result.text.strip():
# Skip empty rows
continue
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
landing_page_link = result.find("a")
title = landing_page_link.text.strip()
landing_url = urljoin(BASE_REPORT_URL, landing_page_link.get('href'))
unreleased = False
if "Limited Distribution" in title:
unreleased = True
report_url = None
published_on_text = result.select("span.date-display-single")[0].text.strip()
published_on = datetime.datetime.strptime(published_on_text, '%A, %B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
try:
report_id = result.select("span.field-data")[0].text.strip()
except IndexError:
report_id = landing_url.split("/")[-1]
try:
report_url = result.select("span.file a")[0].get('href')
except IndexError:
if not unreleased:
landing_page = BeautifulSoup(utils.download(landing_url))
try:
report_url = landing_page.find("a", attrs={"type": 'application/octet-stream;'}).get('href')
except AttributeError:
report_url = landing_url
file_type = None
if report_url:
_, extension = os.path.splitext(report_url)
if not extension:
file_type = 'html'
report = {
'inspector': "ssa",
'inspector_url': "http://oig.ssa.gov",
'agency': "ssa",
'agency_name': "Social Security Administration",
'landing_url': landing_url,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
if unreleased:
report['unreleased'] = unreleased
if file_type:
report['file_type'] = file_type
return report
utils.run(run) if (__name__ == "__main__") else None
| cc0-1.0 | Python | |
bcee6173027c48bfb25a65d3e97660f2e2a0852b | Add a python script to generate test methods | y-uti/php-bsxfun,y-uti/php-bsxfun | gentest.py | gentest.py | from itertools import product
import json
import numpy
cube = numpy.array(range(1, 9)).reshape(2, 2, 2)
pcube = [
cube[0 ,0 ,0 ],
cube[0 ,0 ,0:2],
cube[0 ,0:2,0:1],
cube[0 ,0:2,0:2],
cube[0:2,0:1,0:1],
cube[0:2,0:1,0:2],
cube[0:2,0:2,0:1],
cube[0:2,0:2,0:2],
]
for (i, (a, b)) in enumerate(product(pcube, repeat=2), start=1):
print 'public function testBsxfun{0:0>2d}()'.format(i)
print '{'
print '$a = {0};'.format(json.dumps(a.tolist()))
print '$b = {0};'.format(json.dumps(b.tolist()))
print '$expected = {0};'.format(json.dumps((a * b).tolist()))
print '$actual = Bsxfun::bsxfun($this->times, $a, $b);'
print '$this->assertEquals($expected, $actual);'
print '}'
print
| mit | Python | |
052392da7980c4f4e2e86cd8eb65da5b91d3547b | Solve Code Fights different symbols naive problem | HKuz/Test_Code | CodeFights/differentSymbolsNaive.py | CodeFights/differentSymbolsNaive.py | #!/usr/local/bin/python
# Code Fights Different Symbols Naive Problem
from collections import Counter
def differentSymbolsNaive(s):
return len(Counter(s))
def main():
tests = [
["cabca", 3],
["aba", 2]
]
for t in tests:
res = differentSymbolsNaive(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSymbolsNaive({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: differentSymbolsNaive({}) returned {},"
"answer: {}").format(t[0], res, ans))
if __name__ == '__main__':
main()
| mit | Python | |
226f9430f81c4833a7541c2093dca07ef3645744 | Add build script | headupinclouds/polly,headupinclouds/polly,headupinclouds/polly,ruslo/polly,ruslo/polly,idscan/polly,idscan/polly | bin/build.py | bin/build.py | #!/usr/bin/env python3
# Copyright (c) 2014, Ruslan Baratov
# All rights reserved.
import argparse
import os
import re
import shutil
import subprocess
import sys
parser = argparse.ArgumentParser(description="Script for building")
parser.add_argument(
'--toolchain',
choices=[
'libcxx',
'xcode',
'clang_libstdcxx',
'gcc48',
'gcc',
'vs2013x64',
'vs2013'
],
help="CMake generator/toolchain",
)
parser.add_argument(
'--type',
required=True,
help="CMake build type",
)
parser.add_argument('--test', action='store_true')
parser.add_argument('--open', action='store_true')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
toolchain = ''
generator = ''
tag = "{}-{}".format(args.toolchain, args.type)
if args.toolchain == 'libcxx':
toolchain = 'libcxx'
elif args.toolchain == 'xcode':
toolchain = 'xcode'
generator = '-GXcode'
tag = 'xcode'
elif args.toolchain == 'clang_libstdcxx':
toolchain = 'clang_libstdcxx'
elif args.toolchain == 'gcc48':
toolchain = 'gcc48'
elif args.toolchain == 'gcc':
toolchain = 'gcc'
elif args.toolchain == 'vs2013x64':
generator = '-GVisual Studio 12 2013 Win64'
tag = 'vs2013x64'
elif args.toolchain == 'vs2013':
generator = '-GVisual Studio 12 2013'
tag = 'vs2013'
else:
assert(False)
cdir = os.getcwd()
def call(args):
try:
print('Execute command: [')
for i in args:
print(' `{}`'.format(i))
print(']')
subprocess.check_call(
args,
stderr=subprocess.STDOUT,
universal_newlines=True
)
except subprocess.CalledProcessError as error:
print(error)
print(error.output)
sys.exit(1)
except FileNotFoundError as error:
print(error)
sys.exit(1)
call(['cmake', '--version'])
polly_root = os.getenv("POLLY_ROOT")
if not polly_root:
sys.exit("Environment variable `POLLY_ROOT` is empty")
toolchain_option = ''
if toolchain:
toolchain_path = os.path.join(polly_root, "{}.cmake".format(toolchain))
toolchain_option = "-DCMAKE_TOOLCHAIN_FILE={}".format(toolchain_path)
build_dir = os.path.join(cdir, '_builds', tag)
build_dir_option = "-B{}".format(build_dir)
build_type_for_generate_step = "-DCMAKE_BUILD_TYPE={}".format(args.type)
shutil.rmtree(build_dir, ignore_errors=True)
generate_command = [
'cmake',
'-H.',
build_dir_option,
build_type_for_generate_step
]
if generator:
generate_command.append(generator)
if toolchain_option:
generate_command.append(toolchain_option)
if args.verbose:
generate_command.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
build_command = [
'cmake',
'--build',
build_dir,
'--config',
args.type
]
call(generate_command)
call(build_command)
if (toolchain == 'xcode') and args.open:
for file in os.listdir(build_dir):
if file.endswith(".xcodeproj"):
call(['open', os.path.join(build_dir, file)])
if args.test:
os.chdir(build_dir)
test_command = ['ctest', '--config', args.type]
if args.verbose:
test_command.append('-VV')
call(test_command)
| bsd-2-clause | Python | |
f625f46e89c8e95677492cfb03ee113a3f6c7bb3 | Add utils.py | OParl/validator,OParl/validator | src/utils.py | src/utils.py | """
The MIT License (MIT)
Copyright (c) 2017 Stefan Graupner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import hashlib
from gi.repository import OParl
class OParlType(object):
"""
Simple wrapper class around OParl type urls
"""
entity = 'Unknown'
version = '1.0'
def __init__(self, object):
type = object.get_oparl_type()
type = type.split('/')
self.version = type[-2]
self.entity = type[-1]
def sha1_hexdigest(string):
return hashlib.sha1(string).hexdigest() | mit | Python | |
f156cde55596ab7d954d41454f951227a719f6d5 | Create keys repository script | AlexanderRyzhko/0install-TUF,AlexanderRyzhko/0install-TUF,AlexanderRyzhko/0install-TUF | metadata_repo/scripts/create_keys_repo.py | metadata_repo/scripts/create_keys_repo.py | '''
Script for creating the RSA keys and creating a new repository
'''
from tuf.libtuf import *
# Generate and write the first of two root keys for the TUF repository.
# The following function creates an RSA key pair, where the private key is saved to
# "path/to/root_key" and the public key to "path/to/root_key.pub".
generate_and_write_rsa_keypair("path/to/root_key", bits=2048, password="password")
# If the key length is unspecified, it defaults to 3072 bits. A length of less
# than 2048 bits raises an exception. A password may be supplied as an
# argument, otherwise a user prompt is presented.
generate_and_write_rsa_keypair("path/to/root_key2")
# Import an existing public key.
public_root_key = import_rsa_publickey_from_file("path/to/root_key.pub")
# Import an existing private key. Importing a private key requires a password, whereas
# importing a public key does not.
private_root_key = import_rsa_privatekey_from_file("path/to/root_key")
# Create a new Repository object that holds the file path to the repository and the four
# top-level role objects (Root, Targets, Release, Timestamp). Metadata files are created when
# repository.write() is called. The repository directory is created if it does not exist.
repository = create_new_repository("path/to/repository/")
# The Repository instance, 'repository', initially contains top-level Metadata objects.
# Add one of the public keys, created in the previous section, to the root role. Metadata is
# considered valid if it is signed by the public key's corresponding private key.
repository.root.add_key(public_root_key)
# Add a second public key to the root role. Although previously generated and saved to a file,
# the second public key must be imported before it can added to a role.
public_root_key2 = import_rsa_publickey_from_file("path/to/root_key2.pub")
repository.root.add_key(public_root_key2)
# Threshold of each role defaults to 1. Users may change the threshold value, but libtuf.py
# validates thresholds and warns users. Set the threshold of the root role to 2,
# which means the root metadata file is considered valid if it contains at least two valid
# signatures.
repository.root.threshold = 2
private_root_key2 = import_rsa_privatekey_from_file("path/to/root_key2", password="password")
# Load the root signing keys to the repository, which write() uses to sign the root metadata.
# The load_signing_key() method SHOULD warn when the key is NOT explicitly allowed to
# sign for it.
repository.root.load_signing_key(private_root_key)
repository.root.load_signing_key(private_root_key2)
# Print the number of valid signatures and public/private keys of the repository's metadata.
repository.status()
try:
repository.write()
# An exception is raised here by write() because the other top-level roles (targets, release,
# and timestamp) have not been configured with keys.
except tuf.Error, e:
print e
# Import an existing public key.
public_root_key = import_rsa_publickey_from_file("path/to/root_key.pub")
# Import an existing private key. Importing a private key requires a password, whereas
# importing a public key does not.
private_root_key = import_rsa_privatekey_from_file("path/to/root_key")
# Create a new Repository object that holds the file path to the repository and the four
# top-level role objects (Root, Targets, Release, Timestamp). Metadata files are created when
# repository.write() is called. The repository directory is created if it does not exist.
repository = create_new_repository("path/to/repository/")
# The Repository instance, 'repository', initially contains top-level Metadata objects.
# Add one of the public keys, created in the previous section, to the root role. Metadata is
# considered valid if it is signed by the public key's corresponding private key.
repository.root.add_key(public_root_key)
# Add a second public key to the root role. Although previously generated and saved to a file,
# the second public key must be imported before it can added to a role.
public_root_key2 = import_rsa_publickey_from_file("path/to/root_key2.pub")
repository.root.add_key(public_root_key2)
# Threshold of each role defaults to 1. Users may change the threshold value, but libtuf.py
# validates thresholds and warns users. Set the threshold of the root role to 2,
# which means the root metadata file is considered valid if it contains at least two valid
# signatures.
repository.root.threshold = 2
private_root_key2 = import_rsa_privatekey_from_file("path/to/root_key2", password="password")
# Load the root signing keys to the repository, which write() uses to sign the root metadata.
# The load_signing_key() method SHOULD warn when the key is NOT explicitly allowed to
# sign for it.
repository.root.load_signing_key(private_root_key)
repository.root.load_signing_key(private_root_key2)
# Print the number of valid signatures and public/private keys of the repository's metadata.
repository.status()
try:
repository.write()
# An exception is raised here by write() because the other top-level roles (targets, release,
# and timestamp) have not been configured with keys.
except tuf.Error, e:
print e
generate_and_write_rsa_keypair("path/to/targets_key", password="password")
generate_and_write_rsa_keypair("path/to/release_key", password="password")
generate_and_write_rsa_keypair("path/to/timestamp_key", password="password")
# Add the public keys of the remaining top-level roles.
repository.targets.add_key(import_rsa_publickey_from_file("path/to/targets_key.pub"))
repository.release.add_key(import_rsa_publickey_from_file("path/to/release_key.pub"))
repository.timestamp.add_key(import_rsa_publickey_from_file("path/to/timestamp_key.pub"))
# Import the signing keys of the remaining top-level roles. Prompt for passwords.
private_targets_key = import_rsa_privatekey_from_file("path/to/targets_key")
private_release_key = import_rsa_privatekey_from_file("path/to/release_key")
private_timestamp_key = import_rsa_privatekey_from_file("path/to/timestamp_key")
# Load the signing keys of the remaining roles so that valid signatures are generated when
# repository.write() is called.
repository.targets.load_signing_key(private_targets_key)
repository.release.load_signing_key(private_release_key)
repository.timestamp.load_signing_key(private_timestamp_key)
# Optionally set the expiration date of the timestamp role. By default, roles are set to expire
# as follows: root(1 year), targets(3 months), release(1 week), timestamp(1 day).
repository.timestamp.expiration = "2014-10-28 12:08:00"
# Write all metadata to "path/to/repository/metadata.staged/". The common case is to crawl the
# filesystem for all delegated roles in "path/to/repository/metadata.staged/targets/".
repository.write()
| lgpl-2.1 | Python | |
05c588866cc66bff33cb77fe35434f850ddd07f0 | Handle values larger than 2**63-1 in numeric crash address conversion (#119) | cihatix/FuzzManager,MozillaSecurity/FuzzManager,MozillaSecurity/FuzzManager,MozillaSecurity/FuzzManager,cihatix/FuzzManager,MozillaSecurity/FuzzManager,cihatix/FuzzManager,cihatix/FuzzManager | server/crashmanager/migrations/0009_copy_crashaddress.py | server/crashmanager/migrations/0009_copy_crashaddress.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import sys
from django.db import models, migrations
from django.conf import settings
def create_migration_tool(apps, schema_editor):
CrashEntry = apps.get_model("crashmanager", "CrashEntry")
for entry in CrashEntry.objects.filter(crashAddressNumeric = None):
if entry.crashAddress == None or len(entry.crashAddress) == 0:
entry.crashAddressNumeric = None
entry.save()
else:
try:
entry.crashAddressNumeric = long(entry.crashAddress, 16)
# Crash addresses are typically unsigned, but our database
# can only store signed 64 bit integers. Convert to signed
# if the value exceeds maximum value for signed 64 bit.
if (entry.crashAddressNumeric > (2**63-1)):
entry.crashAddressNumeric -= 2**64
entry.save()
except ValueError as e:
print("Failed to convert crash address value: %s" % entry.crashAddress, file=sys.stderr)
class Migration(migrations.Migration):
dependencies = [
('crashmanager', '0008_crashentry_crashaddressnumeric'),
]
operations = [
migrations.RunPython(
create_migration_tool,
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import sys
from django.db import models, migrations
from django.conf import settings
def create_migration_tool(apps, schema_editor):
CrashEntry = apps.get_model("crashmanager", "CrashEntry")
for entry in CrashEntry.objects.filter(crashAddressNumeric = None):
if entry.crashAddress == None or len(entry.crashAddress) == 0:
entry.crashAddressNumeric = None
entry.save()
else:
try:
entry.crashAddressNumeric = long(entry.crashAddress, 16)
entry.save()
except ValueError as e:
print("Failed to convert crash address value: %s" % entry.crashAddress, file=sys.stderr)
class Migration(migrations.Migration):
dependencies = [
('crashmanager', '0008_crashentry_crashaddressnumeric'),
]
operations = [
migrations.RunPython(
create_migration_tool,
),
]
| mpl-2.0 | Python |
4c381da905d81bde6ed28407f8e4cd3bcbd6d8be | Add cart forms | samitnuk/online_shop,samitnuk/online_shop,samitnuk/online_shop | apps/cart/forms.py | apps/cart/forms.py | from django import forms
PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)]
class CartAddProductForm(forms.Form):
quantity = forms.TypedChoiceField(choices=PRODUCT_QUANTITY_CHOICES,
coerce=int)
update = forms.BooleanField(required=False, initial=False,
widget=forms.HiddenInput)
| mit | Python | |
46f25a4e0a43ea1ea8e1aaddbcdf18f6f20badba | Add package for open source Shiny Server (#3688) | EmreAtes/spack,matthiasdiener/spack,skosukhin/spack,tmerrick1/spack,LLNL/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack,EmreAtes/spack,tmerrick1/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,skosukhin/spack,tmerrick1/spack,krafczyk/spack,krafczyk/spack,TheTimmy/spack,lgarren/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,iulian787/spack,lgarren/spack,EmreAtes/spack,skosukhin/spack,EmreAtes/spack,TheTimmy/spack,lgarren/spack,mfherbst/spack,mfherbst/spack,skosukhin/spack,iulian787/spack,iulian787/spack,skosukhin/spack,mfherbst/spack,lgarren/spack,LLNL/spack,TheTimmy/spack,TheTimmy/spack,TheTimmy/spack,matthiasdiener/spack,lgarren/spack,mfherbst/spack,LLNL/spack,krafczyk/spack,matthiasdiener/spack,matthiasdiener/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/shiny-server/package.py | var/spack/repos/builtin/packages/shiny-server/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class ShinyServer(CMakePackage):
"""Shiny server lets you put shiny web applications and interactive
documents online. Take your shiny apps and share them with your
organization or the world."""
#
# HEADS UP:
# 1. The shiny server installation step will download various node
# and npm bits from the net. They seem to have them well
# constrained ("npm shrinkwrap"?), but this package is not
# "air gappable".
# 2. Docs say that it requires 'gcc'. depends_on() won't do the
# right thing, it's Up To You.
#
homepage = "https://www.rstudio.com/products/shiny/shiny-server/"
url = "https://github.com/rstudio/shiny-server/archive/v1.5.3.838.tar.gz"
version('1.5.3.838', '96f20fdcdd94c9e9bb851baccb82b97f')
depends_on('python@:2.9.99') # docs say: "Really. 3.x will not work"
depends_on('cmake@2.8.10:')
depends_on('git')
depends_on('r+X')
depends_on('openssl')
def cmake_args(self):
spec = self.spec
options = []
options.extend([
"-DPYTHON=%s" % join_path(spec['python'].prefix.bin, 'python'),
])
return options
# Recompile the npm modules included in the project
@run_after('build')
def build_node(self):
bash = which('bash')
mkdirp('build')
bash('-c', 'bin/npm --python="$PYTHON" install')
bash('-c', 'bin/node ./ext/node/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js --python="$PYTHON" rebuild') # noqa: E501
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH',
join_path(self.prefix, 'shiny-server', 'bin'))
# shiny comes with its own pandoc; hook it up...
run_env.prepend_path('PATH',
join_path(self.prefix, 'shiny-server',
'ext', 'pandoc', 'static'))
| lgpl-2.1 | Python | |
1bb2a9213dad8bde8a05da63438dbcdd0d8d09c6 | add example for asynchronous execution, little simpler than multiprocessing, uses a decorator to simplify it further | isidroamv/netmiko,nitzmahone/netmiko,jinesh-patel/netmiko,fooelisa/netmiko,fooelisa/netmiko,mileswdavis/netmiko,rdezavalia/netmiko,brutus333/netmiko,enzzzy/netmiko,isidroamv/netmiko,enzzzy/netmiko,jumpojoy/netmiko,mzbenami/netmiko,ivandgreat/netmiko,rdezavalia/netmiko,shamanu4/netmiko,ktbyers/netmiko,MikeOfNoTrades/netmiko,MikeOfNoTrades/netmiko,isponline/netmiko,jumpojoy/netmiko,mileswdavis/netmiko,shsingh/netmiko,shsingh/netmiko,rumo/netmiko,nitzmahone/netmiko,mzbenami/netmiko,jinesh-patel/netmiko,isponline/netmiko,ivandgreat/netmiko,shamanu4/netmiko,ktbyers/netmiko,brutus333/netmiko,rumo/netmiko | examples/async.py | examples/async.py | #!/usr/bin/env python2.7
"""Example of asynchronously running "show version".
async(): decorator to make further functions asynchronous
command_runner(): creates a connection and runs an arbitrary command
main(): entry point, runs the command_runner
"""
import netmiko
from inspect import getmodule
from multiprocessing import Pool
def async(decorated):
"""Wraps a top-level function around an asynchronous dispatcher.
When the decorated function is called, a task is submitted to a process
pool, and a future object is returned, providing access to an eventual
return value.
The future object has a blocking get() method to access the task result:
it will return immediately if the job is already done, or block until it
completes.
See http://stackoverflow.com/questions/1239035/asynchronous-method-call-in-python
"""
# Keeps the original function visible from the module global namespace,
# under a name consistent to its __name__ attribute. This is necessary for
# the multiprocessing pickling machinery to work properly.
module = getmodule(decorated)
decorated.__name__ += '_original'
setattr(module, decorated.__name__, decorated)
def send(*args, **opts):
"""Returns asynchronously."""
return async.pool.apply_async(decorated, args, opts)
return send
@async
def command_runner(dispatcher, cmd):
"""Run show version on many devices."""
# Prepare the dispatcher
dsp = netmiko.ssh_dispatcher(dispatcher["device_type"])
# Run the dispatcher and get the device ready
dev = dsp(**dispatcher)
# returns the output of the variable `cmd` that was passed
return dev.send_command(cmd)
def main():
"""Program entry point."""
async.pool = Pool(10)
devices = ["10.10.10.1", "10.10.10.2", "10.10.10.3", "10.10.10.4", "10.10.10.5",
"10.10.10.6", "10.10.10.7", "10.10.10.8", "10.10.10.9", "10.10.10.10"]
cmd = "show version"
results = []
for device in devices:
# Assumes all devices are Juniper devices
dispatcher = {"device_type": "juniper",
"ip": device,
"username": "user",
"password": "pass"}
result = command_runner(dispatcher, cmd)
results.append(result)
# Must use the `get()` method or you will just get a list of pool objects
results = [i.get() for i in results]
print results
if __name__ == "__main__":
main()
| mit | Python | |
d159b32d51339915ef633f3c6d33ce5eeafa78d6 | Add py solution for 396. Rotate Function | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/rotate-function.py | py/rotate-function.py | class Solution(object):
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
lA = len(A)
if not lA:
return 0
subsum = 0
F = 0
for i in xrange(1, lA):
subsum += A[-i]
F += subsum
subsum += A[0]
m = F
for i in xrange(1, lA):
F += subsum
F -= lA * A[-i]
m = max(m, F)
return m
| apache-2.0 | Python | |
70ff0faa7da6066bb75ddb871f67aa749f5bdc4e | Add custom field rendering tests | jmagnusson/django-admin-bootstrapped,andrewyager/django-admin-bootstrapped,kevingu1003/django-admin-bootstrapped,kevingu1003/django-admin-bootstrapped,Corner1024/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin,mynksngh/django-admin-bootstrapped,avara1986/django-admin-bootstrapped,sn0wolf/django-admin-bootstrapped,django-admin-bootstrapped/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin,squallcs12/django-admin-bootstrapped,bformet/django-admin-bootstrapped,xrmx/django-admin-bootstrapped,merlian/django-admin-bootstrapped,sn0wolf/django-admin-bootstrapped,IMAmuseum/django-admin-bootstrapped,benthomasson/django-admin-bootstrapped,merlian/django-admin-bootstrapped,benthomasson/django-admin-bootstrapped,IMAmuseum/django-admin-bootstrapped,xrmx/django-admin-bootstrapped,squallcs12/django-admin-bootstrapped,benthomasson/django-admin-bootstrapped,avara1986/django-admin-bootstrapped,IMAmuseum/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin,pombredanne/django-admin-bootstrapped,django-admin-bootstrapped/django-admin-bootstrapped,Corner1024/django-admin-bootstrapped,andrewyager/django-admin-bootstrapped,merlian/django-admin-bootstrapped,avara1986/django-admin-bootstrapped,mynksngh/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin-new,xrmx/django-admin-bootstrapped,pombredanne/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin-new,squallcs12/django-admin-bootstrapped,pombredanne/django-admin-bootstrapped,bformet/django-admin-bootstrapped,sn0wolf/django-admin-bootstrapped,jmagnusson/django-admin-bootstrapped,kevingu1003/django-admin-bootstrapped,jmagnusson/django-admin-bootstrapped,askinteractive/mezzanine-advanced-admin-new,Corner1024/django-admin-bootstrapped,mynksngh/django-admin-bootstrapped,andrewyager/django-admin-bootstrapped,django-admin-bootstrapped/django-admin-bootstrapped,bformet/django-admin-bootstrapped | django_admin_bootstrapped/tests.py | django_admin_bootstrapped/tests.py | from __future__ import absolute_import
from django.test import TestCase
from django.contrib.admin.widgets import AdminDateWidget
from django.template import Template, Context
from django import forms
try:
from bootstrap3 import renderers
except ImportError:
# nothing to test if we don't have django-bootstrap3 installed
pass
else:
from .renderers import BootstrapFieldRenderer
class RendererTestCase(TestCase):
def setUp(self):
class TestForm(forms.Form):
char = forms.CharField(max_length=255)
hidden = forms.CharField(max_length=255, widget=forms.HiddenInput())
date = forms.DateField(widget=AdminDateWidget())
self.form = TestForm({
'char': 'hi there',
'hidden': 'hidden text',
'date': '20140111',
})
def render_template(self, field):
context = { 'field': field }
template = Template('{% load bootstrapped_goodies_tags %} {% dab_field_rendering field %}')
return template.render(Context(context))
def test_basic_functionality(self):
field = self.form['char']
html = self.render_template(field)
# we prepend this class
self.assertIn('class="form-control', html)
def test_hidden_input(self):
field = self.form['hidden']
html = self.render_template(field)
self.assertIn('type="hidden"', html)
def test_control_inline(self):
field = self.form['date']
html = self.render_template(field)
# we prepend these classes
self.assertIn('class="form-control form-control-inline', html)
| apache-2.0 | Python | |
84e14782f353ef1d0dec20ed1da31cfb1da413a4 | Add diary example. | johndlong/walrus,coleifer/walrus | examples/diary.py | examples/diary.py | #!/usr/bin/env python
from collections import OrderedDict
import datetime
import sys
from walrus import *
database = Database(host='localhost', port=6379, db=0)
class Entry(Model):
database = database
namespace = 'diary'
content = TextField(fts=True)
timestamp = DateTimeField(default=datetime.datetime.now, index=True)
def menu_loop():
choice = None
while choice != 'q':
for key, value in menu.items():
print('%s) %s' % (key, value.__doc__))
choice = raw_input('Action: ').lower().strip()
if choice in menu:
menu[choice]()
def add_entry():
"""Add entry"""
print('Enter your entry. Press ctrl+d when finished.')
data = sys.stdin.read().strip()
if data and raw_input('Save entry? [Yn] ') != 'n':
Entry.create(content=data)
print('Saved successfully.')
def view_entries(search_query=None):
"""View previous entries"""
if search_query:
expr = Entry.content.match(search_query)
else:
expr = None
query = Entry.query(expr, order_by=Entry.timestamp.desc())
for entry in query:
timestamp = entry.timestamp.strftime('%A %B %d, %Y %I:%M%p')
print(timestamp)
print('=' * len(timestamp))
print(entry.content)
print('n) next entry')
print('q) return to main menu')
if raw_input('Choice? (Nq) ') == 'q':
break
def search_entries():
"""Search entries"""
view_entries(raw_input('Search query: '))
menu = OrderedDict([
('a', add_entry),
('v', view_entries),
('s', search_entries),
])
if __name__ == '__main__':
menu_loop()
| mit | Python | |
eca8accb984c252f36289cd7bbab8ab23c198317 | Create problem3.py | Amapolita/MITx--6.00.1x- | W2/L4/problem3.py | W2/L4/problem3.py | #L4 PROBLEM 3
def square(x):
'''
x: int or float.
'''
return x ** 2
| unlicense | Python | |
3332370d70ad30856c9517e51eedc454500f8bf8 | Add forwarding script for build-bisect.py. | yitian134/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,adobe/chromium,ropik/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,ropik/chromium,adobe/chromium,ropik/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,ropik/chromium,yitian134/chromium,ropik/chromium,gavinp/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,gavinp/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,adobe/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,ropik/chromium,Crystalnix/house-of-life-chromium | build/build-bisect.py | build/build-bisect.py | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
print "This script has been moved to tools/bisect-builds.py."
print "Please update any docs you're working from!"
sys.exit(1)
| bsd-3-clause | Python | |
35e720cf7b9cbae4e077d0699dd321f741180787 | Create cached_test.py | Coal0/Utilities | cached/cached_test.py | cached/cached_test.py | from cached import cached
class Double:
def __init__(self, x):
self._x = x
@cached("_double_x")
def value(self):
return self._x * 2
| mit | Python | |
ee7c257b62bff832b899f54fd7bf39ae47db05b7 | Add tool to get new url | makyo/polycul.es,makyo/polycul.es,makyo/polycul.es | get_new_url.py | get_new_url.py | import sys
import polycules
if len(sys.argv) != 2:
print('Expected ID, got too little or too much')
old_id = sys.argv[1]
db = polycules.connect_db()
result = db.execute('select hash from polycules where id = ?', [
old_id,
]).fetchone()
if result is None:
print("Couldn't find the polycule with that ID")
print('New url: https://polycul.es/{}'.format(result[0][:7]))
| mit | Python | |
a994df7e8961e0d82a37ed268dba55c021c7ccd1 | Move order - here till i get the order_desc stuff working. | thousandparsec/libtpproto-py,thousandparsec/libtpproto-py | objects/OrderExtra/Move.py | objects/OrderExtra/Move.py |
from xstruct import pack
from objects import Order
class Move(Order):
"""\
Move to a place in space.
"""
subtype = 1
substruct = "qqq"
def __init__(self, sequence, \
id, type, slot, turns, resources, \
x, y, z):
Order.__init__(self, sequence, \
id, type, slot, turns, resources,
x, y, z)
self.length += 3*8
self.pos = (x, y, z)
def __repr__(self):
output = Order.__repr__(self)
output += pack(self.substruct, self.pos[0], self.pos[1], self.pos[2])
return output
| lgpl-2.1 | Python | |
b8764629331caeeb37a4845480ed884841719525 | scale phage counts to percent so match bacteria counts | linsalrob/PhageHosts,linsalrob/PhageHosts,linsalrob/PhageHosts | code/percent_phage_counts.py | code/percent_phage_counts.py | """
The phage counts per metagenome are normalized based on the number of
reads that hit. I want to scale that to a percent, so that it matches
the bacterial data. If there was a single phage present it would get
100% of the reads
"""
import os
import sys
try:
inf = sys.argv[1]
ouf = sys.argv[2]
except:
sys.exit(sys.argv[0] + " <phage abundance file> (probably normalized_phage_mg_counts.tsv) <output file>")
header = None
data = []
total = []
with open(inf, 'r') as fin:
header = fin.readline()
h = header.split("\t")
for i in range(len(h)):
total.append(0)
for l in fin:
p = l.strip().split("\t")
for i in range(1, len(p)-1):
total[i] += float(p[i])
data.append(p)
with open(ouf, 'w') as out:
out.write(header)
for l in data:
out.write(l[0])
for i in range(1, len(l)-1):
if total[i] == 0:
# this metagenome has no phage hits!
out.write("\t0")
else:
out.write("\t" + str(1.0 * float(l[i])/total[i] * 100))
out.write("\t" + l[-1] + "\n")
| mit | Python | |
d2a84fb3a8165c9526aa5c96f308dda3b92a2c2c | add new decision module | Salman-H/mars-search-robot | code/decision.py | code/decision.py | """
Module for rover decision-handling.
Used to build a decision tree for determining throttle, brake and
steer commands based on the output of the perception_step() function
in the perception module.
"""
__author__ = 'Salman Hashmi'
__license__ = 'BSD License'
import time
import numpy as np
import states
import events
class DecisionHandler():
"""Handle events and switch between states."""
def __init__(self):
"""Initialize a DecisionHandler instance."""
def execute(self, Rover):
"""Select and execute the current state action."""
| bsd-2-clause | Python | |
3896ddcf660e168afaa80a0be9d7b40b6dd15967 | Add script to clean source code of compiled files. | lewisodriscoll/sasview,SasView/sasview,SasView/sasview,SasView/sasview,lewisodriscoll/sasview,SasView/sasview,SasView/sasview,SasView/sasview,lewisodriscoll/sasview,lewisodriscoll/sasview,lewisodriscoll/sasview | sansview/clean.py | sansview/clean.py | """
Remove all compiled code.
"""
import os
filedirs = ['.', 'perspectives', 'perspectives/fitting']
for d in filedirs:
files = os.listdir(d)
for f in files:
if f.find('.pyc')>0:
print "Removed", f
os.remove(os.path.join(d,f)) | bsd-3-clause | Python | |
ec736876e11a5aa4f52c63a91b05fc342e298051 | Add config.sample.py. | huxuan/CAPUHome-API,CAPU-ENG/CAPUHome-API | config.sample.py | config.sample.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: config.sample.py
Author: huxuan <i@huxuan.org>
Description: Configuration file for app.
"""
# Debug or not
DEBUG = True
# Make jsonfiy encode in utf-8.
JSON_AS_ASCII = False
# Secret key.
SECRET_KEY = 'CAPUHOME_Secret_Key'
# Database & sqlalchemy.
DB_USERNAME = 'username'
DB_PASSWORD = 'password'
DB_SERVER = 'localhost'
DB_NAME = 'dbname'
SQLALCHEMY_DATABASE_URI = 'mysql://{}:{}@{}/{}'.format(
DB_USERNAME, DB_PASSWORD, DB_SERVER, DB_NAME)
| mit | Python | |
ec9d97f7017939651fc78605fc81a2f030f88b5f | Add exceptions file | chrisgilmerproj/brewday,chrisgilmerproj/brewday | brew/exceptions.py | brew/exceptions.py | # -*- coding: utf-8 -*-
__all__ = [
u'BrewdayException',
u'DataLoaderException',
u'GrainException',
u'HopException',
u'StyleException',
u'YeastException',
]
class BrewdayException(Exception):
pass
class DataLoaderException(BrewdayException):
pass
class GrainException(BrewdayException):
pass
class HopException(BrewdayException):
pass
class StyleException(BrewdayException):
pass
class YeastException(BrewdayException):
pass
| mit | Python | |
2dd55385c3c8209217bde19c5a8d30ad929ce084 | Create employee.py | Chippers255/scheduler | scheduler/employee.py | scheduler/employee.py | # -*- coding: utf-8 -*-
# employee.py
#
# Created by Thomas Nelson <tn90ca@gmail.com>
#
# Created..........2015-03-12
# Modified.........2015-03-12
class Employee (object):
"""This class will represent an employee and there available time slots
for each work day that the provided store is open.
"""
def __init__(self, store, name):
"""
"""
self.date = []
self.name = name
self.date.append([True] * len(store.date[0]))
self.date.append([True] * len(store.date[1]))
self.date.append([True] * len(store.date[2]))
self.date.append([True] * len(store.date[3]))
self.date.append([True] * len(store.date[4]))
self.date.append([True] * len(store.date[5]))
# end def __init__
def add_hours(self, date, t_start, t_end, store):
"""
"""
for t in xrange(len(store.date[date])):
if store.date[date][t] == t_start:
start_time = t
if store.date[date][t] == t_end:
end_time = t
for time in xrange(start_time, end_time+1):
self.date[date][time] = False
# end def add_hours
# end class Employee
| mit | Python | |
70116d7181f48c16d614063df4de54dff172e8c6 | Add internal note | conda/conda-env,isaac-kit/conda-env,mikecroucher/conda-env,phobson/conda-env,dan-blanchard/conda-env,asmeurer/conda-env,phobson/conda-env,nicoddemus/conda-env,ESSS/conda-env,nicoddemus/conda-env,mikecroucher/conda-env,asmeurer/conda-env,ESSS/conda-env,dan-blanchard/conda-env,isaac-kit/conda-env,conda/conda-env | conda_env/cli/main_export.py | conda_env/cli/main_export.py | from argparse import RawDescriptionHelpFormatter
from copy import copy
import os
import sys
import textwrap
import yaml
from conda.cli import common
from conda.cli import main_list
from conda import config
from conda import install
description = """
Export a given environment
"""
example = """
examples:
conda env export
conda env export --file SOME_FILE
"""
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
'export',
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
p.add_argument(
'-n', '--name',
action='store',
help='name of environment (in %s)' % os.pathsep.join(config.envs_dirs),
default=None,
)
p.add_argument(
'-f', '--file',
default=None,
required=False
)
p.set_defaults(func=execute)
# TODO Make this aware of channels that were used to install packages
def execute(args, parser):
if not args.name:
# Note, this is a hack fofr get_prefix that assumes argparse results
# TODO Refactor common.get_prefix
name = os.environ.get('CONDA_DEFAULT_ENV', False)
if not name:
msg = "Unable to determine environment\n\n"
msg += textwrap.dedent("""
Please re-run this command with one of the following options:
* Provide an environment name via --name or -n
* Re-run this command inside an activated conda environment.""").lstrip()
# TODO Add json support
common.error_and_exit(msg, json=False)
args.name = name
prefix = common.get_prefix(args)
installed = install.linked(prefix)
conda_pkgs = copy(installed)
# json=True hides the output, data is added to installed
main_list.add_pip_installed(prefix, installed, json=True)
pip_pkgs = sorted(installed - conda_pkgs)
dependencies = ['='.join(a.rsplit('-', 2)) for a in sorted(conda_pkgs)]
if len(pip_pkgs) > 0:
dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})
data = {
'name': args.name,
'dependencies': dependencies,
}
if args.file is None:
fp = sys.stdout
else:
fp = open(args.file, 'wb')
yaml.dump(data, default_flow_style=False, stream=fp)
| from argparse import RawDescriptionHelpFormatter
from copy import copy
import os
import sys
import textwrap
import yaml
from conda.cli import common
from conda.cli import main_list
from conda import config
from conda import install
description = """
Export a given environment
"""
example = """
examples:
conda env export
conda env export --file SOME_FILE
"""
def configure_parser(sub_parsers):
p = sub_parsers.add_parser(
'export',
formatter_class=RawDescriptionHelpFormatter,
description=description,
help=description,
epilog=example,
)
p.add_argument(
'-n', '--name',
action='store',
help='name of environment (in %s)' % os.pathsep.join(config.envs_dirs),
default=None,
)
p.add_argument(
'-f', '--file',
default=None,
required=False
)
p.set_defaults(func=execute)
def execute(args, parser):
if not args.name:
# Note, this is a hack fofr get_prefix that assumes argparse results
# TODO Refactor common.get_prefix
name = os.environ.get('CONDA_DEFAULT_ENV', False)
if not name:
msg = "Unable to determine environment\n\n"
msg += textwrap.dedent("""
Please re-run this command with one of the following options:
* Provide an environment name via --name or -n
* Re-run this command inside an activated conda environment.""").lstrip()
# TODO Add json support
common.error_and_exit(msg, json=False)
args.name = name
prefix = common.get_prefix(args)
installed = install.linked(prefix)
conda_pkgs = copy(installed)
# json=True hides the output, data is added to installed
main_list.add_pip_installed(prefix, installed, json=True)
pip_pkgs = sorted(installed - conda_pkgs)
dependencies = ['='.join(a.rsplit('-', 2)) for a in sorted(conda_pkgs)]
if len(pip_pkgs) > 0:
dependencies.append({'pip': ['=='.join(a.rsplit('-', 2)[:2]) for a in pip_pkgs]})
data = {
'name': args.name,
'dependencies': dependencies,
}
if args.file is None:
fp = sys.stdout
else:
fp = open(args.file, 'wb')
yaml.dump(data, default_flow_style=False, stream=fp)
| bsd-3-clause | Python |
1a23860bfcc4fc5259bdcb0f208e38909e7055cc | add peak-merging script | brentp/combined-pvalues,brentp/combined-pvalues | cpv/peaks.py | cpv/peaks.py | """
find peaks or troughs in bed files
for a bedgraph file with pvalues in the 4th column. usage would be:
$ python peaks.py --dist 100 --seed 0.01 some.bed > some.regions.bed
where regions.bed contains the start and end of the region and (currently) the
lowest p-value in that region.
"""
from itertools import groupby
import operator
from toolshed import reader
import argparse
import sys
def bediter(fname, col_num):
"""
iterate over a bed file. turn col_num into a float
and the start, stop column into an int and yield a dict
for each row.
"""
for l in reader(fname, header=False):
if l[0][0] == "#": continue
yield {"chrom": l[0], "start": int(l[1]), "end": int(l[2]),
"p": float(l[col_num])} # "stuff": l[3:][:]}
# use class to keep track of written peaks.
class _write_peaks(object):
def __init__(self):
self.peak_count = 0
def __call__(self, peaks, seed, out, scmp):
# could have a list with only those passing the threshold.
if not any(scmp(p["p"], seed) for p in peaks): return None
if len(peaks) == 0: return None
# peak_count unused...
self.peak_count += 1
peak_start = peaks[0]["start"]
peak_end = peaks[-1]["end"]
peak_count = len(peaks)
# TODO: something better than keep best p-value ?
pbest = peaks[0]["p"]
for p in peaks:
if scmp(p["p"], pbest): pbest = p["p"]
out.write("%s\t%i\t%i\t%.4g\t%i\n" % (
peaks[0]["chrom"], peak_start, peak_end, pbest, peak_count))
write_peaks = _write_peaks()
def walk(chromiter, thresh, seed, dist, out=None, scmp=operator.le):
assert(scmp(seed, thresh))
for key, bedlist in groupby(chromiter, lambda c: c["chrom"]):
last_start = -1
peaks = []
for b in bedlist:
assert last_start <= b["start"], ("enforce sorted")
last_start = b["start"]
# this comparison gets both thresh and seed.
if scmp(b["p"], thresh):
# we have to add this to peaks.
# first check distance.
# if distance is too great, we create a new peak
if peaks != [] and b["end"] - peaks[-1]["start"] > dist:
if out is None:
if any(scmp(p, seed) for p in peaks):
for p in peaks: yield p
else:
write_peaks(peaks, seed, out, scmp)
peaks = []
#add new peak regardless
peaks.append(b)
if out is None:
if any(scmp(p, seed) for p in peaks):
for p in peaks: yield p
else:
write_peaks(peaks, seed, out, scmp)
if __name__ == "__main__":
p = argparse.ArgumentParser(__doc__)
p.add_argument("--dist", dest="dist", help="Maximum dist to skip before "
" finding a seed/thresh value. If this number is exceeded, the"
" region is ended.", type=int)
p.add_argument("--seed", dest="seed", help="A value must be at least this"
" large/small in order to seed a region.", type=float)
p.add_argument("--threshold", dest="threshold", help="After seeding, a value"
" of at least this number can extend a region. ",
type=float, default=3.0)
p.add_argument("--invert", dest="invert", action="store_true",
help="by default, the test is for a value less-than seed or"
" thresh--e.g. for p-values. If this flag is specified, the test"
" is for greater-than--e.g. for scores or -log10(p-values)")
p.add_argument("-c", type=int, help="column number containing the value "
"for which to find peaks.", default=4)
p.add_argument("bed_file")
try:
args = p.parse_args()
except TypeError:
sys.exit(not p.print_help())
if args.threshold is None:
args.threshold = args.seed
print >>sys.stderr, "setting threshold == seed"
chromiter = bediter(args.bed_file, args.c - 1)
scmp = operator.ge if args.invert else operator.le
# call list because the walk function is an iterator.
list(walk(chromiter, args.threshold, args.seed, args.dist, sys.stdout))
| mit | Python | |
f66038d1599843913dbe88eb02fa80b79e0d6e57 | add script for bitwise operation | haozai309/hello_python | codecademy/bitwise.py | codecademy/bitwise.py |
print 5 >> 4 # Right Shift
print 5 << 1 # Left Shift
print 8 & 5 # Bitwise AND
print 9 | 4 # Bitwise OR
print 12 ^ 42 # Bitwise XOR
print ~88 # Bitwise NOT
print "the base 2 number system"
print 0b1, #1
print 0b10, #2
print 0b11, #3
print 0b100, #4
print 0b101, #5
print 0b110, #6
print 0b111 #7
print "******"
print 0b1 + 0b11
print 0b11 * 0b11
# binary nubmer 1~12
one = 0b1
two = 0b10
three = 0b11
four = 0b100
five = 0b101
six = 0b110
seven = 0b111
eight =0b1000
nine = 0b1001
ten = 0b1010
eleven = 0b1011
twelve = 0b1100
print bin(1)
print int(bin(5),2)
# Print out the decimal equivalent of the binary 11001001.
print int("11001001", 2)
# Slide to the Left! Slide to the Right!
shift_right = 0b1100
shift_left = 0b1
# Your code here!
shift_right >>= 2
shift_left <<= 2
print bin(shift_right)
print bin(shift_left)
print bin(0b1110 & 0b101)
print bin(0b1110 | 0b101)
def check_bit4(input):
mask = 0b1000
desired = input & mask
if desired > 0:
return "on"
else:
return "off"
a = 0b10111011
mask = 0b100
print bin(a | mask)
a = 0b11101110
mask = 0b11111111
print bin(a ^ mask)
print a, mask, a^mask
def flip_bit(number, n):
mask = (0b1) << (n-1)
result = number ^ mask
return bin(result)
| apache-2.0 | Python | |
059ab529b05d0640e7099e307878db58d6f2ffc9 | update board test | duguyue100/minesweeper | scripts/test-board.py | scripts/test-board.py | """Test script for the game board.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
from minesweeper.msgame import MSGame
game = MSGame(10, 10, 20)
game.print_board()
try:
input = raw_input
except NameError:
pass
while game.game_status == 2:
# play move
move = input("Move: ")
game.play_move_msg(move)
| mit | Python | |
65d2202bc686019ebdaf292693c79ace326ef798 | Create MyoThalmic.py | MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,sstocker46/pyrobotlab | service/MyoThalmic.py | service/MyoThalmic.py |
from com.thalmic.myo import Pose
myo = Runtime.start("python", "Python")
myo = Runtime.start("myo", "MyoThalmic")
myo.connect()
myo.addPoseListener(python)
onPose(pose):
print(pose.getType())
| apache-2.0 | Python | |
045f711f59c89559746ffddafecd92302c0a9ec6 | add fct_collapse and fct_lump funcs | machow/siuba | siuba/dply/forcats.py | siuba/dply/forcats.py | import pandas as pd
import numpy as np
from ..siu import create_sym_call, Symbolic
from functools import singledispatch
# TODO: move into siu
def register_symbolic(f):
@f.register(Symbolic)
def _dispatch_symbol(__data, *args, **kwargs):
return create_sym_call(f, __data.source, *args, **kwargs)
return f
@register_symbolic
@singledispatch
def fct_reorder(fct, x, func = np.median):
s = pd.Series(x.values, index = fct)
# for each cat, calc agg func, make values of ordered the codes
ordered = s.groupby(level = 0).agg(func).sort_values()
ordered[:] = np.arange(len(ordered))
codes = ordered[s.index.values]
return pd.Categorical.from_codes(codes, list(ordered.index))
@register_symbolic
@singledispatch
def fct_recode(fct, **kwargs):
if not isinstance(fct, pd.Categorical):
fct = pd.Categorical(fct)
rev_kwargs = {v:k for k,v in kwargs.items()}
fct.rename_categories(rev_kwargs)
@register_symbolic
@singledispatch
def fct_collapse(fct, recat, group_other = None):
if not isinstance(fct, pd.Categorical):
fct = pd.Categorical(fct)
# each existing cat will map to a new one ----
# need to know existing to new cat
# need to know new cat to new code
cat_to_new = {k: None for k in fct.categories}
new_cat_set = {k: True for k in fct.categories}
for new_name, v in recat.items():
v = [v] if not np.ndim(v) else v
for old_name in v:
if cat_to_new[old_name] is not None:
raise Exception("category %s was already re-assigned"%old_name)
cat_to_new[old_name] = new_name
del new_cat_set[old_name]
new_cat_set[new_name] = True # add new cat
# collapse all unspecified cats to group_other if specified ----
for k, v in cat_to_new.items():
if v is None:
if group_other is not None:
new_cat_set[group_other] = True
cat_to_new[k] = group_other
del new_cat_set[k]
else:
cat_to_new[k] = k
# map from old cat to new code ----
# calculate new codes
new_cat_set = {k: ii for ii, k in enumerate(new_cat_set)}
# map old cats to them
remap_code = {old: new_cat_set[new] for old, new in cat_to_new.items()}
new_codes = fct.map(remap_code)
new_cats = list(new_cat_set.keys())
return pd.Categorical.from_codes(new_codes, new_cats)
@register_symbolic
@singledispatch
def fct_lump(fct, n = None, prop = None, w = None, other_level = "Other", ties = None):
if ties is not None:
raise NotImplementedError("ties is not implemented")
if n is None and prop is None:
raise NotImplementedError("Either n or prop must be specified")
if prop is not None:
raise NotImplementedError("prop arg is not implemented")
keep_cats = _fct_lump_n_cats(fct, n, w, other_level, ties)
return fct_collapse(fct, {k:k for k in keep_cats}, group_other = other_level)
def _fct_lump_n_cats(fct, n, w, other_level, ties):
# TODO: currently always selects n, even if ties
ascending = n < 0
arr = w if w is not None else 1
ser = pd.Series(arr, index = fct)
sorted_arr = ser.groupby(level = 0).sum().sort_values(ascending = ascending)
return sorted_arr.iloc[:abs(n)].index.values
| mit | Python | |
c1b27a617c9050799bb11f4c161f925f153da5bc | add test_gst_rtsp_server.py | tamaggo/gstreamer-examples | test_gst_rtsp_server.py | test_gst_rtsp_server.py | #!/usr/bin/env python
# -*- coding:utf-8 vi:ts=4:noexpandtab
# Simple RTSP server. Run as-is or with a command-line to replace the default pipeline
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GstRtspServer, GObject
loop = GObject.MainLoop()
GObject.threads_init()
Gst.init(None)
class MyFactory(GstRtspServer.RTSPMediaFactory):
def __init__(self):
GstRtspServer.RTSPMediaFactory.__init__(self)
def do_create_element(self, url):
s_src = "v4l2src ! video/x-raw,rate=30,width=320,height=240 ! videoconvert ! video/x-raw,format=I420"
s_h264 = "videoconvert ! vaapiencode_h264 bitrate=1000"
s_src = "videotestsrc ! video/x-raw,rate=30,width=320,height=240,format=I420"
s_h264 = "x264enc tune=zerolatency"
pipeline_str = "( {s_src} ! queue max-size-buffers=1 name=q_enc ! {s_h264} ! rtph264pay name=pay0 pt=96 )".format(**locals())
if len(sys.argv) > 1:
pipeline_str = " ".join(sys.argv[1:])
print(pipeline_str)
return Gst.parse_launch(pipeline_str)
class GstServer():
def __init__(self):
self.server = GstRtspServer.RTSPServer()
f = MyFactory()
f.set_shared(True)
m = self.server.get_mount_points()
m.add_factory("/test", f)
self.server.attach(None)
if __name__ == '__main__':
s = GstServer()
loop.run()
| mit | Python | |
20b3616c810e5f1c4891c5b877b924925c8b28a8 | Add basic tests for redirect/callback/auth flow | adamcik/oauthclientbridge | tests/authorize_test.py | tests/authorize_test.py | import json
import urlparse
import pytest
from oauthclientbridge import app, crypto, db
@pytest.fixture
def client():
app.config.update({
'TESTING': True,
'SECRET_KEY': 's3cret',
'OAUTH_DATABASE': ':memory:',
'OAUTH_CLIENT_ID': 'client',
'OAUTH_CLIENT_SECRET': 's3cret',
'OAUTH_AUTHORIZATION_URI': 'https://example.com/auth',
'OAUTH_TOKEN_URI': 'https://example.com/token',
'OAUTH_REDIRECT_URI': 'https://example.com/callback',
})
client = app.test_client()
with app.app_context():
db.initialize()
yield client
@pytest.fixture
def state(client):
with client.session_transaction() as session:
session['state'] = 'abcdef'
return 'abcdef'
def test_authorize_redirects(client):
resp = client.get('/')
location = urlparse.urlparse(resp.location)
params = urlparse.parse_qs(location.query)
assert resp.status_code == 302
assert location.netloc == 'example.com'
assert location.path == '/auth'
with client.session_transaction() as session:
assert 'state' in session
@pytest.mark.parametrize('query,expected_error', [
('', 'invalid_state'),
('?code', 'invalid_state'),
('?code=1234', 'invalid_state'),
('?state={state}', 'invalid_request'),
('?state={state}&code', 'invalid_request'),
('?state={state}&error=invalid_request', 'invalid_request'),
('?state={state}&error=unauthorized_client', 'unauthorized_client'),
('?state={state}&error=access_denied', 'access_denied'),
('?state={state}&error=unsupported_response_type',
'unsupported_response_type'),
('?state={state}&error=invalid_scope', 'invalid_scope'),
('?state={state}&error=server_error', 'server_error'),
('?state={state}&error=temporarily_unavailable',
'temporarily_unavailable'),
('?state={state}&error=badErrorCode', 'invalid_error'),
])
def test_callback_error_handling(query, expected_error, client, state):
app.config['OAUTH_CALLBACK_TEMPLATE'] = '{{error}}'
resp = client.get('/callback' + query.format(state=state))
assert resp.status_code == 400
assert resp.data == expected_error
@pytest.mark.parametrize('data,expected_error', [
({}, 'server_error'),
({'token_type': 'foobar'}, 'server_error'),
({'access_token': 'foobar'}, 'server_error'),
({'access_token': '', 'token_type': ''}, 'server_error'),
({'access_token': 'foobar', 'token_type': ''}, 'server_error'),
({'access_token': '', 'token_type': 'foobar'}, 'server_error'),
({'error': 'invalid_request'}, 'invalid_request'),
({'error': 'invalid_client'}, 'invalid_client'),
({'error': 'invalid_grant'}, 'invalid_grant'),
({'error': 'unauthorized_client'}, 'unauthorized_client'),
({'error': 'unsupported_grant_type'}, 'unsupported_grant_type'),
({'error': 'invalid_scope'}, 'invalid_scope'),
({'error': 'server_error'}, 'server_error'),
({'error': 'badErrorCode'}, 'invalid_error'),
])
def test_callback_authorization_code_error_handling(
data, expected_error, client, state, requests_mock):
app.config['OAUTH_CALLBACK_TEMPLATE'] = '{{error}}'
requests_mock.post(app.config['OAUTH_TOKEN_URI'], json=data)
resp = client.get('/callback?code=1234&state=' + state)
assert resp.status_code == 400
assert resp.data == expected_error
# TODO: Test with more status codes from callback...
def test_callback_authorization_code_invalid_response(
client, state, requests_mock):
app.config['OAUTH_CALLBACK_TEMPLATE'] = '{{error}}'
requests_mock.post(app.config['OAUTH_TOKEN_URI'], text='Not a JSON value')
resp = client.get('/callback?code=1234&state=' + state)
assert resp.status_code == 400
assert resp.data == 'server_error'
def test_callback_authorization_code_store_token(client, state, requests_mock):
app.config['OAUTH_CALLBACK_TEMPLATE'] = '{{client_id}}:{{client_secret}}'
data = {'token_type': 'bearer', 'access_token': '1234567890'}
requests_mock.post(app.config['OAUTH_TOKEN_URI'], json=data)
resp = client.get('/callback?code=1234&state=' + state)
client_id, client_secret = resp.data.split(':')
# Peek inside internals to check that our token got stored.
assert data == crypto.loads(client_secret, db.lookup(client_id))
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.