commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
78cc6be2243e1e41000f4ff214fe8edea391780c | Create mixcolumns.py | UCSBarchlab/PyRTL,nvandervoort/PyRTL,deekshadangwal/PyRTL,UCSBarchlab/PyRTL,nvandervoort/PyRTL,deekshadangwal/PyRTL | research/aes/mixcolumns.py | research/aes/mixcolumns.py | # mixcolumns.py
import sys
sys.path.append("../..")
import pyrtl
from pyrtl import *
def MixColumns(in_vector):
""" MixColumns round of AES.
Input: A single wirevector of bitwidth 128.
Output: A single wirevector of bitwidth 128.
"""
a0 = in_vector[120:128]
a1 = in_vector[112:120]
a2 = in_vector[104:112]
a3 = in_vector[96:104]
a4 = in_vector[88:96]
a5 = in_vector[80:88]
a6 = in_vector[72:80]
a7 = in_vector[64:72]
a8 = in_vector[56:64]
a9 = in_vector[48:56]
a10 = in_vector[40:48]
a11 = in_vector[32:40]
a12 = in_vector[24:32]
a13 = in_vector[16:24]
a14 = in_vector[8:16]
a15 = in_vector[0:8]
b0 = pyrtl.WireVector(bitwidth=8, name='b0')
b1 = pyrtl.WireVector(bitwidth=8, name='b1')
b2 = pyrtl.WireVector(bitwidth=8, name='b2')
b3 = pyrtl.WireVector(bitwidth=8, name='b3')
b4 = pyrtl.WireVector(bitwidth=8, name='b4')
b5 = pyrtl.WireVector(bitwidth=8, name='b5')
b6 = pyrtl.WireVector(bitwidth=8, name='b6')
b7 = pyrtl.WireVector(bitwidth=8, name='b7')
b8 = pyrtl.WireVector(bitwidth=8, name='b8')
b9 = pyrtl.WireVector(bitwidth=8, name='b9')
b10 = pyrtl.WireVector(bitwidth=8, name='b10')
b11 = pyrtl.WireVector(bitwidth=8, name='b11')
b12 = pyrtl.WireVector(bitwidth=8, name='b12')
b13 = pyrtl.WireVector(bitwidth=8, name='b13')
b14 = pyrtl.WireVector(bitwidth=8, name='b14')
b15 = pyrtl.WireVector(bitwidth=8, name='b15')
b0 <<= 2*a0 ^ 3*a1 ^ a2 ^ a3
b1 <<= 2*a1 ^ 3*a2 ^ a3 ^ a0
b2 <<= 2*a2 ^ 3*a3 ^ a0 ^ a1
b3 <<= 2*a3 ^ 3*a0 ^ a1 ^ a2
b4 <<= 2*a4 ^ 3*a5 ^ a6 ^ a7
b5 <<= 2*a5 ^ 3*a6 ^ a7 ^ a4
b6 <<= 2*a6 ^ 3*a7 ^ a4 ^ a5
b7 <<= 2*a7 ^ 3*a4 ^ a5 ^ a6
b8 <<= 2*a8 ^ 3*a9 ^ a10 ^ a11
b9 <<= 2*a9 ^ 3*a10 ^ a11 ^ a8
b10 <<= 2*a10 ^ 3*a11 ^ a8 ^ a9
b11 <<= 2*a11 ^ 3*a8 ^ a9 ^ a10
b12 <<= 2*a12 ^ 3*a13 ^ a14 ^ a15
b13 <<= 2*a13 ^ 3*a14 ^ a15 ^ a12
b14 <<= 2*a14 ^ 3*a15 ^ a12 ^ a13
b15 <<= 2*a15 ^ 3*a12 ^ a13 ^ a14
out_vector = pyrtl.WireVector(bitwidth=128, name='out_vector')
out_vector <<= pyrtl.concat(b0, b1, b2, b3,
b4, b5, b6, b7,
b8, b9, b10, b11,
b12, b13, b14, b15)
return out_vector
# Hardware build.
aes_input = pyrtl.Input(bitwidth=128, name='aes_input')
aes_output = pyrtl.Output(bitwidth=128, name='aes_output')
aes_output <<= MixColumns(aes_input)
print pyrtl.working_block()
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
for cycle in range(1):
sim.step({aes_input: 0xdb135345f20a225c01010101c6c6c6c6})
sim_trace.render_trace(symbol_len=40, segment_size=1)
| bsd-3-clause | Python | |
cec17c0aaf794cdd108713642e0662c7eac7a020 | Create cesarEncription.py | ferreiro/Python,ferreiro/Python-Introduction-SQL,ferreiro/Python,ferreiro/Python,ferreiro/Python-Introduction-SQL,ferreiro/Python-Introduction-SQL | assigment1/cesarEncription.py | assigment1/cesarEncription.py |
# Chechinput: returns false is the inputed text
# has any numeric character
def checkInput(userInput):
return userInput.isalpha()
# CheckNumber: returns false is the inputed text
# has any numeric character
def checkNumber(userInput):
return userInput.isdigit()
def readMessage():
valid = False;
message = '';
while(not valid):
# Iterate until input message has not have any numeric element (number)
message = raw_input('Introduce your message: ');
valid = checkInput(message);
return message;
# Now, let the user inputs the desired shift number
# iterates until inputed number is correct.
def readNumber():
valid = False;
number = -1;
while(not valid):
number = raw_input('Introduce your shift Number: ');
valid = checkNumber(number);
if (not valid):
print 'Come on! this is not a number :P';
return number;
# Returns and object with a message and a number.
def userInput():
message = readMessage();
number = readNumber();
# Compose and object and returns to the user
userInput = {'message': message, 'number': number};
return userInput;
def simpleEncription(message, shiftNumber):
if (type(shiftNumber) is not int):
shiftNumber = int(shiftNumber); # Int casting of shiftNumber when typed is not expressed as integer. Sometimes may ocurr the program interprets a string...
auxChar = '';
encriptedMsg = list();
shiftNumber = shiftNumber % 26; # English alfhabet has 26 elements. when user shifs more than 26, then make the modulus! if not, an error will crash lines below.
# Convert each character of the array into a
# number, then sum the shif and finally convert this
# number into the alfabhet letter corresponding.
for char in message:
if (char == 'z'):
auxChar = ord('a') - 1;
elif (char == 'Z'):
auxChar = ord('A') - 1;
else:
auxChar = ord(char);
auxChar += shiftNumber;
encriptedChar = chr(auxChar);
encriptedMsg.append(encriptedChar);
return encriptedMsg;
userInput = userInput(); # Returns a valid message and shif number from user in a list.
message = userInput['message']; # Local variable for the message inputed by the user
number = userInput['number']; # Local variable for the number inputed by the user
encriptedMsgList = simpleEncription(message, number);
print encriptedMsgList
outputMsg = '';
for index, char in enumerate(encriptedMsgList):
outputMsg += char;
print outputMsg
| apache-2.0 | Python | |
fc35e902e4a41176e1860b5a42fb5ce51c3042f7 | Add scaleioutil | swevm/scaleio-py | scaleiopy/scaleioutil.py | scaleiopy/scaleioutil.py | import logging
class ScaleIOLogger:
instance = None
@classmethod
def get(cls):
if cls.instance is None:
cls.instance = cls()
return cls.instance
def __init__(self):
# How to use:
# loggerInstance = ScaleIOLogger.get()
# logger = loggerInstance.getLogger('DEBUG')
logging.basicConfig(format='%(asctime)s: %(levelname)s %(module)s:%(funcName)s | %(message)s',level=self._get_log_level(debugLevel))
self.logger = logging.getLogger(__name__)
self.logger.debug("Logger initialized!")
def getLogger(self, loglevel):
return _get_log_level(loglevel)
@staticmethod
def _get_log_level(level):
"""
small static method to get logging level
:param str level: string of the level e.g. "INFO"
:returns logging.<LEVEL>: appropriate debug level
"""
# default to DEBUG
if level is None or level == "DEBUG":
return logging.DEBUG
level = level.upper()
# Make debugging configurable
if level == "INFO":
return logging.INFO
elif level == "WARNING":
return logging.WARNING
elif level == "CRITICAL":
return logging.CRITICAL
elif level == "ERROR":
return logging.ERROR
elif level == "FATAL":
return logging.FATAL
else:
raise Exception("UnknownLogLevelException: enter a valid log level")
| apache-2.0 | Python | |
606b00e7f09e45e57cf6872c67db2d2af8aeff27 | Add augeas module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/modules/augeas_cfg.py | salt/modules/augeas_cfg.py | '''
Manages configuration files via augeas
'''
def __virtual__():
''' Only run this module if the augeas python module is installed '''
try:
from augeas import Augeas
_ = Augeas
except ImportError:
return False
else:
return "augeas"
def _recurmatch(path, aug):
'''
recursive generator providing the infrastructure for
augtools print behaviour.
This function is based on test_augeas.py from
Harald Hoyer <harald@redhat.com> in the python-augeas
repository
'''
if path:
clean_path = path.rstrip('/*')
yield (clean_path, aug.get(path))
for i in aug.match(clean_path + "/*"):
i = i.replace('!', '\!') # escape some dirs
for x in _recurmatch(i, aug):
yield x
def lstrip_word(string, prefix):
'''
Return a copy of the string after the specified prefix was removed
from the beginning of the string
'''
if string.startswith(prefix):
return string[len(prefix):]
return string
def get(path, value=''):
'''
Get a value for a specific augeas path
CLI Example::
salt '*' augeas.get /files/etc/hosts/1/ ipaddr
'''
from augeas import Augeas
aug = Augeas()
ret = {}
path = path.rstrip('/')
if value:
path += "/{0}".format(value.strip('/'))
try:
_match = aug.match(path)
except RuntimeError as err:
return {'error': str(err)}
if _match:
ret[path] = aug.get(path)
else:
ret[path] = '' # node does not exist
return ret
def setvalue(path, value):
'''
Set a value for a specific augeas path
CLI Example::
salt '*' augeas.setvalue /files/etc/hosts/1/canonical localhost
'''
from augeas import Augeas
aug = Augeas()
ret = {'retval': False}
try:
aug.set(path, unicode(value))
aug.save()
ret['retval'] = True
except ValueError as err:
ret['error'] = "Multiple values: " + str(err)
except IOError as err:
ret['error'] = str(err)
return ret
def match(path, value=''):
'''
Get matches for path expression
CLI Example::
salt '*' augeas.match /files/etc/services/service-name ssh
'''
from augeas import Augeas
aug = Augeas()
ret = {}
try:
matches = aug.match(path)
except RuntimeError:
return ret
for _match in matches:
if value and aug.get(_match) == value:
ret[_match] = value
elif not value:
ret[_match] = aug.get(_match)
return ret
def remove(path):
'''
Get matches for path expression
CLI Example::
salt '*' augeas.remove /files/etc/sysctl.conf/net.ipv4.conf.all.log_martians
'''
from augeas import Augeas
aug = Augeas()
ret = {'retval': False}
try:
count = aug.remove(path)
aug.save()
if count == -1:
ret['error'] = 'Invalid node'
else:
ret['retval'] = True
except (RuntimeError, IOError) as err:
ret['error'] = str(err)
ret['count'] = count
return ret
def ls(path):
'''
List the direct children of a node
CLI Example::
salt '*' augeas.ls /files/etc/passwd
'''
def _match(path):
''' Internal match function '''
try:
matches = aug.match(path)
except RuntimeError:
return {}
ret = {}
for _ma in matches:
ret[_ma] = aug.get(_ma)
return ret
from augeas import Augeas
aug = Augeas()
path = path.rstrip('/') + '/'
match_path = path + '*'
matches = _match(match_path)
ret = {}
for key, value in matches.iteritems():
name = lstrip_word(key, path)
if _match(key + '/*'):
ret[name + '/'] = value # has sub nodes, e.g. directory
else:
ret[name] = value
return ret
def tree(path):
'''
Returns recursively the complete tree of a node
CLI Example::
salt '*' augeas.tree /files/etc/
'''
from augeas import Augeas
aug = Augeas()
path = path.rstrip('/') + '/'
match_path = path
return dict([i for i in _recurmatch(match_path, aug)])
| apache-2.0 | Python | |
156d635653e1ec93fbaff7ee7c872a5f6035f9a8 | Add new-command.py to standard_commands | mnieber/dodo_commands | extra/standard_commands/new-command.py | extra/standard_commands/new-command.py | """Finds a directory or file inside the current project."""
from . import DodoCommand
from dodo_commands.framework.config import CommandPath
import os
import sys
script_src = """# noqa
from dodo_commands.default_commands.standard_commands import DodoCommand
class Command(DodoCommand): # noqa
help = ""
decorators = []
def add_arguments_imp(self, parser): # noqa
parser.add_argument('foo')
parser.add_argument(
'--bar',
required=True,
help=''
)
def handle_imp(self, foo, bar, **kwargs): # noqa
pass
"""
class Command(DodoCommand): # noqa
help = "Creates a new Dodo command."
def add_arguments_imp(self, parser): # noqa
"""
Entry point for subclassed commands to add custom arguments.
"""
parser.add_argument('name')
parser.add_argument(
'--next-to',
required=True,
help='Create the new command at the location of this command'
)
def handle_imp(self, name, next_to, **kwargs): # noqa
dest_path = None
command_path = CommandPath(self.get_config("/ROOT/project_dir"))
for item in command_path.items:
script_path = os.path.join(
item.full_path, next_to + ".py"
)
if os.path.exists(script_path):
dest_path = os.path.join(
item.full_path, name + ".py"
)
if not dest_path:
sys.stderr.write("Script not found: %s\n" % next_to)
return
with open(dest_path, "w") as f:
f.write(script_src)
print(dest_path)
| mit | Python | |
421bd355cb3d471ac61d608c9e39cc821b06089f | Create analyzefiles.py | suzannerohrback/somaticCNVpipeline,suzannerohrback/somaticCNVpipeline | bin/interpret/analyzefiles.py | bin/interpret/analyzefiles.py | #!/usr/bin/python
| mit | Python | |
4ec3cb1ddb08e14ba3c2ba169b4c5c47c779740a | Add new package: unuran (#8397) | mfherbst/spack,mfherbst/spack,LLNL/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,tmerrick1/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,krafczyk/spack,tmerrick1/spack,matthiasdiener/spack,matthiasdiener/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,matthiasdiener/spack,matthiasdiener/spack,krafczyk/spack,mfherbst/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack | var/spack/repos/builtin/packages/unuran/package.py | var/spack/repos/builtin/packages/unuran/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Unuran(AutotoolsPackage):
"""Universal Non-Uniform Random number generator."""
homepage = "http://statmath.wu.ac.at/unuran"
url = "http://statmath.wu.ac.at/unuran/unuran-1.8.1.tar.gz"
version('1.8.1', 'a5885baab53a2608c1d85517bf5d06a5')
variant('shared', default=True,
description="Enable the build of shared libraries")
variant('rngstreams', default=True,
description="Use RNGSTREAM library for uniform random generation")
variant('gsl', default=False,
description="Use random number generators from GNU Scientific Library")
depends_on('gsl', when="+gsl")
depends_on('rngstreams', when="+rngstreams")
def configure_args(self):
spec = self.spec
args = [
'--%s-shared' % ('enable' if '+shared' in spec else 'disable'),
'--with-urgn-default=%s' % (
'rngstream' if '+rngstreams' in spec else 'builtin'),
'--%s-urng-gsl' % (
'with' if '+gsl' in spec else 'without'),
'--%s-urng-rngstreams' % (
'with' if '+rngstreams' in spec else 'without')
]
return args
| lgpl-2.1 | Python | |
f111f106722afd3929073f78d9c52fd4e1d804e1 | add androcov_report script | nastya/droidbot,honeynet/droidbot,nastya/droidbot,lynnlyc/droidbot,honeynet/droidbot,nastya/droidbot,honeynet/droidbot,honeynet/droidbot,nastya/droidbot,lynnlyc/droidbot | evaluation_scripts/androcov_report.py | evaluation_scripts/androcov_report.py | # analyze androcov result
# giving the instrumentation.json generated by androcov and the logcat generated at runtime
__author__ = 'yuanchun'
import os
import re
import json
from datetime import datetime
# logcat regex, which will match the log message generated by `adb logcat -v threadtime`
LOGCAT_THREADTIME_RE = re.compile('^(?P<date>\S+)\s+(?P<time>\S+)\s+(?P<pid>[0-9]+)\s+(?P<tid>[0-9]+)\s+'
'(?P<level>[VDIWEFS])\s+(?P<tag>[^:]*):\s+(?P<content>.*)$')
class Androcov(object):
def __init__(self, androcov_dir):
self.androcov_dir = androcov_dir
self.all_methods = self._parse_all_methods()
def _parse_all_methods(self):
instrumentation_file_path = os.path.join(self.androcov_dir, "instrumentation.json")
instrumentation_detail = json.load(open(instrumentation_file_path))
return set(instrumentation_detail['allMethods'])
def gen_androcov_report(self, logcat_path):
"""
generate a coverage report
:param logcat_path:
:return:
"""
reached_methods, reached_timestamps = Androcov._parse_reached_methods(logcat_path)
unreached_methods = self.all_methods - reached_methods
report = {}
report['reached_methods_count'] = len(reached_methods)
report['unreached_methods_count'] = len(unreached_methods)
report['all_methods_count'] = len(self.all_methods)
report['coverage'] = "%.0f%%" % (100.0 * len(reached_methods) / len(self.all_methods))
report['uncoverage'] = "%.0f%%" % (100.0 * len(unreached_methods) / len(self.all_methods))
time_scale = reached_timestamps[-1] - reached_timestamps[0]
timestamp_count = {}
for timestamp in range(0, time_scale.total_seconds()+1):
timestamp_count[timestamp] = 0
for timestamp in reached_timestamps:
timestamp_count[int(timestamp)] += 1
for timestamp in range(1, time_scale.total_seconds()+1):
timestamp_count[timestamp] += timestamp_count[timestamp-1]
report['timestamp_count'] = timestamp_count
return report
@staticmethod
def _parse_reached_methods(logcat_path):
reached_methods = set()
reached_timestamps = []
log_msgs = open(logcat_path).readlines()
androcov_log_re = re.compile('^\[androcov\] reach \d+: (<.+>)$')
for log_msg in log_msgs:
log_data = Androcov.parse_log(log_msg)
log_content = log_data['content']
m = re.match(androcov_log_re, log_content)
if not m:
continue
reached_method = m.group(1)
if reached_method in reached_methods:
continue
reached_methods.add(reached_method)
reached_timestamps.append(log_data['datetime'])
return reached_methods, reached_timestamps
@staticmethod
def parse_log(log_msg):
"""
parse a logcat message
the log should be in threadtime format
@param log_msg:
@return:
"""
m = LOGCAT_THREADTIME_RE.match(log_msg)
if not m:
return None
log_dict = {}
date = m.group('date')
time = m.group('time')
log_dict['pid'] = m.group('pid')
log_dict['tid'] = m.group('tid')
log_dict['level'] = m.group('level')
log_dict['tag'] = m.group('tag')
log_dict['content'] = m.group('content')
datetime_str = "%s-%s %s" % (datetime.today().year, date, time)
log_dict['datetime'] = datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S.%f")
return log_dict
| mit | Python | |
ef62d0cca3f9f28cef6891b87da7b3d9e0ade953 | Add stub file | robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions,robjwells/adventofcode-solutions | 2018/python/2018_02.py | 2018/python/2018_02.py | """Advent of Code 2018 Day 2: Inventory Management System"""
import aoc_common
import pytest
DAY = 2
if __name__ == '__main__':
puzzle_input = aoc_common.load_puzzle_input(DAY)
| mit | Python | |
d5b1dd851b87542ff215bc74f74e3b2e76fc5894 | create traditional box score | jaebradley/nba_data | nba_data/data/traditional_box_score.py | nba_data/data/traditional_box_score.py | class TraditionalBoxScore:
def __init__(self, seconds_played, field_goals_made, field_goal_attempted,
three_point_field_goals_made, three_point_field_goal_attempted,
free_throws_made, free_throws_attempted, offensive_rebounds, defensive_rebounds, assists,
steals, blocks, turnovers, personal_fouls):
self.seconds_played = seconds_played
self.field_goals_made = field_goals_made
self.field_goal_attempted = field_goal_attempted
self.three_point_field_goals_made = three_point_field_goals_made
self.three_point_field_goal_attempted = three_point_field_goal_attempted
self.free_throws_made = free_throws_made
self.free_throws_attempted = free_throws_attempted
self.offensive_rebounds = offensive_rebounds
self.defensive_rebounds = defensive_rebounds
self.assists = assists
self.steals = steals
self.blocks = blocks
self.turnovers = turnovers
self.personal_fouls = personal_fouls
| mit | Python | |
c87f42579826cf236953bc955d15a9cc98c67d05 | Add Migration File this time. | ropable/statdev,xzzy/statdev,xzzy/statdev,brendanc-dpaw/statdev,parksandwildlife/statdev,ropable/statdev,parksandwildlife/statdev,xzzy/statdev,parksandwildlife/statdev,brendanc-dpaw/statdev | applications/migrations/0029_application_proposed_development_description.py | applications/migrations/0029_application_proposed_development_description.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0028_auto_20170329_1445'),
]
operations = [
migrations.AddField(
model_name='application',
name='proposed_development_description',
field=models.TextField(blank=True, null=True),
),
]
| apache-2.0 | Python | |
609e143589d89b5be167914b70de99658776745f | add redirection test | persiaAziz/trafficserver,duke8253/trafficserver,reveller/trafficserver,reveller/trafficserver,SolidWallOfCode/trafficserver,clearswift/trafficserver,chenglongwei/trafficserver,pbchou/trafficserver,taoyunxing/trafficserver,rahmalik/trafficserver,rpufky/trafficserver,PSUdaemon/trafficserver,bryancall/trafficserver,duke8253/trafficserver,PSUdaemon/trafficserver,persiaAziz/trafficserver,clearswift/trafficserver,SolidWallOfCode/trafficserver,PSUdaemon/trafficserver,dyrock/trafficserver,dyrock/trafficserver,reveller/trafficserver,chenglongwei/trafficserver,taoyunxing/trafficserver,chitianhao/trafficserver,reveller/trafficserver,duke8253/trafficserver,rahmalik/trafficserver,persiaAziz/trafficserver,chenglongwei/trafficserver,PSUdaemon/trafficserver,taoyunxing/trafficserver,bryancall/trafficserver,pbchou/trafficserver,SolidWallOfCode/trafficserver,PSUdaemon/trafficserver,rpufky/trafficserver,rahmalik/trafficserver,PSUdaemon/trafficserver,reveller/trafficserver,duke8253/trafficserver,clearswift/trafficserver,rahmalik/trafficserver,rpufky/trafficserver,rahmalik/trafficserver,PSUdaemon/trafficserver,vmamidi/trafficserver,dyrock/trafficserver,dyrock/trafficserver,reveller/trafficserver,rpufky/trafficserver,vmamidi/trafficserver,chitianhao/trafficserver,bryancall/trafficserver,persiaAziz/trafficserver,clearswift/trafficserver,PSUdaemon/trafficserver,taoyunxing/trafficserver,rpufky/trafficserver,bryancall/trafficserver,taoyunxing/trafficserver,vmamidi/trafficserver,duke8253/trafficserver,duke8253/trafficserver,davidbz/trafficserver,persiaAziz/trafficserver,SolidWallOfCode/trafficserver,chenglongwei/trafficserver,chenglongwei/trafficserver,SolidWallOfCode/trafficserver,rpufky/trafficserver,davidbz/trafficserver,pbchou/trafficserver,pbchou/trafficserver,chitianhao/trafficserver,davidbz/trafficserver,vmamidi/trafficserver,pbchou/trafficserver,dyrock/trafficserver,dyrock/trafficserver,chitianhao/trafficserver,davidbz/trafficserver,reveller/trafficserver,reveller/trafficserver,chenglongwei/trafficserver,rahmalik/trafficserver,chitianhao/trafficserver,duke8253/trafficserver,clearswift/trafficserver,chenglongwei/trafficserver,chenglongwei/trafficserver,persiaAziz/trafficserver,clearswift/trafficserver,reveller/trafficserver,clearswift/trafficserver,chitianhao/trafficserver,chitianhao/trafficserver,PSUdaemon/trafficserver,taoyunxing/trafficserver,pbchou/trafficserver,rahmalik/trafficserver,davidbz/trafficserver,bryancall/trafficserver,taoyunxing/trafficserver,taoyunxing/trafficserver,rpufky/trafficserver,bryancall/trafficserver,SolidWallOfCode/trafficserver,vmamidi/trafficserver,taoyunxing/trafficserver,davidbz/trafficserver,rpufky/trafficserver,rpufky/trafficserver,persiaAziz/trafficserver,clearswift/trafficserver,persiaAziz/trafficserver,dyrock/trafficserver,clearswift/trafficserver,SolidWallOfCode/trafficserver,rahmalik/trafficserver,vmamidi/trafficserver,rahmalik/trafficserver | ci/new_tsqa/tests/test_redirection.py | ci/new_tsqa/tests/test_redirection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import logging
import helpers
import tsqa.test_cases
import tsqa.utils
import tsqa.endpoint
log = logging.getLogger(__name__)
class TestRedirection(helpers.EnvironmentCase):
@classmethod
def setUpEnv(cls, env):
cls.configs['records.config']['CONFIG'].update({
'proxy.config.http.redirection_enabled': 1,
'proxy.config.http.number_of_redirections': 10
})
cls.configs['remap.config'].add_line('map / http://httpbin.org');
def test_redirection(self):
server_ports = self.configs['records.config']['CONFIG']['proxy.config.http.server_ports']
# By default Requests will perform location redirection
# Disable redirection handling with the allow_redirects parameter
r = requests.get('http://127.0.0.1:{0}/redirect/9'.format(server_ports), allow_redirects=False)
self.assertEqual(r.status_code, 200)
r = requests.get('http://127.0.0.1:{0}/redirect/10'.format(server_ports), allow_redirects=False)
self.assertEqual(r.status_code, 302)
| apache-2.0 | Python | |
2d8b0266dc1bf0eadc2737d35844f5fb45bedd12 | Add functional tests for loading by name | little-dude/nose2,ezigman/nose2,ojengwa/nose2,ptthiem/nose2,ptthiem/nose2,leth/nose2,leth/nose2,little-dude/nose2,ojengwa/nose2,ezigman/nose2 | nose2/tests/functional/test_loading.py | nose2/tests/functional/test_loading.py | """
pkg1
pkg1.test
pkg1.test.test_things
pkg1.test.test_things.test_func
pkg1.test.test_things.test_gen
pkg1.test.test_things.test_gen:3
pkg1.test.test_things.SomeTests
pkg1.test.test_things.SomeTests.test_ok
# generator method
# generator method index
# param func
# param func index
# param method
# param method index
"""
from nose2.tests._common import FunctionalTestCase
class TestLoadTestsFromNames(FunctionalTestCase):
def test_module_name(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'Ran 16 tests' in stderr, stderr
def test_function_name(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things.test_func')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'test_func' in stderr
assert 'Ran 1 test' in stderr
assert 'OK' in stderr
def test_generator_function_name(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things.test_gen')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'test_gen' in stderr
assert 'Ran 5 tests' in stderr
def test_generator_function_index(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things.test_gen:3')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'test_gen' in stderr
assert 'Ran 1 test' in stderr
def test_generator_function_index_1_based(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things.test_gen:1')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'test_gen' in stderr
assert 'Ran 1 test' in stderr
assert 'OK' in stderr
def test_testcase_name(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things.SomeTests')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'SomeTests' in stderr, stderr
assert 'Ran 4 tests' in stderr, stderr
def test_testcase_method(self):
proc = self.runIn(
'scenario/tests_in_package',
'-v',
'pkg1.test.test_things.SomeTests.test_ok')
stdout, stderr = proc.communicate()
self.assertEqual(proc.poll(), 0, stderr)
assert 'SomeTests' in stderr, stderr
assert 'Ran 1 test' in stderr, stderr
assert 'OK' in stderr, stderr
| bsd-2-clause | Python | |
0a610a44f0d20170ba9c3e6f9ec4eafaac937be1 | Add unit test for Pattern filterer. | 4degrees/mill,4degrees/sawmill | test/unit/filterer/test_pattern.py | test/unit/filterer/test_pattern.py | # :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
from bark.filterer.pattern import Pattern
def test_missing_key_passes():
'''Test log record with missing key passes.'''
log = Log()
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is False
def test_non_string_key_fails():
'''Test log record with non-string key fails.'''
log = Log(name=None)
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is True
def test_include_mode():
'''Test only logs with matching value pass when mode is INCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.INCLUDE)
assert filterer.filter(log) is False
log = Log(name='bark.other.one')
assert filterer.filter(log) is True
def test_exclude_mode():
'''Test only logs with matching value fail when mode is EXCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
log = Log(name='bark.other.one')
assert filterer.filter(log) is False
def test_different_key():
'''Test using key other than name.'''
log = Log()
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='A message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='Another message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is True
log = Log(message='A message')
filterer = Pattern('A message', key='message', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
| apache-2.0 | Python | |
ef65c5eefdcbb21b83504710e3131affbeb88c88 | Create map-reduce-advanced-count-number-of-friends.py | fermin-silva/hackerrank,fermin-silva/hackerrank | databases/nosql_xml_mapreduce/map-reduce-advanced-count-number-of-friends.py | databases/nosql_xml_mapreduce/map-reduce-advanced-count-number-of-friends.py | import sys
from collections import OrderedDict
class MapReduce:
def __init__(self):
self.intermediate = OrderedDict()
self.result = []
def emitIntermediate(self, key, value):
self.intermediate.setdefault(key, [])
self.intermediate[key].append(value)
def emit(self, value):
self.result.append(value)
def execute(self, data, mapper, reducer):
for record in data:
mapper(record)
for key in self.intermediate:
reducer(key, self.intermediate[key])
self.result.sort()
for item in self.result:
print "{\"key\":\""+item[0]+"\",\"value\":\"" + str(item[1]) + "\"}"
mapReducer = MapReduce()
def mapper(record):
split = record.split()
mapReducer.emitIntermediate(split[0], split[1])
mapReducer.emitIntermediate(split[1], split[0])
def reducer(key, list_of_values):
mapReducer.emit((key, len(list_of_values)))
if __name__ == '__main__':
inputData = []
for line in sys.stdin:
inputData.append(line)
mapReducer.execute(inputData, mapper, reducer)
| mit | Python | |
517cb7c66f28e977bf44b7013846f50af8f673fb | Create QiClient.py | osisoft/Qi-Samples,osisoft/Qi-Samples,osisoft/Qi-Samples,osisoft/Qi-Samples,osisoft/Qi-Samples,osisoft/Qi-Samples | Basic/Python/QiPy/Python2/QiClient.py | Basic/Python/QiPy/Python2/QiClient.py | apache-2.0 | Python | ||
1c1967e5a1e941ffa6a8f32d35269f333644cd98 | Create 1.py | UCollegeChitkara/CryptoHacktober | solutions/1.py | solutions/1.py | CODE = {'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.'
}
def main():
msg = raw_input('MESSAGE: ')
for char in msg:
if char == ' ' :
print ' '
else :
print CODE[char.upper()],
raw_input("When done, press [ENTER]")
if __name__ == "__main__":
main()
| mit | Python | |
4a7b60d47a20084867015490cc52f3c5967b979f | add test file | yeatmanlab/pyAFQ,yeatmanlab/pyAFQ,arokem/pyAFQ,arokem/pyAFQ | AFQ/utils/tests/test_conversions.py | AFQ/utils/tests/test_conversions.py | import numpy as np
import numpy.testing as npt
import AFQ.data as afd
from AFQ.utils.conversion import matlab_tractography, matlab_mori_groups
import os
def test_matlab_tractography():
sft = matlab_tractography(
"AFQ/tests/data/WholeBrainFG_test.mat",
afd.read_mni_template())
npt.assert_equal(len(sft.streamlines), 2)
def test_matlab_mori_groups():
fiber_groups = matlab_mori_groups(
"AFQ/tests/data/MoriGroups_Test.mat",
afd.read_mni_template())
npt.assert_equal(len(fiber_groups.keys()), 20)
npt.assert_equal(len(fiber_groups['CST_R'].streamlines), 2)
| bsd-2-clause | Python | |
c64a687f738cefd6f9461f487e76a3920d0f652c | Add new example | eandersson/amqpstorm,eandersson/amqpstorm,eandersson/amqp-storm | examples/consume_queue_until_empty.py | examples/consume_queue_until_empty.py | import logging
from amqpstorm import Connection
logging.basicConfig(level=logging.DEBUG)
def consume_until_queue_is_empty():
with Connection('127.0.0.1', 'guest', 'guest') as connection:
with connection.channel() as channel:
while True:
message = channel.basic.get('simple_queue')
if not message:
print('Queue is empty')
break
print(message.body)
message.ack()
if __name__ == '__main__':
consume_until_queue_is_empty()
| mit | Python | |
f09ee3772d6e15a104af284ed6864005cf8450ef | Add example from listing 11.4 | oysstu/pyopencl-in-action | ch11/radix_sort8.py | ch11/radix_sort8.py | """
Listing 11.4: An eight-element radix sort
"""
from io import open
import numpy as np
import pyopencl as cl
import utility
NUM_SHORTS = 8
kernel_src = '''
__kernel void radix_sort8(__global ushort8 *global_data) {
typedef union {
ushort8 vec;
ushort array[8];
} vec_array;
uint one_count, zero_count;
uint cmp_value = 1;
vec_array mask, ones, data;
data.vec = global_data[0];
/* Rearrange elements according to bits */
for(int i=0; i<3; i++) {
zero_count = 0;
one_count = 0;
/* Iterate through each element in the input vector */
for(int j = 0; j < 8; j++) {
if(data.array[j] & cmp_value)
/* Place element in ones vector */
ones.array[one_count++] = data.array[j];
else {
/* Increment number of elements with zero */
mask.array[zero_count++] = j;
}
}
/* Create sorted vector */
for(int j = zero_count; j < 8; j++)
mask.array[j] = 8 - zero_count + j;
data.vec = shuffle2(data.vec, ones.vec, mask.vec);
cmp_value <<= 1;
}
global_data[0] = data.vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and device buffers
data = np.arange(start=0, stop=NUM_SHORTS, dtype=np.uint16)
np.random.shuffle(data)
print('Input: ' + str(data))
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
# Execute kernel
# radix_sort8(__global ushort8 *global_data)
kernel = prog.radix_sort8
kernel.set_arg(0, data_buffer)
cl.enqueue_task(queue, kernel)
cl.enqueue_copy(queue, dest=data, src=data_buffer, is_blocking=True)
print('Output: ' + str(data))
| mit | Python | |
75a0cbbd5af597c6683b6644659780c3076b835e | Disable test_nonlocal_symbol unit test | tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,arborh/tensorflow,sarvex/tensorflow,annarev/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,gunan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,arborh/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,Intel-Corporation/tensorflow,renyi533/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,cxxgtxy/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,davidzchen/tensorflow,Intel-tensorflow/tensorflow,gunan/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,gunan/tensorflow,sarvex/tensorflow,annarev/tensorflow,tensorflow/tensorflow,jhseu/tensorflow,aam-at/tensorflow,aam-at/tensorflow,arborh/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,ppwwyyxx/tensorflow,frreiss/tensorflow-fred,chemelnucfin/tensorflow,frreiss/tensorflow-fred,petewarden/tensorflow,Intel-Corporation/tensorflow,ppwwyyxx/tensorflow,renyi533/tensorflow,Intel-Corporation/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,petewarden/tensorflow,gunan/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,chemelnucfin/tensorflow,petewarden/tensorflow,xzturn/tensorflow,yongtang/tensorflow,DavidNorman/tensorflow,ppwwyyxx/tensorflow,arborh/tensorflow,ppwwyyxx/tensorflow,sarvex/tensorflow,xzturn/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,renyi533/tensorflow,davidzchen/tensorflow,cxxgtxy/tensorflow,ppwwyyxx/tensorflow,karllessard/tensorflow,jhseu/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,renyi533/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,karllessard/tensorflow,aldian/tensorflow,petewarden/tensorflow,petewarden/tensorflow,ppwwyyxx/tensorflow,xzturn/tensorflow,sarvex/tensorflow,jhseu/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,ppwwyyxx/tensorflow,adit-chandra/tensorflow,sarvex/tensorflow,karllessard/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,adit-chandra/tensorflow,annarev/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,renyi533/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,freedomtan/tensorflow,aldian/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,adit-chandra/tensorflow,ppwwyyxx/tensorflow,cxxgtxy/tensorflow,xzturn/tensorflow,yongtang/tensorflow,xzturn/tensorflow,Intel-Corporation/tensorflow,davidzchen/tensorflow,paolodedios/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,karllessard/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,chemelnucfin/tensorflow,freedomtan/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gunan/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,annarev/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,DavidNorman/tensorflow,davidzchen/tensorflow,arborh/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,renyi533/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-pywrap_saved_model,adit-chandra/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jhseu/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,gunan/tensorflow,petewarden/tensorflow,chemelnucfin/tensorflow,cxxgtxy/tensorflow,paolodedios/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_saved_model,DavidNorman/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow,gunan/tensorflow,adit-chandra/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,renyi533/tensorflow,Intel-tensorflow/tensorflow,aldian/tensorflow,DavidNorman/tensorflow,aldian/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,chemelnucfin/tensorflow,aam-at/tensorflow,renyi533/tensorflow,adit-chandra/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,adit-chandra/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,annarev/tensorflow,tensorflow/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,renyi533/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,annarev/tensorflow,paolodedios/tensorflow,DavidNorman/tensorflow,davidzchen/tensorflow,sarvex/tensorflow,ppwwyyxx/tensorflow,adit-chandra/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,gunan/tensorflow,xzturn/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,adit-chandra/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,jhseu/tensorflow,DavidNorman/tensorflow,jhseu/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,petewarden/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,renyi533/tensorflow,arborh/tensorflow,aam-at/tensorflow,cxxgtxy/tensorflow,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,frreiss/tensorflow-fred,DavidNorman/tensorflow,annarev/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,freedomtan/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,adit-chandra/tensorflow,gunan/tensorflow,davidzchen/tensorflow,DavidNorman/tensorflow,annarev/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,chemelnucfin/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,aldian/tensorflow,Intel-Corporation/tensorflow,adit-chandra/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,xzturn/tensorflow,frreiss/tensorflow-fred,arborh/tensorflow,DavidNorman/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,arborh/tensorflow,gunan/tensorflow,jhseu/tensorflow,ppwwyyxx/tensorflow,xzturn/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,jhseu/tensorflow,aam-at/tensorflow,gautam1858/tensorflow,arborh/tensorflow,aldian/tensorflow,frreiss/tensorflow-fred,xzturn/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,xzturn/tensorflow,aam-at/tensorflow,jhseu/tensorflow,karllessard/tensorflow,petewarden/tensorflow,jhseu/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/python/autograph/pyct/static_analysis/activity_py3_test.py | tensorflow/python/autograph/pyct/static_analysis/activity_py3_test.py | # python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct.static_analysis import activity_test
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
NodeAnno = annos.NodeAnno
class ActivityAnalyzerTest(activity_test.ActivityAnalyzerTestBase):
"""Tests which can only run in Python 3.6 or later versions."""
def test_nonlocal_symbol(self):
# TODO(b/137761188): Remove this skipTest once fixed.
self.skipTest('Annotation syntax is not recognized by Python 3.5')
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
nonlocal_a = nonlocal_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('nonlocal_b', 'c'), ('nonlocal_a',))
def test_annotated_assign(self):
b = int
def test_fn(c):
a: b = c
return a
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'c', 'a'), ('a',))
ann_assign_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(ann_assign_scope, ('b', 'c'), ('a',))
if __name__ == '__main__':
test.main()
| # python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for activity module, that only run in Python 3."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct.static_analysis import activity_test
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.platform import test
NodeAnno = annos.NodeAnno
class ActivityAnalyzerTest(activity_test.ActivityAnalyzerTestBase):
"""Tests which can only run in Python 3."""
def test_nonlocal_symbol(self):
nonlocal_a = 3
nonlocal_b = 13
def test_fn(c):
nonlocal nonlocal_a
nonlocal nonlocal_b
nonlocal_a = nonlocal_b + c
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('nonlocal_b', 'c'), ('nonlocal_a',))
def test_annotated_assign(self):
b = int
def test_fn(c):
a: b = c
return a
node, _ = self._parse_and_analyze(test_fn)
fn_node = node
body_scope = anno.getanno(fn_node, NodeAnno.BODY_SCOPE)
self.assertScopeIs(body_scope, ('b', 'c', 'a'), ('a',))
ann_assign_scope = anno.getanno(fn_node.body[0], anno.Static.SCOPE)
self.assertScopeIs(ann_assign_scope, ('b', 'c'), ('a',))
if __name__ == '__main__':
test.main()
| apache-2.0 | Python |
f5271083eb9f90fba51bea91126d5eb7005f7c51 | add script to delete webhooks | Ziggeo/ZiggeoPythonSdk,Ziggeo/ZiggeoPythonSdk | demos/webhooks_delete.py | demos/webhooks_delete.py | import sys
from Ziggeo import Ziggeo
if(len(sys.argv) < 4):
print ("Error\n")
print ("Usage: $>python webhooks_delete.py YOUR_API_TOKEN YOUR_PRIVATE_KEY WEBHOOK_URL \n")
print ("Example: $>python webhooks_delete.py 1234567890abcdef 1234567890abcdef http://yoursite.com \n")
sys.exit()
api_token = sys.argv[1]
private_key = sys.argv[2]
target_url = sys.argv[3]
ziggeo = Ziggeo(api_token, private_key)
arguments = {}
arguments['target_url'] = target_url
ziggeo.webhooks().delete(arguments) | apache-2.0 | Python | |
bbf056f834724a263dbce5c58104db296240a49c | add gyp file | ContinuumIO/tty.js,clementfarabet/gfx.js,recomputation/tty.js,IonicaBizau/web-term,mannby/gfx.js,jwcooper/tty.js,callmeyan/tty.js,weebygames/tty.js,Stackato-Apps/tty.js,meteora9479/gfx.js,mannby/gfx.js,jsio-private/tty.js,clementfarabet/gfx.js,mabel/tty.js,node-proj/tty.js,hooray-hjh/triggerTest,risacher/ttyx,runvnc/tty.js,callmeyan/tty.js,SvenDowideit/tty.js,Stackato-Apps/tty.js,pairyo/tty.js,soumith/gfx.js,clementfarabet/gfx.js,node-proj/tty.js,AaronZhangL/tty.js,mannby/gfx.js,AaronZhangL/tty.js,weebygames/tty.js,soumith/gfx.js,runvnc/tty.js,weebygames/tty.js,ramwin1/tty.js,mcanthony/tty.js,soumith/gfx.js,marcominetti/tty.js,risacher/ttyx,marcominetti/tty.js,dit4c/tty-lean.js,jsio-private/tty.js,dit4c/tty-lean.js,jwcooper/tty.js,mabel/tty.js,soumith/tty.js,pairyo/tty.js,meteora9479/gfx.js,ContinuumIO/tty.js,risacher/ttyx,geky/roswktty,mannby/gfx.js,weebygames/tty.js,chjj/tty.js,Unitech/tty.js,hooray-hjh/triggerTest,SvenDowideit/tty.js,IonicaBizau/web-term,meteora9479/gfx.js,UhuruSoftware/tty.js,clementfarabet/gfx.js,soumith/gfx.js,laalex/tty-lean.js,laalex/tty-lean.js,10xEngineer/tty.js,chjj/tty.js,recomputation/tty.js,UhuruSoftware/tty.js,meteora9479/gfx.js,jsio-private/tty.js,ContinuumIO/tty.js,ramwin1/tty.js,mcanthony/tty.js,Unitech/tty.js,jsio-private/tty.js | pty.gyp | pty.gyp | {
'targets': [{
'target_name': 'pty',
'type': 'loadable_module',
'product_extension': 'node',
'product_prefix': '',
'include_dirs': [
'./src'
],
'sources': [
'src/pty.cc'
],
'libraries': [
'-lutil'
]
}]
}
| mit | Python | |
d98db37dc70a1126de371bf64e89cc4f20e03511 | Create repr.py | akshaynagpal/python_snippets,akshaynagpal/python_web_crawler | repr.py | repr.py | """
One useful class method to override is the built-in __repr__() method, which is short for representation; by providing a return
value in this method, we can tell Python how to represent an object of our class
"""
class Point3D(object):
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
def __repr__(self):
return "(%d, %d, %d)" % (self.x, self.y, self.z)
my_point = Point3D(1,2,3)
print my_point
| mit | Python | |
e7faa99d9816745338ada38d1a7d974bf3a739ae | Create pretty table of tie averages + function for pretty averages | alexmilesyounger/ds_basics | s5v3.py | s5v3.py | from s5v2 import *
from prettytable import PrettyTable
def my_table(): # no arguments are passed in, which seems a bit weird. We're hard-coding a function that only does one thing.
x = PrettyTable(['Style', 'Average Price']) # setup a new pretty table list and give and give it two list items
x.add_row(['Print', pretty_average(print_ties)]) # add_row is a built-in function of prettytable. We're going to add a row and assign it the vales of 'Print' and the average price of all print ties
x.add_row(['Solid', pretty_average(solid_ties)])
x.add_row(['Paisley', pretty_average(paisley_ties)])
x.add_row(['Striped', pretty_average(striped_ties)])
x.add_row(['Gucci', pretty_average(gucci_ties)])
print(x) # print the table
def pretty_average(my_number):
pretty_avg = "${:03.2f}".format(find_average(my_number)) # assign a variable pretty_avg to the average of my number and then use the format specification mini-language to add three decimal places before the decimal point and 2 after the decimal point. the FSML says that 'f' is a fixed point and displays the number as a fixed-point number. That's like floating point number, but with a fixed amount of float? As far as what kind of variable it is (string, integer, float / decimal) it's still a decimal or float, just a fixed amount of float. See I was calling this string formatting, but really it's format specification mini-language and it doesn't automatically convert the result to a string (like I originally thought).
return pretty_avg
# my_table() # run the function | mit | Python | |
dd9c96c7b12221029b7ea1a4f9748106520bd7a6 | add syntax checker/linter/gotype support | dlclark/GoSublime,alexmullins/GoSublime,Mistobaan/GoSublime,FWennerdahl/GoSublime,cdht/GoSublime,DisposaBoy/GoSublime-next,anacrolix/GoSublime,justinfx/GoSublime,FWennerdahl/GoSublime,allgeek/GoSublime,Mistobaan/GoSublime,DisposaBoy/GoSublime-next,simman/GoSublime,alexmullins/GoSublime,allgeek/GoSublime,cdht/GoSublime,justinfx/GoSublime,FWennerdahl/GoSublime,Mistobaan/GoSublime,DisposaBoy/GoSublime-next,nathany/GoSublime,cdht/GoSublime,DisposaBoy/GoSublime,nathany/GoSublime,simman/GoSublime,DisposaBoy/GoSublime,allgeek/GoSublime,simman/GoSublime,anacrolix/GoSublime,anacrolix/GoSublime,nathany/GoSublime,alexmullins/GoSublime,dlclark/GoSublime | gslint.py | gslint.py | import sublime, sublime_plugin
import gscommon as gs
import re, threading
LINE_PAT = re.compile(r':(\d+):(\d+):\s+(.+)\s*$', re.MULTILINE)
class GsLint(sublime_plugin.EventListener):
rc = 0
errors = {}
def on_selection_modified(self, view):
sel = view.sel()[0].begin()
if view.score_selector(sel, 'source.go') > 0:
line = view.rowcol(sel)[0]
msg = self.errors.get(view.id(), {}).get(line, '')
view.set_status('GsLint', ('GsLint: ' + msg) if msg else '')
def on_modified(self, view):
self.rc += 1
pos = view.sel()[0].begin()
scopes = view.scope_name(pos).split()
if 'source.go' in scopes:
should_run = (
'string.quoted.double.go' not in scopes and
'string.quoted.single.go' not in scopes and
'string.quoted.raw.go' not in scopes and
'comment.line.double-slash.go' not in scopes and
'comment.block.go' not in scopes
)
def cb():
self.lint(view)
if should_run:
sublime.set_timeout(cb, int(gs.setting('gslint_timeout', 500)))
else:
# we want to cleanup if e.g settings changed or we caused an error entering an excluded scope
sublime.set_timeout(cb, 1000)
def on_load(self, view):
self.on_modified(view)
def lint(self, view):
self.rc -= 1
if self.rc == 0:
cmd = gs.setting('gslint_cmd', 'gotype')
if cmd:
_, err = gs.runcmd([cmd], view.substr(sublime.Region(0, view.size())))
else:
err = ''
lines = LINE_PAT.findall(err)
regions = []
view_id = view.id()
self.errors[view_id] = {}
if lines:
for m in lines:
line, start, err = int(m[0])-1, int(m[1])-1, m[2]
self.errors[view_id][line] = err
lr = view.line(view.text_point(line, start))
regions.append(sublime.Region(lr.begin() + start, lr.end()))
if regions:
flags = sublime.DRAW_EMPTY_AS_OVERWRITE | sublime.DRAW_OUTLINED
flags = sublime.DRAW_EMPTY_AS_OVERWRITE
flags = sublime.DRAW_OUTLINED
view.add_regions('GsLint-errors', regions, 'invalid.illegal', 'cross', flags)
else:
view.erase_regions('GsLint-errors')
self.on_selection_modified(view)
| mit | Python | |
0e681d5b5e0d23517a6f050d168e1e86de9eb074 | Add unit test for utils.termcolor (closes #4) | RootForum/magrathea,RootForum/magrathea | test/t_utils/test_termcolor.py | test/t_utils/test_termcolor.py | # -*- coding: utf-8 -*-
"""
test.t_utils.test_termcolor
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2014 by the RootForum.org team, see AUTHORS.
:license: MIT License, see LICENSE for details.
"""
from unittest import TestCase
from magrathea.utils.termcolor import supports_color
class TestMagratheaUtilsTermColor(TestCase):
"""
Unit tests for :py:mod:`magrathea.utils.termcolor`
"""
def test_01(self):
"""
Test Case 01:
Test return value of :py:func:`~magrathea.utils.termcolor.supports_color`.
Test is passed if return value is of type bool.
"""
return_value = supports_color()
self.assertIsInstance(return_value, bool)
| mit | Python | |
9b2166c62b312baf7769279ca56b02eca3ea0078 | check host smallRNA end point | shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl | lib/SmallRNA/smallRNAEndPosition.py | lib/SmallRNA/smallRNAEndPosition.py | import sys
import gzip
import os
import logging
import argparse
import re
from Bio import SeqIO
from Bio.Seq import Seq
from CountXmlUtils import readCountXmlFeatures
from Feature import FeatureItem, FeatureGroup
from audioop import reverse
DEBUG = 1
if DEBUG:
#inputFile="/scratch/cqs/shengq2/vickers/20170628_smallRNA_3018-KCV-77_78_79_mouse_v3/temp/temp.list"
inputFile="/scratch/cqs/shengq2/vickers/20170628_smallRNA_3018-KCV-77_78_79_mouse_v3/temp/smallRNA_1mm_KCV_3018_77_78_79.filelist"
outputFile="/scratch/cqs/shengq2/vickers/20170628_smallRNA_3018-KCV-77_78_79_mouse_v3/temp/temp.endpoint.txt"
else:
parser = argparse.ArgumentParser(description="Generate smallRNA NTA read for Fastq file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input xml file list')
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output endpoint file")
args = parser.parse_args()
print(args)
inputFile = args.input
outputFile = args.output
logger = logging.getLogger('smallRNAEndPosition')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
with open(inputFile, "r") as ir:
files = [line.rstrip().split('\t') for line in ir]
logger.info("Write result ...")
fileFeatures = []
for file in files:
logger.info("Reading feature-query in %s ..." % file[1])
fileFeatures.append([file[0], readCountXmlFeatures(file[1])])
groupNames = set()
for features in fileFeatures:
for mf in features[1]:
for f in mf.Features:
groupNames.add(f.Name.split(':')[0])
groupNames=sorted(groupNames)
groupFeatureMap = {}
for gname in groupNames:
gmap = {};
groupFeatureMap[gname] = gmap
for features in fileFeatures:
mappedFeatures = features[1]
gFeatures = [fg for fg in mappedFeatures if fg.Features[0].Name.startswith(gname)]
for idx in range(0, min(10, len(gFeatures))):
gf = gFeatures[idx].Features[0]
gfCount = sum(ep[1] for ep in gf.EndPoints)
if gf.Name in gmap:
gmap[gf.Name] = gmap[gf.Name] + gfCount
else:
gmap[gf.Name] = gfCount
with open(outputFile, "w") as sw:
sw.write("Sample\tGroup\tFeature\tSampleRank\tOverallRank\tTotalCount\tEndpointCount\tEndpoint\tRelativeEndpoint\tPercentage\n")
for groupName in groupNames:
gmap = groupFeatureMap[groupName]
featureRankMap = {}
overallrank = 0
for fname, fcount in sorted(gmap.iteritems(), key=lambda (k,v): (v,k), reverse=True):
overallrank = overallrank + 1
featureRankMap[fname] = overallrank
for features in fileFeatures:
sampleName = features[0]
mappedFeatures = features[1]
logger.info("output %s in %s ..." % (groupName, sampleName))
gFeatures = [fg for fg in mappedFeatures if fg.Features[0].Name.startswith(groupName)]
for idx, gfeature in enumerate(gFeatures):
gf = gfeature.Features[0]
featureName = gf.Name
if featureName not in featureRankMap:
break
groupRank = featureRankMap[featureName]
endpoints = gf.EndPoints
positions = sorted(set(ep[0] for ep in endpoints))
totalCount = sum(ep[1] for ep in endpoints)
maxCount=max(ep[1] for ep in endpoints)
maxPosition = [ep[0] for ep in endpoints if ep[1] == maxCount][0]
for position in positions:
positionCount = sum(ep[1] for ep in endpoints if ep[0] == position)
sw.write("%s\t%s\t%s\t%d\t%d\t%d\t%d\t%d\t%d\t%.2f\n" %(sampleName, groupName, featureName, (idx+1), groupRank, totalCount, positionCount, position, position - maxPosition, (positionCount * 1.0) / totalCount))
logger.info("Result has been saved to %s" % outputFile)
| apache-2.0 | Python | |
6aa2621afd8253036834f48da6562c25deed54ec | Add tests | jasontbradshaw/gapbuffer | test_pe.py | test_pe.py | #!/usr/bin/env python
import unittest
import pe
class TestGapBuffer(unittest.TestCase):
def setUp(self):
# correct content for each typecode
self.valid_content = {
"c": str("abc"),
"b": [0, 1, 2],
"B": [0, 1, 2],
"u": unicode("abc"),
"h": [0, 1, 2],
"H": [0, 1, 2],
"i": [0, 1, 2],
"I": [0L, 1L, 2L],
"l": [0L, 1L, 2L],
"L": [0L, 1L, 2L],
"f": [0.0, 1.0, 2.0],
"d": [0.0, 1.0, 2.0]
}
def test_init_empty(self):
"""Can we init for every typecode without exceptions?"""
for typecode in self.valid_content:
pe.GapBuffer(typecode)
def test_init_content(self):
"""Can we init for every typecode with valid initial content?"""
for typecode in self.valid_content:
pe.GapBuffer(typecode, self.valid_content[typecode])
def test_init_content_generator(self):
"""Can we init for every typecode with valid initial content generator?
"""
for typecode in self.valid_content:
pe.GapBuffer(typecode, (i for i in self.valid_content[typecode]))
def test_init_content_empty(self):
"""Can we init for every typecode with zero-length initial content?"""
for typecode in self.valid_content:
b = pe.GapBuffer(typecode, [])
self.assertEqual(len(b), 0)
def test_init_content_empty_generator(self):
"""Can we init for every typecode with an empty initial content
generator?
"""
for typecode in self.valid_content:
pe.GapBuffer(typecode, (i for i in []))
def test_init_char_content_wrong_type(self):
"""Does giving 'c' typecode buffers incorrect types raise the correct
exceptions?
"""
# all types but str are invalid for 'c'
for typecode in self.valid_content:
if typecode != "c":
with self.assertRaises(TypeError):
pe.GapBuffer("c", self.valid_content[typecode])
def test_init_unicode_content_wrong_type(self):
"""Does giving 'u' typecode buffers incorrect types raise the correct
exceptions?
"""
# all types but unicode are invalid for 'u'
for typecode in self.valid_content:
if typecode != "u":
with self.assertRaises(TypeError):
pe.GapBuffer("u", self.valid_content[typecode])
def test_eq(self):
"""Test all typecodes for equality to their respective initial content.
"""
for typecode in self.valid_content:
b = pe.GapBuffer(typecode, self.valid_content[typecode])
self.assertEqual(self.valid_content[typecode], b)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestGapBuffer)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit | Python | |
a48c52ccb6e89edbc186e4e916b8151ff0fa232f | Add an admin | kronok/django-sendgrid-events,rorito/django-sendgrid-events,digital-eskimo/django-sendgrid-events,eldarion/django-sendgrid-events | sendgrid_events/admin.py | sendgrid_events/admin.py | from django.contrib import admin
from .models import Event
admin.site.register(Event, list_display=["kind", "email", "created_at"], list_filter=["created_at", "kind"], search_fields=["email", "data"])
| bsd-3-clause | Python | |
ad59ee2bce3e0de3e6fce9647a0ae7faa33e6135 | Create testGame.py | joaofanti/TrabRedesIIFinal | Testes/testGame.py | Testes/testGame.py | import sys
# Adicionar o caminho do MAP FACTORY
sys.path.insert(0, '../Modelos/')
sys.path.insert(0, '../Modelos/Mapa/')
from Game import *
from Map import *
from MapFactory import *
import json
"""
Metodo principal para rodar o cliente
"""
if __name__ == "__main__":
fct = MapFactory()
with open('../Recursos/Mapa.txt', 'r') as data_file:
jsonFormatted = json.load(data_file)
with open('../Recursos/MapaDesign.txt', 'r') as mapDesign:
generatedMap = fct.GenerateMap(jsonFormatted, mapDesign.read())
#Teste 1 - CriaJogador
print "----- CriaJogador -----"
game = Game(generatedMap)
print game.CriaJogador("Joao", "127.0.0.1") + "\n"
#Teste 2 - Examina
print "----- Examina -----"
game = Game(generatedMap)
game.CriaJogador("Joao", "127.0.0.1")
print game.Examina("Joao") + "\n"
#Teste 3 - Move
print "----- Move -----"
game = Game(generatedMap)
game.CriaJogador("Joao", "127.0.0.1")
print game.Move("Joao", "S")
print game.Examina("Joao") + "\n"
#Teste 4 - Inventario
print "----- Inventario -----"
game = Game(generatedMap)
game.CriaJogador("Joao", "127.0.0.1")
print game.Inventario("Joao") + "\n"
#Teste 5 - Pegar
print "----- Pegar -----"
game = Game(generatedMap)
game.CriaJogador("Joao", "127.0.0.1")
print "Objetos no inventario: "+game.Inventario("Joao")
print game.Pegar("Joao", "Nota1")
print "Objetos no inventario: "+game.Inventario("Joao") + "\n"
#Teste 6 - Largar
print "----- Largar -----"
game = Game(generatedMap)
game.CriaJogador("Joao", "127.0.0.1")
print game.Examina("Joao")
print "Inventario do player: " + game.Inventario("Joao")
print game.Largar("Joao", "Mapa")
print game.Examina("Joao")
print "Inventario do player: " + game.Inventario("Joao") + "\n"
#Teste 7 - getPlayersInRoom
print "----- Players na Sala -----"
game = Game(generatedMap)
game.CriaJogador("Joao", "127.0.0.1")
game.CriaJogador("Bruno", "127.0.0.1")
print game.getPlayersInRoom(1)
print "Adiciona mais dois players"
game.CriaJogador("Santos", "127.0.0.1")
game.CriaJogador("Gui", "127.0.0.1")
print game.getPlayersInRoom(1)
| mit | Python | |
3a7459f0f9e171954eb1f86a9e320ef889b9d1a5 | Tidy up and en-repo my little monitor | Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed | watch_emotes.py | watch_emotes.py | import sys
import time
import pprint
import subprocess
import requests
sys.path.append("../mustard-mine")
import config
emote_count = None
while emote_count is None or emote_count == len(emotes):
req = requests.get("https://api.twitch.tv/kraken/chat/emoticon_images?emotesets=317117,317121,317122", headers={
"Client-ID": config.CLIENT_ID,
"Accept": "application/vnd.twitchtv.v5+json",
})
resp = req.json()
emotes = {e["code"]: e["id"] for s in resp["emoticon_sets"].values() for e in s}
resp = requests.get("https://api.betterttv.net/2/channels/rosuav").json()
emotes.update({e["code"]: e["id"] for e in resp["emotes"]})
pprint.pprint(emotes)
# Whatever number there are on the first run, that's considered "current".
# If it then changes, report it.
if emote_count is None: emote_count = len(emotes)
time.sleep(60)
subprocess.check_call(["vlc", "/video/Clips/Let It Go/Turkish - Aldirma.mkv"])
| mit | Python | |
5e5e74e606d9be3c60fb810ed215dfa109a6ad9f | fix #5 wavファイルを生成する処理を行うクラスを定義 | 000ubird/HighResolution | libs/audio/create.py | libs/audio/create.py | import wave
import struct
from pylab import *
"""
振幅amp、基本周波数frq、サンプリング周波数 fs、
ビット深度bit_depthbit、長さlength秒の正弦波を作成して返す
"""
def createSineWave(amp, frq, fs, bit_depth, length) :
data = []
clip_hi = 1.0
clip_lo = -1.0
#正規化した波を整数値に変換する時の倍率
mult_bit = 0
if bit_depth == 16 : mult_bit = 32767.0
elif bit_depth == 24 : mult_bit = 16777216.0
else : exit() #16,24bit深度の時以外は終了
for n in arange(length * fs) :
s = amp * np.sin(2*np.pi * frq * n / fs)
#クリッピング処理
if s > clip_hi : s = clip_hi
if s < clip_lo : s = clip_lo
#書き込み
data.append(s)
#nBit深度の音源に変換
data = [int(x * mult_bit) for x in data]
#バイナリに変換
data = struct.pack("h" * len(data), *data)
return data
if __name__ == "__main__" :
#freqList = [262, 294, 330, 349, 392, 440, 494, 523] # ドレミファソラシド
freqList = [440]
for f in freqList:
data = createSineWave(1.0, f, 44100, 16, 5.0)
#print(data)
save(data,44100,2,"test.wav")
| mit | Python | |
acda4ae5deff6b45f56b84cdecb867a09586af4a | Add lc295_find_median_from_data_stream.py | bowen0701/algorithms_data_structures | lc295_find_median_from_data_stream.py | lc295_find_median_from_data_stream.py | """Leetcode 295. Find Median from Data Stream
Hard
URL: https://leetcode.com/problems/find-median-from-data-stream/
Median is the middle value in an ordered integer list.
If the size of the list is even, there is no middle value.
So the median is the mean of the two middle value.
For example,
[2,3,4], the median is 3
[2,3], the median is (2 + 3) / 2 = 2.5
Design a data structure that supports the following two operations:
- void addNum(int num) - Add a integer number from the data stream to
the data structure.
- double findMedian() - Return the median of all elements so far.
Example:
addNum(1)
addNum(2)
findMedian() -> 1.5
addNum(3)
findMedian() -> 2
Follow up:
- If all integer numbers from the stream are between 0 and 100,
how would you optimize it?
- If 99% of all integer numbers from the stream are between 0 and 100,
how would you optimize it?
"""
class MedianFinder(object):
def __init__(self):
"""
initialize your data structure here.
"""
pass
def addNum(self, num):
"""
:type num: int
:rtype: None
"""
pass
def findMedian(self):
"""
:rtype: float
"""
pass
def main():
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
e2a47d1ef44aa6b0ccc294aae68babd8ca54eb22 | Create search_log_files.py | joshisumit/pythonic_scripts | search_log_files.py | search_log_files.py | #!/usr/bin/env python
import os.path
#Author: Sumit Joshi
#date:5/12/2015
# Log File Search Script
# This Script is useful for searching specific keyword in log files. For specific keyword it returns matching lines.
print "*******************************************************"
print ""
print ""
print "Welcome to the Python Log Search Program"
print "Search log files with specific keywords..."
print ""
print ""
print "*******************************************************"
print ""
log_file=raw_input("Enter the path of the log file that you want to read ")
print log_file
if os.path.isfile(log_file):
keyword=raw_input('Enter the keywords that you want to search in log file with space ').split(" ")
#for storing final log lines
final=[]
fd=open(log_file,"r")
for line in fd.readlines():
for item in keyword:
if item in line:
final.append(line)
final.append('')
if len(final)==0:
print "No matching lines found in", log_file
else:
print 'Found matching lines in ', log_file
for log_line in final:
print log_line
else:
print 'Please enter valid log file name'
| mit | Python | |
ae90cf26caa471f85d7e5e20ef2e349b78183f41 | make python2 wokr | ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt,ccxt/ccxt | python/static_dependencies/__init__.py | python/static_dependencies/__init__.py | __all__ = ['ecdsa']
| mit | Python | |
54250baa4e2a34146506693b2da683ded2ee7cdb | add fake client ability that performs all required API calls | bboozzoo/mender-backend-cli | mender/cli/client.py | mender/cli/client.py | # The MIT License (MIT)
#
# Copyright (c) 2016 Gregorio Di Stefano
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import time
import threading
import os
import random
from mender.cli import device
import mender.cli.utils
import tempfile
import copy
import sys
def add_args(sub):
pisub = sub.add_subparsers(help='Commands for client')
sub.set_defaults(clientcommand='')
sub.add_argument('-n', '--number', help="Number of clients", type=int, required=True)
sub.add_argument('-i', '--inventory', help="Inventory items", action='append', default=["device_type:fake-device", "image_type:fake-image"])
sub.add_argument('--inventory-update-freq', type=int, default=60)
sub.add_argument('-w', '--wait', help="Maximum wait before changing update steps", type=int, default=30)
sub.add_argument('-f', '--fail', help="Fail update with specific messsage", type=str, default="")
sub.add_argument('-c', '--updates', help="Number of updates to perform before exiting", type=int, default=1)
threads = []
def do_main(opts):
client_options = []
for i in range(opts.number):
new_opts = copy.deepcopy(opts)
new_opts.seq_no = 1
new_opts.store = True
new_opts.verify = False
new_opts.attrs_set = opts.inventory
new_opts.mac_address = ":".join(["%02x" % random.randint(0x00, 0xFF) for i in range(6)])
new_opts.device_key = tempfile.NamedTemporaryFile().name
new_opts.tenant_token = tempfile.NamedTemporaryFile().name
new_opts.device_token = tempfile.NamedTemporaryFile().name
threads.append(threading.Thread(target=run_client, args=(new_opts,)))
for t in threads:
t.start()
def run_client(opts):
block_until_authorized(opts)
threading.Thread(target=send_inventory_data, args=(opts,)).start()
if opts.updates > 0:
for i in range(opts.updates):
block_until_update(opts)
else:
while True:
block_until_update(opts)
def block_until_authorized(opts):
logging.info("performing bootstrap")
device.do_key(opts)
count = 1
while True:
opts.seq_no = count
if device.do_authorize(opts):
logging.info("successfully bootstrapped client")
return
else:
logging.info("device not authorized yet..")
count += 1
time.sleep(5)
def send_inventory_data(opts):
while True:
device.do_inventory(opts)
time.sleep(opts.inventory_update_freq)
def block_until_update(opts):
device.do_fake_update(opts)
| mit | Python | |
5dc22c0a30b2e326665f67df1d85cd2ae0243c00 | Increase the sleep timeout to 4 seconds to prevent the main thread from quitting | komakino/plugin.video.pulsar,likeitneverwentaway/plugin.video.quasar,steeve/plugin.video.pulsar,peer23peer/plugin.video.quasar,peer23peer/plugin.video.quasar,pmphxs/plugin.video.pulsar,likeitneverwentaway/plugin.video.quasar,Zopieux/plugin.video.pulsar,johnnyslt/plugin.video.quasar,elrosti/plugin.video.pulsar,johnnyslt/plugin.video.quasar | resources/site-packages/pulsar/main.py | resources/site-packages/pulsar/main.py | import threading
import xbmc
from pulsar.rpc import server_thread
from pulsar.monitor import PulsarMonitor
from pulsar.daemon import pulsard_thread
def main():
# Make sure the XBMC jsonrpc server is started.
xbmc.startServer(xbmc.SERVER_JSONRPCSERVER, True)
# Start the JSONRPC thread
threading.Thread(target=server_thread).start()
# Start the Pulsard thread
threading.Thread(target=pulsard_thread).start()
# Get the monitor
monitor = PulsarMonitor()
# XBMC loop
while not xbmc.abortRequested:
xbmc.sleep(4000)
xbmc.log("pulsar: exiting pulsard")
| import threading
import xbmc
from pulsar.rpc import server_thread
from pulsar.monitor import PulsarMonitor
from pulsar.daemon import pulsard_thread
def main():
# Make sure the XBMC jsonrpc server is started.
xbmc.startServer(xbmc.SERVER_JSONRPCSERVER, True)
# Start the JSONRPC thread
threading.Thread(target=server_thread).start()
# Start the Pulsard thread
threading.Thread(target=pulsard_thread).start()
# Get the monitor
monitor = PulsarMonitor()
# XBMC loop
while not xbmc.abortRequested:
xbmc.sleep(1000)
xbmc.log("pulsar: exiting pulsard")
| bsd-3-clause | Python |
fe5100f5d13ed7461619c8beff791d40306f83ff | Remove annoying SQL view that prevents some operations | blaggacao/OpenUpgrade,kirca/OpenUpgrade,csrocha/OpenUpgrade,bwrsandman/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,sebalix/OpenUpgrade,mvaled/OpenUpgrade,pedrobaeza/OpenUpgrade,OpenUpgrade/OpenUpgrade,sebalix/OpenUpgrade,OpenUpgrade/OpenUpgrade,mvaled/OpenUpgrade,blaggacao/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,kirca/OpenUpgrade,grap/OpenUpgrade,csrocha/OpenUpgrade,blaggacao/OpenUpgrade,csrocha/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,hifly/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,grap/OpenUpgrade,blaggacao/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,pedrobaeza/OpenUpgrade,csrocha/OpenUpgrade,pedrobaeza/OpenUpgrade,kirca/OpenUpgrade,damdam-s/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,damdam-s/OpenUpgrade,sebalix/OpenUpgrade,0k/OpenUpgrade,grap/OpenUpgrade,grap/OpenUpgrade,0k/OpenUpgrade,damdam-s/OpenUpgrade,kirca/OpenUpgrade,pedrobaeza/OpenUpgrade,pedrobaeza/OpenUpgrade,damdam-s/OpenUpgrade,blaggacao/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade/OpenUpgrade,hifly/OpenUpgrade,0k/OpenUpgrade,OpenUpgrade/OpenUpgrade,csrocha/OpenUpgrade,sebalix/OpenUpgrade,csrocha/OpenUpgrade,pedrobaeza/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,bwrsandman/OpenUpgrade,csrocha/OpenUpgrade,sebalix/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,blaggacao/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,Endika/OpenUpgrade,blaggacao/OpenUpgrade,damdam-s/OpenUpgrade,damdam-s/OpenUpgrade,hifly/OpenUpgrade,0k/OpenUpgrade,Endika/OpenUpgrade,kirca/OpenUpgrade,0k/OpenUpgrade,bwrsandman/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,0k/OpenUpgrade,damdam-s/OpenUpgrade,mvaled/OpenUpgrade,mvaled/OpenUpgrade,hifly/OpenUpgrade,bwrsandman/OpenUpgrade,kirca/OpenUpgrade,mvaled/OpenUpgrade,bwrsandman/OpenUpgrade,bwrsandman/OpenUpgrade,kirca/OpenUpgrade | addons/document/migrations/8.0.2.1/pre-migration.py | addons/document/migrations/8.0.2.1/pre-migration.py | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# Drop view that inhibits changing field types. It will be recreated BTW
cr.execute('drop view if exists report_document_user cascade')
| agpl-3.0 | Python | |
935115215259ce011f3f0b46781655119413e720 | Add a directives file for pygments support. | btnpushnmunky/pelican,koobs/pelican,garbas/pelican,JeremyMorgan/pelican,jo-tham/pelican,lazycoder-ru/pelican,crmackay/pelican,iurisilvio/pelican,btnpushnmunky/pelican,TC01/pelican,douglaskastle/pelican,Summonee/pelican,51itclub/pelican,number5/pelican,koobs/pelican,levanhien8/pelican,florianjacob/pelican,zackw/pelican,abrahamvarricatt/pelican,farseerfc/pelican,garbas/pelican,Rogdham/pelican,avaris/pelican,lucasplus/pelican,sunzhongwei/pelican,catdog2/pelican,Polyconseil/pelican,ls2uper/pelican,51itclub/pelican,levanhien8/pelican,goerz/pelican,lucasplus/pelican,Scheirle/pelican,UdeskDeveloper/pelican,deanishe/pelican,sunzhongwei/pelican,joetboole/pelican,Summonee/pelican,janaurka/git-debug-presentiation,kennethlyn/pelican,florianjacob/pelican,kennethlyn/pelican,janaurka/git-debug-presentiation,Scheirle/pelican,crmackay/pelican,janaurka/git-debug-presentiation,gymglish/pelican,HyperGroups/pelican,justinmayer/pelican,HyperGroups/pelican,gymglish/pelican,rbarraud/pelican,deanishe/pelican,eevee/pelican,liyonghelpme/myBlog,treyhunner/pelican,eevee/pelican,goerz/pelican,Polyconseil/pelican,zackw/pelican,rbarraud/pelican,alexras/pelican,catdog2/pelican,GiovanniMoretti/pelican,11craft/pelican,jvehent/pelican,simonjj/pelican,catdog2/pelican,TC01/pelican,ls2uper/pelican,number5/pelican,treyhunner/pelican,getpelican/pelican,joetboole/pelican,jvehent/pelican,iurisilvio/pelican,alexras/pelican,liyonghelpme/myBlog,karlcow/pelican,douglaskastle/pelican,deved69/pelican-1,jimperio/pelican,TC01/pelican,koobs/pelican,51itclub/pelican,douglaskastle/pelican,joetboole/pelican,lazycoder-ru/pelican,0xMF/pelican,liyonghelpme/myBlog,UdeskDeveloper/pelican,karlcow/pelican,JeremyMorgan/pelican,simonjj/pelican,iurisilvio/pelican,iKevinY/pelican,fbs/pelican,11craft/pelican,Rogdham/pelican,jo-tham/pelican,alexras/pelican,ls2uper/pelican,Summonee/pelican,11craft/pelican,GiovanniMoretti/pelican,rbarraud/pelican,kernc/pelican,HyperGroups/pelican,JeremyMorgan/pelican,gymglish/pelican,simonjj/pelican,Scheirle/pelican,goerz/pelican,talha131/pelican,ehashman/pelican,karlcow/pelican,btnpushnmunky/pelican,deved69/pelican-1,ionelmc/pelican,eevee/pelican,crmackay/pelican,liyonghelpme/myBlog,kernc/pelican,farseerfc/pelican,number5/pelican,UdeskDeveloper/pelican,avaris/pelican,treyhunner/pelican,levanhien8/pelican,getpelican/pelican,florianjacob/pelican,ingwinlu/pelican,ehashman/pelican,kennethlyn/pelican,liyonghelpme/myBlog,iKevinY/pelican,talha131/pelican,ehashman/pelican,lazycoder-ru/pelican,ingwinlu/pelican,Rogdham/pelican,abrahamvarricatt/pelican,garbas/pelican,arty-name/pelican,jvehent/pelican,kernc/pelican,abrahamvarricatt/pelican,GiovanniMoretti/pelican,lucasplus/pelican,zackw/pelican,jimperio/pelican,Natim/pelican,deved69/pelican-1,deanishe/pelican,sunzhongwei/pelican,jimperio/pelican,sunzhongwei/pelican | pelican/rstdirectives.py | pelican/rstdirectives.py | # -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)
| agpl-3.0 | Python | |
1b22ec95ec7a659031e59b5a14d018028b29485b | Create hcf.py | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | math/highest_common_factor/cpp/hcf.py | math/highest_common_factor/cpp/hcf.py |
#A python program to determine the highest common factor of positive integers only.
def hcf(x, y):
#set the highest common factor to 1
hcf = 1
#To check if one of the number is divisible by the other
if x % y == 0:
return y
#iterate within half the range of one of the numbers in a descending order
#then we assign it to hcf if it can divide both x & y without remainder
for k in range(int(y / 2), 0, -1):
if x % k == 0 and y % k == 0:
hcf = k
break
#when there is no common factor, return 1
return hcf
#save the program as hcf.py to run the unit test
#------------------------------------------------------
#To test the program
import hcf
import unittest
class TestHCFFunction(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testsimple(self):
a = hcf.hcf(40,16)
self.assertEqual(a,8)
if __name__ == '__main__':
unittest.main()
| cc0-1.0 | Python | |
3a6f724e7d31c1f42bfd426623dcaec3cdd642a8 | add utils for multi gpus | stereoboy/object_detection,stereoboy/object_detection | multi_gpus.py | multi_gpus.py | import tensorflow as tf
import numpy as np
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| mit | Python | |
a2ec630cde4e36cc391ac5eee560c8097c8ceab3 | Copy in constants file from master | architecture-building-systems/CEAforArcGIS,architecture-building-systems/CEAforArcGIS | cea/technologies/constants.py | cea/technologies/constants.py | """
Constants used throughout the cea.technologies package.
History lesson: This is a first step at removing the `cea.globalvars.GlobalVariables` object.
"""
# Heat Exchangers
U_cool = 2500.0 # W/m2K
U_heat = 2500.0 # W/m2K
dT_heat = 5.0 # K - pinch delta at design conditions
dT_cool = 2.0 # K - pinch delta at design conditions
# Specific heat
rho_W = 998.0 # [kg/m^3] density of Water
cp = 4185.0 # [J/kg K]
# Substation data
roughness = 0.02 / 1000 # roughness coefficient for heating network pipe in m (for a steel pipe, from Li &
NetworkDepth = 1 # m
# Initial Diameter guess
REDUCED_TIME_STEPS = 50 # number of time steps of maximum demand which are evaluated as an initial guess of the edge diameters | mit | Python | |
77b9961cbf6b4ce6d453bbc974b0c695308fdd83 | add import script for Broxbourne (closes #1167) | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_broxbourne.py | polling_stations/apps/data_collection/management/commands/import_broxbourne.py | from data_collection.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = 'E07000095'
addresses_name = 'local.2018-05-03/Version 1/Democracy Club - Polling Districts-2018- Broxbourne.csv'
stations_name = 'local.2018-05-03/Version 1/Democracy Club - Polling Stations-2018 - Broxbourne 2.csv'
elections = ['local.2018-05-03']
| bsd-3-clause | Python | |
e8e8c20422248fa4c394efbe1b8f04adbc006674 | Add lc0111_minimum_depth_of_binary_tree.py | bowen0701/algorithms_data_structures | lc0111_minimum_depth_of_binary_tree.py | lc0111_minimum_depth_of_binary_tree.py | """Leetcode 111. Minimum Depth of Binary Tree
Easy
URL: https://leetcode.com/problems/minimum-depth-of-binary-tree/
Given a binary tree, find its minimum depth.
The minimum depth is the number of nodes along the shortest path from the
root node down to the nearest leaf node.
Note: A leaf is a node with no children.
Example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its minimum depth = 2.
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def minDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
312bbdefa256413b4891cd0e13e6ccf3c614541f | Add datetime header to Crank | jad-b/Crank | util.py | util.py | """
util
===
Common utilities across the Crank system.
"""
from datetime import datetime
DATETIME_FORMAT = '%Y %b %d @ %H%M'
def get_timestamp_header():
return datetime.now().strftime(DATETIME_FORMAT)
| mit | Python | |
776efb9c76f29fa7e15066fa38d3fbb06f959e61 | update 040.py | liuyang1/euler,liuyang1/euler,liuyang1/euler | 040_1.py | 040_1.py | #! /usr/bin/python
# 得到数字n的第bit位(从左起)
def getNbit(n, bit):
return int(str(n)[bit])
def cham(n):
t = thresh()
while 1:
low, high, bit = t.next()
if n <= high: #若t<high,则为此区间内
offset = n - low # 本区间内的偏移量
num = offset / bit + pow(10, bit - 1) # 本区间偏移到具体哪个数字
seq = offset % bit # 偏移到该数字的第几位
ret = getNbit(num, seq)
return ret
# 得到各个区间的阈值
# 1 9 1
# 10 189 2
# 190 2889 3
# 意思为长度为3的数据,占据的范围为从第190位到第2889位.
def thresh():
low = 0
bit = 1
while 1:
high = low + 9 * pow(10, bit - 1) * bit
yield (low + 1, high, bit)
bit += 1
low = high
v = 1
for i in xrange(7):
v *= cham(pow(10, i))
print v
| mit | Python | |
a737acb0310c11618802ed8610cf8b7d3bafd543 | Add Python EC2 example change_instance_security_groups.py | awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples | python/example_code/ec2/change_instance_security_groups.py | python/example_code/ec2/change_instance_security_groups.py | # snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[create_instance.py demonstrates how to create an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon Elastic Compute Cloud (Amazon EC2)]
# snippet-keyword:[Python]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-15]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def change_instance_security_groups(instance_id, security_group_ids):
"""Change the security groups assigned to an EC2 instance
This method assigns the security groups to each elastic network interface
attached to the EC2 instance.
:param instance_id: EC2 instance ID
:param security_group_ids: list of security group IDs
:return True if the security groups were assigned to each network interface
in the EC2 instance. Otherwise, False.
"""
# Retrieve the IDs of the network interfaces attached to the EC2 instance
ec2_client = boto3.client('ec2')
try:
response = ec2_client.describe_instances(InstanceIds=[instance_id])
except ClientError as e:
logging.error(e)
return False
instance_info = response['Reservations'][0]['Instances'][0]
# Assign the security groups to each network interface
for network_interface in instance_info['NetworkInterfaces']:
try:
ec2_client.modify_network_interface_attribute(
NetworkInterfaceId=network_interface['NetworkInterfaceId'],
Groups=security_group_ids)
except ClientError as e:
logging.error(e)
return False
return True
def main():
"""Exercise change_instance_security_groups()"""
# Assign these values before running the program
ec2_instance_id = 'INSTANCE_ID'
security_group_ids = [
'SECURITY_GROUP_ID_1',
'SECURITY_GROUP_ID_2',
]
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Assign the security groups to the EC2 instance
if change_instance_security_groups(ec2_instance_id, security_group_ids):
logging.info(f'Changed EC2 Instance {ec2_instance_id} Security Groups to:')
for security_group in security_group_ids:
logging.info(f' ID: {security_group}')
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
c94de10cc97a4e948e2cf35f7b29cdf20c11f2ab | Add poisson sphere benchmark | thomasgibson/firedrake-hybridization | convergence_tests/sphere_poisson.py | convergence_tests/sphere_poisson.py | from __future__ import absolute_import, division
from firedrake import *
import numpy as np
def poisson_sphere(MeshClass, refinement, hdiv_space):
"""Test hybridizing lowest order mixed methods on a sphere."""
mesh = MeshClass(refinement_level=refinement)
mesh.init_cell_orientations(Expression(("x[0]", "x[1]", "x[2]")))
x, y, z = SpatialCoordinate(mesh)
V = FunctionSpace(mesh, hdiv_space, 1)
U = FunctionSpace(mesh, "DG", 0)
W = U * V
f = Function(U)
f.interpolate(x*y*z)
u_exact = Function(U).interpolate(x*y*z/12.0)
u, sigma = TrialFunctions(W)
v, tau = TestFunctions(W)
a = (dot(sigma, tau) - div(tau)*u + v*div(sigma))*dx
L = f*v*dx
w = Function(W)
nullsp = MixedVectorSpaceBasis(W, [VectorSpaceBasis(constant=True), W[1]])
params = {'mat_type': 'matfree',
'ksp_type': 'preonly',
'pc_type': 'python',
'pc_python_type': 'firedrake.HybridizationPC',
'hybridization': {'ksp_type': 'preonly',
'pc_type': 'lu',
'pc_factor_mat_solver_package': 'mumps',
'hdiv_residual': {'ksp_type': 'cg',
'ksp_rtol': 1e-14,
'pc_type': 'bjacobi',
'sub_pc_type': 'ilu'},
'use_reconstructor': True}}
solve(a == L, w, nullspace=nullsp, solver_parameters=params)
u_h, _ = w.split()
error = errornorm(u_exact, u_h)
return w, error
MeshClass = UnitCubedSphereMesh
refinement = 4
hdiv_family = "RTCF"
w, err = poisson_sphere(MeshClass, refinement, hdiv_family)
print err
File("poisson_sphere.pvd").write(w.split()[0], w.split()[1])
| mit | Python | |
e36e809699d387368d5c7edb478961c04692787f | Create BlockSelection.py | HeyIamJames/PyGames,HeyIamJames/PyGames | Documents/GitHub/BlockSelection.py | Documents/GitHub/BlockSelection.py | import pygame
import random
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
class Block(pygame.sprite.Sprite):
def __init__(self, color, width, height):
super(Block, self).__init__()
self.image = pygame.Surface([width, height])
self.image.fill(color)
self.rect = self.image.get_rect()
pygame.init()
screen_width = 700
screen_height = 400
screen = pygame.display.set_mode([screen_width, screen_height])
block_list = pygame.sprite.Group()
all_sprites_list = pygame.sprite.Group()
for i in range(20):
block = Block(BLACK, 20, 15)
block.rect.x = random.randrange(screen_width)
block.rect.y = random.randrange(screen_height)
block_list.add(block)
all_sprites_list.add(block)
player = Block(RED, 20, 15)
all_sprites_list.add(player)
done = False
clock = pygame.time.Clock()
score = 0
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill(WHITE)
pos = pygame.mouse.get_pos()
player.rect.x = pos[0]
player.rect.y = pos[1]
blocks_hit_list = pygame.sprite.spritecollide(player, block_list, True)
for block in blocks_hit_list:
score += 1
print(score)
all_sprites_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit()
| mit | Python | |
10c83fbc01dee9d95290466338f262abffc12a3e | Create a folder in a datacenter if not exists | vmware/pyvmomi-community-samples,pfitzer/pyvmomi-community-samples,jm66/pyvmomi-community-samples,ddcrjlalumiere/pyvmomi-community-samples,prziborowski/pyvmomi-community-samples,pathcl/pyvmomi-community-samples | samples/create_folder_in_datacenter.py | samples/create_folder_in_datacenter.py | #!/usr/bin/env python
"""
Written by Chinmaya Bharadwaj
Github: https://github.com/chinmayb/
Email: acbharadwaj@gmail.com
Create a folder in a datacenter
"""
from __future__ import print_function
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import argparse
import atexit
import getpass
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--datacenter', required=True,
help='name of the datacenter'),
parser.add_argument('-f', '--folder', required=True,
help='name of the folder')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def create_folder(content, host_folder, folder_name):
host_folder.CreateFolder(folder_name)
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
dc = get_obj(content, [vim.Datacenter], args.datacenter)
if (get_obj(content, [vim.Folder], args.folder)):
print("Folder '%s' already exists" % args.folder)
return 0
create_folder(content, dc.hostFolder, args.folder)
print("Successfully created the folder '%s'" % args.folder)
return 0
# Start program
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
7a781a7cb9da6ab585f9b2ea72df991b4def19dd | Write entry/Preparations list test | iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api | whats_fresh/whats_fresh_api/tests/views/entry/test_list_preparations.py | whats_fresh/whats_fresh_api/tests/views/entry/test_list_preparations.py | from django.test import TestCase
from django.core.urlresolvers import reverse
from whats_fresh_api.models import *
from django.contrib.gis.db import models
import json
class ListPreparationTestCase(TestCase):
fixtures = ['test_fixtures']
def test_url_endpoint(self):
url = reverse('entry-list-preparations')
self.assertEqual(url, '/entry/preparations')
def test_list_items(self):
"""
Tests to see if the list of preparations
contains the proper preparations
"""
response = self.client.get(reverse('entry-list-preparations'))
items = response.context['item_list']
for preparation in Preparation.objects.all():
self.assertEqual(
items[preparation.id-1]['description'], preparation.description)
self.assertEqual(
items[preparation.id-1]['name'], preparation.name)
self.assertEqual(
items[preparation.id-1]['link'],
reverse('edit-preparation', kwargs={'id': preparation.id}))
| apache-2.0 | Python | |
e0344625a6a2b9640481622a12c4dd810f5ca4c5 | merge overlapping pairs (set BQ to 0) | brentp/bwa-meth,brentp/bwa-meth,brentp/bwa-meth,dariober/bwa-meth,dariober/bwa-meth,dariober/bwa-meth,PeteHaitch/bwa-parclip,PeteHaitch/bwa-parclip,PeteHaitch/bwa-parclip | bam-merge-pairs.py | bam-merge-pairs.py | """
In order to avoid double counting of bases in overlapping paired end reads,
this script accept mapped reads in SAM format, use the position to determine
overlap, and set the lower base-quality of pair overlap to 0.
It is assumed that the reads are name-sorted as when they come from the
aligner.
Usage:
bwa mem reference.fa R1.fq R2.fq \
| bam-merge-pairs.py \
| samtools view -bS - > merged.bam
"""
from __future__ import print_function
from __future__ import division
import itertools as it
from operator import itemgetter
from bwameth import Bam
import sys
line_iter = (x.rstrip("\r\n").split("\t") for x in sys.stdin)
for g, pair in it.groupby(line_iter, itemgetter(0)):
if g.startswith("@"):
print("\n".join("\t".join(line) for line in pair))
continue
pair = list(pair)
if len(pair) == 1:
print "\t".join(pair[0])
continue
assert len(pair) == 2, pair
left, right = [Bam(b) for b in pair]
# TODO: use left_shift(), right_shift()
if left.pos + left.tlen < right.pos:
print(str(left))
print(str(right))
continue
# there is overlap
# L ------------------>
# R <------------------
ovl_bases = right.pos, left.pos + left.tlen
| mit | Python | |
6f9f87ec8fe3d19de7b995288fa799e36a5c50d5 | add mont test | BarrelfishOS/barrelfish,kishoredbn/barrelfish,BarrelfishOS/barrelfish,BarrelfishOS/barrelfish,BarrelfishOS/barrelfish,BarrelfishOS/barrelfish,kishoredbn/barrelfish,kishoredbn/barrelfish,BarrelfishOS/barrelfish,BarrelfishOS/barrelfish,kishoredbn/barrelfish,kishoredbn/barrelfish,BarrelfishOS/barrelfish,kishoredbn/barrelfish,BarrelfishOS/barrelfish | tools/harness/tests/tommath.py | tools/harness/tests/tommath.py | ##########################################################################
# Copyright (c) 2016, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Universitaetsstrasse 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import tests
from common import TestCommon
from results import PassFailResult
import random
class TomMathAbstract(TestCommon):
def get_finish_string(self):
return "DIGITS == 255...PASSED"
@tests.add_test
class TomMathMont(TomMathAbstract):
'''runs compiler-rt1 builins unit tests'''
name = "tommath-mont"
def get_modules(self, build, machine):
modules = super(TomMathMont, self).get_modules(build, machine)
modules.add_module("$BUILD/tommath/mont", [ int(random.random() * 100000) ])
return modules
def process_data(self, testdir, rawiter):
# the test passed if no error occurred
passed = 3
for line in rawiter:
if "DIGITS == " in line:
passed += 1
return PassFailResult(passed == 255)
| mit | Python | |
a26ba96db8ac3149e457f2eaab0ef55e2b68d4cb | add 114 | ufjfeng/leetcode-jf-soln,ufjfeng/leetcode-jf-soln | python/114_flatten_binary_tree_to_linked_list.py | python/114_flatten_binary_tree_to_linked_list.py | """
Given a binary tree, flatten it to a linked list in-place.
For example,
Given
1
/ \
2 5
/ \ \
3 4 6
The flattened tree should look like:
1
\
2
\
3
\
4
\
5
\
6
Hints:
If you notice carefully in the flattened tree, each node's right child points
to the next node of a pre-order traversal.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if root is None:
return
if root.left:
self.flatten(root.left)
if root.right:
self.flatten(root.right)
leftLeaf = self.findLeaf(root.left)
rightRoot = root.right
if leftLeaf:
root.right = root.left
root.left = None
leftLeaf.right = rightRoot
def findLeaf(self, root):
if root is None:
return None
p = root
while p.right:
p = p.right
return p
| mit | Python | |
85f67084f44419bf9d44439133314e6ffdadfea0 | Introduce OvnNbctlDaemonContext | openvswitch/ovn-scale-test,openvswitch/ovn-scale-test | rally_ovs/plugins/ovs/context/ovnnbctl_daemon.py | rally_ovs/plugins/ovs/context/ovnnbctl_daemon.py | # Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from rally.common.i18n import _
from rally.common import logging
from rally import consts
from rally.task import context
from rally_ovs.plugins.ovs import ovnclients
LOG = logging.getLogger(__name__)
@context.configure(name="ovn-nbctld", order=112)
class OvnNbctlDaemonContext(ovnclients.OvnClientMixin, context.Context):
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"daemon_mode": {"type": "boolean"},
},
"additionalProperties": True
}
DEFAULT_CONFIG = {
"daemon_mode": True,
}
@logging.log_task_wrapper(LOG.info, _("Enter context: `ovn-nbctld`"))
def setup(self):
super(OvnNbctlDaemonContext, self).setup()
if self.config["daemon_mode"]:
self.context["daemon_socket"] = self._restart_daemon()
else:
self._stop_daemon()
@logging.log_task_wrapper(LOG.info, _("Exit context: `ovn-nbctld`"))
def cleanup(self):
pass
| apache-2.0 | Python | |
ce44189d905f783b3963ac71057fe201f52faf64 | Add new sample | msysyamamoto/opencv-samples | detect_face/detect_face_video_swap.py | detect_face/detect_face_video_swap.py | # coding:utf-8
import cv2
cap = cv2.VideoCapture(0)
cascade_path = "../opencv/data/haarcascades/haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(cascade_path)
_response, frame = cap.read()
minsize = (int(frame.shape[0] * 0.08), int(frame.shape[1] * 0.08))
while True:
_response, frame = cap.read()
facerect = cascade.detectMultiScale(frame, scaleFactor=1.11, minNeighbors=3, minSize=minsize)
rect_count = len(facerect)
if rect_count >= 2:
halv_count = int(rect_count/2)
rfacerect = facerect[::-1]
facerect = facerect[:halv_count]
rfacerect = rfacerect[:halv_count]
for (rect1, rect2) in zip(facerect, rfacerect):
x1, y1, w1, h1 = rect1
x2, y2, w2, h2 = rect2
face1 = frame[y1:y1+h1, x1:x1+w1]
face2 = frame[y2:y2+h2, x2:x2+w2]
face1 = cv2.resize(face1, (w2, h2))
face2 = cv2.resize(face2, (w1, h1))
frame[y1:y1+h1, x1:x1+w1] = face2
frame[y2:y2+h2, x2:x2+w2] = face1
cv2.waitKey(1)
cv2.imshow("face camera", frame)
| apache-2.0 | Python | |
bbd732720fc25f5c47147c6f4c2c05ad1c180b35 | Add api_subscribe_request.py | devicehive/devicehive-python | devicehive/api_subscribe_request.py | devicehive/api_subscribe_request.py | class ApiSubscribeRequest(object):
"""Api request class."""
def __init__(self, transport):
self._transport = transport
self._action = None
self._request = {}
self._params = {'method': 'GET',
'url': None,
'request_delete_keys': [],
'params': {},
'headers': {},
'response_key': None}
def action(self, action):
self._action = action
def set(self, key, value):
if not value:
return
self._request[key] = value
def method(self, method):
self._params['method'] = method
def url(self, url, **args):
for key in args:
value = args[key]
url = url.replace('{%s}' % key, str(value))
self._params['url'] = url
def param(self, key, value):
if not value:
return
self._params['params'][key] = value
def header(self, name, value):
self._params['headers'][name] = value
def response_key(self, response_key):
self._params['response_key'] = response_key
def extract(self):
return self._action, self._request, self._params
| apache-2.0 | Python | |
f78a485000ef8dacb584db1f03b7157b79bd5fe7 | Add module for mocking node.get() with Responses | DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python | d1_libclient_python/src/d1_client/tests/mock_get.py | d1_libclient_python/src/d1_client/tests/mock_get.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock MNRead.get() → OctetStream
GET /object/{id}
Will always return the same bytes for a given PID.
"""
# Stdlib
import datetime
import hashlib
import random
import re
import urlparse
# 3rd party
import responses # pip install responses
import requests
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
# App
import d1_common.const
import d1_common.url
# Config
NUM_SCIOBJ_BYTES = 1024
GET_ENDPOINT_RX = r'v([123])/object/(.*)'
def init(base_url):
endpoint_rx_str = r'^' + d1_common.url.joinPathElements(base_url, GET_ENDPOINT_RX)
endpoint_rx = re.compile(endpoint_rx_str)
responses.add_callback(
responses.GET,
endpoint_rx,
callback=_request_callback,
content_type=d1_common.const.CONTENT_TYPE_OCTETSTREAM,
)
def _request_callback(request):
major_version, pid = _parse_url(request.url)
try:
status_int = int(pid)
except ValueError:
body_str = _generate_sciobj_bytes(pid, NUM_SCIOBJ_BYTES)
return 200, {}, body_str
else:
body_str = 'Return code: {}'.format(status_int)
return status_int, {}, body_str
def _parse_url(url):
url_obj = urlparse.urlparse(url)
url = url_obj._replace(query=None).geturl()
m = re.search(GET_ENDPOINT_RX, url)
assert m, 'Should always match since we\'re using the same regex as in add_callback()'
return m.group(1), m.group(2)
def _generate_sciobj_bytes(pid, n_count):
pid_hash_int = int(hashlib.md5(pid).hexdigest(), 16)
random.seed(pid_hash_int)
return bytearray(random.getrandbits(8) for _ in xrange(n_count))
| apache-2.0 | Python | |
ae05372f0a2f4b152fc700593e9a2b99612c9d69 | create tag models manager tags via slug | avelino/django-tags | tags/models.py | tags/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
class Tag(models.Model):
name = models.CharField(_(u'Name'), max_length=150, unique=True,
db_index=True)
slug = models.SlugField(_(u"Slug"),max_length=255)
date_insert = models.DateTimeField(_(u"Date insert"), auto_now_add=True)
date_update = models.DateTimeField(_(u"Date update"), auto_now=True)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
__unicode__ = lambda self: self.name
class Meta:
verbose_name = _(u'Tag')
verbose_name_plural = _(u'Tags')
unique_together = ['slug', 'name']
| mit | Python | |
6e1d2ce723fbdf4f37147f1f1cb692c4fb705c12 | handle cache file on disk | rlowrance/re-avm | Cache.py | Cache.py | '''maintain a pickled cache file on disk'''
import cPickle as pickle
import os
import pdb
import time
import unittest
if False:
# example
class Cache(object):
pass
def read_data(dictionary):
'return the data; it will be pickled and written to the file at path_to_cache'
return None
c = Cache(verbose=True)
path_to_cache = os.path.join('a', 'b', 'c')
dictionary = {'arg1': 123}
returned_value_from_read_data = c.read(read_data, path_to_cache, dictionary)
class Cache(object):
def __init__(self, verbose=False):
self.verbose = verbose
def read(self, read_data_function, path_to_cache, dictionary):
'return whatever read_data_function(**kwds) returns'
start_time = time.time()
if os.path.exists(path_to_cache):
with open(path_to_cache, 'r') as f:
cache = pickle.load(f)
if self.verbose:
print 'read cache; elapsed wall clock time', time.time() - start_time
else:
cache = read_data_function(dictionary)
if self.verbose:
print 'read underlying data; elapsed wall clock time', time.time() - start_time
start_time = time.time()
with open(path_to_cache, 'w') as f:
pickle.dump(cache, f)
if self.verbose:
print 'write cache: elapsed wall clock time', time.time() - start_time
return cache
class CacheTest(unittest.TestCase):
def test_1(self):
read_data_result = 'my data'
dictionary = {'abc': 123}
class Reader(object):
def __init__(self):
self.invocations = 0
def read(self):
self.invocations += 1
return read_data_result
reader = Reader()
def read_data(dictionary):
self.assertEqual(dictionary['abc'], 123)
return reader.read()
verbose = False
dir_temp = os.getenv('temp') # for now, just support Windows
path_to_cache = os.path.join(dir_temp, 'Cache-test.pickle')
if os.path.isfile(path_to_cache):
os.remove(path_to_cache)
self.assertFalse(os.path.isfile(path_to_cache))
c = Cache(verbose=verbose)
cached_data_1 = c.read(read_data, path_to_cache, dictionary)
self.assertEqual(read_data_result, cached_data_1)
self.assertTrue(os.path.isfile(path_to_cache))
self.assertEqual(reader.invocations, 1)
cached_data_2 = c.read(read_data, path_to_cache, dictionary)
self.assertEqual(read_data_result, cached_data_2)
self.assertTrue(os.path.isfile(path_to_cache))
self.assertEqual(reader.invocations, 1)
self.assertEqual(cached_data_1, cached_data_2)
# remove cache file
os.remove(path_to_cache)
self.assertFalse(os.path.isfile(path_to_cache))
if __name__ == '__main__':
unittest.main()
if False:
# avoid linter warnings about imports not used
pdb
| bsd-3-clause | Python | |
0287d807b8b03e5adf30e95c53504378f0bb2f8d | Add basic implementation | boppreh/derivative | derivative.py | derivative.py | from __future__ import division
class Dual(object):
def __init__(self, a=0.0, b=0.0):
self.a = a
self.b = b
def __add__(self, other):
if isinstance(other, Dual):
return Dual(self.a + other.a, self.b + other.b)
else:
return Dual(self.a + other, self.b)
def __radd__(self, other):
if isinstance(other, Dual):
return Dual(self.a + other.a, self.b + other.b)
else:
return Dual(self.a + other, self.b)
def __sub__(self, other):
if isinstance(other, Dual):
return Dual(self.a - other.a, self.b - other.b)
else:
return Dual(self.a - other, self.b)
def __rsub__(self, other):
if isinstance(other, Dual):
return Dual(other.a - self.a, other.b - self.b)
else:
return Dual(other - self.a, self.b)
def __mul__(self, other):
if isinstance(other, Dual):
return Dual(self.a * other.a, self.b * other.a + other.b * self.a)
else:
return Dual(self.a * other, self.b * other)
def __rmul__(self, other):
if isinstance(other, Dual):
return Dual(self.a * other.a, self.b * other.a + other.b * self.a)
else:
return Dual(self.a * other, self.b * other)
def __truediv__(self, other):
if isinstance(other, Dual):
return Dual(self.a * other.a, self.b * other.a + other.b * self.a)
else:
return Dual(self.a * other, self.b * other)
def __str__(self):
if self.b == 0:
return str(self.a)
elif self.b > 0:
return '{} + {}e'.format(self.a, self.b)
elif self.b < 0:
return '{} - {}e'.format(self.a, -self.b)
x = Dual(1, 1)
y = Dual(3, -5)
print(x * y) | mit | Python | |
8223e9ffa61a2772a7a6f52244c5f1bbde4956b8 | Add py solution for 409. Longest Palindrome | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/longest-palindrome.py | py/longest-palindrome.py | from collections import Counter
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter(s)
odd = 0
ans = 0
for char, cnt in counter.iteritems():
if cnt % 2 == 0:
ans += cnt
else:
odd = 1
ans += cnt - 1
return ans + odd
| apache-2.0 | Python | |
6b06a383a6a306991a601f2a862e8b2bdfba1615 | Add a test to check that advanced filters data is fetched | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/tests/deployed_code_tests.py | akvo/rsr/tests/deployed_code_tests.py | # -*- coding: utf-8 -*-
# Akvo RSR is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the
# Akvo RSR module. For additional details on the GNU license please see <
# http://www.gnu.org/licenses/agpl.html >.
"""Script to run tests using Browserstack
Currently, the script is meant to be run locally by the developers, but it can
be easily extended to be run on Travis, etc.
RSR_ENV and BROWSERSTACK_URL need to be set as environment variables for the
script to run.
When logged in to Browserstack,
https://www.browserstack.com/automate/python has the url in the examples.
RSR_ENV should be set to "uat" if these tests are being run before a
deployment, "live" after a deployment and "dev" to test a PR on the dev server.
"""
# *FIXME*: The file, even though in the tests directory, has been named such
# *that it doesn't automatically run on Travis. This script currently needs to
# *be run manually by developers.
import os
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
# FIXME: Make the URL storable on Travis, etc.
BROWSERSTACK_URL = os.environ.get('BROWSERSTACK_URL')
ENV_URLS = {
'dev': 'https://rsr-dev.test.akvo.org',
'test': 'https://rsr.test.akvo.org',
'uat': 'https://rsr.uat.akvo.org',
'live': 'https://rsr.akvo.org',
}
rsr_env = os.environ.get('RSR_ENV', 'dev')
BASE_URL = ENV_URLS.get(rsr_env, ENV_URLS['dev'])
# FIXME: make it easier to use other browsers
desired_cap = {
'browser': 'IE',
'browser_version': '11.0',
'os': 'Windows',
'os_version': '10',
'resolution': '1024x768'
}
class DeployedCodeTestCase(unittest.TestCase):
def setUp(self):
# FIXME: Make it easy to test using local chrome/firefox
# self.driver = webdriver.Chrome()
self.driver = webdriver.Remote(
command_executor=BROWSERSTACK_URL,
desired_capabilities=desired_cap,
)
def test_advanced_search_data_fetched(self):
status_selector = 'li#advanced-filter-status'
driver = self.driver
driver.get(BASE_URL)
self.close_password_modal()
search = driver.find_element_by_css_selector('#search .showFilters')
search_view = driver.find_element_by_id('search-view')
driver.execute_script(
"arguments[0].scrollIntoView(true);", search_view
)
search.click()
try:
WebDriverWait(self.driver, 10).until(
EC.text_to_be_present_in_element(
(By.CSS_SELECTOR, status_selector), 'Projects'
)
)
except:
status = driver.find_element_by_css_selector(status_selector)
raise AssertionError(
'Element Text: {}. Failed to get projects'.format(status.text)
)
def tearDown(self):
self.driver.quit()
def close_password_modal(self):
element = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, ".modal-content input[type=password]")
)
)
element.send_keys('TesTing!')
self.driver.find_element_by_css_selector(
'.modal-footer > button'
).click()
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | Python | |
8223d62c22d4c4f7a66e1e468de53556796a03a9 | Write a function that print something n times including relatives spaces | let42/python-course | src/functions/exercise7.py | src/functions/exercise7.py | """Module docstring.
This serves as a long usage message.
"""
import sys
import getopt
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if __name__ == "__main__":
main() | mit | Python | |
98c07739702fbf3951ccd0359d04be80a303d9ce | Add a fontname to TachyFont Jar file mapper. | bstell/TachyFont,bstell/TachyFont,moyogo/tachyfont,bstell/TachyFont,moyogo/tachyfont,bstell/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,bstell/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,moyogo/tachyfont,googlei18n/TachyFont | run_time/src/gae_server/font_mapper.py | run_time/src/gae_server/font_mapper.py | """
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import logging
from os import path
# import StringIO
# from time import sleep
# from time import time
# import zipfile
# import webapp2
# from incremental_fonts_utils import prepare_bundle
tachyfont_major_version = 1
tachyfont_minor_version = 0
BASE_DIR = path.dirname(__file__)
def fontname_to_zipfile(fontname):
family_dir = ''
if fontname[0:10] == 'NotoSansJP':
family_dir = 'NotoSansJP/'
zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar'
return zip_path
| apache-2.0 | Python | |
4074c4fae998ac1bb6f49bb47b34f4890dc90532 | Add integration tests for pylast.py | knockoutMice/pylast,hugovk/pylast,pylast/pylast,yanggao1119/pylast,knockoutMice/pylast,yanggao1119/pylast | test_pylast.py | test_pylast.py | #!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import datetime
import time
import unittest
import pylast
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = "TODO"
password_hash = "TODO"
API_KEY = "TODO"
API_SECRET = "TODO"
self.network = pylast.LastFMNetwork(api_key = API_KEY, api_secret =
API_SECRET, username = self.username, password_hash = password_hash)
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
library = pylast.Library(user = self.username, network = self.network)
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
def test_add_album(self):
# Arrange
library = pylast.Library(user = self.username, network = self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
# Nothing here, just that no exception occurred
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
if __name__ == '__main__':
unittest.main()
# End of file
| apache-2.0 | Python | |
52cf67258e5f1e6378fec2794cc3abb1d5b394e9 | copia de serialCom sem a comunicacao serial | ocarneiro/arduino-cidadania | testeLogica.py | testeLogica.py | import serial
import time
############################
def num(s):
try:
retorno = int(s)
if retorno > 100 : return 100
if retorno < 0 : return 0
return int(s)
except ValueError:
return 0
############################
porta = "COM3"
velocidade = 9600
debug = True
#with serial.Serial(porta, velocidade) as conexao:
estado = 0
time.sleep(1)
while estado >= 0 and estado <= 100 :
if debug: print 'Enviando ', estado
#conexao.write(estado)
time.sleep(1) #espera o envio e a resposta
if debug: print 'Recebendo resposta'
leitura_serial = 'Mock ' + str(estado) #leitura_serial = conexao.readline()
if debug: print "Resposta = ", leitura_serial, "\n"
entrada = raw_input("Entre um valor entre 0 e 100. Para sair, entre q\n")
if entrada == 'q' : break
estado = num(entrada)
print "Saindo"
#conexao.close()
| mit | Python | |
fb5ef2e7bf3b3315ab6491ec46dd97114162b7c6 | Add tests directory and dummy objects for use in developing and running unit tests. | magul/pywikibot-core,xZise/pywikibot-core,happy5214/pywikibot-core,darthbhyrava/pywikibot-local,smalyshev/pywikibot-core,wikimedia/pywikibot-core,jayvdb/pywikibot-core,hasteur/g13bot_tools_new,npdoty/pywikibot,hasteur/g13bot_tools_new,h4ck3rm1k3/pywikibot-core,valhallasw/pywikibot-core,happy5214/pywikibot-core,jayvdb/pywikibot-core,trishnaguha/pywikibot-core,Darkdadaah/pywikibot-core,icyflame/batman,VcamX/pywikibot-core,PersianWikipedia/pywikibot-core,TridevGuha/pywikibot-core,h4ck3rm1k3/pywikibot-core,npdoty/pywikibot,magul/pywikibot-core,Darkdadaah/pywikibot-core,wikimedia/pywikibot-core,hasteur/g13bot_tools_new,emijrp/pywikibot-core | pywikibot/tests/dummy.py | pywikibot/tests/dummy.py | # -*- coding: utf-8 -*-
"""Dummy objects for use in unit tests."""
#
# (C) Pywikipedia bot team, 2007
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id: $'
# add in any other attributes or methods that are needed for testing
class TestSite(object):
"""Mimic a Site object."""
def __init__(self, hostname, protocol="http"):
self._hostname = hostname
self._protocol = protocol
def protocol(self):
return self._protocol
def hostname(self):
return self._hostname
def cookies(self, sysop=False):
if hasattr(self, "_cookies"):
return self._cookies
return u""
class TestPage(object):
"""Mimic a Page object."""
def __init__(self, site, title):
self._site = site
self._title = title
def site(self):
return self._site
def title(self):
return self._title
| mit | Python | |
862016c070155fa44ed082a47d969ecacbba8aae | add TagAlias class (broken) | jakeogh/anormbookmarker,jakeogh/anormbookmarker | anormbookmarker/TagAlias.py | anormbookmarker/TagAlias.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
from sqlalchemy import Column
#from sqlalchemy import ForeignKey
#from sqlalchemy import UniqueConstraint
#from sqlalchemy import CheckConstraint
from sqlalchemy import Integer
#from sqlalchemy import Unicode
from sqlalchemy.orm import relationship
#from sqlalchemy.orm import backref
#from sqlalchemy.orm.exc import NoResultFound
#from sqlalchemy.ext.declarative import declarative_base
#from sqlalchemy.ext.declarative import declared_attr
#from sqlalchemy.ext.associationproxy import association_proxy
#from sqlalchemy.ext.hybrid import hybrid_property
#from get_one_or_create import get_one_or_create
#from BaseMixin import BASE
from Config import CONFIG
from Word import Word
#from Word import WordMisSpelling
#from tag_relationship import tag_relationship
#from TagWord import TagWord
from find_tag import find_tag
class TagAlias(BASE):
'''
List of TagWord instances that point to a Tag
'''
id = Column(Integer, primary_key=True)
words = relationship("TagWord", backref='tag') # a list of TagWord instances
def __init__(self, session, tag):
print("Tag.__init__() tag:", tag)
assert isinstance(tag, str)
assert not find_tag(session=session, tag=tag)
for index, word in enumerate(tag.split(' ')):
previous_position = index - 1
if previous_position == -1:
previous_position = None
tagword = TagWord(position=index, previous_position=previous_position)
tagword.word = Word.construct(session=session, word=word)
self.words.append(tagword)
session.add(self)
session.flush(objects=[self])
@classmethod
def construct(cls, session, tag):
'''
prevents creation of duplicate tag aliases or conflicting tag aliases and tags
'''
#print("Tag.construct() tag:", tag)
assert tag
existing_tag = find_tag(session=session, tag=tag)
if existing_tag:
#print("Tag.construct() existing_tag:", existing_tag)
return existing_tag
else:
new_tag = Tag(tag=tag, session=session)
#print("Tag.construct() new_tag:", new_tag)
return new_tag
# # not sure if sorting is necessary
# @property
# def tag_with_checks(self):
# tagwords_objects = sorted([word for word in self.words], key=lambda x: x.position)
# sorted_tag = " ".join([str(word.word) for word in tagwords_objects])
#
# unsorted_tag = " ".join([word.word for word in self.words])
# if sorted_tag != unsorted_tag:
# print("TAGS DO NOT MATCH")
# print("sorted_tag:", sorted_tag)
# print("unsorted_tag:", unsorted_tag)
# quit(1)
# return unsorted_tag
@property
def tag(self): # appears to always return the same result as tag_with_checks()
tag = " ".join([str(word.word) for word in self.words])
return tag
def __repr__(self):
return str(self.tag)
| mit | Python | |
0dd2662d3aefe49a6a706461ca9b7a9fd1b380e8 | add rotary_encoder.py from http://abyz.co.uk/rpi/pigpio/examples.html#Python code | philipbeber/karaoke,philipbeber/karaoke,philipbeber/karaoke,philipbeber/karaoke | pi/rotary_encoder.py | pi/rotary_encoder.py | #!/usr/bin/env python
import pigpio
class decoder:
"""Class to decode mechanical rotary encoder pulses."""
def __init__(self, pi, gpioA, gpioB, callback):
"""
Instantiate the class with the pi and gpios connected to
rotary encoder contacts A and B. The common contact
should be connected to ground. The callback is
called when the rotary encoder is turned. It takes
one parameter which is +1 for clockwise and -1 for
counterclockwise.
EXAMPLE
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = pigpio.pi()
decoder = rotary_encoder.decoder(pi, 7, 8, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
"""
self.pi = pi
self.gpioA = gpioA
self.gpioB = gpioB
self.callback = callback
self.levA = 0
self.levB = 0
self.lastGpio = None
self.pi.set_mode(gpioA, pigpio.INPUT)
self.pi.set_mode(gpioB, pigpio.INPUT)
self.pi.set_pull_up_down(gpioA, pigpio.PUD_UP)
self.pi.set_pull_up_down(gpioB, pigpio.PUD_UP)
self.cbA = self.pi.callback(gpioA, pigpio.EITHER_EDGE, self._pulse)
self.cbB = self.pi.callback(gpioB, pigpio.EITHER_EDGE, self._pulse)
def _pulse(self, gpio, level, tick):
"""
Decode the rotary encoder pulse.
+---------+ +---------+ 0
| | | |
A | | | |
| | | |
+---------+ +---------+ +----- 1
+---------+ +---------+ 0
| | | |
B | | | |
| | | |
----+ +---------+ +---------+ 1
"""
if gpio == self.gpioA:
self.levA = level
else:
self.levB = level;
if gpio != self.lastGpio: # debounce
self.lastGpio = gpio
if gpio == self.gpioA and level == 1:
if self.levB == 1:
self.callback(1)
elif gpio == self.gpioB and level == 1:
if self.levA == 1:
self.callback(-1)
def cancel(self):
"""
Cancel the rotary encoder decoder.
"""
self.cbA.cancel()
self.cbB.cancel()
if __name__ == "__main__":
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = pigpio.pi()
decoder = rotary_encoder.decoder(pi, 7, 8, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
| mit | Python | |
871a49b8d1e18c675cb7b5e6730cf8ebd4c56e36 | Create q1.py | matthewelse/british-informatics-olympiad,matthewelse/british-informatics-olympiad,matthewelse/british-informatics-olympiad | 2013/q1.py | 2013/q1.py | # A solution to the British Informatics Olympiad 2012 Question 1
from time import strftime, gmtime
# number of minutes
a = -1
# number of minutes
b = 0
i = input()
fast = [int(x) for x in i.split()]
actual = 0
while a != b:
if a == -1:
a = 0
a = (a + 60 + fast[0]) % 1440
b = (b + 60 + fast[1]) % 1440
actual = (actual + 60) % 1440
print(strftime("%H:%M", gmtime(a*60)))
| mit | Python | |
4785a42d218e75a49f9e5c270f159a17c7e22bcd | add count_clonotypes.py | imminfo/mrd-paper | count_clonotypes.py | count_clonotypes.py | import sys
def count_clonotypes_in_files(link_path):
def get_sequence(words):
tmp = 0
res = ''
try:
tmp = float(words[2])
res = words[3]
except Exception:
res = words[2]
if not res: print("Empty string in NUC!!!")
return res
global_sequences = {}
local_sequences = {}
sample_list = []
with open(link_path) as infile:
sample = ''
for line in infile:
line = line.strip()
if line[0] == '#':
sample = line[1:]
if sample:
sample_list.append(sample)
for seq, _ in local_sequences:
global_sequences[seq] = global_sequences.get(seq, 0) + 1
local_sequences = {}
elif line:
target_file = line
print("Searching in", target_file)
for target_line in target_file:
words = target_line.strip().split()
new_seq = get_sequence(words)
count = local_sequences.get(new_seq, 0)
if count == 0:
local_sequences[new_seq] = 1
return global_sequences
if __name__ == '__main__':
seq_dict = count_clonotypes_in_files(sys.argv[1])
print("# Unique clonotypes: ", sum(filter(lambda x: x == 1, seq_dict.values())))
print("# Public clonotypes: ", sum([1 for x in filter(lambda x: x > 1, seq_dict.values())]) | apache-2.0 | Python | |
3def1498495e0abf230a3deb3873f6c502f3c6ad | Add management command for removing content rotation's settings on sections | praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo | molo/core/management/commands/remove_content_rotation_settings_from_sections.py | molo/core/management/commands/remove_content_rotation_settings_from_sections.py | from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import SectionPage
class Command(BaseCommand):
def handle(self, **options):
SectionPage.objects.all().update(
content_rotation_start_date=None,
content_rotation_end_date=None,
monday_rotation=False,
tuesday_rotation=False,
wednesday_rotation=False,
thursday_rotation=False,
friday_rotation=False,
saturday_rotation=False,
sunday_rotation=False,
time=None)
| bsd-2-clause | Python | |
ea67a84c83fb11237383102c4c447f70c4b83e64 | Add solution to 114. | bsamseth/project-euler,bsamseth/project-euler | 114/114.py | 114/114.py | """
A row measuring seven units in length has red blocks with a minimum length of
three units placed on it, such that any two red blocks (which are allowed to be
different lengths) are separated by at least one black square. There are
exactly seventeen ways of doing this.
How many ways can a row measuring fifty units in length be filled?
NOTE: Although the example above does not lend itself to the possibility, in
general it is permitted to mix block sizes. For example, on a row measuring
eight units in length you could use red (3), black (1), and red (4).
Solution comment: Fast for Python, ~4 ms. This somehow worked on first
try. Idea was to describe the number of ways to place the blocks with N units,
and use this to build larger solutions. A baseline fact is that there is only
one way to do it if N < 3 (i.e. the trivial solution). Then we can place a
block of ever increasing size (until no more room), and then add the number of
ways to place blocks on the remaining units. We can place the block either at
the start, or at some offset. Trying all blocksizes and all offsets we generate
the solution.
The memoization is essential for building the solution recursively like this.
Could be translated to DP with a simple array, but that would take some more
accurate indexing. The simplicity of this approach is the most appealing part.
And somehow I got the ±1 parts right on the first go.
"""
from time import time
from functools import lru_cache
@lru_cache()
def ways(N):
w = 1 # The trivial solution is always possible.
if N >= 3:
for offset in range(N - 3 + 1): # Start placing block at each offset.
n = N - offset # The remaining units after the offset.
for b in range(3, n + 1): # Add the ways after placing a block of size b.
w += ways(n - b - 1)
return w
if __name__ == "__main__":
t0 = time()
print('Answer: {}\nExecution time: {:.3f} ms'.format(ways(50), (time() - t0) * 1e3))
| mit | Python | |
d563e6aed8061d4e439bdad9ece1a9383d00cff2 | add parse_wadl.py, a first attempt to parse this wadl | annegentle/wadl2rst,annegentle/wadl2rst,annegentle/wadl2rst | parse_wadl.py | parse_wadl.py | #!/usr/bin/env python
import os
import xml.parsers.expat
filename = 'samples/cloud-images/wadl/image-2.0.wadl'
def start_resource(name,attrs):
print('start resource: ', attrs)
def start_resources(name,attrs):
print('start resources ', attrs)
start_dispatch = {
'resource': start_resource,
'resources': start_resources,
}
# handler functions
def start_element(name, attrs):
# print('Start element:', name, attrs)
if name in start_dispatch:
start_dispatch[name](name, attrs)
def end_element(name):
#print('End element:', name)
pass
def char_data(data):
#print('Character data:', repr(data))
pass
def entity_data(entityName, is_parameter_entity, value, base, systemId, publicId, notationName):
#print('Entity data:',entityName)
pass
def default(data):
#print('Default:', data)
pass
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = start_element
p.EndElementHandler = end_element
p.CharacterDataHandler = char_data
p.EntityDeclHandler = entity_data
p.DefaultHandler = default
def main():
with open(filename,'rb') as file:
p.ParseFile(file)
main()
| apache-2.0 | Python | |
656d65a484cc4319ba67e4e4bb4cee8adb37136a | copy from hadoop | Vaishaal/ckm,Vaishaal/ckm,Vaishaal/ckm,Vaishaal/ckm,Vaishaal/ckm | copy_from_hadoop.py | copy_from_hadoop.py | import subprocess
import argparse
CMD_BASE = "hadoop fs -copyToLocal"
''' Script to experiment output files from hdfs to dst '''
def copy_from_hadoop(src, dst):
p = subprocess.Popen(" ".join([CMD_BASE, src, dst]), shell=True, executable='/bin/bash')
p.wait()
if p.returncode != 0:
raise Exception("invocation terminated with non-zero exit status")
def copy_train_test_from_hadoop(expid, root, dst):
train_name = expid + "_train_features"
test_name = expid + "_test_features"
copy_from_hadoop(root + train_name, dst)
copy_from_hadoop(root + test_name, dst)
def main():
parser = argparse.ArgumentParser(description='Copy train and test files from hadoop')
parser.add_argument('expid', help='expid to copy')
parser.add_argument('--root', default="/", dest="root", help='root hdfs dir')
parser.add_argument('--dst', default=".", dest="dst", help='place to copy to')
args = parser.parse_args()
copy_train_test_from_hadoop(args.expid, args.root, args.dst)
if __name__ == "__main__":
main()
| apache-2.0 | Python | |
fa60d9857b4412edf5f8f59fa5b65914ee38a279 | add d18 (not working, too tired) | f-koehler/adventofcode | d18.py | d18.py | #!/bin/env python3
with open("d18.txt") as f:
state1 = f.read().splitlines()
for i in range(0, len(state1)):
state1[i] = list(state1[i])
new_state = 2
dim_x = len(state1)
dim_y = len(state1[0])
state2 = [["."]*(dim_y)]*(dim_x)
def light_active(x, y):
if new_state == 1:
return state2[x][y] == "#"
return state1[x][y] == "#"
def turn_on(x, y):
if new_state == 1:
state1[x][y] = "#"
else:
state2[x][y] = "#"
def turn_off(x, y):
if new_state == 1:
state1[x][y] = "."
else:
state2[x][y] = "."
def active_neighbors(x, y):
num = 0
if x > 0:
if light_active(x-1, y):
num += 1
if y > 0:
if light_active(x-1, y-1):
num += 1
if y < dim_y-1:
if light_active(x-1, y+1):
num += 1
if y > 0:
if light_active(x, y-1):
num += 1
if x < dim_x-1:
if light_active(x+1, y):
num += 1
if y > 0:
if light_active(x+1, y-1):
num += 1
if y < dim_y-1:
if light_active(x+1, y+1):
num += 1
if y < dim_y-1:
if light_active(x, y+1):
num += 1
return num
def update_light(x, y):
num = active_neighbors(x, y)
if light_active(x, y):
if num < 2 or num > 3:
turn_off(x, y)
else:
turn_on(x, y)
else:
if num == 3:
turn_on(x, y)
else:
turn_off(x, y)
def update():
global new_state
for x in range(0, dim_x):
for y in range(0, dim_y):
update_light(x, y)
if new_state == 1:
new_state = 2
else:
new_state = 1
def count_lights():
num = 0
for x in range(0, dim_x):
for y in range(0, dim_y):
if light_active(x, y):
num += 1
return num
def print_state(state_num):
if state_num == 1:
for l in state1:
print("".join(l))
else:
for l in state2:
print("".join(l))
print()
print_state(1)
print_state(2)
update()
print_state(1)
print_state(2)
| mit | Python | |
a610154749d081e613b8bf58acf62af55958de9c | Automate the update of the version number in setup.py. | jeffreydwalter/arlo | rev.py | rev.py | import fileinput
import re
import sys
pattern = re.compile("\s*version='([0-9.]+)',")
line = ""
maj = ""
min = ""
ver = ""
for line in fileinput.FileInput("setup.py", inplace=1):
m = pattern.match(line)
if m:
version = m.groups()[0]
maj, min, rev = version.split('.')
line = line.replace(version, "{0}.{1}.{2}".format(maj, min, int(rev)+1))
sys.stdout.write(line)
| apache-2.0 | Python | |
47a575a69b6a55cc188aa7ccecf9d15719efe405 | Include base test class | jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint | tests/check.py | tests/check.py | from unittest import TestCase
import os
class Check(TestCase):
__test__ = False
@property
def this_check(self):
raise NotImplementedError
def check(self, lst):
if isinstance(lst, basestring):
lst = [lst]
errors = []
for text in lst:
errors.append(self.this_check.check(text))
return len(errors[0]) == 0
def test_wpe(self):
min_wpe = 50
examples_dir = os.path.join(os.getcwd(), "tests", "samples")
examples = os.listdir(examples_dir)
for example in examples:
example_path = os.path.join(examples_dir, example)
# Compute the number of words per (wpe) error.
with open(example_path, 'r') as f:
text = f.read()
num_errors = len(self.this_check.check(text))
num_words = len(text.split(' '))
try:
wpe = 1.0*num_words / num_errors
except ZeroDivisionError:
wpe = float('Inf')
# Make sure that
assert wpe > min_wpe, \
"{} has only {} wpe.".format(example, round(wpe, 2))
| bsd-3-clause | Python | |
11c4fedc55a32f27b4aebd44cc03ba1f5d1e6357 | add board_build_example | cfelton/rhea,NickShaffner/rhea,NickShaffner/rhea,cfelton/rhea | examples/build/board_build_example.py | examples/build/board_build_example.py |
"""
This file contains a general function to run the build for
various boards.
>> python example_build_boards.py --board=<board name>
"""
import argparse
import rhea.build as build
from rhea.build.boards import get_board
from rhea.build.boards import get_all_board_names
#from blink import blinky
# some boards don't have LEDs but they have IO that
# can drive LEDs. The following maps the led port
# to a set of pins for boards without an led port.
led_port_pin_map = {
'xula': dict(name='led', pins=(36, 37, 39, 50)),
'xula2': dict(name='led', pins=('R7', 'R15', 'R16', 'M15',)),
'pone': dict(name='led', pins=(18, 23, 26, 33)),
}
def print_board_info(args):
boards = get_all_board_names()
for bn in boards:
brd = get_board(bn)
has_led = False
led = brd.get_port('led')
if led is None:
led = brd.get_port('leds')
if led is not None:
has_led = True
numled = 0 if led is None else len(led.pins)
ledpins = ' ' if led is None else led.pins
ledname = ' ' if led is None else led.name
# print some information
print("{:12}: has led {:5}: {:5}, # led {}, led pins {} ".format(
bn, str(has_led), str(ledname), numled, str(ledpins), ))
return
def build_board(args):
"""
"""
boards = board_table.keys() if args.board == 'all' else [args.board]
board_names = get_all_board_names()
for bn in boards:
assert brd in board_table and brd in board_names
brd = get_board(bn)
if brd in led_port_pin_map:
brd.add_port(**led_port_pin_map[brd])
# check the board definition has led port, if not add it from
# the board_table
ledport = brd.get_port('led')
return
def parseargs():
boards = get_all_board_names() + ['all']
parser = argparse.ArgumentParser()
parser.add_argument('--board', default='xula', choices=boards,
help="select the board to build")
parser.add_argument('--dump', default=False, action='store_true',
help="print information on all boards")
args = parser.parse_args()
return args
def main():
args = parseargs()
print(vars(args))
if args.dump:
print_board_info(args)
else:
build_board(args)
return
if __name__ == '__main__':
main() | mit | Python | |
6138012dc3ce06250684edc641eb26d1beb3fc99 | Add controller test code | joel-wright/DDRPi,fraz3alpha/led-disco-dancefloor,fraz3alpha/DDRPi | experiments/python/controller_test.py | experiments/python/controller_test.py | # For getting an unbuffered write
import sys
# For getting the time
import time
# For threading
import threading
class ControllerInputListener():
def new_controller_input(self, controller_input, channel, value):
raise NotImplementedError("new_controller_input(channel, value) not implements")
class ControllerInput(threading.Thread):
keep_running = 1
listeners = set()
def __init__(self):
self.keep_running = 1
threading.Thread.__init__(self)
self.start()
def addControllerInputListener(self, listener):
self.listeners.add(listener)
def removeControllerInputListener(self, listener):
if (listener in self.listeners):
self.listeners.remove(listener)
def notifyListeners(self, channel, value):
for listener in self.listeners:
listener.new_controller_input(self, channel, value)
def get_nice_name(self, channel):
if (channel == 0x0100):
return "X"
if (channel == 0x0101):
return "A"
if (channel == 0x0102):
return "B"
if (channel == 0x0103):
return "Y"
if (channel == 0x0104):
return "LB"
if (channel == 0x0105):
return "RB"
if (channel == 0x0108):
return "Select"
if (channel == 0x0109):
return "Start"
if (channel == 0x0200):
return "LeftRight"
if (channel == 0x0201):
return "UpDown"
def run(self):
pipe = open('/dev/input/js0', 'r')
data_buffer = []
while (self.keep_running == 1):
for char in pipe.read(1):
data_buffer.append(ord(char))
if (len(data_buffer) == 8):
time_system = int(round(time.time() * 1000))
time_controller = data_buffer[0] + (data_buffer[1] << 8) + (data_buffer[2] << 16) + (data_buffer[3] << 24)
# print "Controller time : %d" % (time_controller)
input_data = (data_buffer[5] << 8) + (data_buffer[4])
# Get the value (1 = pressed, 0 = released, on an axis it goes -1/+1 to give direction and 0 for released)
input_value = input_data & 0x7FFF
# If it is negative, convert it to a signed number
if (input_value & 0x4000):
input_value -= 0x8000
input_state = (input_data >> 15) & 0x1
data_type = (data_buffer[6] >> 7) & 0x1
input_type = data_buffer[6] & 0x7F
input_channel = data_buffer[7]
input_id = (input_type << 8) + input_channel
if (data_type == 1):
#print "Data packet - type: %d channel: %d" % (input_type, input_channel)
data_buffer = []
continue
#print "InputID: 0x%04X , value %d" % (input_id, input_value)
self.notifyListeners(input_id, input_value)
sys.stdout.flush()
data_buffer = []
print "Shutting down Controller input"
pipe.close()
def stop_running(self):
print "Requesting shutdown"
self.keep_running=0
class DummyProgram(ControllerInputListener):
plugins = []
current_plugin = -1
def __init__(self):
self.plugins.append("Tetris")
self.plugins.append("Pong")
self.plugins.append("Pacman")
self.current_plugin = 0
def list_current_plugin(self):
if (self.current_plugin == -1):
print "No plugin selected"
else:
print "Current plugin is %s" % (self.plugins[self.current_plugin])
def new_controller_input(self, controller_input, channel, value):
print "Channel: %s , value %d" % (controller_input.get_nice_name(channel), value)
# Iterate through the list when the Select button is pressed down
if (controller_input.get_nice_name(channel) == "Select" and value == 1):
self.current_plugin += 1
if (self.current_plugin >= len(self.plugins)):
if (len(self.plugins) > 0):
self.current_plugin = 0
else:
self.current_plugin = -1
self.list_current_plugin()
d = DummyProgram()
d2 = DummyProgram()
c = ControllerInput()
c.addControllerInputListener(d)
#c.addControllerInputListener(d2)
time.sleep(10)
c.stop_running()
time.sleep(5)
| mit | Python | |
4d579e832bb5c6b300ccd77859ea1fc6644f20c7 | Create asda.py | kuchaale/work | asda.py | asda.py | sasd=2
| mit | Python | |
0984e3d09e4520870e655b731fea92975884cf30 | Add a script to run test.py in the build directory | opencog/link-grammar,linas/link-grammar,opencog/link-grammar,ampli/link-grammar,linas/link-grammar,opencog/link-grammar,ampli/link-grammar,ampli/link-grammar,opencog/link-grammar,linas/link-grammar,ampli/link-grammar,ampli/link-grammar,opencog/link-grammar,opencog/link-grammar,linas/link-grammar,linas/link-grammar,ampli/link-grammar,linas/link-grammar,linas/link-grammar,opencog/link-grammar,opencog/link-grammar,linas/link-grammar,ampli/link-grammar,ampli/link-grammar,linas/link-grammar,opencog/link-grammar,ampli/link-grammar | msvc14/make-check.py | msvc14/make-check.py | """
Run Link Grammar Python scripts using the build target locations.
This program sets PYTHONPATH and PATH, and uses Python2 or Python3
to run the script, as needed.
This program is designed to run from this directory
It reads Local.props in order to find Python's EXE location.
It also generates relative PYTHONPATH and PATH.
In case it is desired to move it to another directory, there is a need to
change the related variables.
The default script to run is tests.py from binding\python-examples.
in order to run the file example.py there, the following can be used:
> .\make-check.py x64\Debug\Python2 ..\bindings\python-examples\example.py
"""
from __future__ import print_function
import os
import sys
import re
local_prop_file = 'Local.props' # In this directory
pyscript = r'..\bindings\python-examples\tests.py'
os.environ["LINK_GRAMMAR_DATA"] = r'..' # "data" in the parent directory
def error(msg):
if msg:
print(msg)
prog = os.path.basename(sys.argv[0])
print("Usage: ", prog, '[python_args] PYTHON_OUTDIR [script.py] [script_args]')
print('\tOUTDIR is in the format "x64/Debug/Python2"')
sys.exit(1)
def get_prop(vsfile, prop):
"""
Get a macro definition from a .prop file.
I'm too lazy to leave the file open in case more properties are needed,
or to read a list of properties in one scan. However, efficiency is not
needed here.
"""
vs_f = open(vsfile, 'r')
var_re = re.compile('<'+prop+'>([^<]*)<')
for line in vs_f:
m = re.search(var_re, line)
if m != None:
return m.group(1)
return None
#---
local_prop_file = \
os.path.dirname(os.path.realpath(sys.argv[0]))+'\\'+ local_prop_file
#print('Running by:', sys.executable)
if len(sys.argv) < 2:
error('Missing argument')
pyargs = ''
if len(sys.argv[1]) > 0 and sys.argv[1][0] == '-':
pyargs = sys.argv.pop(1)
if len(sys.argv) < 2:
error('Missing argument')
outdir = sys.argv.pop(1)
if not os.path.isdir(outdir):
error('Directory "{}" doesn\'t exist'.format(outdir))
m = re.search(r'(.*)\\(.*)$', outdir)
if not m or len(m.groups()) != 2:
error('Invalid output directory "{}"'.format(outdir))
config = m.group(1)
pydir = m.group(2).upper()
pyloc = get_prop(local_prop_file, pydir)
if pyloc == None:
error('Python definition "{}" not found in {}' . \
format(pyloc, local_prop_file))
pyexe = get_prop(local_prop_file, pydir+'_EXE')
if pyexe == None:
error('Python executable definition "{}" not found in {}' . \
format(pydir+'_EXE', local_prop_file))
pyexe = str.replace(pyexe, '$('+pydir+')', pyloc)
if len(sys.argv) == 2:
pyscript = sys.argv[1]
args = ''
if len(sys.argv) >= 3:
args = ' '.join(sys.argv[2:])
os.environ["PATH"] = ('{};' + os.environ["PATH"]).format(config)
# For linkgrammar.py, clinkgrammar.py and _clinkgrammar.pyd
os.environ["PYTHONPATH"] = r'..\bindings\python;{}'.format(outdir)
print("PYTHONPATH=" + os.environ["PYTHONPATH"])
#print("Searching modules in:\n" + '\n'.join(sys.path))
cmd = ' '.join((pyexe, pyargs, pyscript, args))
print('Issuing command:', cmd)
os.system(cmd)
| lgpl-2.1 | Python | |
4e2172b8bd0953fd706f6c11029f9e4cfeb55407 | Add tests for the registry's root object's Component interface | GNOME/at-spi2-core,GNOME/at-spi2-core,GNOME/at-spi2-core | tests/registryd/test_root_component.py | tests/registryd/test_root_component.py | import pytest
import dbus
COMPONENT_IFACE = 'org.a11y.atspi.Component'
COORD_TYPE_WINDOW = 1
LAYER_WIDGET = 3
def test_contains(registry_root, session_manager):
assert registry_root.Contains(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == False
def test_get_accessible_at_point(registry_root, session_manager):
(name, path) = registry_root.GetAccessibleAtPoint(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE)
assert path == '/org/a11y/atspi/null'
def test_get_extents(registry_root, session_manager):
assert registry_root.GetExtents(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0, 1024, 768)
def test_get_position(registry_root, session_manager):
assert registry_root.GetPosition(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0)
def test_get_size(registry_root, session_manager):
assert registry_root.GetSize(dbus_interface=COMPONENT_IFACE) == (1024, 768)
def test_get_layer(registry_root, session_manager):
assert registry_root.GetLayer(dbus_interface=COMPONENT_IFACE) == LAYER_WIDGET
def test_get_mdi_z_order(registry_root, session_manager):
assert registry_root.GetMDIZOrder(dbus_interface=COMPONENT_IFACE) == 0
def test_grab_focus(registry_root, session_manager):
assert registry_root.GrabFocus(dbus_interface=COMPONENT_IFACE) == False
def test_get_alpha(registry_root, session_manager):
assert registry_root.GetAlpha(dbus_interface=COMPONENT_IFACE) == 1.0
| lgpl-2.1 | Python | |
f7a1595e39eeb754290c62e9194868d98d9755f4 | Add test for symbol selection | ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang | tests/symbols/test_symbol_selection.py | tests/symbols/test_symbol_selection.py | import pytest
from tests.symbols import get_symbols
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.compiler.references import Reference
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.named_access import NamedAccess
from thinglang.symbols.argument_selector import ArgumentSelector
SOURCE_OVERLOADING = '''
thing Container
thing Container1 extends Container
thing Container2 extends Container
as Container1
thing Container3 extends Container
as Container1
thing Container4 extends Container
as Container1
as Container2
thing Container1Child extends Container1
thing Container1Child2 extends Container1
as Container2
thing Container2Child extends Container2
thing A
does overloaded with Container1 container
does overloaded with Container2 container
does overloaded with Container2Child container
does overloaded with Container1 c1, Container2 c2
does overloaded with Container1 c1, Container2Child c2
'''
# TODO: verify no cast to base type!
SYMBOLS = get_symbols(SOURCE_OVERLOADING)
BASE = SYMBOLS.resolve(NamedAccess.auto('A.overloaded'))
def get_selection(*target_types):
selector = BASE.element.selector(SYMBOLS)
for target_type in target_types:
selector.constraint(Reference(Identifier(target_type)))
return selector.disambiguate(None)
def verify_selection(target_type, expected_index, expected_match):
target = get_selection(*target_type)
assert target.symbol.index == expected_index
assert target.match == expected_match
def test_exact_match():
verify_selection(['Container1'], 1, ArgumentSelector.EXACT)
verify_selection(['Container2'], 2, ArgumentSelector.EXACT) # Matches exactly, despite being castable
verify_selection(['Container2Child'], 3, ArgumentSelector.EXACT) # Matches exactly, despite being in an inheritance chain
verify_selection(['Container1', 'Container2'], 4, ArgumentSelector.EXACT)
verify_selection(['Container1', 'Container2Child'], 5, ArgumentSelector.EXACT)
def test_inheritance_match():
verify_selection(['Container1Child'], 1, ArgumentSelector.INHERITANCE)
verify_selection(['Container1Child2'], 1, ArgumentSelector.INHERITANCE) # Matches in an inheritance chain, despite being castable
def test_casted_match():
verify_selection(['Container3'], 1, ArgumentSelector.CAST)
def test_inheritance_directionality(): # Verify that a prent is not accepted in place of a child
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and not exc.value.cast_matches
def test_cast_ambiguity(): # Verify cast ambiguity
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container4')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and len(exc.value.cast_matches) == 2
| mit | Python | |
1ee1068d0203b591a734837d72f2f8a38c40399a | Add new project String -> MethodAndOperator.py | CaptainMich/Python_Project | StartWithPython/StartWithPython/String/MethodAndOperator.py | StartWithPython/StartWithPython/String/MethodAndOperator.py | #STRING(2)
print('\n\t\tSTRING(2)\t\t\n')
user = 'Tuna'
print(user[0]) # use it if you want a specif letter of a string
print(user[1]) # ...
print(user[2]) # ...
print(user[3]) # ...
print('')
print(user[-1]) # ...if you want to start from the end
print(user[-2]) # ...
print(user[-3]) # ...
print(user[-4]) # ...
print('')
print(user[1:3]) # to slicing up a string[from character 'x' to(:) character 'y')
print(user[0:4]) # ...
print(user[1:]) # ...
print(user[:]) # ...
print('')
print(len(user)) # to measure the lenght of a string ( N.B: blank space count as character)
print(len('Tuna')) # ...
# STRING (3)
print('\n\t\tSTRING(3)\t\t\n')
user = 'tuna'
print(user.find('un')) # find the offset of a substring in 'user'; return 1 if the substring is found
print(user.replace('una', 'omb')) # replace occurencesof a string in 'user' with another
print(user) # notice that the originally string is not permanently modified
print(user.upper()) # convert all the contenent upper or lowercase
print(user.isalpha()) # find out if all the character in the string are alphabetic and return true if there is at least one character,
# false otherwise least one character, false otherwise
line = 'aaa,bbb,cccccc,dd\n'
print(line.split(',')) # split on a specific delimiter into a list of substring
print(line.rstrip()) # remove whitespace characters on the right side
print(line.rstrip().split()) # combine two operation
print('%s, eggs, and %s' % ('spam', 'SPAM!')) # formatting expression
print('{}, eggs, and {}'.format('spam', 'SPAM!')) # formatting_Method | mit | Python | |
e9b2131da038c7e65dde33c04ee7c5888063827c | Add a test suite for json_marshal.bzl. | bazelbuild/rules_typescript,bazelbuild/rules_typescript,bazelbuild/rules_typescript | internal/common/json_marshal_test.bzl | internal/common/json_marshal_test.bzl | """Unit tests for json marshaling.
Note, this cannot live next to the file it tests, because that file is in
third_party bazel rules, and bazel doesn't support skylark testing yet.
"""
load("//third_party/bazel_skylib/lib:unittest.bzl", "asserts", "unittest")
load("//third_party/bazel_rules/rules_typescript/internal:common/json_marshal.bzl", "json_marshal")
def _test_impl(ctx):
env = unittest.begin(ctx)
asserts.equals(env, "\"abc\"", json_marshal("abc"))
asserts.equals(env, "123", json_marshal(123))
asserts.equals(env, "true", json_marshal(True))
asserts.equals(env, "false", json_marshal(False))
asserts.equals(env, "\"//a:b\"", json_marshal(Label("//a:b")))
asserts.equals(env, "[]", json_marshal([]))
asserts.equals(env, "{}", json_marshal({}))
asserts.equals(env, """[1, 2, 3]""", json_marshal([1, 2, 3]))
asserts.equals(env, """{"a": "b"}""", json_marshal({"a": "b"}))
asserts.equals(env, """{"none": false}""", json_marshal({"none": None}))
asserts.equals(
env,
"""{"a": {"d": 1, "e": true, "f": ["f1", "f2"]}, "b": "val", "c": [{"g": false}]}""",
json_marshal({"a": {"d": 1, "e": True, "f": ["f1", "f2"]}, "b": "val", "c": [{"g": False}]}),
)
return unittest.end(env)
_test = unittest.make(_test_impl)
def json_marshal_test_suite():
unittest.suite("json_marshal_tests", _test)
| apache-2.0 | Python | |
698d838ed225f42c7acee40c564e0f15aefa16ef | Add code to support optional debugging | ConPaaS-team/conpaas,ConPaaS-team/conpaas,ConPaaS-team/conpaas,ConPaaS-team/conpaas,ConPaaS-team/conpaas,ConPaaS-team/conpaas,ConPaaS-team/conpaas | conpaas-director/cpsdirector/debug.py | conpaas-director/cpsdirector/debug.py | import sys
from common import log
class Debug:
def __init__(self):
self.level = 5;
def set_level(self, l):
self.level = l
def get_level(self):
return(self.level)
def debug(self, level, txt):
if self.get_level() >= level:
try:
raise Exception
except:
theFrame = sys.exc_info()[2].tb_frame.f_back.f_back
line = theFrame.f_lineno
file = theFrame.f_code.co_filename
name = theFrame.f_code.co_name
log("%s: (%s) %d: %s" % (file, name, line, txt))
def d1(self, txt):
self.debug(1, txt);
def d2(self, txt):
self.debug(2, txt);
def d3(self, txt):
self.debug(3, txt);
def d4(self, txt):
self.debug(4, txt);
def d5(self, txt):
self.debug(5, txt);
def testing():
di = Debug();
for k in range(0,6,1):
di.set_level(k)
print "-- testing debug level %d --" % k
di.d1("debugging level %d, d1" % k)
di.d2("debugging level %d, d2" % k)
di.d3("debugging level %d, d3" % k)
di.d4("debugging level %d, d4" % k)
di.d5("debugging level %d, d5" % k)
print
if __name__ == '__main__':
testing()
| bsd-3-clause | Python | |
5a8c8afee5e50a04f38d91745563842fc548cef8 | add favorites to the admin | crateio/crate.io | crate_project/apps/favorites/admin.py | crate_project/apps/favorites/admin.py | from django.contrib import admin
from favorites.models import Favorite
class FavoriteAdmin(admin.ModelAdmin):
list_display = ["user", "package", "created", "modified"]
list_filter = ["created", "modified"]
search_fields = ["user__username", "package__name"]
raw_id_fields = ["user", "package"]
admin.site.register(Favorite, FavoriteAdmin)
| bsd-2-clause | Python | |
cae06ce3f07cd241431baf792e5e3b9dbec04e6b | Create __init__.py | hatchery/Genepool2,hatchery/genepool | genes/docker/__init__.py | genes/docker/__init__.py | #Add
| mit | Python | |
e5238b616f1a9be3fe5863e0aa9e9c118343df0f | add error wrapper to echo to stderr | amaxwell/datatank_py | DTError.py | DTError.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This software is under a BSD license. See LICENSE.txt for details.
import sys
_errors = []
def DTErrorMessage(fcn, msg):
err_msg = "%s: %s" % (fcn, msg)
_errors.append(err_msg)
sys.stderr.write(err_msg + "\n")
def DTSaveError(datafile, name):
if len(_errors):
datafile[name] = _errors
| bsd-3-clause | Python | |
0a6de323b3b878ca6e68c66c3a6b97d78aac65c5 | Create LoginTest.py | skeldi/animated-robot | LoginTest.py | LoginTest.py | #!/usr/bin/env python
import urllib, urllib2
from datetime import datetime
from dateutil.relativedelta import relativedelta
from BeautifulSoup import BeautifulSoup
#useragent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome_Scrape/33.0.1750.154'
accountwww = 'https://account.tfl.gov.uk'
oysterwww = 'https://oyster.tfl.gov.uk'
cardID = '' # oystercard number
username = '' # oystercard website user name - URLencoded
password = '' # oystercard website password
bodyData = "ReturnUrl=https%3A%2F%2Foyster.tfl.gov.uk%2Foyster%2Fsecurity_check&AppId=8ead5cf4-4624-4389-b90c-b1fd1937bf1f&UserName="+username+"&Password="+password+"&Sign+in=Sign+in"
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
urllib2.install_opener(opener)
# log in
#bodyData = urllib.urlencode({'UserName': username, 'Password': password})
#headers = {'User-Agent': useragent}
request = urllib2.Request(accountwww + '/Oyster/', bodyData) #, headers
response = urllib2.urlopen(request)
welcomepage = BeautifulSoup(response)
JourneyHistoryTag = welcomepage.find(lambda tag: tag.string == 'Journey history')
if JourneyHistoryTag is None:
# get card form
cardform = welcomepage.find('form', id='selectCardForm')
#print cardform['action']
cardurl = oysterwww + cardform['action']
q = urllib.urlencode({'cardId': cardID, 'method': 'input'})
# form (sometimes?) has stupid text input hidden by css
cardhidden = cardform.find('input', type='hidden')
if cardhidden:
q += urllib.urlencode({cardhidden['name']: cardhidden['value']})
creq = urllib2.Request(cardurl, q) #,headers
g = urllib2.urlopen(creq)
cardpage = BeautifulSoup(g)
#print cardpage
JourneyHistoryTag = cardpage.find(lambda tag: tag.string == 'Journey history')
if JourneyHistoryTag is None:
raise Exception, 'Failed to find journey history'
# now ready to get journey history
todate = datetime.now()
fromdate = todate - relativedelta(weeks=8)
jhdict = {'dateRange': 'custom date range',
'customDateRangeSel': 'false', 'isJSEnabledForPagenation': 'false',
'offset': '0', 'rows': '0',
'csDateTo': todate.strftime("%d/%m/%Y"),
'csDateFrom': fromdate.strftime("%d/%m/%Y")}
jhurl = oysterwww + JourneyHistoryTag['href']
jhreq = urllib2.Request(jhurl, urllib.urlencode(jhdict))
jhresponse = urllib2.urlopen(jhreq)
#jhpage = jh.read()
#response = urllib2.urlopen(jhreq)
jhpage = BeautifulSoup(jhresponse)
#print jhpage
#Download CSV format
csvform = jhpage.find('form', id='jhDownloadForm')
csvformA = csvform.a['onclick']
#this is the URL we now want to download and save as CSV file
print csvformA.split('"')[1]
# Retrieve the webpage as a string
CSVurl = oysterwww + csvformA.split('"')[1]
CSVreq = urllib2.Request(CSVurl)
CSVresponse = urllib2.urlopen(CSVreq)
#print CSVresponse.read()
# Save the string to a file
csvstr = CSVresponse.read()
lines = csvstr.split("\\n")
file = open("journey.csv", "w")
for line in lines:
file.write(line + "\n")
file.close()
| mit | Python | |
bd761bda5c01384870083ad750e59859c4a5b04a | add collector for http://hosts-file.net/ | spantons/attacks-pages-collector | collectors/hosts-file.py | collectors/hosts-file.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
from time import gmtime, strftime
import requests
import ipwhois
from pprint import pprint
def get_url(url):
try:
res = requests.get(url)
except requests.exceptions.ConnectionError:
raise requests.exceptions.ConnectionError("DNS lookup failures")
else:
if res.status_code != 200:
raise requests.exceptions.ConnectionError(
"the {}, answer with {} error".format(url, res.status_code))
return res
def get_ip(name):
attempts = 5
ip = "undefined"
while attempts:
try:
data = socket.gethostbyname_ex(name)
ip = data[2][0]
break
except (socket.herror, socket.gaierror):
attempts -= 1
return ip
def get_who_is_and_country(ip):
try:
ip_obj = ipwhois.IPWhois(ip)
who_is = ip_obj.lookup(retry_count=5)
return str(who_is), who_is['asn_country_code']
except ipwhois.exceptions.IPDefinedError:
return "Private-Use Networks", "undefined"
except ipwhois.exceptions.WhoisLookupError:
return "undefined", "undefined"
def gather():
base_url = "http://hosts-file.net/"
classifications = [
("ad_servers.txt", "ATS"),
("emd.txt", "EMD"),
("exp.txt", "EXP"),
("fsa.txt", "FSA"),
("grm.txt", "GRM"),
("hfs.txt", "HFS"),
("hjk.txt", "HJK"),
("mmt.txt", "MMT"),
("pha.txt", "PHA"),
("psh.txt", "PSH"),
("wrz.txt", "WRZ"),
]
for classification in classifications:
attack_type = classification[1]
url = base_url + "{}".format(classification[0])
res = get_url(url)
for line in res.iter_lines():
line = line.split("\t")
if len(line) != 2:
continue
site_url = line[1]
ip_address = get_ip(site_url)
who_is, country = get_who_is_and_country(ip_address)
doc = {
'IP': ip_address,
'SourceInfo': url,
'Type': attack_type,
'DateTime': strftime("%Y-%m-%d", gmtime()),
'Country': country,
'Domain': site_url,
'URL': site_url,
'WhoIsInfo': who_is,
}
pprint(doc)
if __name__ == '__main__':
gather()
| mit | Python | |
233090aac7d71a1a8e0caf348b3d785bca15f69c | Create initial image.py file | joshua-stone/DerPyBooru | derpibooru/image.py | derpibooru/image.py | # this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Image(object):
def __init(self, data):
self.data = data
@property
def id(self):
@property
def tags(self):
@property
def is_optimized(self):
@property
def sha512_hash(self):
@property
def upvotes(self):
@property
def aspect_ratio(self):
@property
def original_format(self):
@property
def mime_type(self):
@property
def height(self):
@property
def updated_at(self):
@property
def width(self):
@property
def comment_count(self):
@property
def tag_ids(self):
@property
def created_at(self):
@property
def file_name(self):
@property
def uploader(self):
@property
def description(self):
@property
def orig_sha512_hash(self):
@property
def id_number(self):
@property
def license(self):
@property
def representations(self):
@property
def image(self):
@property
def score(self):
@property
def downvotes(self):
@property
def duplicate_reports(self):
@property
def faves(self):
@property
def source_url(self):
@property
def is_rendered(self):
@property
def data(self):
return(self.__data)
@data.setter
def data(self, data):
self.__data = data
| bsd-2-clause | Python | |
1eb9d5aff303ff920b4bacbeea0d1de6bab419d9 | add bonuses solution SRM 145 DIV 1 | cjwfuller/topcoder | Bonuses.py | Bonuses.py | from __future__ import division
class Bonuses:
def getDivision(self, points):
bonuses = []
# calculate bonuses
total = sum(points)
extra = 0
for p in points:
percent = p / total * 100
extra += percent % 1
bonuses.append(int(percent))
# award any extra points
extra = round(extra)
count = 1
awarded = []
while count <= extra:
maximum = max(y for x, y in enumerate(points) if x not in awarded)
maximums = [i for i, j in enumerate(points) if j == maximum]
for m in maximums:
bonuses[m] += 1
awarded.append(m)
count += 1
if(count > extra):
break
return bonuses | mit | Python | |
34f2e6066bb44697636f72bb416e200145878a42 | Add script for generating TestRun index (#74) | GoogleChrome/wptdashboard,GoogleChrome/wptdashboard,GoogleChrome/wptdashboard | util/generate_testrun_index.py | util/generate_testrun_index.py | #!/usr/bin/python3
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from google.cloud import storage
"""
Scans all WPT results directories then generates and uploads an index.
You must be logged into gcloud and a member of the wptdashboard project
in order for this script to work.
"""
GCP_PROJECT = 'wptdashboard'
RESULTS_BUCKET = 'wptd'
def main():
storage_client = storage.Client(project=GCP_PROJECT)
bucket = storage_client.get_bucket(RESULTS_BUCKET)
# by_sha is an object where:
# Key: a WPT commit SHA
# Value: list of platform IDs the SHA was tested against
by_sha = {}
# by_platform is an object where:
# Key: a platform ID
# Value: list of WPT commit SHAs the platform was tested against
by_platform = {}
sha_directories = list_directory(bucket)
for sha_directory in sha_directories:
sha = sha_directory.replace('/', '')
directories = list_directory(bucket, sha_directory)
platform_directories = [
prefix[len(sha_directory):].replace('/', '')
for prefix in directories
]
for platform in platform_directories:
by_sha.setdefault(sha, [])
by_sha[sha].append(platform)
by_platform.setdefault(platform, [])
by_platform[platform].append(sha)
print('by_sha', by_sha)
print('by_platform', by_platform)
index = {
'by_sha': by_sha,
'by_platform': by_platform
}
filename = 'testruns-index.json'
blob = bucket.blob(filename)
blob.upload_from_string(json.dumps(index), content_type='application/json')
print('Uploaded!')
print('https://storage.googleapis.com/wptd/%s' % filename)
def list_directory(bucket, prefix=None):
iterator = bucket.list_blobs(delimiter='/', prefix=prefix)
response = iterator._get_next_page_response()
return response['prefixes']
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
c15bf8a018265417400b9948e4b36eb48d40582b | Introduce a very simple vim modeline parser. | spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments,spencerlyon2/pygments | pygments/modeline.py | pygments/modeline.py | # -*- coding: utf-8 -*-
"""
pygments.modeline
~~~~~~~~~~~~~~~~~
A simple modeline parser (based on pymodeline).
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
__all__ = ['get_filetype_from_buffer']
modeline_re = re.compile(r'''
(?: vi | vim | ex ) (?: [<=>]? \d* )? :
.* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
''', re.VERBOSE)
def get_filetype_from_line(l):
m = modeline_re.search(l)
if m:
return m.group(1)
def get_filetype_from_buffer(buf, max_lines = 5):
"""
Scan the buffer for modelines and return filetype if one is found.
"""
lines = buf.splitlines()
for l in lines[-1:-max_lines-1:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
for l in lines[max_lines:0:-1]:
ret = get_filetype_from_line(l)
if ret:
return ret
return None
| bsd-2-clause | Python | |
9e5fa749cbf797e8646e72a6b54047a9644c8711 | add controller file | ihfazhillah/qaamus-python | qaamus/controller.py | qaamus/controller.py | import view
import parsers
def idar_controller(soup_object, make_soup):
data = parsers.IndAraParser(soup_object).get_idar(make_soup)
rendered = view.View().render(data)
return rendered
def angka_controller(soup_object):
data = parsers.AngkaParser(soup_object).get_arti_master()
rendered = view.View().render(data)
return rendered
def pegon_controller(soup_object):
data = parsers.AngkaParser(soup_object).get_arti_master()
rendered = view.View().render(data)
return rendered
def angka_instruksi_controller(soup_object):
data = parsers.AngkaParser(soup_object).get_instruction()
rendered = view.View().render(data, layanan="Angka")
return rendered
def pegon_instruksi_controller(soup_object):
data = parsers.AngkaParser(soup_object).get_instruction()
rendered = view.View().render(data, layanan="Pegon")
return rendered
import unittest
from test_parsers import soupping, get_abs_path
idar_soup = soupping(get_abs_path("html/rumah+sakit"))
angka_soup = soupping(get_abs_path("html/angka123"))
pegon_soup = soupping(get_abs_path("html/pegon_suratman"))
class ControllerTestCase(unittest.TestCase):
def test_idar_controller(self):
result = idar_controller(idar_soup, soupping)
self.assertTrue(isinstance(result, str))
self.assertTrue(result is not None)
def test_angka_controller(self):
result = angka_controller(angka_soup)
self.assertTrue(isinstance(result, str))
self.assertTrue(result is not None)
def test_pegon_controller(self):
result = pegon_controller(pegon_soup)
self.assertTrue(isinstance(result, str))
self.assertTrue(result is not None)
def test_angka_instruksi_controller(self):
result = angka_instruksi_controller(angka_soup)
self.assertIn("Instruksi Layanan Angka", result)
def test_pegon_instruksi_controller(self):
result = pegon_instruksi_controller(angka_soup)
self.assertIn("Instruksi Layanan Pegon", result)
if __name__ == "__main__":
unittest.main()
| mit | Python | |
2e60e02e9fd7cb788e15e61329f36e675f14160f | Create PedidoEditar.py | AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb,AEDA-Solutions/matweb | backend/Models/Sala/PedidoEditar.py | backend/Models/Sala/PedidoEditar.py | from Framework.Pedido import Pedido
from Framework.ErroNoHTTP import ErroNoHTTP
class PedidoEditar(Pedido):
def __init__(self,variaveis_do_ambiente):
super(PedidoEditar, self).__init__(variaveis_do_ambiente)
try:
self.id = self.corpo['id']
self.codigo = self.corpo['codigo']
except:
raise ErroNoHTTP(400)
def getId(self):
return self.id
def setCodigo(self,codigo):
self.nome = nome
def getCodigo(self):
return self.codigo
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.