commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
30bd54621ce649e90f4a1717d6709652a2c77351 | Add missing migration | whitesmith/hawkpost,whitesmith/hawkpost,whitesmith/hawkpost | humans/migrations/0013_auto_20201204_1807.py | humans/migrations/0013_auto_20201204_1807.py | # Generated by Django 2.2.13 on 2020-12-04 18:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('humans', '0012_remove_user_server_signed'),
]
operations = [
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(blank=True, max_length=150, verbose_name='last name'),
),
]
| mit | Python | |
a8bf127b1e17b4ce9c2a5c4e6d2bbbc19faa0141 | Create snapper_chain.py | py-in-the-sky/challenges,py-in-the-sky/challenges,py-in-the-sky/challenges | google-code-jam/snapper_chain.py | google-code-jam/snapper_chain.py | """
https://code.google.com/codejam/contest/433101/dashboard
"""
def light_on(n, k):
bits = bin(k)[2:]
if len(bits) < n:
return False
return all(b == '1' for b in list(reversed(bits))[:n])
def main():
T = int(raw_input())
for t in xrange(1, T+1):
n, k = map(int, raw_input().strip().split())
print 'Case #{}: {}'.format(t, 'ON' if light_on(n, k) else 'OFF')
if __name__ == '__main__':
main()
| mit | Python | |
76fe998ad769e97b3424f2a3b8a5cccf2496816f | add very rudimentary/prototype range splitter program, without robust input checking | bpcox/rangesplitter | rangesplitter.py | rangesplitter.py | #! /usr/bin/env python3.4
import ipaddress
import math
toSplit=False
while not toSplit:
inputRange = input('Input the IP range you would like to split into subranges: ')
try:
toSplit =ipaddress.ip_network(inputRange)
except:
ValueError
rangeSize = False
default = False
while (not rangeSize and not default):
rawSize = input ('Input the size of the CIDR range you would calculate. Must be a larger or equal number compared to the suffix of the input range ('+str(toSplit.prefixlen)+') :')
if not rawSize:
default=True
if int(rawSize)<toSplit.prefixlen:
print('Invalid input')
continue
else:
rangeSize = int(rawSize)
if not default:
print(list(toSplit.subnets(new_prefix=rangeSize)))
if default:
if toSplit.version==4:
print(list(toSplit.subnets(new_prefix=16)))
if toSplit.version==6:
print(list(toSplit.subnets(new_prefix=48)))
print('default')
| mit | Python | |
b2661e8156f9a4e96cce3cc720563b1589037ad5 | Add frequency_estimator.py | shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE,shnizzedy/SM_openSMILE | mhealthx/extractors/frequency_estimator.py | mhealthx/extractors/frequency_estimator.py | #!/usr/bin/env python
"""
This program implements some of the frequency estimation functions from:
https://gist.github.com/endolith/255291 and
https://github.com/endolith/waveform-analyzer
"""
def freq_from_autocorr(signal, fs):
"""
Estimate frequency using autocorrelation.
Pros: Best method for finding the true fundamental of any repeating wave,
even with strong harmonics or completely missing fundamental
Cons: Not as accurate, doesn't work for inharmonic things like musical
instruments, this implementation has trouble with finding the true peak
From: https://gist.github.com/endolith/255291 and
https://github.com/endolith/waveform-analyzer
Parameters
----------
signal : list or array
time series data
fs : integer
sample rate
Returns
-------
frequency : float
frequency (Hz)
"""
import numpy as np
from scipy.signal import fftconvolve
from matplotlib.mlab import find
from mhealthx.signals import parabolic
# Calculate autocorrelation (same thing as convolution, but with one input
# reversed in time), and throw away the negative lags:
signal -= np.mean(signal) # Remove DC offset
corr = fftconvolve(signal, signal[::-1], mode='full')
corr = corr[len(corr)/2:]
# Find the first low point:
d = np.diff(corr)
start = find(d > 0)[0]
# Find the next peak after the low point (other than 0 lag). This bit is
# not reliable for long signals, due to the desired peak occurring between
# samples, and other peaks appearing higher.
i_peak = np.argmax(corr[start:]) + start
i_interp = parabolic(corr, i_peak)[0]
frequency = fs / i_interp
return frequency
def freq_from_hps(signal, fs):
"""
Estimate frequency using harmonic product spectrum.
Note: Low frequency noise piles up and overwhelms the desired peaks.
From: https://gist.github.com/endolith/255291 and
https://github.com/endolith/waveform-analyzer
Parameters
----------
signal : list or array
time series data
fs : integer
sample rate
Returns
-------
frequency : float
frequency (Hz)
"""
import numpy as np
from scipy.signal import blackmanharris, decimate
from mhealthx.signals import parabolic
N = len(signal)
signal -= np.mean(signal) # Remove DC offset
# Compute Fourier transform of windowed signal:
windowed = signal * blackmanharris(len(signal))
# Get spectrum:
X = np.log(abs(np.fft.rfft(windowed)))
# Downsample sum logs of spectra instead of multiplying:
hps = np.copy(X)
for h in np.arange(2, 9): # TODO: choose a smarter upper limit
dec = decimate(X, h)
hps[:len(dec)] += dec
# Find the peak and interpolate to get a more accurate peak:
i_peak = np.argmax(hps[:len(dec)])
i_interp = parabolic(hps, i_peak)[0]
# Convert to equivalent frequency:
frequency = fs * i_interp / N # Hz
return frequency
| apache-2.0 | Python | |
d2a80a76fdf28625ad36b2fd71af56938b9b9506 | Add needed track known class. | stober/lspi | src/trackknown.py | src/trackknown.py | #!/usr/bin/env python
'''
@author jstober
Simple class to track knowledge of states and actions. Based on
L. Li, M. L. Littman, and C. R. Mansley, “Online exploration in least-squares policy iteration” AAMAS, 2009.
'''
import numpy as np
import pdb
class TrackKnown:
"""
Track knowledge of states and actions.
TODO: Generalize by adding epsilon and kd tree or approximation methods.
"""
def __init__(self, nstates, nactions, mcount):
self.nstates = nstates
self.nactions = nactions
self.mcount = mcount
self.counts = np.zeros(nstates, nactions)
def init(self, samples):
for (s,a,r,ns,na) in samples:
self.counts[s,a] += 1
def known_pair(self,s,a):
if self.counts[s,a] > self.mcount:
return True
else:
return False
def known_state(self,s):
if np.greater(self.counts[s,:],self.mcount).all():
return True
else:
return False
def unknown(self,s):
# indices of actions with low counts.
return np.where(self.counts[s,:] < self.mcount)[0] | bsd-2-clause | Python | |
5dfa4397a282ddbafb57d990bc7d630fb6f927de | Add helper method for execute a commands | alexandrucoman/bcbio-dev-conda,alexandrucoman/bcbio-dev-conda | build.py | build.py | """Update conda packages on binstars with latest versions"""
import os
import six
import subprocess
import time
ATTEMPTS = 3
RETRY_INTERVAL = 0.1
def execute(command, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
:param attempts: How many times to retry running the command.
:param binary: On Python 3, return stdout and stderr as bytes if
binary is True, as Unicode otherwise.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`CalledProcessError` unless
program exits with one of these code.
:param command: The command passed to the subprocess.Popen.
:param cwd: Set the current working directory
:param env_variables: Environment variables and their values that
will be set for the process.
:param retry_interval: Interval between execute attempts, in seconds
:param shell: whether or not there should be a shell used to
execute this command.
:raises: :class:`subprocess.CalledProcessError`
"""
# pylint: disable=too-many-locals
attempts = kwargs.pop("attempts", ATTEMPTS)
binary = kwargs.pop('binary', False)
check_exit_code = kwargs.pop('check_exit_code', [0])
cwd = kwargs.pop('cwd', None)
env_variables = kwargs.pop("env_variables", None)
retry_interval = kwargs.pop("retry_interval", RETRY_INTERVAL)
shell = kwargs.pop("shell", False)
command = [str(argument) for argument in command]
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
while attempts > 0:
attempts = attempts - 1
try:
process = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=shell,
cwd=cwd, env=env_variables)
result = process.communicate()
return_code = process.returncode
if six.PY3 and not binary and result is not None:
# pylint: disable=no-member
# Decode from the locale using using the surrogate escape error
# handler (decoding cannot fail)
(stdout, stderr) = result
stdout = os.fsdecode(stdout)
stderr = os.fsdecode(stderr)
else:
stdout, stderr = result
if not ignore_exit_code and return_code not in check_exit_code:
raise subprocess.CalledProcessError(returncode=return_code,
cmd=command,
output=(stdout, stderr))
else:
return (stdout, stderr)
except subprocess.CalledProcessError:
if attempts:
time.sleep(retry_interval)
else:
raise
raise RuntimeError("The maximum number of attempts has been exceeded.")
| mit | Python | |
387758ebcc2a0fa29e9e7744eacc6c753ae5284e | add example for FIFOQueue and coordinate application | iViolinSolo/DeepLearning-GetStarted,iViolinSolo/DeepLearning-GetStarted | TF-Demo/QueueRunnerDemo/queue_runner_demo.py | TF-Demo/QueueRunnerDemo/queue_runner_demo.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: violinsolo
# Created on 12/12/2017
import tensorflow as tf
# define FIFO queue
q = tf.FIFOQueue(capacity=1000, dtypes='float32')
# define ops
counter = tf.Variable(initial_value=0, dtype='float32')
counter_increment_op = tf.assign_add(counter, 1.)
queue_enqueue_op = q.enqueue([counter])
coordinator = tf.train.Coordinator()
qr = tf.train.QueueRunner(queue=q, enqueue_ops=[counter_increment_op, queue_enqueue_op])
# begin session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
enqueue_threads = qr.create_threads(sess=sess, start=True, coord=coordinator)
for i in range(10):
print sess.run(q.dequeue())
# coordinator.join(enqueue_threads)
coordinator.request_stop()
print sess.run(q.size())
coordinator.join(enqueue_threads)
for i in range(100):
print "-%d-" % i
print sess.run(q.size())
| apache-2.0 | Python | |
20b2e70fe732b6f0cc049d18da9cac717cd7e967 | Remove groups from admin | polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon | polyaxon/db/admin/groups.py | polyaxon/db/admin/groups.py | from django.contrib import admin
from django.contrib.auth.models import Group
admin.site.unregister(Group)
| apache-2.0 | Python | |
4a7fc9efce33bba3aa9ea818d09f6e9b621ab152 | add script to pull out contacts csv | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | portality/migrate/emails.py | portality/migrate/emails.py | from portality.models import Account
import csv
OUT = "emails.csv"
f = open(OUT, "wb")
writer = csv.writer(f)
writer.writerow(["ID", "Name", "Journal Count", "Email"])
for a in Account.iterall():
id = a.id
name = a.name
count = len(a.journal) if a.journal is not None else 0
email = a.email
if name is not None:
name = name.encode("ascii", "ignore")
if name is None or name == "":
name = "no name available"
if email is not None and email != "":
email = email.encode("ascii", "ignore")
writer.writerow([id, name, count, email])
f.close()
| apache-2.0 | Python | |
9639ab62ed0f6e0c5229be9820a9b902e5870a67 | update readme and make command line script | randlet/dianonymous,randlet/dianonymous | scripts/dianon.py | scripts/dianon.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import dicom
import sys
if __name__ == "__main__": #pragma nocover
from dianonymous.dianonymous import anonymize
parser = argparse.ArgumentParser(description="Anonymize DICOM files")
parser.add_argument(
'-r', '--recurse',
default=False,
action="store_true",
help="If input is a directory all DICOM files including"
"subdirectories will be anonymized"
)
parser.add_argument(
'-o', '--output',
default="./anonymized/",
help="Directory to place output files. If it doesn't exist it will be created."
)
parser.add_argument(
'-p', '--patient-id',
default=None,
help="Anonymous patient id to use"
)
parser.add_argument(
'-n', '--patient-name',
default=None,
help="Anonymous patient name to use"
)
parser.add_argument( 'files', nargs="+", help="Input files and directories")
args = parser.parse_args()
anonymize(args.files, output=args.output, anon_id=args.patient_id, anon_name=args.patient_name, recurse=args.recurse, log=sys.stdout)
| bsd-3-clause | Python | |
bd371ecbd2ac163e44f104a775390b2ca2b88d35 | Add migration for index on departement | openmaraude/APITaxi,openmaraude/APITaxi | migrations/versions/75704b2e975e_add_index_on_departement_for_numero.py | migrations/versions/75704b2e975e_add_index_on_departement_for_numero.py | """Add index on Departement for numero
Revision ID: 75704b2e975e
Revises: 34c2049aaee2
Create Date: 2019-10-22 17:27:10.925104
"""
# revision identifiers, used by Alembic.
revision = '75704b2e975e'
down_revision = '34c2049aaee2'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_index('departement_numero_index', 'departement', ['numero'], unique=False)
def downgrade():
op.drop_index('departement_numero_index', table_name='departement')
| agpl-3.0 | Python | |
70428a920ae9e02820e63e7dba98fc16faab6f10 | add benchmark for linalg.logm | zerothi/scipy,vigna/scipy,scipy/scipy,Eric89GXL/scipy,mdhaber/scipy,matthew-brett/scipy,ilayn/scipy,rgommers/scipy,endolith/scipy,mdhaber/scipy,Eric89GXL/scipy,grlee77/scipy,grlee77/scipy,rgommers/scipy,mdhaber/scipy,tylerjereddy/scipy,anntzer/scipy,endolith/scipy,andyfaff/scipy,scipy/scipy,perimosocordiae/scipy,tylerjereddy/scipy,zerothi/scipy,anntzer/scipy,nmayorov/scipy,Eric89GXL/scipy,WarrenWeckesser/scipy,WarrenWeckesser/scipy,scipy/scipy,ilayn/scipy,grlee77/scipy,nmayorov/scipy,Stefan-Endres/scipy,mdhaber/scipy,tylerjereddy/scipy,andyfaff/scipy,endolith/scipy,matthew-brett/scipy,WarrenWeckesser/scipy,scipy/scipy,grlee77/scipy,endolith/scipy,Eric89GXL/scipy,vigna/scipy,rgommers/scipy,rgommers/scipy,andyfaff/scipy,WarrenWeckesser/scipy,matthew-brett/scipy,ilayn/scipy,ilayn/scipy,tylerjereddy/scipy,Stefan-Endres/scipy,zerothi/scipy,Stefan-Endres/scipy,perimosocordiae/scipy,nmayorov/scipy,perimosocordiae/scipy,ilayn/scipy,anntzer/scipy,Stefan-Endres/scipy,nmayorov/scipy,e-q/scipy,ilayn/scipy,matthew-brett/scipy,scipy/scipy,perimosocordiae/scipy,Eric89GXL/scipy,zerothi/scipy,e-q/scipy,mdhaber/scipy,Eric89GXL/scipy,tylerjereddy/scipy,perimosocordiae/scipy,Stefan-Endres/scipy,e-q/scipy,andyfaff/scipy,andyfaff/scipy,rgommers/scipy,WarrenWeckesser/scipy,e-q/scipy,anntzer/scipy,endolith/scipy,perimosocordiae/scipy,vigna/scipy,anntzer/scipy,scipy/scipy,vigna/scipy,zerothi/scipy,mdhaber/scipy,grlee77/scipy,e-q/scipy,Stefan-Endres/scipy,andyfaff/scipy,zerothi/scipy,anntzer/scipy,nmayorov/scipy,matthew-brett/scipy,vigna/scipy,endolith/scipy,WarrenWeckesser/scipy | benchmarks/benchmarks/linalg_logm.py | benchmarks/benchmarks/linalg_logm.py | """ Benchmark linalg.logm for various blocksizes.
"""
import numpy as np
try:
import scipy.linalg
except ImportError:
pass
from .common import Benchmark
class Logm(Benchmark):
params = [
['float64', 'complex128'],
[64, 256],
['gen', 'her', 'pos']
]
param_names = ['dtype', 'n', 'structure']
def setup(self, dtype, n, structure):
n = int(n)
dtype = np.dtype(dtype)
A = np.random.rand(n, n)
if dtype == np.complex128:
A = A + 1j*np.random.rand(n, n)
if structure == 'pos':
A = A @ A.T.conj()
elif structure == 'her':
A = A + A.T.conj()
self.A = A
def time_logm(self, dtype, n, structure):
scipy.linalg.logm(self.A, disp=False)
| bsd-3-clause | Python | |
4be38d1f523696a48333797cbdb4a99a874a9cd5 | Create albumCoverFinder.py | btran29/yet-another-album-cover-finder | albumCoverFinder.py | albumCoverFinder.py | # albumCoverFinder - Brian Tran, btran29@gmail.com
# This program scans a tree of directories containing mp3 files. For
# each directory, it attempts to download the cover image from the
# Apple iTunes service. Subdirectories must be named <Artist>/<Album>
# contain .mp3 files to be considered. The cover will be saved to
# "cover.jpg" in each directory.
# If the cover file already exists, the directory is skipped.
# Usage example:
# albumCoverFinder.py <music directory>
import sys
import os
import shutil
import re
import urllib.request
import json
import tempfile
# For testing + possible future expansion with classes
defaults = {
"artist": 'Jack Johnson',
"album": 'In Between Dreams',
"country": 'US',
"media": 'music',
"attribute": 'albumTerm',
"base": 'https://itunes.apple.com/search?'
}
# Clean up album names via dictionary below
cleanup_table = {
' ': '+'
}
# Clean up album folder names for input
def clean_input(term):
print("\n" + "Search Term: " + "\"" + term + "\"")
pattern = re.compile('|'.join(cleanup_table.keys()))
term = pattern.sub(lambda x: cleanup_table[x.group()], term)
return term
# Generate url for apple api search
def gen_url(term):
url = defaults["base"] + \
'term=' + term + '&' + \
'attribute=' + defaults["attribute"] + '&' +\
'media=' + defaults["media"]
print("URL Used: " + url)
return url
# Connect to website and collect response
def collect_data(url):
response = urllib.request.urlopen(url)
# Convert to http response to utf-8
string = response.read().decode('utf-8')
data = json.loads(string) # returns dictionary object
return data
# Parse data to get album cover url
def parse_data(data, artist):
data = data['results']
# Initialize key vars
found = False
album_art_url = 'stringThing'
# Loop over results to find matching artist given album
for result in data:
if result['artistName'] == artist:
found = True
album_art_url = result['artworkUrl100']
print("Album Art URL: " + album_art_url)
break
if found is False:
print("No album/artist combination found.")
return album_art_url
# Download album art
def download(album_art_url):
img = urllib.request.urlopen(album_art_url)
output = tempfile.mktemp(".jpg")
o = open(output, "wb")
o.write(img.read())
o.close()
return output
# Simplified method
def get_art(directory):
# Get path values, artist, album
final_path = directory + os.sep + "cover.jpg"
values = directory.split(os.sep)
artist = values[-2]
album = values[-1]
# Run through procedure
url = gen_url(clean_input(album))
data = collect_data(url)
parsed_url = parse_data(data, artist)
dl_art = download(parsed_url)
if dl_art is not None:
# Copy file to location
shutil.copyfile(dl_art, final_path)
os.remove(dl_art)
print("Saved: " + final_path)
# Define usage
def usage(argv):
print("Usage" + argv[1] + "<music root directory>")
sys.exit(1)
# Main method
def main(argv):
if len(argv) < 2:
usage(argv)
source_directory = argv[1]
print("Searching within: " + source_directory)
# Obtain list of directories
directories = [source_directory]
for directory in directories:
files = os.listdir(directory)
for file in files:
if os.path.isdir(os.path.join(directory, file)):
directories.append(os.path.join(directory, file))
# Travel through directories
for directory in directories:
files = os.listdir(directory)
found = False
for file in files:
# Mark directories with mp3 files
if file.endswith('.mp3'):
found = True
break
if not found:
continue
# Get album art for this directory
get_art(directory)
# Limits this python file to script functionality (vs a module)
if __name__ == "__main__":
main(sys.argv)
| mit | Python | |
e7e37e9b1fd56d18711299065d6f421c1cb28bac | Add some Feed test cases | pombredanne/moksha,lmacken/moksha,pombredanne/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,pombredanne/moksha,ralphbean/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha | moksha/tests/test_feed.py | moksha/tests/test_feed.py | from tw.api import Widget
from moksha.feed import Feed
class TestFeed(object):
def test_feed_subclassing(self):
class MyFeed(Feed):
url = 'http://lewk.org/rss'
feed = MyFeed()
assert feed.url == 'http://lewk.org/rss'
assert feed.num_entries() > 0
for entry in feed.iterentries():
pass
for entry in feed.entries():
pass
def test_widget_children(self):
class MyWidget(Widget):
myfeedurl = 'http://lewk.org/rss'
children = [Feed('myfeed', url=myfeedurl)]
template = "mako:${c.myfeed()}"
widget = MyWidget()
assert widget.c.myfeed
def test_feed_generator(self):
feed = Feed(url='http://lewk.org/rss')
iter = feed.iterentries()
data = iter.next()
assert iter.next()
| apache-2.0 | Python | |
d308695c79face90ba7f908230edb5e2e2437cbd | Decrypt file using XOR | paulherman/mu | tools/xor_decryptor.py | tools/xor_decryptor.py | #! /usr/bin/env python3
import sys
import os
from ctypes import c_ubyte
keys = [0xd1, 0x73, 0x52, 0xf6, 0xd2, 0x9a, 0xcb, 0x27, 0x3e, 0xaf, 0x59, 0x31, 0x37, 0xb3, 0xe7, 0xa2]
initial_key = 0x5e
delta_key = 0x3d
if __name__ == '__main__':
for path in sys.argv[1:]:
if os.path.isfile(path):
with open(path, 'rb') as input_file:
data = input_file.read()
dec_data = []
position = 0
key = initial_key
for b in data:
dec_data.append(c_ubyte(int(b) ^ keys[position % len(keys)] - key))
key = int(b) + delta_key
dec_data = bytes(map(lambda b: b.value, dec_data))
output_path, extension = os.path.splitext(path)
output_path = output_path + '_dec' + extension
with open(output_path, 'wb') as output_file:
output_file.write(data)
else:
print('File {} does not exist.'.format(path))
| mit | Python | |
7a02f383986f347d208f69ba59526d9ce7df59bf | Add access grant functions | gryffon/SusumuTakuan,gryffon/SusumuTakuan | access.py | access.py | #
# access.py
#
# functions for dealing with access to Discord bot commands
#
def grant_user_access(user, commandclass):
new_grant = CommandClassAccess(user_id = user.id, command_class_id = commandclass.id)
session.add(new_grant)
session.commit()
def grant_role_access(role, commandclass):
new_grant = CommandClassAccess(role_id = role.id, command_class_id = commandclass.id)
session.add(new_grant)
session.commit()
| mit | Python | |
c4001a95dee88bc98eda5ce67a2f14485f4e85a5 | Add configurations | 317070/kaggle-heart | configurations/initial.py | configurations/initial.py | #TODO: add code
| mit | Python | |
226cf36e4b4d069a920785b492804b78eebc34a5 | Make non-commtrack location types administrative | dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq | corehq/apps/locations/management/commands/migrate_admin_status.py | corehq/apps/locations/management/commands/migrate_admin_status.py | # One-off migration from 2016-04-04
from optparse import make_option
from time import sleep
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import LocationType, SQLLocation
from corehq.apps.es import DomainES
from corehq.util.log import with_progress_bar
def get_affected_location_types():
commtrack_domains = (DomainES()
.commtrack_domains()
.values_list('name', flat=True))
return (LocationType.objects
.exclude(domain__in=commtrack_domains)
.filter(administrative=False))
def show_info():
location_types = get_affected_location_types()
num_locations = SQLLocation.objects.filter(location_type__in=location_types).count()
print ("There are {domains} domains, {loc_types} loc types, and "
"{locations} locations affected").format(
domains=location_types.distinct('domain').count(),
loc_types=location_types.count(),
locations=num_locations,
)
def run_migration():
for location_type in with_progress_bar(get_affected_location_types()):
if not location_type.administrative:
location_type.administrative = True
location_type.save()
sleep(1)
class Command(BaseCommand):
help = ('There are a bunch of LocationTypes on non-commtrack domains which'
'incorrectly are marked as administrative=False')
option_list = BaseCommand.option_list + (
make_option('--run', action='store_true', default=False),
)
def handle(self, *args, **options):
if options.get('run', False):
run_migration()
else:
show_info()
print "pass `--run` to run the migration"
| bsd-3-clause | Python | |
be2ac14fbb228e5a5addd393867b9b3c7267ba89 | Add and define string_permu_check problem. | nguyentu1602/pyexp | pyexp/string_permu_check.py | pyexp/string_permu_check.py | '''Module to solve the algoritm question:
Given a string S, how to count how many permutations
of S is in a longer string L, assuming, of course, that
permutations of S must be in contagious blocks in L.
I will solve it in O(len(L)) time.
'''
| mit | Python | |
a59682d4b8bd4f594dce72b0f86f2ed4096c4178 | Add missing migration file | akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr,akvo/akvo-rsr | akvo/rsr/migrations/0127_auto_20180529_0955.py | akvo/rsr/migrations/0127_auto_20180529_0955.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import akvo.rsr.fields
class Migration(migrations.Migration):
dependencies = [
('rsr', '0126_auto_20180320_1252'),
]
operations = [
migrations.AlterField(
model_name='report',
name='url',
field=akvo.rsr.fields.ValidXMLCharField(help_text='Enter the parametrized path for downloading the report. NOTE: one line only even if the input field allows for more!', max_length=200, verbose_name='url'),
preserve_default=True,
),
]
| agpl-3.0 | Python | |
4322ca998fadbd0e380626b895415bf75c4f7214 | change ordering on ability levels | numbas/editor,numbas/editor,numbas/editor | editor/migrations/0043_auto_20160303_1138.py | editor/migrations/0043_auto_20160303_1138.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editor', '0042_remove_comment_date'),
]
operations = [
migrations.AlterModelOptions(
name='abilitylevel',
options={'ordering': ('framework', 'start')},
),
]
| apache-2.0 | Python | |
774b0b3d11aaf3fd529f95233eb13e87829802f7 | create catalog script written | gnowledge/gstudio,makfire/gstudio,gnowledge/gstudio,gnowledge/gstudio,makfire/gstudio,makfire/gstudio,AvadootNachankar/gstudio,AvadootNachankar/gstudio,makfire/gstudio,AvadootNachankar/gstudio,AvadootNachankar/gstudio,gnowledge/gstudio,gnowledge/gstudio | gnowsys-ndf/gnowsys_ndf/ndf/management/commands/create_catalog.py | gnowsys-ndf/gnowsys_ndf/ndf/management/commands/create_catalog.py | import subprocess
from django.core.management.base import BaseCommand, CommandError
from gnowsys_ndf.factory_type import *
from gnowsys_ndf.ndf.models import *
class Command(BaseCommand):
def handle(self,*args,**options):
#print factory_attribute_types
GSystemTypeList = [i['name'] for i in factory_gsystem_types]
RelationTypeList = [ i.keys()[0] for i in factory_relation_types ]
AttributeTypeList = [ i.keys()[0] for i in factory_attribute_types ]
get_factory_data(factory_data)
get_gsystems(GSystemTypeList)
get_relationtypes(RelationTypeList)
get_attributetypes(AttributeTypeList)
def get_gsystems(GSystemTypeList):
var = '{"_type": "GSystemType","name":{"$in":%s}}' % GSystemTypeList
var = var.replace("'",'"')
cmd = "mongoexport --db studio-dev --collection Nodes -q '" + '%s' % var + "' --out Schema/GSystemType.json"
subprocess.Popen(cmd,stderr=subprocess.STDOUT,shell=True)
def get_relationtypes(RelationTypeList):
var = '{"_type": "RelationType","name":{"$in":%s}}' % RelationTypeList
var = var.replace("'",'"')
cmd = "mongoexport --db studio-dev --collection Nodes -q '" + '%s' % var + "' --out Schema/RelationType.json"
subprocess.Popen(cmd,stderr=subprocess.STDOUT,shell=True)
def get_attributetypes(AttributeTypeList):
var = '{"_type": "AttributeType","name":{"$in":%s}}' % AttributeTypeList
var = var.replace("'",'"')
cmd = "mongoexport --db studio-dev --collection Nodes -q '" + '%s' % var + "' --out Schema/AttributeType.json"
subprocess.Popen(cmd,stderr=subprocess.STDOUT,shell=True)
def get_factory_data(Factory_data):
final_list = []
for i in factory_data:
var = str(i)
var = var.replace("'",'"')
node = node_collection.find(i)
final_list.append(node[0]._id)
# take the rcs of the data
cmd = "mongoexport --db studio-dev --collection Nodes -q '" + '%s' % var + "' --out Schema/" + str(i["name"]) + "." +"json" + ""
subprocess.Popen(cmd,stderr=subprocess.STDOUT,shell=True)
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.pardir))
rcs_path = os.path.join(PROJECT_ROOT)
path_val = os.path.exists(rcs_path)
if path_val == False:
os.makedirs(rcs_path)
for i in final_list:
#get rcs files path and copy them to the current dir:
hr = HistoryManager()
if type(i)!= int:
try:
a = node_collection.find_one({"_id":ObjectId(i)})
if a:
path = hr.get_file_path(a)
path = path + ",v"
print path,rcs_path
cp = "cp -u " + path + " " +" --parents " + rcs_path + "/"
subprocess.Popen(cp,stderr=subprocess.STDOUT,shell=True)
except:
pass
| agpl-3.0 | Python | |
65d7e81510980d85af5b52504e6d98e45943cc36 | Create getdata.py | wikkii/raspluonto,wikkii/raspluonto,wikkii/raspluonto,wikkii/raspluonto,wikkii/raspluonto | python_flask/public_html/nuotiovahti/nuotiovahti/getdata.py | python_flask/public_html/nuotiovahti/nuotiovahti/getdata.py | import paho.mqtt.client as mqtt
import mysql.connector
from flask import Flask, jsonify, json, request
app = Flask(__name__)
app.route("/")
with app.app_context():
def fetchfrombase():
try:
cnx = mysql.connector.connect(option_files='/home/mint/connectors.cnf')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
print("connected to database")
cursor = cnx.cursor()
print("selecting")
cursor.execute("select (select count(f.detection_time) from Flame as f where detection_time >now()- INTERVAL 300 SECOND) as flame, (select count(p.detection_time) from Pir as p where detection_time >now()- INTERVAL 300 SECOND) as pir;")
datarows = jsonify(cursor.fetchall)
print (datarows)
cnx.close()
fetchfrombase()
if __name__ == "__name__":
app.run()
| mit | Python | |
26fcd91313b15ee2426aec36817a3f29734f4b93 | add diagonal gaussian demo | mattjj/pybasicbayes,michaelpacer/pybasicbayes,fivejjs/pybasicbayes | examples/demo-diaggaussian.py | examples/demo-diaggaussian.py | from __future__ import division
import numpy as np
np.seterr(invalid='raise')
from matplotlib import pyplot as plt
import copy
from pybasicbayes import models, distributions
from pybasicbayes.util.text import progprint_xrange
alpha_0=5.0
obs_hypparams=dict(
mu_0=np.zeros(2),
alphas_0=2*np.ones(2),
betas_0=np.ones(2),
nus_0=0.1*np.ones(2))
priormodel = models.Mixture(alpha_0=alpha_0,
components=[distributions.DiagonalGaussian(**obs_hypparams) for itr in range(30)])
data, _ = priormodel.generate(500)
plt.figure()
priormodel.plot()
plt.title('true model')
del priormodel
plt.figure()
plt.plot(data[:,0],data[:,1],'kx')
plt.title('data')
posteriormodel = models.Mixture(alpha_0=alpha_0,
components=[distributions.DiagonalGaussian(**obs_hypparams) for itr in range(30)])
posteriormodel.add_data(data)
allscores = []
allmodels = []
for superitr in range(5):
# Gibbs sampling to wander around the posterior
print 'Gibbs Sampling'
for itr in progprint_xrange(100):
posteriormodel.resample_model()
# mean field to lock onto a mode
print 'Mean Field'
scores = [posteriormodel.meanfield_coordinate_descent_step()
for itr in progprint_xrange(100)]
allscores.append(scores)
allmodels.append(copy.deepcopy(posteriormodel))
plt.figure()
for scores in allscores:
plt.plot(scores)
plt.title('model vlb scores vs iteration')
import operator
models_and_scores = sorted([(m,s[-1]) for m,s
in zip(allmodels,allscores)],key=operator.itemgetter(1),reverse=True)
plt.figure()
models_and_scores[0][0].plot()
plt.title('best model')
plt.show()
| mit | Python | |
f48535102b6f71ba802e9b656c73cdd3ec746a3b | Add the test_repeat_layer.py | tensor-tang/Paddle,luotao1/Paddle,lcy-seso/Paddle,chengduoZH/Paddle,chengduoZH/Paddle,QiJune/Paddle,lispc/Paddle,putcn/Paddle,lispc/Paddle,pkuyym/Paddle,luotao1/Paddle,jacquesqiao/Paddle,hedaoyuan/Paddle,lcy-seso/Paddle,chengduoZH/Paddle,putcn/Paddle,PaddlePaddle/Paddle,PaddlePaddle/Paddle,reyoung/Paddle,pengli09/Paddle,luotao1/Paddle,yu239/Paddle,yu239/Paddle,putcn/Paddle,putcn/Paddle,Canpio/Paddle,pengli09/Paddle,jacquesqiao/Paddle,reyoung/Paddle,pkuyym/Paddle,lispc/Paddle,yu239/Paddle,baidu/Paddle,luotao1/Paddle,jacquesqiao/Paddle,luotao1/Paddle,pkuyym/Paddle,jacquesqiao/Paddle,Canpio/Paddle,putcn/Paddle,tensor-tang/Paddle,lispc/Paddle,pengli09/Paddle,lispc/Paddle,hedaoyuan/Paddle,tensor-tang/Paddle,yu239/Paddle,pengli09/Paddle,lispc/Paddle,luotao1/Paddle,baidu/Paddle,reyoung/Paddle,hedaoyuan/Paddle,Canpio/Paddle,pengli09/Paddle,jacquesqiao/Paddle,luotao1/Paddle,lcy-seso/Paddle,lcy-seso/Paddle,Canpio/Paddle,pengli09/Paddle,PaddlePaddle/Paddle,Canpio/Paddle,pkuyym/Paddle,lcy-seso/Paddle,reyoung/Paddle,yu239/Paddle,QiJune/Paddle,PaddlePaddle/Paddle,baidu/Paddle,hedaoyuan/Paddle,baidu/Paddle,QiJune/Paddle,hedaoyuan/Paddle,Canpio/Paddle,yu239/Paddle,baidu/Paddle,hedaoyuan/Paddle,PaddlePaddle/Paddle,Canpio/Paddle,reyoung/Paddle,lispc/Paddle,PaddlePaddle/Paddle,pengli09/Paddle,pkuyym/Paddle,chengduoZH/Paddle,Canpio/Paddle,QiJune/Paddle,tensor-tang/Paddle,putcn/Paddle,pkuyym/Paddle,yu239/Paddle,QiJune/Paddle,reyoung/Paddle,lcy-seso/Paddle,lispc/Paddle,hedaoyuan/Paddle,pengli09/Paddle,QiJune/Paddle,tensor-tang/Paddle,chengduoZH/Paddle,PaddlePaddle/Paddle,hedaoyuan/Paddle,yu239/Paddle,jacquesqiao/Paddle | python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py | python/paddle/trainer_config_helpers/tests/configs/test_repeat_layer.py | from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
din = data_layer(name='data', size=30)
outputs(
repeat_layer(
input=din, num_repeats=10, as_row_vector=True),
repeat_layer(
input=din, num_repeats=10, act=TanhActivation(), as_row_vector=False))
| apache-2.0 | Python | |
aa5c8164b26c388b6a3a1efe8ea402a63a1c7ae8 | add migrations file | djangothon/django-db-meter,djangothon/django-db-meter,djangothon/django-db-meter | django_db_meter/migrations/0003_testmodel.py | django_db_meter/migrations/0003_testmodel.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('django_db_meter', '0002_appwiseaggregatedmetric_dbwiseaggregatedmetric_tablewiseaggregatedmetric'),
]
operations = [
migrations.CreateModel(
name='TestModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('field1', models.CharField(max_length=10)),
('filed2', models.BooleanField(default=False)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| apache-2.0 | Python | |
4db578f728a1eeda337f642513c57814fa9ec855 | create module to save script to s3 bucket | ryninho/session2s3 | session2s3.py | session2s3.py | """
Save session to S3 bucket. Ex: ses2s3.workspace_to_s3('my-project-script')
"""
from datetime import datetime
import re
import boto3
import dill
def session_to_s3(prefix, bucket_name, timestamp=True):
"""Save session to S3 bucket. Login via ~/.aws/credentials as per boto3."""
if timestamp:
now_str = str(datetime.now())
date_time_str = re.sub('[^0-9a-zA-Z]+', '_', now_str)
filename = prefix + "_" + date_time_str + ".pkl"
else:
filename = prefix + ".pkl"
dill.dump_session(filename)
s3 = boto3.resource('s3')
s3.meta.client.upload_file(filename, bucket_name, filename)
return filename
| mit | Python | |
a6ac0949b32e8e02d26fe0eff159fd057c11c8e2 | rename test_shore.py in test_shore_odf.py | FrancoisRheaultUS/dipy,Messaoud-Boudjada/dipy,JohnGriffiths/dipy,nilgoyyou/dipy,Messaoud-Boudjada/dipy,rfdougherty/dipy,beni55/dipy,demianw/dipy,sinkpoint/dipy,rfdougherty/dipy,StongeEtienne/dipy,demianw/dipy,JohnGriffiths/dipy,mdesco/dipy,jyeatman/dipy,matthieudumont/dipy,villalonreina/dipy,samuelstjean/dipy,oesteban/dipy,villalonreina/dipy,samuelstjean/dipy,samuelstjean/dipy,nilgoyyou/dipy,mdesco/dipy,matthieudumont/dipy,StongeEtienne/dipy,sinkpoint/dipy,FrancoisRheaultUS/dipy,oesteban/dipy,beni55/dipy,jyeatman/dipy | dipy/reconst/tests/test_shore_odf.py | dipy/reconst/tests/test_shore_odf.py | import numpy as np
from dipy.data import get_data, two_shells_voxels, three_shells_voxels, get_sphere
from dipy.data.fetcher import (fetch_isbi2013_2shell, read_isbi2013_2shell,
fetch_sherbrooke_3shell, read_sherbrooke_3shell)
from dipy.reconst.shore import ShoreModel
from dipy.reconst.shm import QballModel, sh_to_sf
from dipy.reconst.peaks import gfa, peak_directions
from dipy.core.gradients import gradient_table
from numpy.testing import (assert_equal,
assert_almost_equal,
run_module_suite,
assert_array_equal,
assert_raises)
from dipy.sims.voxel import SticksAndBall, multi_tensor
from dipy.core.subdivide_octahedron import create_unit_sphere
from dipy.core.sphere_stats import angular_similarity
from dipy.reconst.tests.test_dsi import sticks_and_ball_dummies
import nibabel as nib
def test_shore_odf():
fetch_isbi2013_2shell()
img, gtab=read_isbi2013_2shell()
# load symmetric 724 sphere
sphere = get_sphere('symmetric724')
# load icosahedron sphere
sphere2 = create_unit_sphere(5)
data, golden_directions = SticksAndBall(gtab, d=0.0015,
S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=None)
asm = ShoreModel(gtab,radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8)
# symmetric724
asmfit = asm.fit(data)
odf = asmfit.odf(sphere)
odf_sh = asmfit.odf_sh()
odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None)
assert_almost_equal(odf, odf_from_sh, 10)
directions, _, _ = peak_directions(odf, sphere, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)
# 5 subdivisions
odf = asmfit.odf(sphere2)
directions, _, _ = peak_directions(odf, sphere2, .35, 25)
assert_equal(len(directions), 2)
assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1)
sb_dummies = sticks_and_ball_dummies(gtab)
for sbd in sb_dummies:
data, golden_directions = sb_dummies[sbd]
asmfit = asm.fit(data)
odf = asmfit.odf(sphere2)
directions, _, _ = peak_directions(odf, sphere2, .35, 25)
if len(directions) <= 3:
assert_equal(len(directions), len(golden_directions))
if len(directions) > 3:
assert_equal(gfa(odf) < 0.1, True)
def test_multivox_shore():
fetch_sherbrooke_3shell()
img, gtab=read_sherbrooke_3shell()
test = img.get_data()
data = test[45:65, 35:65, 33:34]
radial_order = 4
zeta = 700
asm = ShoreModel(gtab, radial_order=radial_order, zeta=zeta, lambdaN=1e-8, lambdaL=1e-8)
asmfit = asm.fit(data)
c_shore=asmfit.shore_coeff
assert_equal(c_shore.shape[0:3], data.shape[0:3])
assert_equal(np.alltrue(np.isreal(c_shore)), True)
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause | Python | |
76ac913fc0862421b7e4ef1f32994c8084a21f86 | Add influx component | shaftoe/home-assistant,mKeRix/home-assistant,kyvinh/home-assistant,miniconfig/home-assistant,alexmogavero/home-assistant,ewandor/home-assistant,PetePriority/home-assistant,Duoxilian/home-assistant,titilambert/home-assistant,open-homeautomation/home-assistant,emilhetty/home-assistant,philipbl/home-assistant,Theb-1/home-assistant,deisi/home-assistant,bdfoster/blumate,oandrew/home-assistant,nevercast/home-assistant,instantchow/home-assistant,jamespcole/home-assistant,leoc/home-assistant,MartinHjelmare/home-assistant,Julian/home-assistant,nugget/home-assistant,luxus/home-assistant,Julian/home-assistant,Duoxilian/home-assistant,JshWright/home-assistant,postlund/home-assistant,sffjunkie/home-assistant,balloob/home-assistant,hexxter/home-assistant,fbradyirl/home-assistant,robbiet480/home-assistant,Smart-Torvy/torvy-home-assistant,HydrelioxGitHub/home-assistant,partofthething/home-assistant,hexxter/home-assistant,qedi-r/home-assistant,deisi/home-assistant,shaftoe/home-assistant,devdelay/home-assistant,tboyce1/home-assistant,tboyce021/home-assistant,betrisey/home-assistant,sffjunkie/home-assistant,coteyr/home-assistant,tboyce021/home-assistant,jaharkes/home-assistant,open-homeautomation/home-assistant,ct-23/home-assistant,toddeye/home-assistant,leppa/home-assistant,ct-23/home-assistant,HydrelioxGitHub/home-assistant,philipbl/home-assistant,srcLurker/home-assistant,Julian/home-assistant,adrienbrault/home-assistant,LinuxChristian/home-assistant,JshWright/home-assistant,tinloaf/home-assistant,tchellomello/home-assistant,Theb-1/home-assistant,sffjunkie/home-assistant,DavidLP/home-assistant,tboyce1/home-assistant,alexmogavero/home-assistant,MungoRae/home-assistant,florianholzapfel/home-assistant,hmronline/home-assistant,soldag/home-assistant,adrienbrault/home-assistant,lukas-hetzenecker/home-assistant,devdelay/home-assistant,stefan-jonasson/home-assistant,xifle/home-assistant,auduny/home-assistant,morphis/home-assistant,kennedyshead/home-assistant,robjohnson189/home-assistant,jawilson/home-assistant,MungoRae/home-assistant,fbradyirl/home-assistant,shaftoe/home-assistant,PetePriority/home-assistant,instantchow/home-assistant,shaftoe/home-assistant,balloob/home-assistant,alexmogavero/home-assistant,rohitranjan1991/home-assistant,ma314smith/home-assistant,jnewland/home-assistant,balloob/home-assistant,LinuxChristian/home-assistant,rohitranjan1991/home-assistant,dmeulen/home-assistant,joopert/home-assistant,bdfoster/blumate,fbradyirl/home-assistant,hmronline/home-assistant,oandrew/home-assistant,ct-23/home-assistant,happyleavesaoc/home-assistant,tinloaf/home-assistant,miniconfig/home-assistant,instantchow/home-assistant,keerts/home-assistant,sfam/home-assistant,jnewland/home-assistant,justyns/home-assistant,MungoRae/home-assistant,GenericStudent/home-assistant,bdfoster/blumate,Zyell/home-assistant,oandrew/home-assistant,aequitas/home-assistant,keerts/home-assistant,sander76/home-assistant,mezz64/home-assistant,Duoxilian/home-assistant,lukas-hetzenecker/home-assistant,dmeulen/home-assistant,JshWright/home-assistant,miniconfig/home-assistant,open-homeautomation/home-assistant,w1ll1am23/home-assistant,dmeulen/home-assistant,coteyr/home-assistant,auduny/home-assistant,philipbl/home-assistant,sfam/home-assistant,caiuspb/home-assistant,ct-23/home-assistant,jawilson/home-assistant,emilhetty/home-assistant,xifle/home-assistant,happyleavesaoc/home-assistant,jaharkes/home-assistant,eagleamon/home-assistant,Zac-HD/home-assistant,hexxter/home-assistant,betrisey/home-assistant,keerts/home-assistant,persandstrom/home-assistant,pschmitt/home-assistant,HydrelioxGitHub/home-assistant,caiuspb/home-assistant,ma314smith/home-assistant,GenericStudent/home-assistant,Theb-1/home-assistant,ewandor/home-assistant,srcLurker/home-assistant,mikaelboman/home-assistant,Zyell/home-assistant,varunr047/homefile,justyns/home-assistant,caiuspb/home-assistant,kyvinh/home-assistant,Zyell/home-assistant,MungoRae/home-assistant,kennedyshead/home-assistant,robjohnson189/home-assistant,sander76/home-assistant,Zac-HD/home-assistant,nnic/home-assistant,xifle/home-assistant,DavidLP/home-assistant,joopert/home-assistant,sffjunkie/home-assistant,varunr047/homefile,jnewland/home-assistant,deisi/home-assistant,nevercast/home-assistant,jabesq/home-assistant,kyvinh/home-assistant,happyleavesaoc/home-assistant,luxus/home-assistant,betrisey/home-assistant,emilhetty/home-assistant,devdelay/home-assistant,FreekingDean/home-assistant,ma314smith/home-assistant,bdfoster/blumate,tinloaf/home-assistant,toddeye/home-assistant,oandrew/home-assistant,Danielhiversen/home-assistant,mikaelboman/home-assistant,turbokongen/home-assistant,jabesq/home-assistant,MartinHjelmare/home-assistant,emilhetty/home-assistant,robjohnson189/home-assistant,Teagan42/home-assistant,w1ll1am23/home-assistant,varunr047/homefile,miniconfig/home-assistant,FreekingDean/home-assistant,leoc/home-assistant,bdfoster/blumate,florianholzapfel/home-assistant,home-assistant/home-assistant,persandstrom/home-assistant,mKeRix/home-assistant,aequitas/home-assistant,morphis/home-assistant,nugget/home-assistant,srcLurker/home-assistant,leppa/home-assistant,eagleamon/home-assistant,tboyce1/home-assistant,hexxter/home-assistant,morphis/home-assistant,Danielhiversen/home-assistant,nkgilley/home-assistant,jaharkes/home-assistant,nnic/home-assistant,eagleamon/home-assistant,jaharkes/home-assistant,jamespcole/home-assistant,stefan-jonasson/home-assistant,luxus/home-assistant,florianholzapfel/home-assistant,happyleavesaoc/home-assistant,PetePriority/home-assistant,Teagan42/home-assistant,Zac-HD/home-assistant,JshWright/home-assistant,robjohnson189/home-assistant,Smart-Torvy/torvy-home-assistant,hmronline/home-assistant,postlund/home-assistant,molobrakos/home-assistant,stefan-jonasson/home-assistant,emilhetty/home-assistant,nevercast/home-assistant,keerts/home-assistant,MartinHjelmare/home-assistant,stefan-jonasson/home-assistant,mikaelboman/home-assistant,coteyr/home-assistant,mikaelboman/home-assistant,soldag/home-assistant,pschmitt/home-assistant,LinuxChristian/home-assistant,ct-23/home-assistant,robbiet480/home-assistant,aronsky/home-assistant,tboyce1/home-assistant,tchellomello/home-assistant,nnic/home-assistant,Smart-Torvy/torvy-home-assistant,mezz64/home-assistant,betrisey/home-assistant,turbokongen/home-assistant,srcLurker/home-assistant,morphis/home-assistant,ma314smith/home-assistant,dmeulen/home-assistant,sfam/home-assistant,mKeRix/home-assistant,deisi/home-assistant,Smart-Torvy/torvy-home-assistant,qedi-r/home-assistant,aoakeson/home-assistant,Duoxilian/home-assistant,philipbl/home-assistant,sffjunkie/home-assistant,aequitas/home-assistant,titilambert/home-assistant,mKeRix/home-assistant,persandstrom/home-assistant,florianholzapfel/home-assistant,rohitranjan1991/home-assistant,aoakeson/home-assistant,LinuxChristian/home-assistant,deisi/home-assistant,alexmogavero/home-assistant,hmronline/home-assistant,aronsky/home-assistant,Zac-HD/home-assistant,LinuxChristian/home-assistant,molobrakos/home-assistant,nkgilley/home-assistant,aoakeson/home-assistant,mikaelboman/home-assistant,xifle/home-assistant,justyns/home-assistant,home-assistant/home-assistant,auduny/home-assistant,kyvinh/home-assistant,sdague/home-assistant,MungoRae/home-assistant,varunr047/homefile,devdelay/home-assistant,sdague/home-assistant,Cinntax/home-assistant,jabesq/home-assistant,hmronline/home-assistant,open-homeautomation/home-assistant,partofthething/home-assistant,leoc/home-assistant,nugget/home-assistant,leoc/home-assistant,Cinntax/home-assistant,DavidLP/home-assistant,eagleamon/home-assistant,molobrakos/home-assistant,ewandor/home-assistant,varunr047/homefile,Julian/home-assistant,jamespcole/home-assistant | homeassistant/components/influx.py | homeassistant/components/influx.py | """
homeassistant.components.influx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
InfluxDB component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influx/
Configuration:
influx:
host: localhost
port: 8086
dbname: home_assistant
dbuser: DB_USER
dbuser_password: DB_USER_PASSWORD
"""
import logging
import requests
import socket
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (MATCH_ALL)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "influx"
DEPENDENCIES = ['recorder']
INFLUX_CLIENT = None
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
REQUIREMENTS = ['influxdb==2.10.0']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
def setup(hass, config):
""" Setup the Influx component. """
from influxdb import exceptions
if not validate_config(config, {DOMAIN: ['host']}, _LOGGER):
return False
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
dbname = util.convert(conf.get(CONF_DB_NAME), str, DEFAULT_DATABASE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
global INFLUX_CLIENT
try:
INFLUX_CLIENT = Influx(host, port, username, password, dbname)
except (socket.gaierror, requests.exceptions.ConnectionError):
_LOGGER.error("Database is not accessible. "
"Please check your entries in the configuration file.")
return False
try:
INFLUX_CLIENT.create_database(dbname)
except exceptions.InfluxDBClientError:
_LOGGER.info("Database '%s' already exists", dbname)
INFLUX_CLIENT.switch_user(username, password)
INFLUX_CLIENT.switch_database(dbname)
def event_listener(event):
""" Listen for new messages on the bus and sends them to Influx. """
event_data = event.as_dict()
json_body = []
if event_data['event_type'] is not 'time_changed':
try:
entity_id = event_data['data']['entity_id']
new_state = event_data['data']['new_state']
json_body = [
{
"measurement": entity_id.split('.')[1],
"tags": {
"type": entity_id.split('.')[0],
},
"time": event_data['time_fired'],
"fields": {
"value": new_state.state
}
}
]
except KeyError:
pass
if json_body:
INFLUX_CLIENT.write_data(json_body)
hass.bus.listen(MATCH_ALL, event_listener)
return True
# pylint: disable=too-many-arguments
class Influx(object):
""" Implements the handling of an connection to an Influx database.. """
def __init__(self, host, port, username, password, dbname):
from influxdb import InfluxDBClient
self._host = host
self._port = port
self._username = username
self._password = password
self._dbname = dbname
self.client = InfluxDBClient(self._host, self._port, self._username,
self._password, self._dbname)
def switch_user(self, username, password):
""" Switch the user to the given one. """
self.client.switch_user(username, password)
def create_database(self, dbname):
""" Creates a new Influx database. """
self.client.create_database(dbname)
def switch_database(self, dbname):
""" Switch the user to the given one. """
return self.client.switch_database(dbname)
def write_data(self, data):
""" Writes data to Influx database. """
self.client.write_points(data)
| mit | Python | |
730c8bf23dbd687b3070eae58378ebcccf523736 | add 'split' filter | serge-name/myansible,serge-name/myansible,serge-name/myansible | filter_plugins/split.py | filter_plugins/split.py | class FilterModule(object):
''' A comment '''
def filters(self):
return {
'split': self.split,
}
def split(self, input_value, separator):
return input_value.split(separator)
| mit | Python | |
938a6fabbc67feb409f6874966b30cb5c3e927a6 | Create myotpsecrets.py | mortn/docker-py3bottle | app/myotpsecrets.py | app/myotpsecrets.py | ttp_user = 'admin'
http_pass = 'admin'
codes = {
'account1': 'pefjehegNusherewSunaumIcwoafIfyi',
'account2': 'memJarrIfomWeykvajLyutIkJeafcoyt',
'account3': 'rieshjaynEgDoipEjkecPopHiWighath',
}
| mit | Python | |
eb396c12cccbda03a46381b5a54ff55d8f876152 | Fix NameError | untitaker/vdirsyncer,untitaker/vdirsyncer,untitaker/vdirsyncer,hobarrera/vdirsyncer,hobarrera/vdirsyncer | vdirsyncer/__init__.py | vdirsyncer/__init__.py | # -*- coding: utf-8 -*-
'''
vdirsyncer is a synchronization tool for vdir. See the README for more details.
'''
from __future__ import print_function
PROJECT_HOME = 'https://github.com/untitaker/vdirsyncer'
DOCS_HOME = 'https://vdirsyncer.readthedocs.org/en/stable'
try:
from .version import version as __version__ # noqa
except ImportError: # pragma: no cover
raise ImportError(
'Failed to find (autogenerated) version.py. '
'This might be because you are installing from GitHub\'s tarballs, '
'use the PyPI ones.'
)
def _detect_faulty_requests(): # pragma: no cover
import requests
if 'dist-packages' not in requests.__file__:
return
text = (
'{e}\n\n'
'This most likely means you are running into a bug specific to '
'Debian-based distributions.\n\n'
'Consult {d}/problems.html#requests-related-importerrors-on-debian'
'-based-distributions on how to deal with this, or use a different '
'operating system.'
)
try:
from requests_toolbelt.auth.guess import GuessAuth # noqa
except ImportError as e:
import sys
print(text.format(e=str(e), d=DOCS_HOME), file=sys.stderr)
sys.exit(1)
_detect_faulty_requests()
| # -*- coding: utf-8 -*-
'''
vdirsyncer is a synchronization tool for vdir. See the README for more details.
'''
from __future__ import print_function
try:
from .version import version as __version__ # noqa
except ImportError: # pragma: no cover
raise ImportError(
'Failed to find (autogenerated) version.py. '
'This might be because you are installing from GitHub\'s tarballs, '
'use the PyPI ones.'
)
def _detect_faulty_requests(): # pragma: no cover
import requests
if 'dist-packages' not in requests.__file__:
return
text = (
'{e}\n\n'
'This most likely means you are running into a bug specific to '
'Debian-based distributions.\n\n'
'Consult {d}/problems.html#requests-related-importerrors-on-debian'
'-based-distributions on how to deal with this, or use a different '
'operating system.'
)
try:
from requests_toolbelt.auth.guess import GuessAuth # noqa
except ImportError as e:
import sys
print(text.format(e=str(e), d=DOCS_HOME), file=sys.stderr)
sys.exit(1)
_detect_faulty_requests()
PROJECT_HOME = 'https://github.com/untitaker/vdirsyncer'
DOCS_HOME = 'https://vdirsyncer.readthedocs.org/en/stable'
| mit | Python |
0e3711000bcf7d59f75baa68f357f49f5246f812 | Add video capturing functionality | vladimiroff/humble-media,vladimiroff/humble-media | humblemedia/resources/utils/video_capture.py | humblemedia/resources/utils/video_capture.py | import subprocess
import re
def get_video_duration(filename):
# returns duration in seconds
command = 'ffmpeg -i %s 2>&1 | grep "Duration"' % filename
result = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
stdout_lines = result.stdout.readlines()
duration_line = stdout_lines[0].decode()
match = re.match(r'\s*Duration:\s*(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d+)', duration_line)
if not match:
raise 'Invalid video file'
groups = match.groupdict()
hours = int(groups.get('hours'))
minutes = int(groups.get('minutes'))
seconds = int(groups.get('seconds'))
return hours * 3600 + (minutes * 60) + seconds
def get_video_capture(filename, at_second, output_file):
command = 'ffmpeg -ss %s -i %s -vframes 1 %s' % (at_second, filename, output_file)
subprocess.Popen(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True)
def get_random_video_captures(filename, count, output_files):
INITIAL_CAPTURE_SECOND = 5
duration = get_video_duration(filename)
capture_window = (duration - INITIAL_CAPTURE_SECOND) / count
capture_tuples = zip(range(INITIAL_CAPTURE_SECOND, duration, int(capture_window)), output_files)
for (at_second, output_file) in capture_tuples:
get_video_capture(filename, at_second, output_file)
| mit | Python | |
ff63bb34aaf01cd9cd7eff89c0c94135f896640f | Create mqtt_easydriver_stepper.py | pumanzor/iot-redlibre,pumanzor/iot-redlibre | linkit/easydriver/mqtt_easydriver_stepper.py | linkit/easydriver/mqtt_easydriver_stepper.py | import paho.mqtt.client as mqtt
import json, time
import mraa
pin19 = mraa.Pwm(19)
pin0 = mraa.Gpio(0)
pin0.dir(mraa.DIR_OUT)
# ----- CHANGE THESE FOR YOUR SETUP -----
MQTT_HOST = "190.97.168.236"
MQTT_PORT = 1883
def on_connect(client, userdata, rc):
print("\nConnected with result code " + str(rc) + "\n")
#Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
#client.subscribe("/pyxo/xyusers/{USERNAME}/{APIKEY}/iot/control/".format(**vars()), 2) # Connect to everything in /mcu topic
client.subscribe("/pryxo/yxusers/motor/control/")
print("Subscribed to homecontrol")
def on_message_iotrl(client, userdata, msg):
print("\n\t* Linkit UPDATED ("+msg.topic+"): " + str(msg.payload))
if msg.payload == "m1":
pin0.write(0)
pin1 = mraa.Gpio(1)
pin1.dir(mraa.DIR_OUT)
pin1.write(0)
pin19.period_us(300)
pin19.enable(True)
pin19.write(0.1)
time.sleep(2)
client.publish("/pryxo/yxusers/iot/status/", "derecha", 2)
if msg.payload == "m0":
pin1 = mraa.Gpio(1)
pin1.dir(mraa.DIR_OUT)
pin1.write(1)
pin19.period_us(300)
pin19.enable(True)
pin19.write(0.1)
time.sleep(2)
client.publish("/pryxo/yxusers/iot/status/", "izquierda", 2)
if msg.payload == "m2":
pin0.write(1)
client.publish("/pryxo/yxusers/iot/status/", "STOP", 2)
def command_error():
print("Error: Unknown command")
client = mqtt.Client(client_id="linkit7688-patio")
# Callback declarations (functions run based on certain messages)
client.on_connect = on_connect
client.message_callback_add("/pryxo/yxusers/motor/control/", on_message_iotrl)
# This is where the MQTT service connects and starts listening for messages
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start() # Background thread to call loop() automatically
# Main program loop
while True:
time.sleep(10)
| mit | Python | |
2ae6f4183b2096287f8155d7db7e2ed0444618c4 | Add first version of Day One entry splitter | rdocking/bits_and_bobs | day_one_entry_splitter.py | day_one_entry_splitter.py | #!/usr/bin/env python
# encoding: utf-8
"""
day_one_entry_splitter.py
Created by Rod Docking on 2017-01-01.
All rights reserved.
"""
import sys
def main():
"""Split entries from Day One export into separate files"""
# Entry headers look like:
# "Date: February 14, 2005 at 9:00 AM"
# Need to:
# Loop through all the lines in the input file
# When we hit a new date, open a new file with approriate name
with open(sys.argv[1]) as in_handle:
for line in in_handle:
if "Date:" in line:
print line
if __name__ == '__main__':
main()
| mit | Python | |
d73070f268e240439c71ffd193a18c477403dd2e | Add project model class | JrGoodle/clowder,JrGoodle/clowder,JrGoodle/clowder | clowder/project.py | clowder/project.py | import argparse
import sys
class Project(object):
def __init__(self, name, path, url):
self.name = name
self.path = path
self.url = url
| mit | Python | |
dd708956ed19a38be09597cae94172e0b9863623 | Add signing thanks @jmcarp | kwierman/waterbutler,TomBaxter/waterbutler,Ghalko/waterbutler,rafaeldelucena/waterbutler,icereval/waterbutler,CenterForOpenScience/waterbutler,rdhyee/waterbutler,RCOSDP/waterbutler,hmoco/waterbutler,chrisseto/waterbutler,cosenal/waterbutler,Johnetordoff/waterbutler,felliott/waterbutler | waterbutler/signing.py | waterbutler/signing.py | # encoding: utf-8
import hmac
import json
import base64
import collections
from waterbutler import settings
# Written by @jmcarp originally
def order_recursive(data):
"""Recursively sort keys of input data and all its nested dictionaries.
Used to ensure consistent ordering of JSON payloads.
"""
if isinstance(data, dict):
return collections.OrderedDict(
sorted(
(
(key, order_recursive(value))
for key, value in data.items()
),
key=lambda item: item[0]
)
)
if isinstance(data, list):
return [
order_recursive(value)
for value in data
]
return data
def serialize_payload(payload):
ordered = order_recursive(payload)
return base64.b64encode(json.dumps(ordered).encode('UTF-8'))
def unserialize_payload(message):
payload = json.loads(base64.b64decode(message))
return order_recursive(payload)
class Signer(object):
def __init__(self, secret, digest):
assert callable(digest)
self.secret = secret
self.digest = digest
def sign_message(self, message):
return hmac.new(
key=self.secret,
digestmod=self.digest,
msg=message,
).hexdigest()
def sign_payload(self, payload):
message = serialize_payload(payload)
signature = self.sign_message(message)
return message, signature
def verify_message(self, signature, message):
expected = self.sign_message(message)
return signature == expected
def verify_payload(self, signature, payload):
_, expected = self.sign_payload(payload)
return signature == expected
# default_signer = Signer(settings.DEFAULT_HMAC_KEY, settings.DEFAULT_HMAC_ALGORITHM)
osf_signer = Signer(settings.OSF_HMAC_SECRET, settings.OSF_HMAC_ALGORITHM)
| apache-2.0 | Python | |
553624fcd4d7e8a4c561b182967291a1cc44ade9 | Add algorithm for Casimir Effect (#7141) | TheAlgorithms/Python | physics/casimir_effect.py | physics/casimir_effect.py | """
Title : Finding the value of magnitude of either the Casimir force, the surface area
of one of the plates or distance between the plates provided that the other
two parameters are given.
Description : In quantum field theory, the Casimir effect is a physical force
acting on the macroscopic boundaries of a confined space which arises from the
quantum fluctuations of the field. It is a physical force exerted between separate
objects, which is due to neither charge, gravity, nor the exchange of particles,
but instead is due to resonance of all-pervasive energy fields in the intervening
space between the objects. Since the strength of the force falls off rapidly with
distance it is only measurable when the distance between the objects is extremely
small. On a submicron scale, this force becomes so strong that it becomes the
dominant force between uncharged conductors.
Dutch physicist Hendrik B. G. Casimir first proposed the existence of the force,
and he formulated an experiment to detect it in 1948 while participating in research
at Philips Research Labs. The classic form of his experiment used a pair of uncharged
parallel metal plates in a vacuum, and successfully demonstrated the force to within
15% of the value he had predicted according to his theory.
The Casimir force F for idealized, perfectly conducting plates of surface area
A square meter and placed at a distance of a meter apart with vacuum between
them is expressed as -
F = - ((Reduced Planck Constant ℏ) * c * Pi^2 * A) / (240 * a^4)
Here, the negative sign indicates the force is attractive in nature. For the ease
of calculation, only the magnitude of the force is considered.
Source :
- https://en.wikipedia.org/wiki/Casimir_effect
- https://www.cs.mcgill.ca/~rwest/wikispeedia/wpcd/wp/c/Casimir_effect.htm
- Casimir, H. B. ; Polder, D. (1948) "The Influence of Retardation on the
London-van der Waals Forces", Physical Review, vol. 73, Issue 4, pp. 360-372
"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
REDUCED_PLANCK_CONSTANT = 1.054571817e-34 # unit of ℏ : J * s
SPEED_OF_LIGHT = 3e8 # unit of c : m * s^-1
def casimir_force(force: float, area: float, distance: float) -> dict[str, float]:
"""
Input Parameters
----------------
force -> Casimir Force : magnitude in Newtons
area -> Surface area of each plate : magnitude in square meters
distance -> Distance between two plates : distance in Meters
Returns
-------
result : dict name, value pair of the parameter having Zero as it's value
Returns the value of one of the parameters specified as 0, provided the values of
other parameters are given.
>>> casimir_force(force = 0, area = 4, distance = 0.03)
{'force': 6.4248189174864216e-21}
>>> casimir_force(force = 2635e-13, area = 0.0023, distance = 0)
{'distance': 1.0323056015031114e-05}
>>> casimir_force(force = 2737e-21, area = 0, distance = 0.0023746)
{'area': 0.06688838837354052}
>>> casimir_force(force = 3457e-12, area = 0, distance = 0)
Traceback (most recent call last):
...
ValueError: One and only one argument must be 0
>>> casimir_force(force = 3457e-12, area = 0, distance = -0.00344)
Traceback (most recent call last):
...
ValueError: Distance can not be negative
>>> casimir_force(force = -912e-12, area = 0, distance = 0.09374)
Traceback (most recent call last):
...
ValueError: Magnitude of force can not be negative
"""
if (force, area, distance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if force < 0:
raise ValueError("Magnitude of force can not be negative")
if distance < 0:
raise ValueError("Distance can not be negative")
if area < 0:
raise ValueError("Area can not be negative")
if force == 0:
force = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
area = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
distance = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0")
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit | Python | |
cad438214ec55684bfc7d5f1d5383109934f29ff | add weboob.tools.application.prompt.PromptApplication | Konubinix/weboob,nojhan/weboob-devel,nojhan/weboob-devel,laurent-george/weboob,sputnick-dev/weboob,laurent-george/weboob,sputnick-dev/weboob,frankrousseau/weboob,frankrousseau/weboob,Boussadia/weboob,laurent-george/weboob,Konubinix/weboob,Boussadia/weboob,willprice/weboob,nojhan/weboob-devel,eirmag/weboob,franek/weboob,Konubinix/weboob,Boussadia/weboob,willprice/weboob,yannrouillard/weboob,franek/weboob,frankrousseau/weboob,yannrouillard/weboob,eirmag/weboob,franek/weboob,RouxRC/weboob,willprice/weboob,eirmag/weboob,yannrouillard/weboob,sputnick-dev/weboob,RouxRC/weboob,Boussadia/weboob,RouxRC/weboob | weboob/tools/application/prompt.py | weboob/tools/application/prompt.py | # -*- coding: utf-8 -*-
"""
Copyright(C) 2010 Romain Bignon
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
import sched
import time
import select
import sys
from weboob import Weboob
from weboob.scheduler import Scheduler
from .console import ConsoleApplication
class PromptScheduler(Scheduler):
def __init__(self, prompt_cb, read_cb):
self.scheduler = sched.scheduler(time.time, self.sleep)
self.read_cb = read_cb
self.prompt_cb = prompt_cb
def sleep(self, d):
self.prompt_cb()
try:
read, write, excepts = select.select([sys.stdin], [], [], d or None)
if read:
line = sys.stdin.readline()
if not line:
self.want_stop()
else:
self.read_cb(line.strip())
except KeyboardInterrupt:
sys.stdout.write('\n')
class PromptApplication(ConsoleApplication):
def create_weboob(self):
return Weboob(self.APPNAME, scheduler=PromptScheduler(self.prompt, self.read_cb))
def prompt(self):
sys.stdout.write('> ')
sys.stdout.flush()
def loop(self):
self.weboob.loop()
def read_cb(self, line):
line = line.split()
self.process_command(*line)
| agpl-3.0 | Python | |
1b3d7078a4ca91ef07f90d1645f26761d1f7abac | Add example of using lower-level plotting methods directly | joferkington/mplstereonet | examples/scatter.py | examples/scatter.py | """
Example of how `ax.scatter` can be used to plot linear data on a stereonet
varying color and/or size by other variables.
This also serves as a general example of how to convert orientation data into
the coordinate system that the stereonet plot uses so that generic matplotlib
plotting methods may be used.
"""
import numpy as np
import matplotlib.pyplot as plt
import mplstereonet
np.random.seed(1)
strikes = np.arange(0, 360, 15)
dips = 45 * np.ones(strikes.size)
magnitude = np.random.random(strikes.size)
# Convert our strikes and dips to stereonet coordinates
lons, lats = mplstereonet.pole(strikes, dips)
# Now we'll plot our data and color by magnitude
fig, ax = mplstereonet.subplots()
sm = ax.scatter(lons, lats, c=magnitude, s=50, cmap='gist_earth')
ax.grid()
plt.show()
| mit | Python | |
7383cc2a4b6ad21c747794dbb3d33338d8eea528 | Add another example. | nihilifer/txsocksx,locusf/txsocksx,habnabit/txsocksx | examples/tor-irc.py | examples/tor-irc.py | # Copyright (c) Aaron Gallagher <_@habnab.it>
# See COPYING for details.
from twisted.internet.defer import Deferred
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.protocol import ClientFactory
from twisted.internet.task import react
from twisted.words.protocols.irc import IRCClient
from twisted.protocols.policies import SpewingFactory
from txsocksx.client import SOCKS5ClientEndpoint
class CouldNotIRCError(Exception):
pass
class TorIRC(IRCClient):
nickname = 'txsocksx-tor-irc'
nickservPassword = ''
def connectionMade(self):
self.sendLine('CAP REQ :sasl')
self.deferred = Deferred()
IRCClient.connectionMade(self)
def irc_CAP(self, prefix, params):
if params[1] != 'ACK' or params[2].split() != ['sasl']:
self.deferred.errback(CouldNotIRCError('sasl not available'))
sasl = ('{0}\0{0}\0{1}'.format(self.nickname, self.nickservPassword)).encode('base64').strip()
self.sendLine('AUTHENTICATE PLAIN')
self.sendLine('AUTHENTICATE ' + sasl)
def irc_903(self, prefix, params):
self.sendLine('CAP END')
def irc_904(self, prefix, params):
self.deferred.errback(CouldNotIRCError('sasl auth failed', params))
irc_905 = irc_904
def connectionLost(self, reason):
self.deferred.errback(reason)
def signedOn(self):
print 'signed on successfully'
self.quit('')
class TorIRCFactory(ClientFactory):
protocol = TorIRC
def main(reactor):
torEndpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', 9050)
# freenode's tor endpoint
ircEndpoint = SOCKS5ClientEndpoint('lgttsalmpw3qo4no.onion', 6667, torEndpoint)
d = ircEndpoint.connect(SpewingFactory(TorIRCFactory()))
d.addCallback(lambda proto: proto.wrappedProtocol.deferred)
return d
react(main, [])
| isc | Python | |
7a3e85231efeb5c03cab944f6da346d138f6fcb1 | Add tests for pips | wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build | test/test_pips.py | test/test_pips.py | import pytest
@pytest.mark.parametrize("name", [
("awscli"),
("boto3"),
("docker-py"),
("GitPython"),
("mkdocs"),
("pep8"),
("virtualenv"),
("virtualenvwrapper"),
])
def test_pips(host, name):
assert name in host.pip_package.get_packages() | mit | Python | |
8b4d27851889bccc87392b14557ce63d3f95e426 | add build.py | greggman/hft-unity3d,greggman/hft-unity3d,greggman/hft-unity3d,greggman/hft-unity3d | build.py | build.py | #!/usr/bin/python
import glob
import gzip
import os
import platform
import re
import sh
import shutil
import subprocess
import sys
import time
from optparse import OptionParser
log = lambda *a: None
def VerbosePrint(*args):
# Print each argument separately so caller doesn't need to
# stuff everything to be printed into a single string
for arg in args:
print arg,
print
class ShError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def BuildUnityPlugin():
p = platform.system().lower()
mdtool_path = "mdtool" if not p == 'darwin' else "/Applications/Unity/MonoDevelop.app/Contents/MacOS/mdtool"
cmd = sh.Command(mdtool_path)
result = cmd.build("HappyFunTimes.sln")
if result.exit_code:
raise ShError(result)
log(result)
def CopyFiles(files):
for file in files:
src = file["src"]
dst = file["dst"]
log("copy", src, "->", dst)
def main(argv):
"""This is the main function."""
parser = OptionParser()
parser.add_option(
"-v", "--verbose", action="store_true",
help="verbose")
(options, args) = parser.parse_args(args=argv)
if options.verbose:
global log
log = VerbosePrint
files = [
{ "src": 'HappyFunTimes/bin/Release/DeJson.dll',
"dst": 'HFTPlugin/Assets/Plugins/DeJson.dll',
},
{ "src": 'HappyFunTimes/bin/Release/HappyFunTimes.dll',
"dst": 'HFTPlugin/Assets/Plugins/HappyFunTimes.dll',
},
{ "src": 'HappyFunTimes/bin/Release/websocket-sharp.dll',
"dst": 'HFTPlugin/Assets/Plugins/websocket-sharp.dll',
},
{ "src": 'Extra/HFTRunner.cs',
"dst": 'HFTPlugin/Assets/Plugins/HFTExtra.dll',
},
{ "src": 'HappyFunTimeEditor/bin/Release/HappyFunTimesEditor.dll',
"dst": 'HFTPlugin/Assets/Plugins/Editor/HappyFunTimesEditor.dll',
}
]
BuildUnityPlugin()
CopyFiles(files)
if __name__=="__main__":
main(sys.argv[1:])
| bsd-3-clause | Python | |
4a97d5b9f9998a5b8ca8509547dabf8d757e70d9 | Add build script. | ryansturmer/gitmake | build.py | build.py | import version
print "Reading gitmake.py..."
with open('gitmake.py') as fp:
lines = fp.readlines()
print "Rewriting gitmake.py..."
with open('gitmake.py', 'w') as fp:
for line in lines:
if line.startswith('version_info ='):
fp.write('version_info = (%d,%d,%d,\'%s\')\n' % (version.major, version.minor, version.patch, version.branch))
else:
fp.write(line)
print "Done!"
| mit | Python | |
6dabd92990df570d81a621e51d7119345671d4c0 | Create Neopixel_Serial.py (#43) | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/moz4r/Neopixel_Serial.py | home/moz4r/Neopixel_Serial.py | #Just a poc maybe there is a best method
#Flash Neopixel_MRL.ino
import time
serial = Runtime.createAndStart("serial","Serial")
Runtime.createAndStart("mouth", "AcapelaSpeech")
serial.connect("COM7", 9600, 8, 1, 0)
sleep(5)
mouth.speak("Hi everybody this is neo pixel ring controled by my robot lab")
sleep(3)
mouth.speak("Fire, It burn a lot")
serial.write(2)
sleep(6)
mouth.speak("Hello jarvis")
serial.write(3)
sleep(6)
mouth.speak("I am a cylon")
serial.write(1)
sleep(6)
serial.write(9)
| apache-2.0 | Python | |
86ae30203475a2ac718cf3839e38522e8e1aa203 | Add tests package #5 | 7pairs/kac6vote | tests/__init__.py | tests/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Jun-ya HASEBA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| apache-2.0 | Python | |
0fec255426bc48e7674cc1391bdb3e1f64386be6 | Add disk_variability script, used to generate box plot for paper | jcarreira/ramcloud,SMatsushi/RAMCloud,rstutsman/RAMCloud,behnamm/cs244b_project,rstutsman/RAMCloud,jcarreira/ramcloud,jcarreira/ramcloud,SMatsushi/RAMCloud,jcarreira/ramcloud,alexandermerritt/ramcloud,mrdiegoa/ramcloud,QingkaiLu/RAMCloud,QingkaiLu/RAMCloud,taschik/ramcloud,DavidLi2010/ramcloud,jcarreira/ramcloud,taschik/ramcloud-load-manager,jblomer/ramcloud,IMCG/RamCloud,DavidLi2010/ramcloud,Frank-Wu/RamCloud,alexandermerritt/ramcloud,IMCG/RamCloud,behnamm/cs244b_project,IMCG/RamCloud,jblomer/ramcloud,rstutsman/RAMCloud,y-higuchi/ramcloud,SMatsushi/RAMCloud,Frank-Wu/RamCloud,SMatsushi/RAMCloud,utah-scs/RAMCloud,y-higuchi/ramcloud,matrix207/RAMCloud,Frank-Wu/RamCloud,alexandermerritt/ramcloud,anirajk/RAMCloud,mrdiegoa/ramcloud,QingkaiLu/RAMCloud,rstutsman/RAMCloud,utah-scs/RAMCloud,matrix207/RAMCloud,y-higuchi/ramcloud,y-higuchi/ramcloud,jblomer/ramcloud,jblomer/ramcloud,matrix207/RAMCloud,taschik/ramcloud-load-manager,jblomer/ramcloud,utah-scs/RAMCloud,taschik/ramcloud-load-manager,y-higuchi/ramcloud,QingkaiLu/RAMCloud,matrix207/RAMCloud,alexandermerritt/ramcloud,jcarreira/ramcloud,SMatsushi/RAMCloud,mrdiegoa/ramcloud,y-higuchi/ramcloud,utah-scs/RAMCloud,taschik/ramcloud,DavidLi2010/ramcloud,anirajk/RAMCloud,taschik/ramcloud,QingkaiLu/RAMCloud,taschik/ramcloud,anirajk/RAMCloud,utah-scs/RAMCloud,anirajk/RAMCloud,DavidLi2010/ramcloud,QingkaiLu/RAMCloud,rstutsman/RAMCloud,Frank-Wu/RamCloud,IMCG/RamCloud,jblomer/ramcloud,IMCG/RamCloud,mrdiegoa/ramcloud,matrix207/RAMCloud,behnamm/cs244b_project,mrdiegoa/ramcloud,matrix207/RAMCloud,SMatsushi/RAMCloud,anirajk/RAMCloud,Frank-Wu/RamCloud,taschik/ramcloud-load-manager,anirajk/RAMCloud,DavidLi2010/ramcloud,taschik/ramcloud-load-manager,taschik/ramcloud,alexandermerritt/ramcloud,utah-scs/RAMCloud,alexandermerritt/ramcloud,Frank-Wu/RamCloud,behnamm/cs244b_project,rstutsman/RAMCloud,behnamm/cs244b_project,IMCG/RamCloud,mrdiegoa/ramcloud,behnamm/cs244b_project,DavidLi2010/ramcloud | scripts/disk_variability.py | scripts/disk_variability.py | #!/usr/bin/env python
# Copyright (c) 2011 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generates data for a recovery performance graph."""
from __future__ import division, print_function
from common import *
from glob import glob
import math
import metrics
import recovery
import re
import sys
def median(l):
l = sorted(l)
if len(l) % 2 == 0:
return metrics.average(l[len(l)//2:len(l)//2+1])
else:
return l[len(l)//2]
if len(sys.argv) > 1:
recovery_dir = sys.argv[1]
else:
recovery_dir = 'recovery/latest'
NUMBACKUPS = 36
TRIALS = 25
backups = [[] for i in range(NUMBACKUPS)]
for trial in range(TRIALS):
args = {}
args['numBackups'] = NUMBACKUPS
args['numPartitions'] = 12
args['objectSize'] = 1024
args['disk'] = '/dev/sda2'
args['replicas'] = 3
args['numObjects'] = (626012 * args['numBackups'] * 80 //
args['numPartitions'] // 640)
args['oldMasterArgs'] = '-m 17000'
args['newMasterArgs'] = '-m 800'
r = recovery.insist(**args)
print('->', r['ns'] / 1e6, 'ms', '(run %s)' % r['run'])
for i, backup in enumerate(r['metrics'].backups):
backups[i].append(
(backup.backup.storageReadBytes / 2**20) /
(backup.backup.storageReadTicks / backup.clockFrequency))
with open('%s/recovery/disk_variability.data' % top_path, 'w', 1) as dat:
for outliersPass in [False, True]:
for i, read in enumerate(sorted(backups, key=median)):
read.sort()
m = median(read)
# Mendenhall and Sincich method
q1 = read[int(math.ceil(1 * (len(read) + 1) / 4)) - 1]
q3 = read[int(math.floor(3 * (len(read) + 1) / 4)) - 1]
iqr = q3 - q1
include = []
outliers = []
for x in read:
if x < q1 - 1.5 * iqr or x > q3 + 1.5 * iqr:
outliers.append(x)
else:
include.append(x)
if outliersPass:
for outlier in outliers:
print(i, outlier, file=dat)
else:
print(i, min(include), q1, m, q3, max(include), file=dat)
print(file=dat)
print(file=dat)
| isc | Python | |
d519c7f171d7e89f30f073616f71af24654d223d | add solution for Rotate List | zhyu/leetcode,zhyu/leetcode | src/rotateList.py | src/rotateList.py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def rotateRight(self, head, k):
if not head:
return None
n = self.len(head)
k %= n
if k == 0:
return head
slow = fast = head
for _ in xrange(k):
fast = fast.next
while fast.next:
slow, fast = slow.next, fast.next
head, fast.next = slow.next, head
slow.next = None
return head
def len(self, head):
res = 0
while head:
res += 1
head = head.next
return res
| mit | Python | |
5828823d505aae1425fd2353f898c5b18722e6e5 | Introduce base class and ProgressObserver for renaming occurences. | caio2k/RIDE,fingeronthebutton/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,robotframework/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,caio2k/RIDE,caio2k/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE | src/robotide/ui/progress.py | src/robotide/ui/progress.py | # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import time
from robotide import context
class ProgressObserver(object):
def __init__(self, frame, title, message):
self._progressbar = wx.ProgressDialog(title, message,
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
class LoadProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Loading the test data')
class RenameProgressObserver(ProgressObserver):
def __init__(self, frame):
ProgressObserver.__init__(self, frame, 'RIDE', 'Renaming')
self._notification_occured = 0
def notify(self):
if time.time() - self._notification_occured > 0.1:
self._progressbar.Pulse()
self._notification_occured = time.time()
| # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
from robotide import context
class LoadProgressObserver(object):
def __init__(self, frame):
self._progressbar = wx.ProgressDialog('RIDE', 'Loading the test data',
maximum=100, parent=frame,
style=wx.PD_ELAPSED_TIME)
def notify(self):
self._progressbar.Pulse()
def finish(self):
self._progressbar.Destroy()
context.LOG.report_parsing_errors()
def error(self, msg):
self.finish()
context.LOG.error(msg)
| apache-2.0 | Python |
0d7b1d848d7ab80cc9054931f14b98bc123287bf | Create test_bulkresize.py file | sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis | jarviscli/plugins/test_bulkresize.py | jarviscli/plugins/test_bulkresize.py | from unittest import mock
import unittest
import os
from Jarvis import Jarvis
from plugins.bulkresize import spin
from plugins import bulkresize
from tests import PluginTest
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(CURRENT_PATH, '..', 'data/')
class Bulkresize(PluginTest):
pass
if __name__ == '__main__':
unittest.main() | mit | Python | |
a8a87818094f0cf9954815caca9fb586ddb4099b | Add a gallery example to show coloring of points by categories (#1006) | GenericMappingTools/gmt-python,GenericMappingTools/gmt-python | examples/gallery/symbols/points_categorical.py | examples/gallery/symbols/points_categorical.py | """
Color points by categories
---------------------------
The :meth:`pygmt.Figure.plot` method can be used to plot symbols which are
color-coded by categories. In the example below, we show how the
`Palmer Penguins dataset <https://github.com/allisonhorst/palmerpenguins>`__
can be visualized. Here, we can pass the individual categories included in
the "species" column directly to the ``color`` parameter via
``color=df.species.cat.codes.astype(int)``. Additionally, we have to set
``cmap=True``. A desired colormap can be selected via the :meth:`pygmt.makecpt`
method.
"""
import pandas as pd
import pygmt
# Load sample penguins data and convert 'species' column to categorical dtype
df = pd.read_csv("https://github.com/mwaskom/seaborn-data/raw/master/penguins.csv")
df.species = df.species.astype(dtype="category")
# Use pygmt.info to get region bounds (xmin, xmax, ymin, ymax)
# The below example will return a numpy array like [30.0, 60.0, 12.0, 22.0]
region = pygmt.info(
table=df[["bill_length_mm", "bill_depth_mm"]], # x and y columns
per_column=True, # report the min/max values per column as a numpy array
# round the min/max values of the first two columns to the nearest multiple
# of 3 and 2, respectively
spacing=(3, 2),
)
# Make a 2D categorical scatter plot, coloring each of the 3 species differently
fig = pygmt.Figure()
# Generate a basemap of 10 cm x 10 cm size
fig.basemap(
region=region,
projection="X10c/10c",
frame=[
'xafg+l"Bill length (mm)"',
'yafg+l"Bill depth (mm)"',
'WSen+t"Penguin size at Palmer Station"',
],
)
# Define a colormap to be used for three categories, define the range of the
# new discrete CPT using series=(lowest_value, highest_value, interval),
# use color_model="+c" to write the discrete color palette "inferno" in
# categorical format
pygmt.makecpt(cmap="inferno", series=(0, 3, 1), color_model="+c")
fig.plot(
# Use bill length and bill depth as x and y data input, respectively
x=df.bill_length_mm,
y=df.bill_depth_mm,
# Vary each symbol size according to another feature (body mass, scaled by 7.5*10e-5)
sizes=df.body_mass_g * 7.5e-5,
# Points colored by categorical number code
color=df.species.cat.codes.astype(int),
# Use colormap created by makecpt
cmap=True,
# Do not clip symbols that fall close to the map bounds
no_clip=True,
# Use circles as symbols with size in centimeter units
style="cc",
# Set transparency level for all symbols to deal with overplotting
transparency=40,
)
# A colorbar displaying the different penguin species types will be added
# once GMT 6.2.0 is released.
fig.show()
| bsd-3-clause | Python | |
7ff0c1fd4eb77129c7829f92fc176678a06abe19 | add solution for Balanced Binary Tree | zhyu/leetcode,zhyu/leetcode | src/balancedBinaryTree.py | src/balancedBinaryTree.py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a boolean
def isBalanced(self, root):
return self.getDepth(root) != -1
def getDepth(self, root):
if root is None:
return 0
l_depth = self.getDepth(root.left)
if l_depth == -1:
return -1
r_depth = self.getDepth(root.right)
if r_depth == -1:
return -1
if l_depth-r_depth > 1 or l_depth-r_depth < -1:
return -1
return max(l_depth, r_depth) + 1
| mit | Python | |
81a79933aa593f79ae054053068b04073f9db68f | Add hit calibration example | tamasgal/km3pipe,tamasgal/km3pipe | examples/plot_calibrating_hits.py | examples/plot_calibrating_hits.py | #!/usr/bin/env python
# coding: utf-8 -*-
"""
==================
Calibrating Hits
==================
Hits stored in ROOT and HDF5 files are usually not calibrated, which
means that they have invalid positions, directions and uncorrected hit times.
This example shows how to assign the PMT position and direction to each hit
and applying a time correction to them.
The KM3NeT offline format (derived from aanet) uses a single class for every
hit type (regular hits, MC hits and their correspeonding calibrated and
uncalibrated counter parts). In ROOT files, the actual struct/class definition
is stored along the data, which means that all the attributes are accessible
even when they are invalid or not instantiated. Positions and directions are
also part of these attributes and they are initialised to `(0, 0, 0)`.
The `km3pipe.calib.Calibration()` class can be used to load a calibration and
the `.apply()` method to update the position, direction and correct the arrival
time of hit.
"""
# Author: Tamas Gal <tgal@km3net.de>
# License: BSD-3
import km3pipe as kp
import km3io
from km3net_testdata import data_path
#######################
# The `offline/km3net_offline.root` contains 10 events with hit information:
f = km3io.OfflineReader(data_path("offline/km3net_offline.root"))
#######################
# The corresponding calibration file is stored in `detx/km3net_offline.detx`:
calib = kp.calib.Calibration(filename=data_path("detx/km3net_offline.detx"))
#######################
# Let's grab the hits of the event with index `5`:
hits = f.events[5].hits
#######################
# The positions and directions show the default values (0) and are not
# calibrated. Here are the values of the first few hits:
n = 7
for attr in [f"{k}_{q}" for k in ("pos", "dir") for q in "xzy"]:
print(attr, hits[attr][:n])
#######################
# Here are the uncalibrated times:
uncalibrated_times = hits.t
print(uncalibrated_times[:n])
#######################
# To calibrate the hits, use the `calib.apply()` method which will create a
# `km3pipe.Table`, retrieve the positions and directions of the corresponding
# PMTs, apply the time calibration and also do the PMT time slew correction.
calibrated_hits = calib.apply(f.events[5].hits)
#######################
# The calibrated hits are stored in a `kp.Table` which is a thin wrapper
# around a `numpy.record` array (a simple numpy array with named attributes):
print(calibrated_hits.dtype)
#######################
# The positions and directions are now showing the correct values:
for attr in [f"{k}_{q}" for k in ("pos", "dir") for q in "xzy"]:
print(attr, calibrated_hits[attr][:n])
#######################
# The `t0` field holds the time calibration correction which was automatically
# added to hit time (`hit.time`):
print(calibrated_hits.t0[:n])
#######################
# As mentioned above, the PMT time slewing correction is also applied, which
# is a tiny correction of the arrival time with respect to the hit's ToT value.
# We can reveal their values by subtracting the t0 from the calibrated time and
# compare to the uncalibrated ones:
slews = uncalibrated_times - (calibrated_hits.time - calibrated_hits.t0)
print(slews[:n])
#######################
# Let's compare the slews with the ones calculated with `kp.calib.slew()`. The
# values match very well, with tiny variations due to floating point arithmetic:
slew_diff = slews - kp.calib.slew(hits.tot)
print(slew_diff[:n])
| mit | Python | |
d19599227935139585a013227e816090a48e3a83 | Create bh.py | supthunder/amdRestock | bh.py | bh.py | import requests
import re
import tweepy
import os
from time import gmtime, strftime, sleep
import json
from random import uniform
def getName(name):
product = "RX "
if "570" in name:
product += "570"
else:
product += "580"
if "4G" in name:
product += " 4GB"
else:
product += " 8GB"
return product
def sendTweet(link, site, name, price):
global restock
# setup twitter
C_KEY = "KEYS"
C_SECRET = "KEYS"
A_TOKEN = "KEYS"
A_TOKEN_SECRET = "KEYS"
auth = tweepy.OAuthHandler(C_KEY, C_SECRET)
auth.set_access_token(A_TOKEN, A_TOKEN_SECRET)
api = tweepy.API(auth)
# send tweet
alert = "\U0001f6a8 "
sos = "\U0001f198 "
flag = "\U0001f6a9 "
tweet = alert+sos+flag+" IN STOCK "+flag+sos+alert
tweet += "\n"+name
tweet += "\n$"+price
tweet += "\nSite: "+site+"\n"
tweet += link+"\n"
tweet += strftime("%Y-%m-%d %H:%M:%S", gmtime())
print(tweet)
api.update_status(tweet.encode('utf-8'))
restock = 1
def bh():
with open('waitBh.json') as dt:
waitCheck = json.load(dt)
url1 = 'https://www.bhphotovideo.com/c/json/buy/Graphic-Cards/ci/6567/N/3668461602/mnp/180/mxp/350/ntt/rx+580'
url2 = 'https://www.bhphotovideo.com/c/json/buy/Graphic-Cards/ci/6567/N/3668461602+4294272508+4294272499/mnp/175/mxp/350/ntt/rx'
urlTest = 'https://www.bhphotovideo.com/c/json/buy/Graphic-Cards/ci/6567/N/3668461602/mnp/700/mxp/720'
user = {"User-Agent": "Chrome 41.0.2227.1"}
try:
stock = requests.get(url2, headers=user, timeout=5)
except:
print("Could not connect...")
exit(1)
if str(stock.status_code) != "200":
print("BH - banned! - "+str(stock.status_code))
return
stock = stock.json()
for item in range(0,stock["resultCount"]):
name = stock['items'][item]['shortDescriptionPlusBrand']
inStock = stock['items'][item]["available"]
link = stock['items'][item]["detailsUrl"]
price = stock['items'][item]["price"]
itemCode = stock['items'][item]["itemCode"]
if inStock == True:
if waitCheck[itemCode] == 180:
waitCheck[itemCode] = 0
elif waitCheck[itemCode] > 0:
waitCheck[itemCode] += 1
else:
waitCheck[itemCode] += 1
print("BHPhoto - " + name + " - In Stock")
sendTweet(link,"BHPhoto",getName(name), price)
else:
if waitCheck[itemCode] == 180:
waitCheck[itemCode] = 0
elif waitCheck[itemCode] > 0:
waitCheck[itemCode] += 1
print("BHPhoto - " + name + " - OOS")
with open('waitBh.json','w') as fl:
json.dump(waitCheck, fl, indent=4)
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
bh()
| mit | Python | |
8bc4dddfad944d385c02e2a6ebd8031bfb6bfae8 | Test dynamic_length | raviqqe/tensorflow-extenteten,raviqqe/tensorflow-extenteten | extenteten/dynamic_length_test.py | extenteten/dynamic_length_test.py | import numpy as np
import tensorflow as tf
from .dynamic_length import *
def test_id_tree_to_root_width():
with tf.Session() as session, session.as_default():
id_tree = tf.constant([[[1], [2], [3], [0], [0]]])
assert id_tree_to_root_width(id_tree).eval() == np.array([3])
def test_id_sequence_to_length():
with tf.Session() as session, session.as_default():
id_sequence = tf.constant([[1, 2, 3, 0, 0]])
assert id_sequence_to_length(id_sequence).eval() == np.array([3])
| unlicense | Python | |
471d60f41a283e5a2b2fb4a364cde67150de8acd | Create pmcolor.py | TingPing/plugins,TingPing/plugins | HexChat/pmcolor.py | HexChat/pmcolor.py | __module_name__ = "PMColor"
__module_author__ = "TingPing"
__module_version__ = "1"
__module_description__ = "Color PM tabs like Hilights"
import xchat
def pm_cb(word, word_eol, userdata):
xchat.command('GUI COLOR 3')
return None
xchat.hook_print("Private Message to Dialog", pm_cb)
xchat.hook_print("Private Action to Dialog", pm_cb)
| mit | Python | |
8b5c9a434b1d8ae8d46a34d45114bc9c71dac0ea | Create install for nginx | hatchery/genepool,hatchery/Genepool2 | genes/nginx/main.py | genes/nginx/main.py | from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.debian.traits import is_debian
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
def main():
if is_ubuntu() or is_debian():
apt.update()
apt.install('nginx')
elif is_osx():
brew.update()
brew.install('nginx')
else:
pass
| mit | Python | |
e1a40e6a43915f8e8be2aa27387cd0d25f05ed67 | Create Multiplication_Of_2_Numbers.py | HarendraSingh22/Python-Guide-for-Beginners | Code/Multiplication_Of_2_Numbers.py | Code/Multiplication_Of_2_Numbers.py | a=input("Enter a number -->")
b=input("Enter a number -->")
print a*b
| mit | Python | |
2059aa7776a8e0c947b68e9401d74bdd146a59cd | Test passed for week day | sitdh/com-prog | ch03_04.py | ch03_04.py | (day, month, year) = input().split()
day = int(day); month = int(month); year = int(year)
if month < 3:
month += 12
year -= 1
c = year / 100
k = year % 100
week_day = int( day + (26 * (month + 1) / 10) + k + ( k / 4 ) + ( c / 4 ) + ( 5 * c ) ) % 7
week_day_name = ''
# 1. Follow from flowchart
if 0 == week_day:
week_day_name = 'SAT'
elif 1 == week_day:
week_day_name = 'SUN'
elif 2 == week_day:
week_day_name = 'MON'
elif 3 == week_day:
week_day_name = 'TUE'
elif 4 == week_day:
week_day_name = 'WED'
elif 5 == week_day:
week_day_name = 'THU'
elif 6 == week_day:
week_day_name = 'FRI'
print(week_day_name)
# 2. SHORTER VERSION
# week_day_list = ['SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI']
# print(week_day_list[week_day])
| mit | Python | |
bda7ef0f449c40d572cc4fe40aaaa2f60996bde5 | add spider for solitaireonline.com | simonsdave/gaming-spiders,simonsdave/gaming_spiders,simonsdave/gaming-spiders,simonsdave/gaming-spiders,simonsdave/gaming_spiders | gaming_spiders/solitaireonline.py | gaming_spiders/solitaireonline.py | #!/usr/bin/env python
import json
from cloudfeaster import spider
from zygomatic import ZygomaticSpider
class SolitaireOnlineSpider(ZygomaticSpider):
@classmethod
def get_metadata(cls):
return {
"url": "http://www.solitaireonline.com/?sort=mostPlayed",
}
if __name__ == "__main__":
crawl_args = spider.CLICrawlArgs(SolitaireOnlineSpider)
crawler = spider.SpiderCrawler(SolitaireOnlineSpider)
crawl_result = crawler.crawl(*crawl_args)
print json.dumps(crawl_result)
| mit | Python | |
868293aee14d6216c69446dc367491b25469f6e8 | add import_question_metadata to import display_text and key for questions from csv file | klpdotorg/dubdubdub,klpdotorg/dubdubdub,klpdotorg/dubdubdub,klpdotorg/dubdubdub | apps/stories/management/commands/import_question_metadata.py | apps/stories/management/commands/import_question_metadata.py | from django.core.management.base import BaseCommand
import csv
from stories.models import Question, Questiongroup, QuestiongroupQuestions
class Command(BaseCommand):
args = "filename to import from"
help = """Import Key and Display Text metadata for questions
python manage.py import_question_metadata <path/to/file.csv>
"""
def handle(self, *args, **options):
filename = args[0]
reader = csv.reader(open(filename))
i = 0
for row in reader:
if i == 0:
i += 1
continue
source = row[0].strip()
school_type = row[1].strip()
version = int(row[2])
sequence = int(row[3])
question_text = row[4].strip()
key = row[5].strip()
new_display_text = row[7].strip()
qg = Questiongroup.objects.filter(source__name=source, version=version)[0]
qgq = QuestiongroupQuestions.objects.filter(sequence=sequence, questiongroup=qg,
question__school_type__name=school_type)[0]
question = qgq.question
print question.text
print question_text
if question.text.strip() != question_text:
raise Exception("question text does not match. failing.")
question.display_text = new_display_text
question.key = key
question.save()
| mit | Python | |
9f1dfbf4bf36c0e3ef991a66c5a68b2674223b19 | Add a constant decoractor | iluxonchik/lyricist | const.py | const.py | def constant(func):
""" Decorator used to emulate constant values """
def fset(self, value):
raise TypeError("Cannot modify the value of a constant.")
def fget(self):
return func()
return property(fget, fset) | mit | Python | |
2ee04a1b668501eb41ce4b08e6c92ffe4f57d861 | Build dependencies were borken because something sorts 1.0.1-XX and 1.0-YY wrong | quixey/python-aliyun,easemob/python-aliyun | aliyun/__init__.py | aliyun/__init__.py | """
Aliyun API
==========
The Aliyun API is well-documented at `dev.aliyun.com <http://dev.aliyun.com/thread.php?spm=0.0.0.0.MqTmNj&fid=8>`_.
Each service's API is very similar: There are regions, actions, and each action has many parameters.
It is an OAuth2 API, so you need to have an ID and a secret. You can get these from the Aliyun management console.
Authentication
==============
You will need security credentials for your Aliyun account. You can view and
create them in the `Aliyun management console <http://console.aliyun.com>`_. This
library will look for credentials in the following places:
1. Environment variables `ALI_ACCESS_KEY_ID` and `ALI_SECRET_ACCESS_KEY`
2. An ini-style configuration file at `~/.aliyun.cfg` with contents like:
::
[default]
access_key_id=xxxxxxxxxxxxx
secret_access_key=xxxxxxxxxxxxxxxxxxxxxxx
..
3. A system-wide version of that file at /etc/aliyun.cfg with similar contents.
We recommend using environment variables whenever possible.
Main Interfaces
===============
The main components of python-aliyun are ECS and SLB. Other Aliyun products will
be added as API support develops. Within each Aliyun product, we tried to
implement every API Action variation available. We used a boto-style design
where most API interaction is done with a connection object which marshalls
Python objects and API representations.
*ECS*:
You can create a new ECS connection and interact with ECS like this::
import aliyun.ecs.connection
conn = aliyun.ecs.connection.EcsConnection('cn-hangzhou')
print conn.get_all_instance_ids()
See more at :mod:`aliyun.ecs`
*SLB*:
Similarly for SLB, get the connection object like this::
import aliyun.slb.connection
conn = aliyun.slb.connection.SlbConnection('cn-hangzhou')
print conn.get_all_load_balancer_ids()
See more at :mod:`aliyun.slb`
ali command
===========
The ali commandline tool is mostly used for debugging the Aliyun API interactions.
It accepts arbitrary Key=Value pairs and passes them on to the API after wrapping them.
::
ali --region cn-hangzhou ecs Action=DescribeRegions
ali --region cn-hangzhou slb Action=DescribeLoadBalancers
"""
__version__ = "1.1.0"
| """
Aliyun API
==========
The Aliyun API is well-documented at `dev.aliyun.com <http://dev.aliyun.com/thread.php?spm=0.0.0.0.MqTmNj&fid=8>`_.
Each service's API is very similar: There are regions, actions, and each action has many parameters.
It is an OAuth2 API, so you need to have an ID and a secret. You can get these from the Aliyun management console.
Authentication
==============
You will need security credentials for your Aliyun account. You can view and
create them in the `Aliyun management console <http://console.aliyun.com>`_. This
library will look for credentials in the following places:
1. Environment variables `ALI_ACCESS_KEY_ID` and `ALI_SECRET_ACCESS_KEY`
2. An ini-style configuration file at `~/.aliyun.cfg` with contents like:
::
[default]
access_key_id=xxxxxxxxxxxxx
secret_access_key=xxxxxxxxxxxxxxxxxxxxxxx
..
3. A system-wide version of that file at /etc/aliyun.cfg with similar contents.
We recommend using environment variables whenever possible.
Main Interfaces
===============
The main components of python-aliyun are ECS and SLB. Other Aliyun products will
be added as API support develops. Within each Aliyun product, we tried to
implement every API Action variation available. We used a boto-style design
where most API interaction is done with a connection object which marshalls
Python objects and API representations.
*ECS*:
You can create a new ECS connection and interact with ECS like this::
import aliyun.ecs.connection
conn = aliyun.ecs.connection.EcsConnection('cn-hangzhou')
print conn.get_all_instance_ids()
See more at :mod:`aliyun.ecs`
*SLB*:
Similarly for SLB, get the connection object like this::
import aliyun.slb.connection
conn = aliyun.slb.connection.SlbConnection('cn-hangzhou')
print conn.get_all_load_balancer_ids()
See more at :mod:`aliyun.slb`
ali command
===========
The ali commandline tool is mostly used for debugging the Aliyun API interactions.
It accepts arbitrary Key=Value pairs and passes them on to the API after wrapping them.
::
ali --region cn-hangzhou ecs Action=DescribeRegions
ali --region cn-hangzhou slb Action=DescribeLoadBalancers
"""
__version__ = "1.0.1"
| apache-2.0 | Python |
c480982a09f354a05c5e5ff0dc8a7c93f13f3970 | add config for quakenet script | PaulSalden/vorobot | config/quakenet.py | config/quakenet.py | settings = {
"authname": "authname",
"password": "authpw",
"channels": "#pwnagedeluxe"
} | mit | Python | |
6b0721b6aeda6d3ec6f5d31be7c741bc7fcc4635 | bump release for 18.0.1 development | zeroSteiner/boltons | setup.py | setup.py | """Functionality that should be in the standard library. Like
builtins, but Boltons.
Otherwise known as, "everyone's util.py," but cleaned up and
tested.
Contains over 160 BSD-licensed utility types and functions that can be
used as a package or independently. `Extensively documented on Read
the Docs <http://boltons.readthedocs.org>`_.
"""
from setuptools import setup
__author__ = 'Mahmoud Hashemi'
__version__ = '18.0.1dev'
__contact__ = 'mahmoud@hatnote.com'
__url__ = 'https://github.com/mahmoud/boltons'
__license__ = 'BSD'
setup(name='boltons',
version=__version__,
description="When they're not builtins, they're boltons.",
long_description=__doc__,
author=__author__,
author_email=__contact__,
url=__url__,
packages=['boltons'],
include_package_data=True,
zip_safe=False,
license=__license__,
platforms='any',
classifiers=[
# See: https://pypi.python.org/pypi?:action=list_classifiers
'Topic :: Utilities',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
# List of python versions and their support status:
# https://en.wikipedia.org/wiki/CPython#Version_history
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy', ]
)
"""
A brief checklist for release:
* tox
* git commit (if applicable)
* Bump setup.py version off of -dev
* git commit -a -m "bump version for x.y.z release"
* python setup.py sdist bdist_wheel upload
* bump docs/conf.py version
* git commit
* git tag -a x.y.z -m "brief summary"
* write CHANGELOG
* git commit
* bump setup.py version onto n+1 dev
* git commit
* git push
"""
| """Functionality that should be in the standard library. Like
builtins, but Boltons.
Otherwise known as, "everyone's util.py," but cleaned up and
tested.
Contains over 160 BSD-licensed utility types and functions that can be
used as a package or independently. `Extensively documented on Read
the Docs <http://boltons.readthedocs.org>`_.
"""
from setuptools import setup
__author__ = 'Mahmoud Hashemi'
__version__ = '18.0.0'
__contact__ = 'mahmoud@hatnote.com'
__url__ = 'https://github.com/mahmoud/boltons'
__license__ = 'BSD'
setup(name='boltons',
version=__version__,
description="When they're not builtins, they're boltons.",
long_description=__doc__,
author=__author__,
author_email=__contact__,
url=__url__,
packages=['boltons'],
include_package_data=True,
zip_safe=False,
license=__license__,
platforms='any',
classifiers=[
# See: https://pypi.python.org/pypi?:action=list_classifiers
'Topic :: Utilities',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
# List of python versions and their support status:
# https://en.wikipedia.org/wiki/CPython#Version_history
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy', ]
)
"""
A brief checklist for release:
* tox
* git commit (if applicable)
* Bump setup.py version off of -dev
* git commit -a -m "bump version for x.y.z release"
* python setup.py sdist bdist_wheel upload
* bump docs/conf.py version
* git commit
* git tag -a x.y.z -m "brief summary"
* write CHANGELOG
* git commit
* bump setup.py version onto n+1 dev
* git commit
* git push
"""
| bsd-3-clause | Python |
95d3306f2f7c492ea5f58c86b86165544273e6b9 | Create mp.py | Askars/bio_one_line_magic | mp.py | mp.py | import multiprocessing as mp
import time
THREADS=10
def f(x):
print("Starting...." + str(x))
time.sleep(5)
print("Finishing...."+ str(x))
processes = [None] * THREADS
print(processes)
def add_to_processes(args):
while True:
for idx, process in enumerate(processes):
if process is None or not process.is_alive():
p = mp.Process(target=f, args=args)
processes[idx] = p
p.start()
return
for i in range(0, 30):
add_to_processes((i,))
| unlicense | Python | |
934c4136c6415b76577d206739b352ad965210f0 | Create test_postures.py | mecax/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,mecax/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,sstocker46/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/beetlejuice/test_postures.py | home/beetlejuice/test_postures.py | # Sweety's postures test
import random
Runtime.createAndStart("sweety", "Sweety")
sweety.arduino.setBoard("atmega2560")
sweety.connect("COM9")
sleep(1) # give a second to the arduino for connect
sweety.attach()
sweety.mouthState("smile")
sleep(1)
# set delays for led sync (delayTime, delayTimeStop, delayTimeLetter)
sweety.setdelays(50,200,50)
sweety.mouth.setLanguage("en")
#sweety.saying("Hello,my name is sweety.")
sweety.posture("neutral")
sweety.saying("neutral.")
sleep(2)
sweety.posture("yes")
sweety.saying("yes.")
sleep(2)
sweety.posture("concentre")
sweety.saying("concentre.")
sleep(2)
sweety.posture("showLeft")
sweety.saying("show left.")
sleep(2)
sweety.posture("showRight")
sweety.saying("show right.")
sleep(2)
sweety.posture("handsUp")
sweety.saying("hands up !")
sleep(2)
sweety.posture("carryBags")
sweety.saying("carry bags.")
sleep(2)
sweety.posture("neutral")
sweety.saying("neutral.")
| apache-2.0 | Python | |
89714cf01186e9aa5575fadf45c6c1fa70812871 | Create count.py | iwyos13/Robosys2 | count.py | count.py | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
if __name__ == '__main__':
rospy.init_node('count')
pub = rospy.Publisher('count_up', Int32, queue_size=1)
rate = rospy.Rate(10)
n = 0
while not rospy.is_shutdown():
n += 1
pub.publish(n)
rate.sleep()
| bsd-2-clause | Python | |
9f443a5af6537867712f12419d93a5b8c824858a | Add Notify-osd option for linux based systems | jacobmetrick/Flexget,ratoaq2/Flexget,thalamus/Flexget,X-dark/Flexget,patsissons/Flexget,asm0dey/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,tobinjt/Flexget,poulpito/Flexget,sean797/Flexget,thalamus/Flexget,OmgOhnoes/Flexget,camon/Flexget,gazpachoking/Flexget,tsnoam/Flexget,v17al/Flexget,vfrc2/Flexget,drwyrm/Flexget,tsnoam/Flexget,vfrc2/Flexget,Pretagonist/Flexget,ibrahimkarahan/Flexget,ianstalk/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,qvazzler/Flexget,camon/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,Flexget/Flexget,patsissons/Flexget,dsemi/Flexget,Danfocus/Flexget,ianstalk/Flexget,v17al/Flexget,ratoaq2/Flexget,Flexget/Flexget,drwyrm/Flexget,qk4l/Flexget,asm0dey/Flexget,ZefQ/Flexget,oxc/Flexget,LynxyssCZ/Flexget,tvcsantos/Flexget,poulpito/Flexget,ZefQ/Flexget,ratoaq2/Flexget,jacobmetrick/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,oxc/Flexget,v17al/Flexget,sean797/Flexget,malkavi/Flexget,malkavi/Flexget,offbyone/Flexget,antivirtel/Flexget,jawilson/Flexget,jawilson/Flexget,tarzasai/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,malkavi/Flexget,tarzasai/Flexget,Danfocus/Flexget,cvium/Flexget,LynxyssCZ/Flexget,Pretagonist/Flexget,malkavi/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,tvcsantos/Flexget,drwyrm/Flexget,voriux/Flexget,oxc/Flexget,xfouloux/Flexget,X-dark/Flexget,spencerjanssen/Flexget,crawln45/Flexget,tsnoam/Flexget,vfrc2/Flexget,lildadou/Flexget,Pretagonist/Flexget,voriux/Flexget,ibrahimkarahan/Flexget,dsemi/Flexget,ZefQ/Flexget,X-dark/Flexget,jawilson/Flexget,xfouloux/Flexget,Flexget/Flexget,antivirtel/Flexget,spencerjanssen/Flexget,Danfocus/Flexget,asm0dey/Flexget,grrr2/Flexget,tarzasai/Flexget,jacobmetrick/Flexget,crawln45/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,offbyone/Flexget,grrr2/Flexget,LynxyssCZ/Flexget,spencerjanssen/Flexget,lildadou/Flexget,tobinjt/Flexget,qvazzler/Flexget,cvium/Flexget,patsissons/Flexget,sean797/Flexget,qk4l/Flexget,cvium/Flexget,antivirtel/Flexget,thalamus/Flexget,ianstalk/Flexget,offbyone/Flexget,xfouloux/Flexget,lildadou/Flexget,crawln45/Flexget,OmgOhnoes/Flexget,tobinjt/Flexget,gazpachoking/Flexget,qvazzler/Flexget,poulpito/Flexget,grrr2/Flexget | flexget/plugins/output/notify_osd.py | flexget/plugins/output/notify_osd.py | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget.plugin import register_plugin, priority, DependencyError
from flexget.utils.template import RenderError, render_from_task
log = logging.getLogger('notify_osd')
class OutputNotifyOsd(object):
def validator(self):
from flexget import validator
config = validator.factory()
config.accept('boolean')
advanced = config.accept('dict')
advanced.accept('text', key='title_template')
advanced.accept('text', key='item_template')
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {}
config.setdefault('title_template', '{{task.name}}')
config.setdefault('item_template', '{{title}}')
return config
def on_task_start(self, task, config):
try:
from gi.repository import Notify
except ImportError as e:
log.debug('Error importing Notify: %s' % e)
raise DependencyError('notify_osd', 'gi.repository',
'Notify module required. ImportError: %s' % e)
@priority(0)
def on_task_output(self, task, config):
"""
Configuration::
notify_osd:
title_template: Notification title, supports jinja templating, default {{task.name}}
item_template: Notification body, suports jinja templating, default {{title}}
"""
from gi.repository import Notify
if not Notify.init("Flexget"):
log.error('Unable to init libnotify.')
return
if not task.accepted:
return
config = self.prepare_config(config)
body_items = []
for entry in task.accepted:
try:
body_items.append(entry.render(config['item_template']))
except RenderError as e:
log.error('Error setting body message: %s' % e)
log.verbose("Send Notify-OSD notification about: %s", " - ".join(body_items))
title = config['title_template']
try:
title = render_from_task(title, task)
log.debug('Setting bubble title to :%s', title)
except RenderError as e:
log.error('Error setting title Notify-osd message: %s' % e)
n = Notify.Notification.new(title, '\n'.join(body_items), None)
n.show()
register_plugin(OutputNotifyOsd, 'notify_osd', api_ver=2)
| mit | Python | |
77922e6527ad0e2c223983c59329dea127cd38ef | Create heuristic_test | frila/agente-minimax | models/players/heuristic_test.py | models/players/heuristic_test.py | from models.algorithm.minimax import Heuristic
from models.algorithm.minimax import Minimax
| apache-2.0 | Python | |
93b2d737407389a1c4dbc67836a949663eeba948 | Call the new presubmit checks from chrome/ code, with a blacklist. | junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk-efl,anirudhSK/chromium,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,dednal/chromium.src,dushu1203/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,ltilve/chromium,zcbenz/cefode-chromium,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,markYoungH/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,rogerwang/chromium,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,nacl-webkit/chrome_deps,markYoungH/chromium.src,timopulkkinen/BubbleFish,anirudhSK/chromium,dednal/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,rogerwang/chromium,mogoweb/chromium-crosswalk,krieger-od/nwjs_chromium.src,patrickm/chromium.src,keishi/chromium,dednal/chromium.src,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl,keishi/chromium,Chilledheart/chromium,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,dednal/chromium.src,chuan9/chromium-crosswalk,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,nacl-webkit/chrome_deps,zcbenz/cefode-chromium,Jonekee/chromium.src,Jonekee/chromium.src,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,junmin-zhu/chromium-rivertrail,rogerwang/chromium,robclark/chromium,axinging/chromium-crosswalk,Chilledheart/chromium,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,patrickm/chromium.src,jaruba/chromium.src,robclark/chromium,axinging/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,rogerwang/chromium,markYoungH/chromium.src,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,Fireblend/chromium-crosswalk,robclark/chromium,hgl888/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,patrickm/chromium.src,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,timopulkkinen/BubbleFish,fujunwei/chromium-crosswalk,ltilve/chromium,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,junmin-zhu/chromium-rivertrail,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,nacl-webkit/chrome_deps,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,zcbenz/cefode-chromium,zcbenz/cefode-chromium,markYoungH/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,ltilve/chromium,Chilledheart/chromium,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,jaruba/chromium.src,junmin-zhu/chromium-rivertrail,hgl888/chromium-crosswalk,littlstar/chromium.src,keishi/chromium,anirudhSK/chromium,nacl-webkit/chrome_deps,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,pozdnyakov/chromium-crosswalk,M4sse/chromium.src,keishi/chromium,dednal/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,jaruba/chromium.src,dushu1203/chromium.src,robclark/chromium,mogoweb/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,fujunwei/chromium-crosswalk,zcbenz/cefode-chromium,axinging/chromium-crosswalk,ltilve/chromium,mogoweb/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,keishi/chromium,ltilve/chromium,robclark/chromium,M4sse/chromium.src,Chilledheart/chromium,jaruba/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,timopulkkinen/BubbleFish,ChromiumWebApps/chromium,keishi/chromium,jaruba/chromium.src,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,littlstar/chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,rogerwang/chromium,hgl888/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,patrickm/chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,patrickm/chromium.src,M4sse/chromium.src,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,rogerwang/chromium,M4sse/chromium.src,ondra-novak/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,M4sse/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,markYoungH/chromium.src,hujiajie/pa-chromium,Chilledheart/chromium,robclark/chromium,ltilve/chromium,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,rogerwang/chromium,zcbenz/cefode-chromium,M4sse/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,rogerwang/chromium,axinging/chromium-crosswalk,jaruba/chromium.src,ondra-novak/chromium.src,Chilledheart/chromium,zcbenz/cefode-chromium,hujiajie/pa-chromium,markYoungH/chromium.src,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,junmin-zhu/chromium-rivertrail,Jonekee/chromium.src,Just-D/chromium-1,jaruba/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,robclark/chromium,keishi/chromium,hujiajie/pa-chromium,anirudhSK/chromium,Chilledheart/chromium,zcbenz/cefode-chromium,anirudhSK/chromium,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,keishi/chromium,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,robclark/chromium,dushu1203/chromium.src,nacl-webkit/chrome_deps,ondra-novak/chromium.src,robclark/chromium,krieger-od/nwjs_chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,jaruba/chromium.src,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,timopulkkinen/BubbleFish,rogerwang/chromium,Just-D/chromium-1,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,robclark/chromium,Chilledheart/chromium,keishi/chromium,chuan9/chromium-crosswalk,Chilledheart/chromium,nacl-webkit/chrome_deps,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,zcbenz/cefode-chromium,rogerwang/chromium,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,junmin-zhu/chromium-rivertrail,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,timopulkkinen/BubbleFish,ondra-novak/chromium.src,pozdnyakov/chromium-crosswalk,Jonekee/chromium.src,axinging/chromium-crosswalk,keishi/chromium,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,M4sse/chromium.src,ChromiumWebApps/chromium,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,junmin-zhu/chromium-rivertrail,bright-sparks/chromium-spacewalk,hujiajie/pa-chromium,patrickm/chromium.src,hgl888/chromium-crosswalk,ltilve/chromium,krieger-od/nwjs_chromium.src,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,keishi/chromium,dushu1203/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,fujunwei/chromium-crosswalk | chrome/PRESUBMIT.py | chrome/PRESUBMIT.py | # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
| bsd-3-clause | Python | |
923786f0ee9e5128337997b6687374f74388c1c2 | add leetcode Find Minimum in Rotated Sorted Array | Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code | leetcode/FindMinimuminRotatedSortedArray/solution.py | leetcode/FindMinimuminRotatedSortedArray/solution.py | # -*- coding:utf-8 -*-
class Solution:
# @param num, a list of integer
# @return an integer
def findMin(self, num):
l = 0
h = len(num) - 1
while l < h:
mid = (l + h) // 2
if num[l] > num[mid]:
h = mid
elif num[h] < num[mid]:
l = mid + 1
else:
break
return num[l]
| mit | Python | |
227e38318e41b3c11ee818fdb08b273f527ba686 | add test_source_stream.pyc | longaccess/longaccess-client,longaccess/longaccess-client,longaccess/longaccess-client | lacli/t/test_source_stream.py | lacli/t/test_source_stream.py | import os
from testtools import TestCase
from lacli.decorators import coroutine
class StreamSourceTest(TestCase):
def setUp(self):
super(StreamSourceTest, self).setUp()
self.home = os.path.join('t', 'data', 'home')
self.testfile = os.path.join('t', 'data', 'longaccess-74-5N93.html')
def tearDown(self):
super(StreamSourceTest, self).tearDown()
def _makeit(self, *args, **kw):
from lacli.source.stream import StreamSource
return StreamSource(*args, **kw)
def test_constructor_none(self):
self._makeit(None, None)
def test_constructor_chunk(self):
f = self._makeit(None, None, chunk=123)
self.assertEqual(123, f.chunk)
self.assertEqual(False, f.readable())
self.assertEqual(True, f.writable())
@coroutine
def coro(self, *args):
for v in args:
v2 = yield
yield self.assertEqual(v, v2)
def test_write(self):
vs = ["x"*50, "x"*50]
f = self._makeit(self.coro(100), self.coro("x"*100))
for v in vs:
self.assertEqual(len(v), f.write(v))
f.close()
def test_write_context(self):
with self._makeit(self.coro(100), self.coro("x"*100)) as f:
self.assertEqual(50, f.write("x"*50))
self.assertEqual(50, f.write("x"*50))
def test_write_context2(self):
with self._makeit(self.coro(0), self.coro("x"*100)) as f:
f.close()
def test_write_context3(self):
with self._makeit(self.coro(0), self.coro("x"*100)) as f:
f.close()
self.assertRaises(ValueError, f.write, "BAR")
def test_write_context4(self):
with self._makeit(self.coro(90), self.coro("x"*90)) as f:
self.assertEqual(50, f.write("x"*50))
self.assertEqual(40, f.write("x"*40))
def test_write_context5(self):
with self._makeit(self.coro(90), self.coro("x"*90), chunk=90) as f:
self.assertEqual(50, f.write("x"*50))
self.assertEqual(40, f.write("x"*40))
def test_write_context6(self):
f = self._makeit(self.coro(90), self.coro("x"*90), chunk=90)
f.dst = self.coro(None)
e = self.assertRaises(Exception, f.close)
self.assertEqual("Generator didn't stop", str(e))
def test_write_context7(self):
def mustraise():
with self._makeit(self.coro(90), self.coro("x"*90)):
raise Exception('lala')
e = self.assertRaises(Exception, mustraise)
self.assertEqual("lala", str(e))
def catch():
try:
foo = yield
except:
foo = ''
yield foo
def mustraise2():
with self._makeit(self.coro(90), self.coro("x"*90)) as f:
f.end = catch()
f.end.send(None)
raise Exception('lala')
e = self.assertRaises(Exception, mustraise2)
self.assertEqual("Generator didn't stop after throw", str(e))
| apache-2.0 | Python | |
f8c7a80fc8500d53cacef904c4a7caea88263465 | Add 20150608 question. | fantuanmianshi/Daily,fantuanmianshi/Daily | LeetCode/gas_station.py | LeetCode/gas_station.py |
class Solution:
# @param {integer[]} gas
# @param {integer[]} cost
# @return {integer}
def canCompleteCircuit(self, gas, cost):
diff = []
i = 0
while i < len(gas):
diff.append(gas[i] - cost[i])
i += 1
leftGas, sumCost, start = 0, 0, 0
i = 0
while i < len(gas):
leftGas += diff[i]
sumCost += diff[i]
if sumCost < 0:
start = i + 1
sumCost = 0
i += 1
if leftGas < 0:
return -1
return start
| mit | Python | |
08e57c27c47437b46c557f4697dd32d00f27fd7f | Create whatIsYourName.py | AlexEaton1105/computerScience | whatIsYourName.py | whatIsYourName.py | a = 20
b = 130
c = a + b
print (c)
d = 100
e = 2
f = d / e
print (f)
g = 34
h = 47
i = 82
j= g + h + i
print (j)
name = input("What is your name? ")
print("hello, ", name)
| mit | Python | |
4f99ffbc3deb321ba3ff76b23bacb889b11e1f4d | add to index solved | xala3pa/Computer-Science-cs101 | Lesson4/add_to_index.py | Lesson4/add_to_index.py | # Define a procedure, add_to_index,
# that takes 3 inputs:
# - an index: [[<keyword>,[<url>,...]],...]
# - a keyword: String
# - a url: String
# If the keyword is already
# in the index, add the url
# to the list of urls associated
# with that keyword.
# If the keyword is not in the index,
# add an entry to the index: [keyword,[url]]
index = []
def add_to_index(index,keyword,url):
for e in index:
if keyword in e:
e[1].append(url)
return
index.append([keyword,[url]])
add_to_index(index,'udacity','http://udacity.com')
add_to_index(index,'computing','http://acm.org')
add_to_index(index,'udacity','http://npr.org')
print index
#>>> [['udacity', ['http://udacity.com', 'http://npr.org']],
#>>> ['computing', ['http://acm.org']]]
| mit | Python | |
d824d2fc32774ce51e4f36d702a2a6cc131db558 | add migration file to automatically parse citations | sloria/osf.io,mfraezz/osf.io,leb2dg/osf.io,adlius/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,icereval/osf.io,TomBaxter/osf.io,leb2dg/osf.io,adlius/osf.io,chennan47/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,pattisdr/osf.io,binoculars/osf.io,adlius/osf.io,leb2dg/osf.io,saradbowman/osf.io,icereval/osf.io,leb2dg/osf.io,cslzchen/osf.io,mattclark/osf.io,felliott/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,icereval/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,laurenrevere/osf.io,binoculars/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,aaxelb/osf.io,TomBaxter/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,cslzchen/osf.io,mfraezz/osf.io,felliott/osf.io,cslzchen/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,baylee-d/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,TomBaxter/osf.io,erinspace/osf.io,mattclark/osf.io,aaxelb/osf.io,felliott/osf.io,caseyrollins/osf.io,crcresearch/osf.io,adlius/osf.io,brianjgeiger/osf.io,felliott/osf.io,binoculars/osf.io,laurenrevere/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,baylee-d/osf.io,sloria/osf.io,mattclark/osf.io,erinspace/osf.io | osf/migrations/0074_parse_citation_styles.py | osf/migrations/0074_parse_citation_styles.py | # This migration port `scripts/parse_citation_styles` to automatically parse citation styles.
# Additionally, this set the corresponding `has_bibliography` field to `False` for all citation formats whose CSL files do not
# include a bibliography section. As a result, all such citation formats would not show up in OSF
# citation widgets for users to choose.
#
# NOTE:
# As of December 6th, 2017, there are however THREE EXCEPTIONS:
# "Bluebook Law Review", "Bluebook Law Review(2)" and "Bluebook Inline" shares a
# special CSL file ('website/static/bluebook.cls'), in which a bibliography section is defined,
# in order to render bibliographies even though their official CSL files (located in CenterForOpenScience/styles repo)
# do not contain a bibliography section. Therefore, This migration also automatically set `has_bibliography` to `True` for all styles whose titles contain "Bluebook"
import logging
import os
from django.db import migrations
from lxml import etree
from osf.models.citation import CitationStyle
from website import settings
logger = logging.getLogger(__file__)
def get_style_files(path):
files = (os.path.join(path, x) for x in os.listdir(path))
return (f for f in files if os.path.isfile(f))
def parse_citation_styles(*args):
# drop all styles
CitationStyle.remove()
for style_file in get_style_files(settings.CITATION_STYLES_PATH):
with open(style_file, 'r') as f:
try:
root = etree.parse(f).getroot()
except etree.XMLSyntaxError:
continue
namespace = root.nsmap.get(None)
selector = '{{{ns}}}info/{{{ns}}}'.format(ns=namespace)
title = root.find(selector + 'title').text
# `has_bibliography` is set to `True` for Bluebook citation formats due to the special way we handle them.
has_bibliography = root.find('{{{ns}}}{tag}'.format(ns=namespace, tag='bibliography')) is not None or 'Bluebook' in title
# Required
fields = {
'_id': os.path.splitext(os.path.basename(style_file))[0],
'title': title,
'has_bibliography': has_bibliography,
}
# Optional
try:
fields['short_title'] = root.find(selector + "title-short").text
except AttributeError:
pass
try:
fields['summary'] = root.find(selector + 'summary').text
except AttributeError:
pass
style = CitationStyle(**fields)
style.save()
def revert(*args):
# The revert of this migration simply removes all CitationStyle instances.
CitationStyle.remove()
class Migration(migrations.Migration):
dependencies = [
('osf', '0073_citationstyle_has_bibliography'),
]
operations = [
migrations.RunPython(parse_citation_styles, revert),
] | apache-2.0 | Python | |
9f46cf4836ad555a54dc9c47b8b2843643a878f2 | Create migration for draft dos1 briefs to dos2 | alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api | migrations/versions/840_migrate_draft_dos1_briefs_to_draft_dos2.py | migrations/versions/840_migrate_draft_dos1_briefs_to_draft_dos2.py | """Migrate draft DOS1 briefs to draft DOS2 briefs
Revision ID: 840
Revises: 830
Create Date: 2017-02-07 15:31:50.715832
"""
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
from alembic import op
def upgrade():
# Change framework of draft DOS1 briefs from DOS1 (framework_id == 5) to DOS2 (framework_id == 7)
op.execute("""
UPDATE briefs
SET framework_id = 7
WHERE framework_id = 5 AND published_at IS NULL
""")
def downgrade():
# No downgrade
pass
| mit | Python | |
4a3d56589cbf4e94618795d3f1bc09fa0f59e5ca | Add "ROV_SRS_Library.py" file containing functions for main script. | Spongeneers/rov-srs-control | ROV_SRS_Library.py | ROV_SRS_Library.py | # ROV_SRS_Library
#
#
# Overview: A collection of helper functions used by the BeagleBone
# to control the ROV SRS Actuators.
#
# Authors: Jonathan Lee (2015)
#
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.PWM as PWM
def calc_pulse_width(pin_name):
"""Calculates the pulse width of a PWM signal input.
Stores the time of day on a Rising Edge and subsequent Falling Edge
event, then returns the difference in milliseconds.
Args:
pin_name: A String containing the pin name on which the PWM
signal is expected. The pin name should be in the
format defined by the Adafruit_BBIO library.
Returns:
A float containing the calculated pulse width, in
milliseconds.
"""
| bsd-3-clause | Python | |
0a45c8f0632f3e8ca5502b9e4fdbaef410b07c71 | rename settings.py | Masakichi/books | config.py | config.py | # -*- coding: utf-8 -*-
from flask import Flask
app = Flask(__name__)
| mit | Python | |
1d35451387f9cab55df12f28e71824b2dbe37153 | add back after exposing my key | meg2208/automash | config.py | config.py | ECHO_NEST_API_KEY = "INSERT ECHO NEST API KEY HERE" | mit | Python | |
88a1f41c99320117bedb9d9922f3737fa820768a | fix import in config | nerk/BookPlayer,grvrulz/BookPlayer | config.py | config.py |
#!/usr/bin/env python
# encoding: utf-8
"""
config.py
Application configurations
db_file : the SQLite file used to store the progress
serial : settings for the serial port that the RFID reader connects to
mpd_conn : the connection details for the MPD client
gpio_pins : the ids of the GPIO input pins and their callbacks
status_light_pin : the pin used by the status light
playing : keep track of playing status. rather use this instead of calling
status() all the time"""
import os
__version_info__ = (0, 0, 1)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Willem van der Jagt"
db_file = "%s/%s" % (os.path.dirname(os.path.realpath(__file__)), 'state.db')
serial = { "port_name" : "/dev/ttyAMA0", "baudrate" : 9600, "string_length" : 14 }
mpd_conn = { "host" : "localhost", "port" : 6600 }
gpio_pins = [
{ 'pin_id': 9, 'callback' : 'rewind' },
{ 'pin_id': 11, 'callback' : 'toggle_pause' },
{ 'pin_id': 22, 'callback' : 'volume_down' },
{ 'pin_id': 10, 'callback' : 'volume_up' }
]
status_light_pin = 23
|
#!/usr/bin/env python
# encoding: utf-8
"""
config.py
Application configurations
db_file : the SQLite file used to store the progress
serial : settings for the serial port that the RFID reader connects to
mpd_conn : the connection details for the MPD client
gpio_pins : the ids of the GPIO input pins and their callbacks
status_light_pin : the pin used by the status light
playing : keep track of playing status. rather use this instead of calling
status() all the time"""
__version_info__ = (0, 0, 1)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Willem van der Jagt"
db_file = "%s/%s" % (os.path.dirname(os.path.realpath(__file__)), 'state.db')
serial = { "port_name" : "/dev/ttyAMA0", "baudrate" : 9600, "string_length" : 14 }
mpd_conn = { "host" : "localhost", "port" : 6600 }
gpio_pins = [
{ 'pin_id': 9, 'callback' : 'rewind' },
{ 'pin_id': 11, 'callback' : 'toggle_pause' },
{ 'pin_id': 22, 'callback' : 'volume_down' },
{ 'pin_id': 10, 'callback' : 'volume_up' }
]
status_light_pin = 23
| mit | Python |
a335c9dbaa2da6dc429c9e280c6a6786422f0809 | Add code that generates byte encodings for various x86-32 instructions, with holes for constant operands | mseaborn/x86-decoder,mseaborn/x86-decoder | encoder.py | encoder.py |
import re
import subprocess
def write_file(filename, data):
fh = open(filename, "w")
try:
fh.write(data)
finally:
fh.close()
def Encode(instr):
write_file('tmp.S', instr + '\n')
subprocess.check_call(['as', '--32', 'tmp.S', '-o', 'tmp.o'])
proc = subprocess.Popen(['objdump', '-d', 'tmp.o'],
stdout=subprocess.PIPE)
lines = list(proc.stdout)
assert proc.wait() == 0
got = []
for line in lines:
match = re.match('\s*[0-9a-f]+:\s*((\S\S )+)\s*(.*)', line)
if match is not None:
bytes = match.group(1)
disasm = match.group(3)
bytes = [chr(int(part, 16)) for part in bytes.strip().split(' ')]
got.extend(bytes)
return got
def assert_eq(x, y):
if x != y:
raise AssertionError('%r != %r' % (x, y))
assert_eq(Encode('movl $0x12345678, 0x12345678(%eax)'),
list('\xc7\x80\x78\x56\x34\x12\x78\x56\x34\x12'))
def FormatByte(arg):
if arg == 'XX':
return arg
return '%02x' % ord(arg)
def Matches(string, substr):
i = 0
while True:
index = string.find(substr, i)
if index == -1:
return
yield index
i = index + len(substr)
def DiscoverArg(instr_template):
assert len(list(Matches(instr_template, 'VALUE'))) == 1
def Try(value, value_str):
bytes = Encode(instr_template.replace('VALUE', value))
bytes_str = ''.join(bytes)
return bytes, set(Matches(bytes_str, value_str))
op_size = 4
bytes1, indexes1 = Try('0x12345678', '\x78\x56\x34\x12')
bytes2, indexes2 = Try('0x12345679', '\x79\x56\x34\x12')
# op_size = 1
# bytes1, indexes1 = Try('0x12', '\x12')
# bytes2, indexes2 = Try('0x21', '\x21')
both = indexes1.intersection(indexes2)
assert len(both) == 1
index = list(both)[0]
def Erase(bytes):
for i in range(index, index + op_size):
bytes[i] = 'XX'
Erase(bytes1)
Erase(bytes2)
assert bytes1 == bytes2
return bytes1, index
assert_eq(DiscoverArg('and $VALUE, %ebx'),
(['\x81', '\xe3', 'XX', 'XX', 'XX', 'XX'], 2))
def DiscoverArgs2(template):
op_size = 4
dummy = '0x11111111'
# op_size = 1
# dummy = '0x11'
bytes1, index1 = DiscoverArg(template((dummy, 'VALUE')))
bytes2, index2 = DiscoverArg(template(('VALUE', dummy)))
def Erase(bytes, index):
for i in range(index, index + op_size):
assert bytes[i] == '\x11'
bytes[i] = 'XX'
Erase(bytes1, index2)
Erase(bytes2, index1)
assert bytes1 == bytes2
return bytes1
assert_eq(DiscoverArgs2(lambda x: 'movl $%s, %s(%%ebx)' % x),
['\xc7', '\x83', 'XX', 'XX', 'XX', 'XX', 'XX', 'XX', 'XX', 'XX'])
def Tokenise(string):
regexp = re.compile('[A-Z]+')
i = 0
while i < len(string):
match = regexp.search(string, i)
if match is None:
yield string[i:]
break
else:
if match.start() > 0:
yield string[i:match.start()]
yield match.group()
i = match.end()
assert_eq(list(Tokenise('FOO + BAR')), ['FOO', ' + ', 'BAR'])
assert_eq(list(Tokenise('(FOO + BAR)')), ['(', 'FOO', ' + ', 'BAR', ')'])
regs = (
'%eax',
'%ebx',
'%ecx',
'%edx',
'%esi',
'%edi',
'%ebp',
'%esp',
)
def Generate(instr):
if len(instr) == 0:
yield []
return
if instr[0] == 'REG':
vals = regs
else:
vals = [instr[0]]
for val in vals:
for rest in Generate(instr[1:]):
yield [val] + rest
def TryInstr(instr):
args = 0
for token in instr:
if token == 'VALUE':
args += 1
if args == 0:
return Encode(''.join(instr))
elif args == 1:
bytes, i = DiscoverArg(''.join(instr))
return bytes
else:
indexes = [index for index, token in enumerate(instr)
if token == 'VALUE']
def Subst(vals):
copy = instr[:]
for i, val in zip(indexes, vals):
copy[i] = val
return ''.join(copy)
return DiscoverArgs2(Subst)
templates = [
'add $VALUE, REG',
'sub $VALUE, REG',
'and $VALUE, REG',
'or $VALUE, REG',
'movl REG, REG',
'movl $VALUE, (REG)',
'movl REG, (REG)',
'movl (REG), REG',
'movl $VALUE, VALUE(REG)',
]
for template in templates:
for instr in Generate(list(Tokenise(template))):
bytes = TryInstr(instr)
print '%s %s' % (''.join(instr),
' '.join(FormatByte(byte) for byte in bytes))
| bsd-3-clause | Python | |
ffca5ea26c02170cc5edf6eea25ec9ef2c0c72bf | Disable trix serializer tests with Jython | ssssam/rdflib,armandobs14/rdflib,RDFLib/rdflib,marma/rdflib,marma/rdflib,yingerj/rdflib,RDFLib/rdflib,dbs/rdflib,RDFLib/rdflib,armandobs14/rdflib,marma/rdflib,armandobs14/rdflib,avorio/rdflib,yingerj/rdflib,ssssam/rdflib,ssssam/rdflib,ssssam/rdflib,avorio/rdflib,dbs/rdflib,yingerj/rdflib,avorio/rdflib,yingerj/rdflib,RDFLib/rdflib,marma/rdflib,dbs/rdflib,dbs/rdflib,avorio/rdflib,armandobs14/rdflib | test/test_trix_serialize.py | test/test_trix_serialize.py | #!/usr/bin/env python
import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
class TestTrixSerialize(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSerialize(self):
s1 = URIRef('store:1')
r1 = URIRef('resource:1')
r2 = URIRef('resource:2')
label = URIRef('predicate:label')
g1 = Graph(identifier = s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
s2 = URIRef('store:2')
g2 = Graph(identifier = s2)
g2.add((r2, label, Literal("label 3")))
g = ConjunctiveGraph()
for s,p,o in g1.triples((None, None, None)):
g.addN([(s,p,o,g1)])
for s,p,o in g2.triples((None, None, None)):
g.addN([(s,p,o,g2)])
r3 = URIRef('resource:3')
g.add((r3, label, Literal(4)))
r = g.serialize(format='trix')
g3 = ConjunctiveGraph()
g3.parse(BytesIO(r), format='trix')
for q in g3.quads((None,None,None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
if isinstance(q[3].identifier, URIRef):
tg=Graph(store=g.store, identifier=q[3].identifier)
else:
# BNode, this is a bit ugly
# we cannot match the bnode to the right graph automagically
# here I know there is only one anonymous graph,
# and that is the default one, but this is not always the case
tg=g.default_context
self.assertTrue(q[0:3] in tg)
import platform
if platform.system() == 'Java':
from nose import SkipTest
raise SkipTest('Jython issues - "JavaSAXParser" object has no attribute "start_namespace_decl"')
if __name__=='__main__':
unittest.main()
| #!/usr/bin/env python
import unittest
from rdflib.graph import ConjunctiveGraph
from rdflib.term import URIRef, Literal
from rdflib.graph import Graph
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
class TestTrixSerialize(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSerialize(self):
s1 = URIRef('store:1')
r1 = URIRef('resource:1')
r2 = URIRef('resource:2')
label = URIRef('predicate:label')
g1 = Graph(identifier = s1)
g1.add((r1, label, Literal("label 1", lang="en")))
g1.add((r1, label, Literal("label 2")))
s2 = URIRef('store:2')
g2 = Graph(identifier = s2)
g2.add((r2, label, Literal("label 3")))
g = ConjunctiveGraph()
for s,p,o in g1.triples((None, None, None)):
g.addN([(s,p,o,g1)])
for s,p,o in g2.triples((None, None, None)):
g.addN([(s,p,o,g2)])
r3 = URIRef('resource:3')
g.add((r3, label, Literal(4)))
r = g.serialize(format='trix')
g3 = ConjunctiveGraph()
g3.parse(BytesIO(r), format='trix')
for q in g3.quads((None,None,None)):
# TODO: Fix once getGraph/getContext is in conjunctive graph
if isinstance(q[3].identifier, URIRef):
tg=Graph(store=g.store, identifier=q[3].identifier)
else:
# BNode, this is a bit ugly
# we cannot match the bnode to the right graph automagically
# here I know there is only one anonymous graph,
# and that is the default one, but this is not always the case
tg=g.default_context
self.assertTrue(q[0:3] in tg)
if __name__=='__main__':
unittest.main()
| bsd-3-clause | Python |
ba6dc4269f96903f863748a779521d2bd8803d4f | Create Process.py | MariusWirtz/TM1py,OLAPLINE/TM1py | Samples/Process.py | Samples/Process.py | __author__ = 'Marius'
from TM1py import TM1Queries, Process
import uuid
import unittest
class TestAnnotationMethods(unittest.TestCase):
q = TM1Queries(ip='', port=8008, user='admin', password='apple', ssl=True)
random_string = str(uuid.uuid4()).replace('-', '_')
p_none = Process(name='unittest_none_' + random_string, datasource_type='None')
p_ascii = Process(name='unittest_ascii_' + random_string, datasource_type='ASCII',
datasource_ascii_delimiter_char=',',
datasource_data_source_name_for_server='C:\Data\simple_csv.csv',
datasource_data_source_name_for_client='C:\Data\simple_csv.csv')
# variables
p_ascii.add_variable('v_1', 'Numeric')
p_ascii.add_variable('v_2', 'Numeric')
p_ascii.add_variable('v_3', 'Numeric')
p_ascii.add_variable('v_4', 'Numeric')
# parameters
p_ascii.add_parameter('p_Year', 'year?', '2016')
p_view = Process(name='unittest_view_' + random_string, datasource_type='TM1CubeView', datasource_view='view1',
datasource_data_source_name_for_client='Plan_BudgetPlan',
datasource_data_source_name_for_server='Plan_BudgetPlan')
p_odbc = Process(name='unittest_odbc_' + random_string, datasource_type='ODBC')
# create Process
def test1_create_process(self):
self.q.create_process(self.p_none)
self.q.create_process(self.p_ascii)
self.q.create_process(self.p_view)
self.q.create_process(self.p_odbc)
# get Process
def test2_get_process(self):
p1 = self.q.get_process(self.p_ascii.name)
self.assertEqual(p1.body, self.p_ascii.body)
p2 = self.q.get_process(self.p_none.name)
self.assertEqual(p2.body, self.p_none.body)
p3 = self.q.get_process(self.p_view.name)
self.assertEqual(p3.body, self.p_view.body)
p4 = self.q.get_process(self.p_odbc.name)
x = p4.datasource_password = None
y = self.p_odbc.datasource_password = None
self.assertEqual(x, y)
# update process
def test3_update_process(self):
# get
p = self.q.get_process(self.p_ascii.name)
# modify
p.set_data_procedure(Process.auto_generated_string() + "x = 'Hi this is a test';")
# update on Server
self.q.update_process(p)
# get again
p_ascii_updated = self.q.get_process(p.name)
# assert
self.assertNotEqual(p_ascii_updated.data_procedure, self.p_ascii.data_procedure)
# delete process
def test4_delete_process(self):
self.q.delete_process(self.p_none.name)
self.q.delete_process(self.p_ascii.name)
self.q.delete_process(self.p_view.name)
self.q.delete_process(self.p_odbc.name)
def test_5_logout(self):
self.q.logout()
if __name__ == '__main__':
unittest.main()
| mit | Python | |
f26bdfa1ff0a388fb7bd2d473cf7b4b03fa61f6d | add unit test | DOAJ/doaj,DOAJ/doaj,DOAJ/doaj,DOAJ/doaj | doajtest/unit/event_consumers/test_application_publisher_revision_notify.py | doajtest/unit/event_consumers/test_application_publisher_revision_notify.py | from portality import models
from portality import constants
from portality.bll import exceptions
from doajtest.helpers import DoajTestCase
from doajtest.fixtures import ApplicationFixtureFactory
import time
from portality.events.consumers.application_publisher_revision_notify import ApplicationPublisherRevisionNotify
class TestApplicationPublisherRevisionNotify(DoajTestCase):
def setUp(self):
super(TestApplicationPublisherRevisionNotify, self).setUp()
def tearDown(self):
super(TestApplicationPublisherRevisionNotify, self).tearDown()
def test_consumes(self):
source = ApplicationFixtureFactory.make_application_source()
event = models.Event(constants.EVENT_APPLICATION_STATUS, context={"application": "abcd", "old_status": "in progress", "new_status": "revisions_required"})
assert ApplicationPublisherRevisionNotify.consumes(event)
event = models.Event(constants.EVENT_APPLICATION_STATUS,
context={"application": "abcd", "old_status": "revisions_required", "new_status": "revisions_required"})
assert not ApplicationPublisherRevisionNotify.consumes(event)
event = models.Event("test:event", context={"application" : "abcd"})
assert not ApplicationPublisherRevisionNotify.consumes(event)
event = models.Event(constants.EVENT_APPLICATION_STATUS)
assert not ApplicationPublisherRevisionNotify.consumes(event)
def test_consume_success(self):
self._make_and_push_test_context("/")
source = ApplicationFixtureFactory.make_application_source()
app = models.Application(**source)
app.save()
acc = models.Account()
acc.set_id("publisher")
acc.set_email("test@example.com")
acc.save()
event = models.Event(constants.EVENT_APPLICATION_STATUS, context={"application": "abcdefghijk", "old_status": "in progress", "new_status": "revisions_required"})
ApplicationPublisherRevisionNotify.consume(event)
time.sleep(2)
ns = models.Notification.all()
assert len(ns) == 1
n = ns[0]
assert n.who == "publisher", "Expected: {}, Received: {}".format("publisher", n.who)
assert n.created_by == ApplicationPublisherRevisionNotify.ID, "Expected: {}, Received: {}".format(ApplicationPublisherRevisionNotify.ID, n.created_by)
assert n.classification == constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE, "Expected: {}, Received: {}".format(constants.NOTIFICATION_CLASSIFICATION_STATUS_CHANGE, n.classification)
assert n.message is not None
assert n.action is None
assert not n.is_seen()
def test_consume_fail(self):
event = models.Event(constants.EVENT_APPLICATION_ASSED_ASSIGNED, context={"application": "abcd"})
with self.assertRaises(exceptions.NoSuchObjectException):
ApplicationPublisherRevisionNotify.consume(event)
| apache-2.0 | Python | |
d24e8c746359169058e9c0577c2f843695ca3b55 | Add 2 instance with EBS test. | citrix-openstack-build/heat,JioCloud/heat,dragorosson/heat,rh-s/heat,rickerc/heat_audit,noironetworks/heat,pshchelo/heat,jasondunsmore/heat,rh-s/heat,NeCTAR-RC/heat,cwolferh/heat-scratch,cryptickp/heat,miguelgrinberg/heat,varunarya10/heat,pratikmallya/heat,takeshineshiro/heat,ntt-sic/heat,pratikmallya/heat,redhat-openstack/heat,dims/heat,steveb/heat,Triv90/Heat,noironetworks/heat,rdo-management/heat,gonzolino/heat,miguelgrinberg/heat,Triv90/Heat,gonzolino/heat,rickerc/heat_audit,takeshineshiro/heat,steveb/heat,dims/heat,jasondunsmore/heat,redhat-openstack/heat,pshchelo/heat,maestro-hybrid-cloud/heat,varunarya10/heat,dragorosson/heat,cryptickp/heat,openstack/heat,openstack/heat,srznew/heat,NeCTAR-RC/heat,cwolferh/heat-scratch,maestro-hybrid-cloud/heat,citrix-openstack-build/heat,rdo-management/heat,ntt-sic/heat,srznew/heat,JioCloud/heat,Triv90/Heat | heat/tests/functional/test_WordPress_2_Instances_With_EBS.py | heat/tests/functional/test_WordPress_2_Instances_With_EBS.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import util
import verify
import nose
from nose.plugins.attrib import attr
from heat.common import context
from heat.engine import manager
import unittest
@attr(speed='slow')
@attr(tag=['func', 'wordpress', '2instance', 'ebs',
'WordPress_2_Instances_With_EBS.template'])
class WordPress2InstancesWithEBS(unittest.TestCase):
def setUp(self):
template = 'WordPress_2_Instances_With_EBS.template'
self.stack = util.Stack(template, 'F17', 'x86_64', 'cfntools')
self.WikiDatabase = util.Instance('WikiDatabase')
self.WikiDatabase.check_cfntools()
self.WikiDatabase.wait_for_provisioning()
self.WebServer = util.Instance('WebServer')
self.WebServer.check_cfntools()
self.WebServer.wait_for_provisioning()
def test_instance(self):
# ensure wordpress was installed
self.assertTrue(self.WebServer.file_present
('/etc/wordpress/wp-config.php'))
print "Wordpress installation detected"
# Verify the output URL parses as expected, ie check that
# the wordpress installation is operational
stack_url = self.stack.get_stack_output("WebsiteURL")
print "Got stack output WebsiteURL=%s, verifying" % stack_url
ver = verify.VerifyStack()
self.assertTrue(ver.verify_wordpress(stack_url))
# Check EBS volume is present and mounted
stdin, stdout, sterr = self.WikiDatabase.exec_command(
'grep vdc /proc/mounts')
result = stdout.readlines().pop().rstrip()
self.assertTrue(len(result))
print "Checking EBS volume is attached : %s" % result
devname = result.split()[0]
self.assertEqual(devname, '/dev/vdc1')
mountpoint = result.split()[1]
self.assertEqual(mountpoint, '/var/lib/mysql')
self.stack.cleanup()
| apache-2.0 | Python | |
24c5248d578774d13d69b001fad8f50e2eac192a | Add tracepoint_variable_sized_types.py | iovisor/bpftrace,iovisor/bpftrace,iovisor/bpftrace,iovisor/bpftrace | scripts/tracepoint_variable_sized_types.py | scripts/tracepoint_variable_sized_types.py | # This script lists all the types in the kernel's tracepoint format files
# which appear with more than one size. This script's output should be
# compared to the code in TracepointFormatParser::adjust_integer_types()
import glob
field_types = {}
for format_file in glob.iglob("/sys/kernel/debug/tracing/events/*/*/format"):
for line in open(format_file):
if not line.startswith("\tfield:"):
continue
size_section = line.split(";")[2].split(":")
if size_section[0] != "\tsize":
continue
size_val = size_section[1]
field_section = line.split(";")[0].split(":")
if field_section[0] != "\tfield":
continue
field_val = field_section[1]
if "[" in field_val or "*" in field_val:
continue
field_type = " ".join(field_val.split()[:-1])
if field_type not in field_types:
field_types[field_type] = set()
field_types[field_type].add(size_val)
for t in sorted(field_types):
sizes = field_types[t]
if len(sizes) > 1:
sizes_str = ",".join(sorted(sizes))
print(f"{t}: {sizes_str}")
| apache-2.0 | Python | |
15cf70107d999c673a6bd6a4e026f04396ceb5f3 | create basic and compatability test for nixio_fr | samuelgarcia/python-neo,JuliaSprenger/python-neo,NeuralEnsemble/python-neo,apdavison/python-neo,rgerkin/python-neo,INM-6/python-neo | neo/test/iotest/test_nixio_fr.py | neo/test/iotest/test_nixio_fr.py | import numpy as np
import unittest
from neo.io.nixio_fr import NixIO as NixIOfr
import quantities as pq
from neo.io.nixio import NixIO
class TestNixfr(unittest.TestCase):
files_to_test = ['nixio_fr.nix']
def setUp(self):
self.testfilename = 'nixio_fr.nix'
self.reader_fr = NixIOfr(filename=self.testfilename)
self.reader_norm = NixIO(filename=self.testfilename, mode='ro')
self.blk = self.reader_fr.read_block(block_index=1, load_waveforms=True)
# read block with NixIOfr
self.blk1 = self.reader_norm.read_block(index=1) # read same block with NIXio
def tearDown(self):
self.reader_fr.file.close()
self.reader_norm.close()
def test_check_same_neo_structure(self):
self.assertEqual(len(self.blk.segments), len(self.blk1.segments))
for seg1, seg2 in zip(self.blk.segments, self.blk1.segments):
self.assertEqual(len(seg1.analogsignals), len(seg2.analogsignals))
self.assertEqual(len(seg1.spiketrains), len(seg2.spiketrains))
self.assertEqual(len(seg1.events), len(seg2.events))
self.assertEqual(len(seg1.epochs), len(seg2.epochs))
def test_check_same_data_content(self):
for seg1, seg2 in zip(self.blk.segments, self.blk1.segments):
for asig1, asig2 in zip(seg1.analogsignals, seg2.analogsignals):
np.testing.assert_almost_equal(asig1.magnitude, asig2.magnitude)
# not completely equal
for st1, st2 in zip(seg1.spiketrains, seg2.spiketrains):
np.testing.assert_array_equal(st1.magnitude, st2.times)
for wf1, wf2 in zip(st1.waveforms, st2.waveforms):
np.testing.assert_array_equal(wf1.shape, wf2.shape)
np.testing.assert_almost_equal(wf1.magnitude, wf2.magnitude)
for ev1, ev2 in zip(seg1.events, seg2.events):
np.testing.assert_almost_equal(ev1.times, ev2.times)
assert np.all(ev1.labels == ev2.labels)
for ep1, ep2 in zip(seg1.epochs, seg2.epochs):
assert len(ep1.durations) == len(ep2.times)
np.testing.assert_almost_equal(ep1.times, ep2.times)
np.testing.assert_array_equal(ep1.durations, ep2.durations)
np.testing.assert_array_equal(ep1.labels, ep2.labels)
# Not testing for channel_index as rawio always read from seg
for chid1, chid2 in zip(self.blk.channel_indexes, self.blk1.channel_indexes):
for asig1, asig2 in zip(chid1.analogsignals, chid2.analogsignals):
np.testing.assert_almost_equal(asig1.magnitude, asig2.magnitude)
def test_analog_signal(self):
seg1 = self.blk.segments[0]
an_sig1 = seg1.analogsignals[0]
assert len(an_sig1) == 30
an_sig2 = seg1.analogsignals[1]
assert an_sig2.shape == (50,3)
def test_spike_train(self):
st1 = self.blk.segments[0].spiketrains[0]
assert np.all(st1.times == np.cumsum(np.arange(0,1,0.1)).tolist() * pq.s + 10 *pq.s)
def test_event(self):
seg1 = self.blk.segments[0]
event1 = seg1.events[0]
raw_time = 10 + np.cumsum(np.array([0,1,2,3,4]))
assert np.all(event1.times == np.array(raw_time *pq.s / 1000))
assert np.all(event1.labels == np.array([b'A', b'B', b'C', b'D', b'E']))
assert len(seg1.events) == 1
def test_epoch(self):
seg1 = self.blk.segments[1]
seg2 = self.blk1.segments[1]
epoch1 = seg1.epochs[0]
epoch2 = seg2.epochs[0]
assert len(epoch1.durations) == len(epoch1.times)
assert np.all(epoch1.durations == epoch2.durations)
assert np.all(epoch1.labels == epoch2.labels)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
673dac79cbab6de0be5650d46840a3bc9858b2b4 | Add a help script to clear the test bucket | glasslion/django-qiniu-storage,jeffrey4l/django-qiniu-storage,Mark-Shine/django-qiniu-storage,jackeyGao/django-qiniu-storage | tests/clear_qiniu_bucket.py | tests/clear_qiniu_bucket.py | import os
from qiniu import Auth, BucketManager
QINIU_ACCESS_KEY = os.environ.get('QINIU_ACCESS_KEY')
QINIU_SECRET_KEY = os.environ.get('QINIU_SECRET_KEY')
QINIU_BUCKET_NAME = os.environ.get('QINIU_BUCKET_NAME')
QINIU_BUCKET_DOMAIN = os.environ.get('QINIU_BUCKET_DOMAIN')
def main():
auth = Auth(QINIU_ACCESS_KEY, QINIU_SECRET_KEY)
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(QINIU_BUCKET_NAME, limit=100)
if ret is None:
print info
break
for item in ret['items']:
name = item['key']
print "Deleting %s ..." % name
ret, info = bucket.delete(QINIU_BUCKET_NAME, name)
if ret is None:
print info
if eof:
break
if __name__ == '__main__':
main()
| mit | Python | |
da7deee98bb8d6a92d2ab1b8ad5c3e550a24fc83 | add `Config` class tests | pine/opoona | tests/config/test_config.py | tests/config/test_config.py | # -*- coding: utf-8 -*-
import os
import tempfile
import unittest
from mock import patch
from opoona.config import Config
class TestInvalidSyntaxException(unittest.TestCase):
@patch('os.path.expanduser')
def test_init(self, expanduser):
expanduser.return_value = 'HOME/.opoona.yaml'
config = Config()
self.assertEqual(config.config_path, 'HOME/.opoona.yaml')
expanduser.assert_called_with('~/.opoona.yaml')
def test_load(self):
f = tempfile.NamedTemporaryFile(delete=False)
yaml = '''\
github:
token: XXX
'''
f.write(yaml.encode('utf-8'))
f.close()
config = Config()
config.config_path = f.name
config.load()
self.assertIsInstance(config['github'], dict)
self.assertEqual(config['github']['token'], 'XXX')
os.remove(f.name)
| mit | Python | |
ae92573d2c86fa1e83b636c17c443cc8f97f4040 | Add unittest for ElementaryLine. | PytLab/catplot | tests/elementary_line_test.py | tests/elementary_line_test.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test case for ElementaryLine.
"""
import unittest
from catplot.ep_components.ep_lines import ElementaryLine
class ElementaryLineTest(unittest.TestCase):
def setUp(self):
self.maxDiff = True
def test_construction_and_query(self):
""" Test we can construct ElementaryLine object correctly.
"""
line = ElementaryLine([0.0, 1.2, 0.7], n=2)
ret_x = line.x.tolist()
ref_x = [0.0, 1.0, 1.0, 2.0, 2.0, 3.0]
self.assertListEqual(ret_x, ref_x)
ret_y = line.y.tolist()
ref_y = [0.0, 0.0, -3.4426554548552387e-18, 0.7, 0.7, 0.7]
self.assertListEqual(ret_y, ref_y)
self.assertIsNone(line.rxn_equation)
self.assertEqual(line.color, "#000000")
self.assertEqual(line.shadow_color, "#595959")
self.assertEqual(line.shadow_depth, 7)
self.assertEqual(line.hline_length, 1.0)
self.assertEqual(line.interp_method, "spline")
self.assertEqual(line.n, 2)
self.assertEqual(line.peak_width, 1.0)
if "__main__" == __name__:
suite = unittest.TestLoader().loadTestsFromTestCase(ElementaryLineTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit | Python | |
666d9c467806782827edac4b2c0c13d494e41250 | Add a test for the status server | adamnew123456/jobmon | jobmon/test/test_status_server.py | jobmon/test/test_status_server.py | import os
import select
import socket
import time
import unittest
from jobmon.protocol import *
from jobmon import protocol, status_server, transport
PORT = 9999
class StatusRecorder:
def __init__(self):
self.records = []
def process_start(self, job):
self.records.append(('started', job))
def process_stop(self, job):
self.records.append(('stopped', job))
class TestCommandServer(unittest.TestCase):
def test_command_server(self):
status_recorder = StatusRecorder()
status_svr = status_server.StatusServer(status_recorder)
status_svr.start()
status_peer = status_svr.get_peer()
try:
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STARTJOB))
status_peer.send(protocol.Event('some_job',
protocol.EVENT_STOPJOB))
time.sleep(5) # Give the server time to process all events
self.assertEqual(status_recorder.records,
[('started', 'some_job'),
('stopped', 'some_job')])
finally:
status_peer.close()
status_svr.terminate()
| bsd-2-clause | Python | |
09ea74a9b3b3f518c67f719c3525b14058b528af | add files | shwnbrgln/metes-and-bounds | declination.py | declination.py | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 20 21:11:52 2014
@author: SB
###############################################################################
This function gets the declination using webservices hosted
by the National Oceanic and Atmospheric Administration (NOAA)
Declination is a function of latitude and longitude and date
There are some limits on the numbers of times you can use the webservice per
second etc.
###############################################################################
"""
import requests
from xml.etree import ElementTree
def calc_declination(longitude, latitude, year):
longitude = "%.4f"%longitude
latitude = "%.4f"%latitude
startYear = "%.4f"%year
URL = "http://www.ngdc.noaa.gov/geomag-web/calculators/calculate\
Declination?lat1=" + latitude + "&lon1=" + longitude + "&startYear=" + startYear + "&resultFormat=xml"
XMLresponse = requests.get(URL)
declination = ElementTree.fromstring(XMLresponse.content)[0][4].text
return float(declination)
| mit | Python | |
27bfb211b4f10a6a61e53b613b9074e90f417321 | create BST.py | ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms | BST.py | BST.py | class BinaryNode:
def __init__(self ):
self.data = None
self.left = None
self.right = None
self.parent = None
class Binarytree:
def __init__(self,data=None):
self.root = BinaryNode()
self.root.data = data
def search(self,k):
return self.searchtree(self.root,k)
def searchtree(self,r=None,k=None):
if r is None:
return False
if k == r.data:
return True
if k > r.data:
return self.searchtree(r.right,k)
if k < r.data:
return self.searchtree(r.left,k)
def treemin(self,t):
#t = self.root
while (t.left != None):
t = t.left
return t.data
def treemax(self,r):
#r = self.root
while (r.right != None):
r = r.right
return r.data
def treeSuc(self,x):
# x = self.root
if x.right != None:
return self.treemin(x.right)
y = x.parent
while x != y.left and y != None:
x = y
y = y.parent
return y.data
def treePre(self,r):
#r = self.root
if r.left != None:
return self.treemax(r.left)
y = r.parent
while y != None and r != y.right:
r = y
y = y.parent
return y.data
def Insert(self, v):
t = BinaryNode()
t.data=v
t.left = t.right = t.parent = None
x = self.root
y = self.root.parent
while (x != None):
y = x
if v <= x.data:
x = x.left
else:
x = x.right
t.parent = y
if v < y.data:
y.left = t
else:
y.right = t
def delete(self,x):
if x.right is None and x.left is None and x.parent is not None:
if x.parent.data>x.data:
x.parent.left=None
else:
x.parent.right=None
#return
elif (x.right is None and x.left is not None):
if x.parent.data>x.data:
x.parent.left=x.right
else:
x.parent.right=x.right
#return
elif x.left is None and x.right is not None and x.parent is not None:
if x.parent.data>x.data:
x.parent.left=x.left
else:
x.parent.right=x.left
#return
elif x.left is not None and x.right is not None:
y=self.treePre1(x)
# temp
return self.delete(y)
def height(v):
if v is None:
return 0
lh=1+height(v.left)
rh=1+height(v.right)
if lh>rh:
return lh
else:
return rh
def printlevelorder(v):
h=height(v)
for i in range(1,h+1):
printgivenlevel(v,i)
def printgivenlevel(v,level):
if v is None:
return
if level==1:
print(v.data)
elif level>1:
printgivenlevel(v.left,level-1)
printgivenlevel(v.right,level-1)
def printpre(v):
if v is None:
return
print(v.data)
printpre(v.left)
printpre(v.right)
b = Binarytree(10)
b.Insert(5)
b.Insert(14)
b.Insert(3)
b.Insert(8)
b.Insert(11)
b.Insert(6)
b.Insert(13)
b.Insert(7)
print(b.search(3))
printpre(b.root)
print("the predessor")
print(b.treePre(b.root))
print("the successor is ")
print(b.treeSuc(b.root))
print("the maximum in tree is ")
print(b.treemax(b.root))
print("the minimum in tree is ")
print(b.treemin(b.root))
b.delete(b.root.left.right)
print("after deleting the node")
printpre(b.root)
print("the level order traversal")
printlevelorder(b.root)
print("the height of the tree")
print(height(b.root))
| cc0-1.0 | Python | |
1a9f379ed121945a79eff5c2fdd468c98f0381d7 | add Jeu.py | SUPINFOLaboDev/TheSnake | Jeu.py | Jeu.py | import pygame
import sys
from pygame.locals import *
class Jeu:
def Jeu(self):
self.__score = 0
print('Jeu creer')
def augmenter_Score(self):
return 0
def recup_score(self):
return 0
def score(self):
return 0
def tableau_jeu(self):
return 0
def test_collision(self):
return 0
def spawn_pomme(self):
return 0
| mit | Python | |
63d7639f6c0e470575820be2b51444f34aa4bf2d | add flask app | Nuve17/Meruem | app.py | app.py | import jinja2
from flask import Flask, jsonify, make_response
from pdf_getter import main
app = Flask(__name__)
@app.route('/planning', methods=['GET'])
def get_planning():
pdf_filename = main()
if pdf_filename:
binary_pdf = open("./planning.pdf", "rb")
binary_pdf = binary_pdf.read()
response = make_response(binary_pdf)
response.headers['Content-Type'] = 'application/pdf'
response.headers['Content-Disposition'] = 'inline; filename=planning.pdf'
return response
else:
jsonify("Error: There is an error, please contact the admin")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False) | apache-2.0 | Python | |
ec78a2b7551838ab05dce6c2c93c8c42b76fc850 | Add utility functions (1) | quqixun/BrainTumorClassification,quqixun/BrainTumorClassification | src/btc_utilities.py | src/btc_utilities.py | # Brain Tumor Classification
# Script for Utility Functions
# Author: Qixun Qu
# Create on: 2017/10/11
# Modify on: 2017/10/11
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
import numpy as np
from math import factorial
def compute_hist(volume):
rvolume = np.round(volume)
bins = np.arange(np.min(rvolume), np.max(rvolume))
hist = np.histogram(rvolume, bins=bins, density=True)
x = hist[1][1:]
y = hist[0]
i = np.where(y > 0)
bg_index = i[0][1] + int((i[0][-1] - i[0][1]) / 20.)
bg = x[bg_index]
x = x[bg_index:]
y = y[bg_index:]
ysg = savitzky_golay(y, window_size=31, order=4)
return x, y, ysg, bg
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate**deriv * factorial(deriv)
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
| mit | Python | |
1d8ff137f4792121bdf0cb52f719dbd5966dc87b | Add missing cct.py | progers/cctdb,progers/cctdb | cct.py | cct.py | # cct.py - calling context tree
#
# A Calling Context Tree (CCT) has a single root which contains multiple calls to Functions which
# can themselves have other calls.
from collections import defaultdict
import json
class Function(object):
def __init__(self, name):
self.calls = []
self.name = name
self.parent = None
self._callCountsByName = defaultdict(int)
def addCall(self, call):
if call.parent:
raise ValueError("Function is already called by an existing Function.")
if not call.name:
raise ValueError("Function cannot be added without a name.")
self.calls.append(call)
self._callCountsByName[call.name] += 1
call.parent = self
def callCountToFunctionName(self, name):
return self._callCountsByName[name]
def callStack(self):
stack = []
if self.parent:
stack.extend(self.parent.callStack())
stack.append(self)
return stack
def callNameStack(self):
nameStack = []
for call in self.callStack():
nameStack.append(call.name)
return nameStack
def uniqueCallNames(self):
return self._callCountsByName.keys()
def asJson(self, indent = None):
return json.dumps(self, sort_keys=False, indent=indent, cls=FunctionJSONEncoder)
@staticmethod
def fromJson(string):
return json.loads(string, cls=FunctionJSONDecoder)
# A CCT is the root for a calling context tree and represents the program entry point.
class CCT(Function):
def __init__(self):
Function.__init__(self, None)
def asJson(self, indent = None):
return json.dumps(self.calls, sort_keys=False, indent=indent, cls=FunctionJSONEncoder)
def callStack(self):
return []
@staticmethod
def fromRecord(string):
function = CCT()
for line in iter(string.splitlines()):
if (line.startswith("entering ")):
name = line[len("entering "):]
nextFunction = Function(name)
function.addCall(nextFunction)
function = nextFunction
elif (line.startswith("exiting ")):
name = line[len("exiting "):]
if (function.name != name):
raise AssertionError("Incorrect nesting found when exiting " + name)
function = function.parent
if (function.parent):
raise AssertionError("Incorrect nesting found when exiting " + function.name)
return function
@staticmethod
def fromJson(string):
decodedFunctions = json.loads(string, cls=FunctionJSONDecoder)
cct = CCT()
for function in decodedFunctions:
cct.addCall(function)
return cct
class FunctionJSONEncoder(json.JSONEncoder):
def default(self, function):
if not isinstance(function, Function):
return super(FunctionJSONEncoder, self).default(function)
jsonValue = {"name": function.name}
if len(function.calls) > 0:
jsonValue["calls"] = []
for call in function.calls:
jsonValue["calls"].append(self.default(call))
return jsonValue
class FunctionJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if "name" not in obj:
return obj
name = obj["name"]
function = Function(name)
if "calls" in obj:
for call in obj["calls"]:
if isinstance(call, Function):
function.addCall(call)
return function
| apache-2.0 | Python | |
800df7bcaa57e0935c0836f1f49de6407c55c212 | add tests for clock exercise | mcdickenson/python-washu-2014 | day2/clock_test.py | day2/clock_test.py | import unittest
import clock
class ClockTest(unittest.TestCase):
def test_on_the_hour(self):
self.assertEqual("08:00", Clock.at(8).__str__() )
self.assertEqual("09:00", Clock.at(9).__str__() )
def test_past_the_hour(self):
self.assertEqual("11:09", Clock.at(11, 9).__str__() )
def test_add_a_few_minutes(self):
clock = Clock.at(10) + 3
self.assertEqual("10:03", clock.__str__())
def test_add_over_an_hour(self):
clock = Clock.at(10) + 61
self.assertEqual("11:01", clock.__str__())
def test_wrap_around_at_midnight(self):
clock = Clock.at(23, 30) + 60
self.assertEqual("00:30", clock.__str__())
def test_subtract_minutes(self):
clock = Clock.at(10) - 90
self.assertEqual("08:30", clock.__str__())
def test_equivalent_clocks(self):
clock1 = Clock.at(15, 37)
clock2 = Clock.at(15, 37)
self.assertEqual(clock1, clock2)
def test_inequivalent_clocks(self):
clock1 = Clock.at(15, 37)
clock2 = Clock.at(15, 36)
clock3 = Clock.at(14, 37)
self.assertNotEqual(clock1, clock2)
self.assertNotEqual(clock1, clock3)
def test_wrap_around_backwards(self):
clock = Clock.at(0, 30) - 60
self.assertEqual("23:30", clock.__str__())
if __name__ == '__main__':
unittest.main()
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.