commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
abd70f40aaa026844f9b088a4a648ce58e469839 | add preliminary measure script | miklos1/fayette | measure.py | measure.py | from __future__ import absolute_import, division, print_function
from six.moves import filter, intern, map, range, zip
from functools import reduce
from numpy import cbrt, floor, sqrt
from firedrake.petsc import PETSc
from firedrake import ExtrudedMesh, UnitSquareMesh, assemble
import form
num_matvecs = 20
PETSc.Log.begin()
parloop_event = PETSc.Log.Event("ParLoopExecute")
assemble_event = PETSc.Log.Event("AssembleMat")
matmult_event = PETSc.Log.Event("MatMult")
problems = [form.mass, form.mass_dq, form.mass_gll, form.mass_vec,
form.poisson, form.poisson_gll, form.helmholtz,
form.stokes_momentum, form.elasticity,
form.hyperelasticity, form.curl_curl]
for problem in problems:
name = problem.__name__
PETSc.Sys.Print(name)
for degree in range(1, 4):
num_cells = max(1, 1e8 / (degree + 1)**7)
w = int(floor(cbrt(num_cells)))
d = int(floor(sqrt(num_cells / w)))
h = int(round(num_cells / (w * d)))
num_cells = w * d * h
PETSc.Sys.Print("degree = {}: num_cells = {}".format(degree, num_cells))
mesh = ExtrudedMesh(UnitSquareMesh(w, d, quadrilateral=True), h)
J = problem(mesh, degree)
for typ in ["aij", "matfree"]:
# Warmup and allocate
A = assemble(J, mat_type=typ)
A.force_evaluation()
Ap = A.petscmat
x, y = Ap.createVecs()
Ap.mult(x, y)
stage = PETSc.Log.Stage("%s(%d) %s matrix" % (name, degree, typ))
with stage:
with assemble_event:
assemble(J, mat_type=typ, tensor=A)
A.force_evaluation()
Ap = A.petscmat
for _ in range(num_matvecs):
Ap.mult(x, y)
parloop = parloop_event.getPerfInfo()
assembly = assemble_event.getPerfInfo()
matmult = matmult_event.getPerfInfo()
matmult_time = matmult["time"] / num_matvecs
assemble_time = assembly["time"]
print(typ, assemble_time, matmult_time, parloop["time"] / parloop["count"], parloop["count"])
| mit | Python | |
462f9a651eb93aca3c8ff980345e40429e6f3fe9 | add migrate script | anapat/Upload-Data-to-Amazon-S3 | migrate.py | migrate.py | # -*- coding: utf-8 -*-
import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
connection = S3Connection(
host = 's3.amazonaws.com', # S3 Compatible Services
is_secure = True,
aws_access_key_id = 'access_key_id', # Add your access key
aws_secret_access_key = 'secret_access_key' # Add your secret key
)
bucket = connection.get_bucket('bucket_name', validate = True)
COMMON_PATH = '/common_folder/' # COMMON PATH OF YOUR S3 AND YOUR SERVER
BASE = os.path.dirname(os.path.abspath(__file__))
def upload(path, filename):
path_file = '%s/%s'%(BASE, filename)
if COMMON_PATH in path_file:
path_upload = ROOT_PATH + path_file.rsplit(COMMON_PATH, 1)[1]
print ' Upload to : %s' % path_upload
key = Key(bucket, path_upload)
key.set_contents_from_filename(path_file)
else:
print ' Upload path not found.'
if __name__ == '__main__':
count = 1
for path, subdirs, files in os.walk('.'):
for name in files:
if name not in os.path.basename(__file__):
print '> Execute File (%s/%s) : %s '% (count, len(files)-1, os.path.join(path, name)[1:])
upload(path, name)
count += 1
| apache-2.0 | Python | |
feb4e40afa8b589d9dc90652099202d07921f4b8 | add 0021 | Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2 | llluiop/0021/password.py | llluiop/0021/password.py | #!/usr/bin/env python
#-*- coding: utf-8-*-
import os
from hashlib import sha256
from hmac import HMAC
def encode(password):
salt = os.urandom(8)
print salt
result = password.encode("utf-8")
for i in range(10):
result = HMAC(result, salt, sha256).digest()
return result
if __name__ == "__main__":
password = "password"
print encode(password) | mit | Python | |
debaa1f32b6b2dcbc7a7e8a02de19afc2c86a29f | add asgi file | fcurella/django-channels-react-redux,fcurella/django-channels-react-redux,fcurella/django-channels-react-redux | django_react/asgi.py | django_react/asgi.py | import os
import channels.asgi
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_react.settings")
channel_layer = channels.asgi.get_channel_layer()
| bsd-3-clause | Python | |
002e903c978a30f27ed24316bb85958e5c69a259 | Solve Code Fights count visitors problem | HKuz/Test_Code | CodeFights/countVisitors.py | CodeFights/countVisitors.py | #!/usr/local/bin/python
# Code Fights Count Visitors Problem
class Counter(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def get(self):
return self.value
def countVisitors(beta, k, visitors):
counter = Counter(beta)
for visitor in visitors:
if visitor >= k:
counter.inc()
return counter.get()
def main():
tests = [
[22, 5, [4, 6, 6, 5, 2, 2, 5], 26],
[1, 5, [], 1],
[34, 8, [1, 2, 3, 4, 5, 6, 7], 34],
[4, 5, [3, 4, 65, 3, 2, 4, 5, 3, 5], 7],
[38, 20, [20], 39]
]
for t in tests:
res = countVisitors(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: countVisitors({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: countVisitors({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
| mit | Python | |
f53aef9fdcd01fdb8607984e38b4fb8c5813aacf | Solve Code Fights fibonacci list problem | HKuz/Test_Code | CodeFights/fibonacciList.py | CodeFights/fibonacciList.py | #!/usr/local/bin/python
# Code Fights Fibonacci List Problem
from functools import reduce
def fibonacciList(n):
return [[0] * x for x in reduce(lambda x, n: x + [sum(x[-2:])],
range(n - 2), [0, 1])]
def main():
tests = [
[
6,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0]]
],
[
2,
[[],
[0]]
],
[
3,
[[],
[0],
[0]]
],
[
8,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
5,
[[],
[0],
[0],
[0, 0],
[0, 0, 0]]
]
]
for t in tests:
res = fibonacciList(t[0])
ans = t[1]
if ans == res:
print("PASSED: fibonacciList({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: fibonacciList({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
| mit | Python | |
93a9ba6bacf6c1f32b8601c6de6153048c5d9feb | Create Keithley_autoprobefilter.py | jzmnd/fet-py-scripts | Keithley_autoprobefilter.py | Keithley_autoprobefilter.py | # Reads Keithley .xls and filters data from autoprober
# Jeremy Smith
# Northwestern University
# Version 1.1
from numpy import *
import xlrd
import os
import sys
from myfunctions import *
from scipy import stats
data_path = os.path.dirname(__file__) # Path name for location of script
print "\n"
print data_path
print "\n"
files = os.listdir(data_path) # All files in directory
data_summary = []
skipinit = 5 # Initial data points to skip
l = 50.
w = 150.
ci = 497e-9
# Loops through each device file
for d in files:
print d
if "Vgs-Id" not in d:
continue
workbook = xlrd.open_workbook(d, logfile=open(os.devnull, 'w'))
datasheet = workbook.sheet_by_index(0)
data = {}
colhead = []
for h in range(datasheet.ncols):
data[datasheet.cell_value(0,h)] = []
colhead.append(datasheet.cell_value(0,h))
for r in range(datasheet.nrows-1):
for c in range(datasheet.ncols):
data[colhead[c]].append(float(datasheet.cell_value(r+1,c)))
settingsheet = workbook.sheet_by_index(2)
idrain = array(data["DrainI"])
igate = array(data["GateI"])
vgate = array(data["GateV"])
vdrain = float(settingsheet.cell_value(19,2))
onoff_at_zero = abs(idrain[-1]/idrain[where(vgate==0)[0][0]])
leakage_ratio = abs(idrain/igate)
# Initial filtering steps
filtered = False
if onoff_at_zero < 1e2:
filtered = True
if leakage_ratio[where(vgate==2)[0][0]] < 10:
filtered = True
if abs(idrain[-1]) < 1e-10:
filtered = True
if filtered == True:
print " FILTERED"
continue
# Smoothing the drain current data
idrain_smoothed = adjAvSmooth(abs(idrain), N=1)
# Finding max transconductance
sqrtidrain = sqrt(idrain_smoothed)
diff_sqrt_idrain_smoothed = array(numDiff(sqrtidrain, vgate))
tmaxarg = argmax(diff_sqrt_idrain_smoothed[skipinit:-1]) + skipinit
# Saturation mobility (max transconductance)
satmob_tmax = (2*l/(w*ci))*(diff_sqrt_idrain_smoothed[tmaxarg])**2
# Threshold Voltage (max transconductance)
vth_tmax = vgate[tmaxarg] - sqrt(idrain_smoothed)[tmaxarg]/diff_sqrt_idrain_smoothed[tmaxarg]
# On-off ratio
onoffratio = log10(max(idrain_smoothed[skipinit:-1])/min(idrain_smoothed[skipinit:-1]))
# Finds range of data that lies within the minimum+15% and the maximum-15% and also has a positive transconductance
fitrange_id = [0.85*min(sqrtidrain[skipinit:-1]) + 0.15*max(sqrtidrain[skipinit:-1]), 0.85*max(sqrtidrain[skipinit:-1]) + 0.15*min(sqrtidrain[skipinit:-1])]
fitrange_bool = bitwise_and(sqrtidrain > fitrange_id[0], sqrtidrain < fitrange_id[1], diff_sqrt_idrain_smoothed > 0)
# Checks that there are at least 3 data points to fit
if sum(fitrange_bool) < 3:
filtered = True
print " FILTERED"
continue
# Linear Fitting to sqrt(Idrain)
slope, intercept, r_value, p_value, std_err = stats.linregress(vgate[fitrange_bool], sqrtidrain[fitrange_bool])
fitline = slope*vgate + intercept
# Saturation mobility (from slope of sqrt(Idrain) fit)
satmob_FITTED = (2*l/(w*ci))*slope**2
# Threshold Voltage (from slope of sqrt(Idrain) fit)
vth_FITTED = -intercept/slope
# Second filtering steps
if abs(vth_FITTED) > 3.0:
filtered = True
if satmob_FITTED < 0.1:
filtered = True
if satmob_FITTED > 250.:
filtered = True
if r_value**2 < 0.9:
filtered = True
if filtered == True:
print " FILTERED"
continue
data_summary.append([d[:-4], satmob_tmax, vth_tmax, log10(onoff_at_zero), onoffratio, log10(leakage_ratio[where(vgate==2)[0][0]]), satmob_FITTED, vth_FITTED, r_value**2])
quickPlot(d[:-4]+"_SQRTplot", data_path, [vgate, sqrtidrain, fitline], xlabel="VG [V]", ylabel="sqrt(Id) [A^0.5]", yrange=[0, 'auto'])
quickPlot(d[:-4]+"_TRANSFERplot", data_path, [vgate, idrain_smoothed, abs(igate)], xlabel="VG [V]", ylabel="Id,g [A]", yscale="log", yrange=[1e-12, 1e-3])
outfile = open(os.path.join(data_path, "summarylist.txt"), "w")
outfile.write("Device\tSatMobility(maxtrans)\tVthreshold(maxtrans)\tLogOnOffAtZero\tLogOnOffRatio\tLogLeakageRatioAt2V\tSatMobility(fitted)\tVthreshold(fitted)\tFITr2\n")
for a in data_summary:
outfile.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"%(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]))
outfile.close()
| mit | Python | |
902cf7f0b167847a96e1db0cd523878c5abb9032 | add addlc.py: add 2 lightcurves (to be used to crate EPIC combined lightcurves) | evandromr/python_scitools | addlcs.py | addlcs.py | #!/usr/env python
import myscitools
import glob
if __name__ == '__main__':
'''
Add lightcurves
MOS1 + MOS2 = MOSS
PN + MOSS = EPIC
'''
mos1files = glob.glob('MOS1_lc_net*')
mos2files = glob.glob('MOS2_lc_net*')
pnfiles = glob.glob('PN_lc_net*')
mos1files.sort()
mos2files.sort()
pnfiles.sort()
mossfiles = ['MOSS'+mos1[4:] for mos1 in mos1files]
epicfiles = ['EPIC'+mos1[4:] for mos1 in mos1files]
for mos1, mos2, moss in zip(mos1files, mos2files, mossfiles):
myscitools.addlc(mos1, mos2, moss)
for pn, moss, epic in zip(pnfiles, mossfiles, epicfiles):
myscitools.addlc(pn, moss, epic)
| mit | Python | |
0116701a64748efe1348686c2c52069d8d94c5f9 | Add migration | mfcovington/djangocms-lab-carousel,mfcovington/djangocms-lab-carousel | cms_lab_carousel/migrations/0004_auto_20151207_0015.py | cms_lab_carousel/migrations/0004_auto_20151207_0015.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cms_lab_carousel', '0003_auto_20150827_0111'),
]
operations = [
migrations.AlterField(
model_name='slide',
name='carousel',
field=models.ForeignKey(to='cms_lab_carousel.Carousel', help_text='Choose a carousel for this slide.', on_delete=django.db.models.deletion.SET_NULL, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publication',
field=models.ForeignKey(to='cms_lab_publications.Publication', help_text='<strong>If this slide is for a publication, select/create a publication.</strong><br>The publication info will be used to auto-populate the title, subtitle, and description fields when slide is saved (if those fields are left blank).<br>To override this auto-fill behavior, manually enter the title, subtitle, and/or description below.', on_delete=django.db.models.deletion.PROTECT, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publish_datetime',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='<strong>Choose date/time to publish slide.</strong><br>Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.<br>If this is a slide for a publication and this field is not set to a future date/time or at least one day in the past, it will be auto-populated with the date of the publication.', verbose_name='date/time slide published'),
preserve_default=True,
),
]
| bsd-3-clause | Python | |
5b1782ad41d738bce01f20b4cef5242420e83931 | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/matplotlib/colour_map_list.py | python/matplotlib/colour_map_list.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See: http://matplotlib.org/1.2.1/examples/pylab_examples/show_colormaps.html
# See also:
# - http://matplotlib.org/examples/color/colormaps_reference.html (the list of all colormaps)
# - http://matplotlib.org/users/colormaps.html?highlight=colormap#mycarta-banding (what is the right colormap to choose for a given plot)
import matplotlib.pyplot as plt
# Get a list of the colormaps in matplotlib.
maps = sorted(plt.cm.datad)
print(maps)
| mit | Python | |
ad47e008ec61772c8c169742f0ab944f0f426a7a | Add solution to 97. | bsamseth/project-euler,bsamseth/project-euler | 097/97.py | 097/97.py |
def two_to_n_mod_10_to_m(n, m):
"""Successive squaring. Very fast and handles very large numbers.
1. Rewrite 2^n so that n is a sum of powers of two.
2. Create a list of powers 2^(2^i) mod 10^m, by repeatedly squaring the prior result.
3. Combine, with multiplication mod 10^m, the powers in the list that make up 2^n.
Based on: http://www.exploringbinary.com/how-to-find-the-last-digits-of-a-positive-power-of-two/
"""
mod = 10**m
# Step 1. one_bits is a list of the powers (exponents) of two that make up n.
one_bits = [i for i, bit in enumerate(reversed(bin(n)[2:])) if bit == '1']
# Step 2. powers_of_two_mod is a list of 2^(2^i) mod 10^m with i
# such that 2^i <= n for all i.
powers_of_two_mod = []
power, number = 1, 2
while power <= n:
powers_of_two_mod.append(number)
power, number = power*2, number**2 % mod
# Step 3. 2^n === prod(powers_of_two[i]) mod 10^m.
res = 1
for b in one_bits:
res = (res * powers_of_two_mod[b]) % mod
return res
def alt1(n, m):
"""Build up number, taking modulo as you go. Slow but simple."""
mod = 10**m
res = 1
for _ in range(n):
res = (res << 1) % mod
return res
def alt2(n, m):
"""As simple as it gets, and fast for somewhat reasonable values of n."""
return 2**n % 10**m
print((28433 * two_to_n_mod_10_to_m(7830457, 10) + 1) % 10**10)
print((28433 * alt1(7830457, 10) + 1) % 10**10)
print((28433 * alt2(7830457, 10) + 1) % 10**10)
| mit | Python | |
644ada5f8afcfe791299eea72efda1b0475040aa | Add benchmark for parsing vs. serializing. | plures/ndtypes,skrah/ndtypes,plures/ndtypes,skrah/ndtypes,plures/ndtypes,skrah/ndtypes,plures/ndtypes,skrah/ndtypes | python/bench.py | python/bench.py | from ndtypes import *
import time
# =============================================================================
# Type with huge number of offsets
# =============================================================================
s = "var(offsets=[0,10000000]) * var(offsets=%s) * int64" % list(range(10000001))
print("Parse 10_000_000 var offsets:")
start = time.time()
t = ndt(s)
end = time.time()
print(end-start)
b = t.serialize()
print("\nDeserialize 10_000_000 var offsets:")
start = time.time()
u = ndt.deserialize(b)
end = time.time()
print(end-start)
assert t == u
# =============================================================================
# Large type
# =============================================================================
s = """
{battingpost: 100 * {yearID: ?int32, round: ?string, playerID: ?string, teamID: ?string, lgID: (?string, int64, 5 * 10 * {a: complex128, b: ?int32}), G: ?int32, AB: ?int32, R: ?int32, H: (int32, 10 * int32), HR: {a: 10 * float64, b: 10 * int32}, RBI: ?int32, SB: ?int32, CS: ?int32, BB: ?int32, SO: ?int32, IBB: ?int32, HBP: ?int32, SH: ?int32, SF: ?int32, GIDP: ?int32, AString: fixed_string(100,'utf32'), BString: fixed_string(100), CBytes: bytes(align=16), DBytes: fixed_bytes(size=1600, align=16)}, awardsmanagers: 10 * {managerID: ?string, awardID: ?string, yearID: ?int32, lgID: ?string, tie: ?string, notes: ?string}, hofold: 10 * {hofID: ?string, yearid: ?int32, votedBy: ?string, ballots: ?int32, votes: ?int32, inducted: ?string, category: ?string}, salaries: 10 * {yearID: ?int32, teamID: ?string, lgID: ?string, playerID: ?string, salary: ?float64}, pitchingpost: 10 * {playerID: ?string, yearID: ?int32, round: ?string, teamID: ?string, lgID: ?string, W: ?int32, L: ?int32, G: ?int32, GS: ?int32, CG: ?int32, SHO: ?int32, SV: ?int32, IPouts: ?int32, H: ?int32, ER: ?int32, HR: ?int32, BB: ?int32, SO: ?int32, BAOpp: ?float64, ERA: ?float64, IBB: ?int32, WP: ?int32, HBP: ?int32, BK: ?int32, BFP: ?int32, GF: ?int32, R: ?int32, SH: ?int32, SF: ?int32, GIDP: ?int32}, managers: 10 * {managerID: ?string, yearID: ?int32, teamID: ?string, lgID: ?string, inseason: ?int32, G: ?int32, W: ?int32, L: ?int32, rank: ?int32, plyrMgr: ?string}, teams: 10 * {yearID: ?int32, lgID: ?string, teamID: ?string, franchID: ?string, divID: ?string, Rank: ?int32, G: ?int32, Ghome: ?int32, W: ?int32, L: ?int32, DivWin: ?string, WCWin: ?string, LgWin: ?string, WSWin: ?string, R: ?int32, AB: ?int32, H: ?int32, B: ?int32, B: ?int32, HR: ?int32, BB: ?int32, SO: ?int32, SB: ?int32, CS: ?int32, HBP: ?int32, SF: ?int32, RA: ?int32, ER: ?int32, ERA: ?float64, CG: ?int32, SHO: ?int32, SV: ?int32, IPouts: ?int32, HA: ?int32, HRA: ?int32, BBA: ?int32, SOA: ?int32, E: ?int32, DP: ?int32, FP: ?float64, name: ?string, park: ?string, attendance: ?int32, BPF: ?int32, PPF: ?int32, teamIDBR: ?string, teamIDlahman45: ?string, teamIDretro: ?string}}\
"""
print("\nParse large type (100_000 repetitions):")
start = time.time()
for i in range(100000):
t = ndt(s)
end = time.time()
print(end-start)
b = t.serialize()
print("\nDeserialize large type (100_000 repetitions):")
start = time.time()
for i in range(100000):
u = ndt.deserialize(b)
end = time.time()
print(end-start)
assert t == u
| bsd-3-clause | Python | |
bf744e472209162ce83b2759c9240cb3018cb0bf | Fix find_packages | iheartradio/Henson | henson/contrib/__init__.py | henson/contrib/__init__.py | """Henson's contrib packages."""
| apache-2.0 | Python | |
11296e24228ee10be009b04a9909504a8e8d5ace | Test for the save_character() function | Enether/python_wow | tests/models/character/test_saver.py | tests/models/character/test_saver.py | import unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from classes import Paladin
from models.characters.saved_character import SavedCharacterSchema
from models.items.item_template import ItemTemplateSchema
from tests.models.character.character_mock import character, char_equipment, entry
from models.characters.saver import save_character
class SavedCharacterSaverTests(unittest.TestCase):
"""
Get the Mock character, change his name and try to save him in the DB
"""
def setUp(self):
self.expected_character = character
self.expected_character.name = 'Tester'
def test_save_character(self):
save_character(self.expected_character)
received_character = session.query(SavedCharacterSchema).filter_by(name=self.expected_character.name).first()
self.assertIsNotNone(received_character)
received_character = received_character.convert_to_character_object()
# assert they're the same
self.assertEqual(vars(received_character), vars(self.expected_character))
def tearDownModule():
import tests.delete_test_db # module that deletes the DB :)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
b329688792d65555ab7169d0ff625f2b4bddd7f2 | Add initial API integration tests | ivuk/pyusesthis | tests/test_integration_pyusesthis.py | tests/test_integration_pyusesthis.py | from pyusesthis import pyusesthis
class TestClass:
def test_get_hardware_all(self):
response = pyusesthis.get_hardware('all')
assert isinstance(response, str)
assert 'thinkpad-x220' in response
assert 'Xeon E5-2680' in response
def test_get_hardware(self):
response = pyusesthis.get_hardware('ThinkPad X230')
assert isinstance(response, str)
assert 'thinkpad-x230' in response
assert 'ThinkPad X230' in response
assert 'fabienne.serriere' in response
assert 'Jean Yang' in response
def test_get_software_all(self):
response = pyusesthis.get_software('all')
assert isinstance(response, str)
assert 'vi' in response
assert 'john-cage-prepared-piano-ios' in response
assert 'Tearaway Unfolded' in response
def test_get_software(self):
response = pyusesthis.get_software('tcpdump')
assert isinstance(response, str)
assert 'tcpdump' in response
assert 'http://www.tcpdump.org/' in response
assert 'brendan.gregg' in response
assert 'Brad Fitzpatrick' in response
assert 'john.allspaw' in response
def test_get_interviews_all(self):
response = pyusesthis.get_interviews('all')
assert isinstance(response, str)
assert 'alice.goldfuss' in response
assert 'Jessie Frazelle' in response
assert 'Jonathan Corbet' in response
assert 'https://usesthis.com/interviews/brian.kernighan/' in response
assert 'katie.o.shea' in response
assert 'Barista, bike fanatic' in response
def test_get_interviews(self):
response = pyusesthis.get_interviews('Katie O Shea')
assert isinstance(response, str)
assert 'Hario V60' in response
assert 'robur' in response
assert 'katie.o.shea' in response
assert 'Barista, bike fanatic' in response
assert 'La Tortuga Honduras' in response
assert 'http://www.synesso.com/default.aspx?ID=8' in response
def test_get_stats_all(self):
response = pyusesthis.get_stats('all')
assert isinstance(response, str)
assert 'interviews' in response
assert 'hardware' in response
assert 'software' in response
def test_get_stats_software(self):
response = pyusesthis.get_stats('software', '2016')
assert isinstance(response, str)
assert 'iterm2' in response
assert 'android' in response
assert 'github' in response
assert 'firefox' in response
def test_get_stats_hardware(self):
response = pyusesthis.get_stats('hardware', '2014')
assert isinstance(response, str)
assert 'nexus-5' in response
assert 'nexus-4' in response
assert 'kindle-paperwhite' in response
assert 'macbook-pro' in response
assert 'pebble' in response
assert 'arduino' in response
def test_get_categories_all(self):
response = pyusesthis.get_categories('all')
assert isinstance(response, str)
assert 'sysadmin' in response
assert 'usability' in response
assert 'anthropologist' in response
assert 'librarian' in response
assert 'linux' in response
def test_get_categories(self):
response = pyusesthis.get_categories('sysadmin')
assert isinstance(response, str)
assert 'maggie.mcfee' in response
assert 'Maggie McFee' in response
assert 'https://usesthis.com/interviews/maggie.mcfee/' in response
assert 'Technologist' in response
assert 'sysadmin' in response
| mit | Python | |
dcb4a8ae0732b78afa4385988714f19b78fb3312 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/ff451b346ffc6061369b7712da787ceb2b5becf7. | Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,karllessard/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,gautam1858/tensorflow,karllessard/tensorflow,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,sarvex/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow,Intel-Corporation/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,frreiss/tensorflow-fred,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,karllessard/tensorflow,yongtang/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,paolodedios/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "ff451b346ffc6061369b7712da787ceb2b5becf7"
TFRT_SHA256 = "333151d184baf3b8b384615ea20c9ab14efab635f096b364e7b11a000cac2016"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "94a9cc13caf5aa8e2ba54937ed837279c11c78e4"
TFRT_SHA256 = "8a9257aaf2b0042824659be7b2fd8335f37b98a4f57f76fda58ad820fde00f2a"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| apache-2.0 | Python |
5568f30f2bcb83eb8f4dd250d8c817aaea3815f5 | Create chalkboard-xor-game.py | kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/chalkboard-xor-game.py | Python/chalkboard-xor-game.py | # Time: O(n)
# Space: O(1)
# We are given non-negative integers nums[i] which are written on a chalkboard.
# Alice and Bob take turns erasing exactly one number from the chalkboard,
# with Alice starting first. If erasing a number causes the bitwise XOR of
# all the elements of the chalkboard to become 0, then that player loses.
# (Also, we'll say the bitwise XOR of one element is that element itself,
# and the bitwise XOR of no elements is 0.)
#
# Also, if any player starts their turn with the bitwise XOR of all the elements
# of the chalkboard equal to 0, then that player wins.
#
# Return True if and only if Alice wins the game, assuming both players play optimally.
#
# Example:
# Input: nums = [1, 1, 2]
# Output: false
# Explanation:
# Alice has two choices: erase 1 or erase 2.
# If she erases 1, the nums array becomes [1, 2]. The bitwise XOR of
# all the elements of the chalkboard is 1 XOR 2 = 3.
# Now Bob can remove any element he wants, because Alice will be the one
# to erase the last element and she will lose.
# If Alice erases 2 first, now nums becomes [1, 1]. The bitwise XOR of
# all the elements of the chalkboard is 1 XOR 1 = 0. Alice will lose.
#
# Notes:
# - 1 <= N <= 1000.
# - 0 <= nums[i] <= 2^16.
from operator import xor
from functools import reduce
class Solution(object):
def xorGame(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return reduce(xor, nums) == 0 or \
len(nums) % 2 == 0
| mit | Python | |
c9294cbc743923c8898b7775392e93313c1ba171 | Create FindMininRSA2_001.py | Chasego/codirit,cc13ny/algo,Chasego/codi,Chasego/cod,cc13ny/Allin,cc13ny/algo,Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/cod,Chasego/codirit,Chasego/codi,Chasego/codirit,cc13ny/algo,cc13ny/algo,cc13ny/Allin,Chasego/codi,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/cod,Chasego/cod,Chasego/cod,Chasego/codi,cc13ny/Allin | leetcode/154-Find-Minimum-in-Rotated-Sorted-Array-II/FindMininRSA2_001.py | leetcode/154-Find-Minimum-in-Rotated-Sorted-Array-II/FindMininRSA2_001.py | class Solution:
# @param num, a list of integer
# @return an integer
def findMin(self, num):
L = 0; R = len(num)-1
while L < R and num[L] >= num[R]:
M = (L+R)/2
if num[M] > num[L]:
L = M + 1
elif num[M] < num[R]:
R = M
else:
L += 1
return num[L]
| mit | Python | |
a78d93dbc23d832ca5eaae6535a45bfa478e4e56 | Add US state capitals from vega-lite. | altair-viz/altair,ellisonbg/altair,jakevdp/altair | altair/vegalite/v2/examples/us_state_capitals.py | altair/vegalite/v2/examples/us_state_capitals.py | """
U.S. state capitals overlayed on a map of the U.S
================================================-
This is a geographic visualization that shows US capitals
overlayed on a map.
"""
import altair as alt
from vega_datasets import data
states = alt.UrlData(data.us_10m.url,
format=alt.TopoDataFormat(type='topojson',
feature='states'))
capitals = data.us_state_capitals.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
projection={'type': 'albersUsa'},
width=800,
height=500
)
# State capitals labeled on background
points = alt.Chart(capitals).mark_text().encode(
alt.Text('city', type='nominal'),
alt.X('lon', type='longitude'),
alt.Y('lat', type='latitude'),
)
chart = background + points
| bsd-3-clause | Python | |
301a506aa21d4439448508ed80844d402c574e97 | Add version 1.7 (#26712) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-pygraphviz/package.py | var/spack/repos/builtin/packages/py-pygraphviz/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPygraphviz(PythonPackage):
"""Python interface to Graphviz"""
homepage = "https://pygraphviz.github.io/"
pypi = "pygraphviz/pygraphviz-1.7.zip"
maintainers = ['haralmha']
version('1.7', sha256='a7bec6609f37cf1e64898c59f075afd659106cf9356c5f387cecaa2e0cdb2304')
depends_on('python@3.7:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('graphviz')
| lgpl-2.1 | Python | |
f22abf2b8a31d9621a891191db84364edb167390 | Add a management command to create realm administrators. | wdaher/zulip,xuxiao/zulip,showell/zulip,jonesgithub/zulip,levixie/zulip,Gabriel0402/zulip,ryansnowboarder/zulip,zacps/zulip,rht/zulip,suxinde2009/zulip,hustlzp/zulip,bowlofstew/zulip,sharmaeklavya2/zulip,seapasulli/zulip,SmartPeople/zulip,dxq-git/zulip,sup95/zulip,dhcrzf/zulip,timabbott/zulip,tommyip/zulip,zacps/zulip,aps-sids/zulip,esander91/zulip,tbutter/zulip,JanzTam/zulip,swinghu/zulip,adnanh/zulip,aps-sids/zulip,jphilipsen05/zulip,showell/zulip,hustlzp/zulip,JanzTam/zulip,aakash-cr7/zulip,hj3938/zulip,PaulPetring/zulip,hackerkid/zulip,ryanbackman/zulip,souravbadami/zulip,shubhamdhama/zulip,cosmicAsymmetry/zulip,Juanvulcano/zulip,AZtheAsian/zulip,amanharitsh123/zulip,j831/zulip,proliming/zulip,nicholasbs/zulip,shaunstanislaus/zulip,sonali0901/zulip,jerryge/zulip,ashwinirudrappa/zulip,karamcnair/zulip,synicalsyntax/zulip,AZtheAsian/zulip,wangdeshui/zulip,sharmaeklavya2/zulip,adnanh/zulip,zulip/zulip,ashwinirudrappa/zulip,alliejones/zulip,developerfm/zulip,johnny9/zulip,rht/zulip,so0k/zulip,ufosky-server/zulip,tommyip/zulip,zachallaun/zulip,itnihao/zulip,vakila/zulip,JPJPJPOPOP/zulip,swinghu/zulip,johnny9/zulip,yocome/zulip,mohsenSy/zulip,ikasumiwt/zulip,Qgap/zulip,LAndreas/zulip,ikasumiwt/zulip,karamcnair/zulip,aps-sids/zulip,timabbott/zulip,reyha/zulip,Diptanshu8/zulip,ericzhou2008/zulip,hafeez3000/zulip,tdr130/zulip,niftynei/zulip,ipernet/zulip,PaulPetring/zulip,punchagan/zulip,mdavid/zulip,blaze225/zulip,cosmicAsymmetry/zulip,joyhchen/zulip,m1ssou/zulip,EasonYi/zulip,dattatreya303/zulip,luyifan/zulip,dhcrzf/zulip,atomic-labs/zulip,vakila/zulip,susansls/zulip,avastu/zulip,suxinde2009/zulip,zorojean/zulip,luyifan/zulip,xuxiao/zulip,niftynei/zulip,babbage/zulip,saitodisse/zulip,hafeez3000/zulip,he15his/zulip,huangkebo/zulip,tdr130/zulip,susansls/zulip,seapasulli/zulip,dxq-git/zulip,kokoar/zulip,Frouk/zulip,sonali0901/zulip,mohsenSy/zulip,wweiradio/zulip,zwily/zulip,hustlzp/zulip,joshisa/zulip,jessedhillon/zulip,LeeRisk/zulip,hj3938/zulip,esander91/zulip,praveenaki/zulip,atomic-labs/zulip,christi3k/zulip,huangkebo/zulip,wangdeshui/zulip,ryanbackman/zulip,schatt/zulip,swinghu/zulip,calvinleenyc/zulip,Drooids/zulip,amyliu345/zulip,guiquanz/zulip,he15his/zulip,rishig/zulip,zofuthan/zulip,ApsOps/zulip,eastlhu/zulip,dnmfarrell/zulip,brainwane/zulip,sup95/zulip,calvinleenyc/zulip,arpitpanwar/zulip,qq1012803704/zulip,zhaoweigg/zulip,gigawhitlocks/zulip,yuvipanda/zulip,tommyip/zulip,brockwhittaker/zulip,LeeRisk/zulip,sharmaeklavya2/zulip,hustlzp/zulip,fw1121/zulip,willingc/zulip,mdavid/zulip,MayB/zulip,susansls/zulip,joyhchen/zulip,LeeRisk/zulip,bitemyapp/zulip,pradiptad/zulip,Gabriel0402/zulip,jonesgithub/zulip,atomic-labs/zulip,guiquanz/zulip,thomasboyt/zulip,arpitpanwar/zulip,akuseru/zulip,sonali0901/zulip,krtkmj/zulip,amanharitsh123/zulip,jimmy54/zulip,hackerkid/zulip,dotcool/zulip,arpitpanwar/zulip,ikasumiwt/zulip,yocome/zulip,Suninus/zulip,synicalsyntax/zulip,joyhchen/zulip,grave-w-grave/zulip,zofuthan/zulip,brockwhittaker/zulip,peiwei/zulip,bitemyapp/zulip,ipernet/zulip,jphilipsen05/zulip,proliming/zulip,deer-hope/zulip,noroot/zulip,zofuthan/zulip,thomasboyt/zulip,ApsOps/zulip,yuvipanda/zulip,jonesgithub/zulip,levixie/zulip,Frouk/zulip,arpith/zulip,mahim97/zulip,Batterfii/zulip,bitemyapp/zulip,udxxabp/zulip,levixie/zulip,Jianchun1/zulip,zulip/zulip,aliceriot/zulip,alliejones/zulip,Galexrt/zulip,stamhe/zulip,stamhe/zulip,aakash-cr7/zulip,tiansiyuan/zulip,ipernet/zulip,Frouk/zulip,yocome/zulip,jackrzhang/zulip,bluesea/zulip,swinghu/zulip,ahmadassaf/zulip,TigorC/zulip,easyfmxu/zulip,blaze225/zulip,rht/zulip,calvinleenyc/zulip,levixie/zulip,so0k/zulip,Gabriel0402/zulip,KingxBanana/zulip,schatt/zulip,atomic-labs/zulip,synicalsyntax/zulip,dxq-git/zulip,easyfmxu/zulip,voidException/zulip,shubhamdhama/zulip,calvinleenyc/zulip,xuanhan863/zulip,xuanhan863/zulip,technicalpickles/zulip,calvinleenyc/zulip,Jianchun1/zulip,xuanhan863/zulip,hustlzp/zulip,saitodisse/zulip,ApsOps/zulip,moria/zulip,hengqujushi/zulip,karamcnair/zulip,yuvipanda/zulip,nicholasbs/zulip,showell/zulip,jerryge/zulip,andersk/zulip,PaulPetring/zulip,glovebx/zulip,timabbott/zulip,hayderimran7/zulip,hj3938/zulip,itnihao/zulip,EasonYi/zulip,ikasumiwt/zulip,themass/zulip,armooo/zulip,developerfm/zulip,babbage/zulip,zacps/zulip,littledogboy/zulip,ryanbackman/zulip,vikas-parashar/zulip,andersk/zulip,so0k/zulip,RobotCaleb/zulip,technicalpickles/zulip,brockwhittaker/zulip,AZtheAsian/zulip,yuvipanda/zulip,susansls/zulip,MayB/zulip,littledogboy/zulip,qq1012803704/zulip,glovebx/zulip,aps-sids/zulip,wavelets/zulip,littledogboy/zulip,johnnygaddarr/zulip,Cheppers/zulip,timabbott/zulip,swinghu/zulip,babbage/zulip,firstblade/zulip,zorojean/zulip,hayderimran7/zulip,Gabriel0402/zulip,karamcnair/zulip,ashwinirudrappa/zulip,PhilSk/zulip,grave-w-grave/zulip,kou/zulip,itnihao/zulip,levixie/zulip,xuanhan863/zulip,levixie/zulip,vikas-parashar/zulip,mdavid/zulip,jackrzhang/zulip,akuseru/zulip,Vallher/zulip,souravbadami/zulip,Suninus/zulip,pradiptad/zulip,hj3938/zulip,eastlhu/zulip,lfranchi/zulip,sonali0901/zulip,LAndreas/zulip,jainayush975/zulip,gigawhitlocks/zulip,Gabriel0402/zulip,vabs22/zulip,kaiyuanheshang/zulip,codeKonami/zulip,punchagan/zulip,qq1012803704/zulip,huangkebo/zulip,alliejones/zulip,rishig/zulip,jonesgithub/zulip,so0k/zulip,dhcrzf/zulip,vabs22/zulip,thomasboyt/zulip,vabs22/zulip,esander91/zulip,lfranchi/zulip,deer-hope/zulip,ApsOps/zulip,RobotCaleb/zulip,wdaher/zulip,akuseru/zulip,vakila/zulip,ipernet/zulip,bluesea/zulip,kokoar/zulip,stamhe/zulip,dxq-git/zulip,bastianh/zulip,ryansnowboarder/zulip,natanovia/zulip,DazWorrall/zulip,dhcrzf/zulip,zulip/zulip,JanzTam/zulip,zwily/zulip,udxxabp/zulip,rishig/zulip,samatdav/zulip,reyha/zulip,vakila/zulip,zacps/zulip,babbage/zulip,dwrpayne/zulip,saitodisse/zulip,itnihao/zulip,bastianh/zulip,natanovia/zulip,ericzhou2008/zulip,tommyip/zulip,kou/zulip,he15his/zulip,m1ssou/zulip,arpitpanwar/zulip,krtkmj/zulip,willingc/zulip,brockwhittaker/zulip,sharmaeklavya2/zulip,karamcnair/zulip,luyifan/zulip,dawran6/zulip,mahim97/zulip,bastianh/zulip,joyhchen/zulip,alliejones/zulip,nicholasbs/zulip,PhilSk/zulip,MariaFaBella85/zulip,bowlofstew/zulip,andersk/zulip,shrikrishnaholla/zulip,proliming/zulip,gkotian/zulip,nicholasbs/zulip,showell/zulip,timabbott/zulip,so0k/zulip,eastlhu/zulip,EasonYi/zulip,glovebx/zulip,zachallaun/zulip,eeshangarg/zulip,m1ssou/zulip,jackrzhang/zulip,Jianchun1/zulip,johnny9/zulip,nicholasbs/zulip,Vallher/zulip,moria/zulip,wdaher/zulip,wangdeshui/zulip,suxinde2009/zulip,moria/zulip,rht/zulip,zhaoweigg/zulip,dotcool/zulip,MayB/zulip,udxxabp/zulip,KingxBanana/zulip,mohsenSy/zulip,rht/zulip,dwrpayne/zulip,gigawhitlocks/zulip,Jianchun1/zulip,guiquanz/zulip,pradiptad/zulip,mansilladev/zulip,LeeRisk/zulip,peguin40/zulip,noroot/zulip,technicalpickles/zulip,zhaoweigg/zulip,easyfmxu/zulip,shrikrishnaholla/zulip,wavelets/zulip,bowlofstew/zulip,easyfmxu/zulip,rht/zulip,tbutter/zulip,tdr130/zulip,KingxBanana/zulip,hustlzp/zulip,nicholasbs/zulip,shaunstanislaus/zulip,he15his/zulip,TigorC/zulip,ashwinirudrappa/zulip,gkotian/zulip,kaiyuanheshang/zulip,ahmadassaf/zulip,bssrdf/zulip,Suninus/zulip,natanovia/zulip,fw1121/zulip,sup95/zulip,MayB/zulip,wdaher/zulip,wdaher/zulip,stamhe/zulip,vaidap/zulip,qq1012803704/zulip,armooo/zulip,jimmy54/zulip,joshisa/zulip,arpith/zulip,firstblade/zulip,KingxBanana/zulip,jeffcao/zulip,PaulPetring/zulip,tbutter/zulip,eastlhu/zulip,dxq-git/zulip,bastianh/zulip,PhilSk/zulip,isht3/zulip,esander91/zulip,amyliu345/zulip,Qgap/zulip,Galexrt/zulip,dxq-git/zulip,ryanbackman/zulip,wangdeshui/zulip,Qgap/zulip,mohsenSy/zulip,MayB/zulip,avastu/zulip,lfranchi/zulip,ahmadassaf/zulip,vaidap/zulip,ryansnowboarder/zulip,krtkmj/zulip,tbutter/zulip,Frouk/zulip,eastlhu/zulip,ashwinirudrappa/zulip,bssrdf/zulip,qq1012803704/zulip,EasonYi/zulip,proliming/zulip,amallia/zulip,vabs22/zulip,deer-hope/zulip,hengqujushi/zulip,KJin99/zulip,themass/zulip,jessedhillon/zulip,hafeez3000/zulip,hayderimran7/zulip,wweiradio/zulip,Qgap/zulip,punchagan/zulip,aliceriot/zulip,swinghu/zulip,timabbott/zulip,jrowan/zulip,jimmy54/zulip,esander91/zulip,atomic-labs/zulip,gigawhitlocks/zulip,verma-varsha/zulip,andersk/zulip,kokoar/zulip,firstblade/zulip,gkotian/zulip,pradiptad/zulip,zwily/zulip,jrowan/zulip,m1ssou/zulip,shrikrishnaholla/zulip,mdavid/zulip,stamhe/zulip,peguin40/zulip,samatdav/zulip,TigorC/zulip,DazWorrall/zulip,yocome/zulip,jphilipsen05/zulip,zofuthan/zulip,natanovia/zulip,amyliu345/zulip,Batterfii/zulip,hengqujushi/zulip,Cheppers/zulip,amyliu345/zulip,MayB/zulip,shrikrishnaholla/zulip,seapasulli/zulip,littledogboy/zulip,bitemyapp/zulip,verma-varsha/zulip,lfranchi/zulip,jrowan/zulip,joshisa/zulip,hafeez3000/zulip,krtkmj/zulip,jphilipsen05/zulip,wweiradio/zulip,codeKonami/zulip,aliceriot/zulip,johnnygaddarr/zulip,RobotCaleb/zulip,babbage/zulip,hackerkid/zulip,RobotCaleb/zulip,zhaoweigg/zulip,JanzTam/zulip,Drooids/zulip,KingxBanana/zulip,brainwane/zulip,littledogboy/zulip,he15his/zulip,voidException/zulip,christi3k/zulip,zorojean/zulip,luyifan/zulip,suxinde2009/zulip,vaidap/zulip,MariaFaBella85/zulip,sup95/zulip,mansilladev/zulip,brainwane/zulip,brainwane/zulip,bssrdf/zulip,kou/zulip,ericzhou2008/zulip,rht/zulip,ipernet/zulip,zwily/zulip,dnmfarrell/zulip,j831/zulip,jimmy54/zulip,jessedhillon/zulip,mansilladev/zulip,thomasboyt/zulip,seapasulli/zulip,huangkebo/zulip,wweiradio/zulip,itnihao/zulip,swinghu/zulip,ikasumiwt/zulip,eastlhu/zulip,niftynei/zulip,natanovia/zulip,Juanvulcano/zulip,verma-varsha/zulip,alliejones/zulip,EasonYi/zulip,zwily/zulip,JPJPJPOPOP/zulip,noroot/zulip,voidException/zulip,voidException/zulip,wavelets/zulip,umkay/zulip,Qgap/zulip,codeKonami/zulip,johnny9/zulip,so0k/zulip,Vallher/zulip,jphilipsen05/zulip,mansilladev/zulip,glovebx/zulip,zulip/zulip,JanzTam/zulip,JPJPJPOPOP/zulip,Vallher/zulip,Batterfii/zulip,RobotCaleb/zulip,j831/zulip,sonali0901/zulip,samatdav/zulip,udxxabp/zulip,christi3k/zulip,blaze225/zulip,verma-varsha/zulip,Diptanshu8/zulip,isht3/zulip,dwrpayne/zulip,jonesgithub/zulip,paxapy/zulip,willingc/zulip,littledogboy/zulip,KJin99/zulip,developerfm/zulip,mdavid/zulip,technicalpickles/zulip,punchagan/zulip,zachallaun/zulip,Frouk/zulip,ufosky-server/zulip,hengqujushi/zulip,huangkebo/zulip,ericzhou2008/zulip,punchagan/zulip,Diptanshu8/zulip,jeffcao/zulip,Juanvulcano/zulip,suxinde2009/zulip,bluesea/zulip,firstblade/zulip,tommyip/zulip,Batterfii/zulip,praveenaki/zulip,vikas-parashar/zulip,willingc/zulip,lfranchi/zulip,stamhe/zulip,KJin99/zulip,ufosky-server/zulip,codeKonami/zulip,shaunstanislaus/zulip,wweiradio/zulip,LeeRisk/zulip,ufosky-server/zulip,LAndreas/zulip,Juanvulcano/zulip,RobotCaleb/zulip,voidException/zulip,Drooids/zulip,wangdeshui/zulip,hengqujushi/zulip,dwrpayne/zulip,deer-hope/zulip,umkay/zulip,dwrpayne/zulip,MariaFaBella85/zulip,ericzhou2008/zulip,eeshangarg/zulip,peiwei/zulip,thomasboyt/zulip,vaidap/zulip,themass/zulip,akuseru/zulip,LAndreas/zulip,technicalpickles/zulip,amyliu345/zulip,sup95/zulip,alliejones/zulip,shaunstanislaus/zulip,mohsenSy/zulip,bssrdf/zulip,voidException/zulip,tiansiyuan/zulip,bluesea/zulip,he15his/zulip,Cheppers/zulip,johnny9/zulip,synicalsyntax/zulip,bluesea/zulip,Galexrt/zulip,tbutter/zulip,EasonYi/zulip,brainwane/zulip,noroot/zulip,hengqujushi/zulip,aliceriot/zulip,jessedhillon/zulip,shaunstanislaus/zulip,developerfm/zulip,zorojean/zulip,hackerkid/zulip,yuvipanda/zulip,ahmadassaf/zulip,reyha/zulip,huangkebo/zulip,Batterfii/zulip,eeshangarg/zulip,souravbadami/zulip,mansilladev/zulip,christi3k/zulip,tbutter/zulip,dwrpayne/zulip,amallia/zulip,hj3938/zulip,Cheppers/zulip,kaiyuanheshang/zulip,zorojean/zulip,verma-varsha/zulip,vakila/zulip,saitodisse/zulip,AZtheAsian/zulip,zwily/zulip,niftynei/zulip,blaze225/zulip,tiansiyuan/zulip,joyhchen/zulip,wavelets/zulip,mahim97/zulip,bowlofstew/zulip,synicalsyntax/zulip,aliceriot/zulip,zachallaun/zulip,esander91/zulip,pradiptad/zulip,amanharitsh123/zulip,armooo/zulip,seapasulli/zulip,hengqujushi/zulip,wavelets/zulip,jrowan/zulip,Juanvulcano/zulip,zorojean/zulip,ryansnowboarder/zulip,armooo/zulip,Qgap/zulip,Vallher/zulip,PaulPetring/zulip,Suninus/zulip,bssrdf/zulip,joshisa/zulip,Galexrt/zulip,ryanbackman/zulip,glovebx/zulip,developerfm/zulip,jeffcao/zulip,SmartPeople/zulip,schatt/zulip,Drooids/zulip,jessedhillon/zulip,ikasumiwt/zulip,joshisa/zulip,tiansiyuan/zulip,arpith/zulip,TigorC/zulip,Cheppers/zulip,codeKonami/zulip,firstblade/zulip,zachallaun/zulip,punchagan/zulip,hayderimran7/zulip,christi3k/zulip,kokoar/zulip,moria/zulip,bssrdf/zulip,brainwane/zulip,zhaoweigg/zulip,moria/zulip,reyha/zulip,vikas-parashar/zulip,brainwane/zulip,m1ssou/zulip,MariaFaBella85/zulip,jerryge/zulip,krtkmj/zulip,rishig/zulip,DazWorrall/zulip,jeffcao/zulip,Diptanshu8/zulip,gigawhitlocks/zulip,tbutter/zulip,blaze225/zulip,ahmadassaf/zulip,Gabriel0402/zulip,jainayush975/zulip,isht3/zulip,qq1012803704/zulip,guiquanz/zulip,xuxiao/zulip,codeKonami/zulip,bitemyapp/zulip,yocome/zulip,cosmicAsymmetry/zulip,amallia/zulip,PaulPetring/zulip,jonesgithub/zulip,hayderimran7/zulip,kou/zulip,luyifan/zulip,deer-hope/zulip,johnnygaddarr/zulip,thomasboyt/zulip,natanovia/zulip,ashwinirudrappa/zulip,gkotian/zulip,SmartPeople/zulip,eeshangarg/zulip,reyha/zulip,avastu/zulip,mahim97/zulip,praveenaki/zulip,Batterfii/zulip,peguin40/zulip,noroot/zulip,MariaFaBella85/zulip,udxxabp/zulip,armooo/zulip,developerfm/zulip,arpitpanwar/zulip,vaidap/zulip,zacps/zulip,timabbott/zulip,eeshangarg/zulip,kokoar/zulip,jackrzhang/zulip,wdaher/zulip,dhcrzf/zulip,umkay/zulip,jerryge/zulip,so0k/zulip,zacps/zulip,calvinleenyc/zulip,he15his/zulip,bowlofstew/zulip,mansilladev/zulip,peguin40/zulip,synicalsyntax/zulip,peguin40/zulip,KJin99/zulip,ahmadassaf/zulip,dattatreya303/zulip,seapasulli/zulip,bitemyapp/zulip,sharmaeklavya2/zulip,aliceriot/zulip,Qgap/zulip,shrikrishnaholla/zulip,qq1012803704/zulip,Suninus/zulip,johnnygaddarr/zulip,krtkmj/zulip,peiwei/zulip,jainayush975/zulip,willingc/zulip,ryansnowboarder/zulip,showell/zulip,JanzTam/zulip,jainayush975/zulip,jeffcao/zulip,tdr130/zulip,TigorC/zulip,arpith/zulip,ashwinirudrappa/zulip,SmartPeople/zulip,grave-w-grave/zulip,umkay/zulip,Batterfii/zulip,themass/zulip,guiquanz/zulip,wavelets/zulip,esander91/zulip,andersk/zulip,aakash-cr7/zulip,schatt/zulip,grave-w-grave/zulip,joshisa/zulip,tiansiyuan/zulip,luyifan/zulip,avastu/zulip,peiwei/zulip,dawran6/zulip,ufosky-server/zulip,deer-hope/zulip,natanovia/zulip,bowlofstew/zulip,shubhamdhama/zulip,paxapy/zulip,dattatreya303/zulip,guiquanz/zulip,themass/zulip,dotcool/zulip,andersk/zulip,vakila/zulip,jerryge/zulip,amanharitsh123/zulip,xuanhan863/zulip,samatdav/zulip,samatdav/zulip,RobotCaleb/zulip,dnmfarrell/zulip,yuvipanda/zulip,souravbadami/zulip,paxapy/zulip,noroot/zulip,isht3/zulip,aliceriot/zulip,armooo/zulip,brockwhittaker/zulip,samatdav/zulip,glovebx/zulip,easyfmxu/zulip,bowlofstew/zulip,arpith/zulip,bluesea/zulip,Drooids/zulip,dnmfarrell/zulip,Diptanshu8/zulip,Frouk/zulip,DazWorrall/zulip,ryansnowboarder/zulip,ApsOps/zulip,codeKonami/zulip,jrowan/zulip,praveenaki/zulip,atomic-labs/zulip,dotcool/zulip,JPJPJPOPOP/zulip,amyliu345/zulip,suxinde2009/zulip,hustlzp/zulip,zorojean/zulip,tommyip/zulip,jackrzhang/zulip,j831/zulip,themass/zulip,gkotian/zulip,zhaoweigg/zulip,reyha/zulip,mdavid/zulip,adnanh/zulip,showell/zulip,Gabriel0402/zulip,hayderimran7/zulip,easyfmxu/zulip,gigawhitlocks/zulip,bssrdf/zulip,ufosky-server/zulip,arpitpanwar/zulip,Vallher/zulip,jeffcao/zulip,amallia/zulip,jimmy54/zulip,moria/zulip,babbage/zulip,dhcrzf/zulip,PhilSk/zulip,pradiptad/zulip,vikas-parashar/zulip,vaidap/zulip,jeffcao/zulip,ApsOps/zulip,isht3/zulip,kaiyuanheshang/zulip,sharmaeklavya2/zulip,DazWorrall/zulip,hj3938/zulip,sonali0901/zulip,aps-sids/zulip,mahim97/zulip,Drooids/zulip,KingxBanana/zulip,blaze225/zulip,jonesgithub/zulip,Suninus/zulip,hafeez3000/zulip,johnny9/zulip,Galexrt/zulip,dhcrzf/zulip,andersk/zulip,ikasumiwt/zulip,levixie/zulip,ipernet/zulip,souravbadami/zulip,DazWorrall/zulip,jerryge/zulip,avastu/zulip,zulip/zulip,bastianh/zulip,glovebx/zulip,johnny9/zulip,dnmfarrell/zulip,souravbadami/zulip,rishig/zulip,wdaher/zulip,xuxiao/zulip,fw1121/zulip,PhilSk/zulip,johnnygaddarr/zulip,arpitpanwar/zulip,dattatreya303/zulip,johnnygaddarr/zulip,firstblade/zulip,zachallaun/zulip,jphilipsen05/zulip,seapasulli/zulip,ufosky-server/zulip,hackerkid/zulip,schatt/zulip,praveenaki/zulip,hafeez3000/zulip,praveenaki/zulip,shubhamdhama/zulip,kaiyuanheshang/zulip,udxxabp/zulip,Cheppers/zulip,firstblade/zulip,stamhe/zulip,dawran6/zulip,LeeRisk/zulip,saitodisse/zulip,akuseru/zulip,atomic-labs/zulip,suxinde2009/zulip,xuxiao/zulip,littledogboy/zulip,ericzhou2008/zulip,hayderimran7/zulip,ipernet/zulip,eastlhu/zulip,bastianh/zulip,LAndreas/zulip,TigorC/zulip,shrikrishnaholla/zulip,praveenaki/zulip,arpith/zulip,cosmicAsymmetry/zulip,aakash-cr7/zulip,Cheppers/zulip,karamcnair/zulip,saitodisse/zulip,avastu/zulip,KJin99/zulip,themass/zulip,gkotian/zulip,Galexrt/zulip,LAndreas/zulip,sup95/zulip,j831/zulip,karamcnair/zulip,fw1121/zulip,avastu/zulip,armooo/zulip,johnnygaddarr/zulip,j831/zulip,tiansiyuan/zulip,deer-hope/zulip,JanzTam/zulip,dotcool/zulip,eeshangarg/zulip,hackerkid/zulip,hackerkid/zulip,Frouk/zulip,paxapy/zulip,christi3k/zulip,zofuthan/zulip,moria/zulip,technicalpickles/zulip,xuanhan863/zulip,gigawhitlocks/zulip,nicholasbs/zulip,amallia/zulip,SmartPeople/zulip,cosmicAsymmetry/zulip,m1ssou/zulip,zofuthan/zulip,easyfmxu/zulip,PaulPetring/zulip,mansilladev/zulip,niftynei/zulip,Diptanshu8/zulip,dattatreya303/zulip,kou/zulip,jerryge/zulip,jimmy54/zulip,umkay/zulip,adnanh/zulip,developerfm/zulip,fw1121/zulip,adnanh/zulip,bluesea/zulip,shaunstanislaus/zulip,jainayush975/zulip,zachallaun/zulip,tiansiyuan/zulip,zwily/zulip,AZtheAsian/zulip,susansls/zulip,KJin99/zulip,grave-w-grave/zulip,dawran6/zulip,joshisa/zulip,huangkebo/zulip,wangdeshui/zulip,aakash-cr7/zulip,wavelets/zulip,vabs22/zulip,MariaFaBella85/zulip,EasonYi/zulip,jackrzhang/zulip,schatt/zulip,dotcool/zulip,technicalpickles/zulip,jessedhillon/zulip,jainayush975/zulip,synicalsyntax/zulip,itnihao/zulip,tdr130/zulip,xuanhan863/zulip,shubhamdhama/zulip,peguin40/zulip,wweiradio/zulip,xuxiao/zulip,rishig/zulip,adnanh/zulip,vikas-parashar/zulip,jessedhillon/zulip,jackrzhang/zulip,ApsOps/zulip,vabs22/zulip,cosmicAsymmetry/zulip,kokoar/zulip,kou/zulip,zulip/zulip,verma-varsha/zulip,noroot/zulip,shubhamdhama/zulip,amallia/zulip,proliming/zulip,udxxabp/zulip,amallia/zulip,dotcool/zulip,jrowan/zulip,Juanvulcano/zulip,dwrpayne/zulip,aps-sids/zulip,dnmfarrell/zulip,bitemyapp/zulip,zulip/zulip,yocome/zulip,xuxiao/zulip,kokoar/zulip,yocome/zulip,dawran6/zulip,akuseru/zulip,proliming/zulip,jimmy54/zulip,schatt/zulip,Jianchun1/zulip,lfranchi/zulip,wangdeshui/zulip,eeshangarg/zulip,aps-sids/zulip,shaunstanislaus/zulip,peiwei/zulip,bastianh/zulip,dxq-git/zulip,ahmadassaf/zulip,umkay/zulip,wweiradio/zulip,Jianchun1/zulip,zofuthan/zulip,paxapy/zulip,luyifan/zulip,PhilSk/zulip,m1ssou/zulip,punchagan/zulip,hafeez3000/zulip,mahim97/zulip,amanharitsh123/zulip,niftynei/zulip,susansls/zulip,pradiptad/zulip,AZtheAsian/zulip,umkay/zulip,fw1121/zulip,dawran6/zulip,ryansnowboarder/zulip,isht3/zulip,krtkmj/zulip,dnmfarrell/zulip,zhaoweigg/zulip,ryanbackman/zulip,hj3938/zulip,yuvipanda/zulip,shrikrishnaholla/zulip,Suninus/zulip,KJin99/zulip,gkotian/zulip,amanharitsh123/zulip,joyhchen/zulip,fw1121/zulip,MayB/zulip,kaiyuanheshang/zulip,dattatreya303/zulip,ericzhou2008/zulip,rishig/zulip,willingc/zulip,itnihao/zulip,mdavid/zulip,MariaFaBella85/zulip,tdr130/zulip,tommyip/zulip,LAndreas/zulip,Galexrt/zulip,JPJPJPOPOP/zulip,shubhamdhama/zulip,aakash-cr7/zulip,adnanh/zulip,SmartPeople/zulip,voidException/zulip,DazWorrall/zulip,LeeRisk/zulip,Drooids/zulip,babbage/zulip,alliejones/zulip,lfranchi/zulip,proliming/zulip,paxapy/zulip,guiquanz/zulip,willingc/zulip,peiwei/zulip,thomasboyt/zulip,Vallher/zulip,kou/zulip,grave-w-grave/zulip,akuseru/zulip,vakila/zulip,saitodisse/zulip,kaiyuanheshang/zulip,mohsenSy/zulip,peiwei/zulip,showell/zulip,brockwhittaker/zulip,JPJPJPOPOP/zulip,tdr130/zulip | zephyr/management/commands/knight.py | zephyr/management/commands/knight.py | from __future__ import absolute_import
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.core import validators
from guardian.shortcuts import assign_perm
from zephyr.models import Realm, UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
option_list = BaseCommand.option_list + (
def handle(self, *args, **options):
try:
email = args[0]
except ValueError:
raise CommandError("""Please specify a user.""")
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if profile.has_perm('administer', profile.realm):
raise CommandError("User already has permission for this realm.")
else:
assign_perm('administer', profile, profile.realm)
print "Done!"
| apache-2.0 | Python | |
4aeba707e8bec6e5cfc8cebf08f307044e145bf3 | Add script to meas TS dispersion | lnls-fac/apsuite | apsuite/commissioning_scripts/measure_disp_ts.py | apsuite/commissioning_scripts/measure_disp_ts.py | #!/usr/bin/env python-sirius
"""."""
import time as _time
import numpy as np
from epics import PV
import pyaccel
from pymodels.middlelayer.devices import SOFB, RF
from apsuite.commissioning_scripts.base import BaseClass
class ParamsDisp:
"""."""
def __init__(self):
"""."""
self.energy_delta = 0.005 # in GeV
self.wait_time = 40
self.timeout_orb = 10
self.num_points = 10
self.delay2energy = 0.4923/489994.304 # [GeV/us]
@property
def ejection_delta(self):
return self.energy_delta / self.delay2energy # in us
class MeasureDispTBBO(BaseClass):
"""."""
HARMONIC_NUM = 828
def __init__(self):
"""."""
super().__init__(ParamsDisp())
self.devices = {
'ts_sofb': SOFB('TS'),
'rf': RF()
}
self.pvs = {
'injsi': PV('AS-RaMO:TI-EVG:InjSIDelay-SP'),
'digts': PV('AS-RaMO:TI-EVG:DigTSDelay-SP'),
'update_evt': PV('AS-RaMO:TI-EVG:UpdateEvt-Cmd'),
}
@property
def energy(self):
"""."""
return self.params.delay2energy * self.pvs['injsi'].value
@property
def trajx(self):
"""."""
return self.devices['ts_sofb'].trajx
@property
def trajy(self):
"""."""
return self.devices['ts_sofb'].trajy
@property
def nr_points(self):
"""."""
return self.devices['ts_sofb'].nr_points
@nr_points.setter
def nr_points(self, value):
self.devices['ts_sofb'].nr_points = int(value)
def wait(self, timeout=10):
"""."""
self.devices['ts_sofb'].wait(timeout=timeout)
def reset(self, wait=0):
"""."""
_time.sleep(wait)
self.devices['ts_sofb'].reset()
_time.sleep(1)
def update_events(self):
self.pvs['update_evt'].value = 1
def calc_delta(self, delta):
# delta and revolution time in [us]
t0 = self.HARMONIC_NUM/self.devices['rf'].frequency * 1e6
return round(delta/t0)*t0
def measure_dispersion(self):
"""."""
self.nr_points = self.params.num_points
delta = self.calc_delta(delta=self.params.ejection_delta)
self.reset(self.params.wait_time)
self.wait(self.params.timeout_orb)
orb = [-np.hstack([self.trajx, self.trajy]), ]
ene0 = self.energy
orig_delay = self.pvs['injsi'].value
orig_delay_digts = self.pvs['digts'].value
self.pvs['injsi'].value = orig_delay + delta
self.pvs['digts'].value = orig_delay_digts + delta
self.update_events()
self.reset(self.params.wait_time)
self.wait(self.params.timeout_orb)
orb.append(np.hstack([self.trajx, self.trajy]))
ene1 = self.energy
self.pvs['injsi'].value = orig_delay
self.pvs['digts'].value = orig_delay_digts
self.update_events()
d_ene = ene1/ene0 - 1
return np.array(orb).sum(axis=0) / d_ene
def calc_model_dispersionTBBO(model, bpms):
"""."""
dene = 1e-3
rout, *_ = pyaccel.tracking.line_pass(
model,
[[0, 0, 0, 0, dene/2, 0],
[0, 0, 0, 0, -dene/2, 0]],
bpms)
dispx = (rout[0, 0, :] - rout[1, 0, :]) / dene
dispy = (rout[0, 2, :] - rout[1, 2, :]) / dene
return np.hstack([dispx, dispy])
| mit | Python | |
ee1beb9b4bd51ab8f0bcb473d329275c68430919 | Ajoute un script remplaçant des extraits d’enregistrement par les enregistrements entiers. | dezede/dezede,dezede/dezede,dezede/dezede,dezede/dezede | scripts/remplacement_extraits_AFO.py | scripts/remplacement_extraits_AFO.py | # coding: utf-8
"""
Copie les fichiers listés dans "Campagne num° 2012-13.xlsx" (chemin : filepath)
depuis dir_source vers dir_dest.
"""
from __future__ import print_function
import os
import shutil
import pandas as pd
from scripts.import_AFO_11_2014 import path, NOM_FICHIER, REMARQUE, EXCL_MSG
def run(dir_source, dir_dest, file_path):
dir_source, dir_dest = map(os.path.normpath, (dir_source, dir_dest))
df = pd.read_excel(file_path, 1,
encoding=u'utf-8', parse_cols=u'M,N')
df = df[df[NOM_FICHIER].notnull() & (df[REMARQUE] != EXCL_MSG)]
n = len(df)*2
i = 0
for ext in (u'mp4', u'ogg'):
for s in df[NOM_FICHIER]:
i += 1
shutil.copyfile(path(dir_source, s, ext),
dir_dest + u'/' + s + u'.' + ext)
print('Remplacements : %s / %s' % (i, n), end=u'\r')
| bsd-3-clause | Python | |
7eef45621d3e917cf09101b309a7c839008dde76 | Fix indentation | AnishShah/tensorflow,eaplatanios/tensorflow,ArtsiomCh/tensorflow,aselle/tensorflow,manazhao/tf_recsys,gojira/tensorflow,jalexvig/tensorflow,manazhao/tf_recsys,meteorcloudy/tensorflow,snnn/tensorflow,adamtiger/tensorflow,xzturn/tensorflow,ghchinoy/tensorflow,renyi533/tensorflow,tiagofrepereira2012/tensorflow,raymondxyang/tensorflow,Moriadry/tensorflow,dyoung418/tensorflow,Xeralux/tensorflow,pavelchristof/gomoku-ai,jendap/tensorflow,alistairlow/tensorflow,unsiloai/syntaxnet-ops-hack,frreiss/tensorflow-fred,apark263/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,girving/tensorflow,gojira/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,theflofly/tensorflow,a-doumoulakis/tensorflow,jostep/tensorflow,aselle/tensorflow,pavelchristof/gomoku-ai,eadgarchen/tensorflow,mavenlin/tensorflow,maciekcc/tensorflow,Intel-tensorflow/tensorflow,JingJunYin/tensorflow,karllessard/tensorflow,zycdragonball/tensorflow,mdrumond/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,alistairlow/tensorflow,yongtang/tensorflow,ageron/tensorflow,mavenlin/tensorflow,dongjoon-hyun/tensorflow,adit-chandra/tensorflow,allenlavoie/tensorflow,asimshankar/tensorflow,tiagofrepereira2012/tensorflow,Bismarrck/tensorflow,horance-liu/tensorflow,DavidNorman/tensorflow,jwlawson/tensorflow,horance-liu/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,hehongliang/tensorflow,aselle/tensorflow,mavenlin/tensorflow,aam-at/tensorflow,ppwwyyxx/tensorflow,nburn42/tensorflow,dendisuhubdy/tensorflow,jbedorf/tensorflow,tillahoffmann/tensorflow,eadgarchen/tensorflow,ppwwyyxx/tensorflow,freedomtan/tensorflow,snnn/tensorflow,kevin-coder/tensorflow-fork,drpngx/tensorflow,jalexvig/tensorflow,ppwwyyxx/tensorflow,karllessard/tensorflow,allenlavoie/tensorflow,dancingdan/tensorflow,llhe/tensorflow,aam-at/tensorflow,snnn/tensorflow,ppwwyyxx/tensorflow,jhaux/tensorflow,mavenlin/tensorflow,kevin-coder/tensorflow-fork,dendisuhubdy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,kevin-coder/tensorflow-fork,lukeiwanski/tensorflow,ishay2b/tensorflow,Moriadry/tensorflow,annarev/tensorflow,alsrgv/tensorflow,frreiss/tensorflow-fred,eadgarchen/tensorflow,bowang/tensorflow,ppwwyyxx/tensorflow,codrut3/tensorflow,lukeiwanski/tensorflow,rabipanda/tensorflow,drpngx/tensorflow,gunan/tensorflow,aldian/tensorflow,gunan/tensorflow,alsrgv/tensorflow,dongjoon-hyun/tensorflow,yufengg/tensorflow,yanchen036/tensorflow,aam-at/tensorflow,gunan/tensorflow,frreiss/tensorflow-fred,mavenlin/tensorflow,ageron/tensorflow,Mazecreator/tensorflow,adit-chandra/tensorflow,tornadozou/tensorflow,petewarden/tensorflow,snnn/tensorflow,av8ramit/tensorflow,asimshankar/tensorflow,with-git/tensorflow,with-git/tensorflow,apark263/tensorflow,sarvex/tensorflow,jbedorf/tensorflow,cxxgtxy/tensorflow,ppwwyyxx/tensorflow,jart/tensorflow,dancingdan/tensorflow,tiagofrepereira2012/tensorflow,jhseu/tensorflow,ishay2b/tensorflow,aam-at/tensorflow,jhaux/tensorflow,nburn42/tensorflow,Intel-tensorflow/tensorflow,jendap/tensorflow,ran5515/DeepDecision,tensorflow/tensorflow,alivecor/tensorflow,alshedivat/tensorflow,girving/tensorflow,tensorflow/tensorflow,zycdragonball/tensorflow,Intel-Corporation/tensorflow,Mazecreator/tensorflow,laszlocsomor/tensorflow,caisq/tensorflow,ghchinoy/tensorflow,benoitsteiner/tensorflow-xsmm,mixturemodel-flow/tensorflow,manazhao/tf_recsys,kobejean/tensorflow,maciekcc/tensorflow,dendisuhubdy/tensorflow,nightjean/Deep-Learning,sarvex/tensorflow,xodus7/tensorflow,ran5515/DeepDecision,hsaputra/tensorflow,mixturemodel-flow/tensorflow,mixturemodel-flow/tensorflow,ishay2b/tensorflow,arborh/tensorflow,manipopopo/tensorflow,paolodedios/tensorflow,petewarden/tensorflow,kobejean/tensorflow,alsrgv/tensorflow,andrewcmyers/tensorflow,hfp/tensorflow-xsmm,yufengg/tensorflow,Kongsea/tensorflow,ychfan/tensorflow,Mazecreator/tensorflow,eaplatanios/tensorflow,ppwwyyxx/tensorflow,jendap/tensorflow,jbedorf/tensorflow,jendap/tensorflow,theflofly/tensorflow,snnn/tensorflow,unsiloai/syntaxnet-ops-hack,codrut3/tensorflow,alshedivat/tensorflow,alsrgv/tensorflow,caisq/tensorflow,andrewcmyers/tensorflow,Intel-Corporation/tensorflow,jhaux/tensorflow,Mazecreator/tensorflow,with-git/tensorflow,dongjoon-hyun/tensorflow,DavidNorman/tensorflow,jbedorf/tensorflow,nightjean/Deep-Learning,benoitsteiner/tensorflow-xsmm,annarev/tensorflow,hsaputra/tensorflow,gautam1858/tensorflow,mavenlin/tensorflow,eaplatanios/tensorflow,gautam1858/tensorflow,dancingdan/tensorflow,hfp/tensorflow-xsmm,eadgarchen/tensorflow,aselle/tensorflow,caisq/tensorflow,theflofly/tensorflow,mixturemodel-flow/tensorflow,renyi533/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,Bulochkin/tensorflow_pack,kobejean/tensorflow,eadgarchen/tensorflow,jalexvig/tensorflow,davidzchen/tensorflow,seanli9jan/tensorflow,manipopopo/tensorflow,hsaputra/tensorflow,hehongliang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tillahoffmann/tensorflow,Mistobaan/tensorflow,renyi533/tensorflow,petewarden/tensorflow,gautam1858/tensorflow,hehongliang/tensorflow,girving/tensorflow,ArtsiomCh/tensorflow,zasdfgbnm/tensorflow,jhseu/tensorflow,alistairlow/tensorflow,renyi533/tensorflow,brchiu/tensorflow,ville-k/tensorflow,chemelnucfin/tensorflow,DavidNorman/tensorflow,benoitsteiner/tensorflow-opencl,alivecor/tensorflow,lukeiwanski/tensorflow,apark263/tensorflow,lukeiwanski/tensorflow,tornadozou/tensorflow,jart/tensorflow,gautam1858/tensorflow,JingJunYin/tensorflow,caisq/tensorflow,aam-at/tensorflow,jbedorf/tensorflow,arborh/tensorflow,zasdfgbnm/tensorflow,annarev/tensorflow,sarvex/tensorflow,hsaputra/tensorflow,bowang/tensorflow,adit-chandra/tensorflow,benoitsteiner/tensorflow,benoitsteiner/tensorflow,manazhao/tf_recsys,tornadozou/tensorflow,jhaux/tensorflow,horance-liu/tensorflow,davidzchen/tensorflow,jart/tensorflow,brchiu/tensorflow,manipopopo/tensorflow,benoitsteiner/tensorflow-opencl,laszlocsomor/tensorflow,freedomtan/tensorflow,alsrgv/tensorflow,jalexvig/tensorflow,kobejean/tensorflow,snnn/tensorflow,jendap/tensorflow,ArtsiomCh/tensorflow,ghchinoy/tensorflow,dyoung418/tensorflow,ville-k/tensorflow,apark263/tensorflow,jhseu/tensorflow,aam-at/tensorflow,sjperkins/tensorflow,jostep/tensorflow,ageron/tensorflow,hehongliang/tensorflow,tillahoffmann/tensorflow,zasdfgbnm/tensorflow,freedomtan/tensorflow,jbedorf/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,zasdfgbnm/tensorflow,arborh/tensorflow,llhe/tensorflow,mdrumond/tensorflow,paolodedios/tensorflow,AnishShah/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,cxxgtxy/tensorflow,aam-at/tensorflow,jbedorf/tensorflow,dyoung418/tensorflow,aselle/tensorflow,nightjean/Deep-Learning,annarev/tensorflow,paolodedios/tensorflow,hsaputra/tensorflow,gunan/tensorflow,tornadozou/tensorflow,brchiu/tensorflow,pavelchristof/gomoku-ai,av8ramit/tensorflow,AnishShah/tensorflow,maciekcc/tensorflow,apark263/tensorflow,drpngx/tensorflow,a-doumoulakis/tensorflow,Xeralux/tensorflow,nolanliou/tensorflow,frreiss/tensorflow-fred,aselle/tensorflow,JVillella/tensorflow,dongjoon-hyun/tensorflow,raymondxyang/tensorflow,alshedivat/tensorflow,allenlavoie/tensorflow,alsrgv/tensorflow,annarev/tensorflow,Bismarrck/tensorflow,eaplatanios/tensorflow,yongtang/tensorflow,andrewcmyers/tensorflow,jart/tensorflow,yanchen036/tensorflow,apark263/tensorflow,karllessard/tensorflow,Bulochkin/tensorflow_pack,ghchinoy/tensorflow,Bulochkin/tensorflow_pack,petewarden/tensorflow,av8ramit/tensorflow,jwlawson/tensorflow,xodus7/tensorflow,annarev/tensorflow,freedomtan/tensorflow,aam-at/tensorflow,suiyuan2009/tensorflow,allenlavoie/tensorflow,Mazecreator/tensorflow,nolanliou/tensorflow,hfp/tensorflow-xsmm,tensorflow/tensorflow-experimental_link_static_libraries_once,gojira/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,chemelnucfin/tensorflow,ghchinoy/tensorflow,drpngx/tensorflow,eaplatanios/tensorflow,jbedorf/tensorflow,a-doumoulakis/tensorflow,Intel-Corporation/tensorflow,raymondxyang/tensorflow,mdrumond/tensorflow,mdrumond/tensorflow,benoitsteiner/tensorflow,hfp/tensorflow-xsmm,allenlavoie/tensorflow,guschmue/tensorflow,ghchinoy/tensorflow,yanchen036/tensorflow,arborh/tensorflow,ageron/tensorflow,maciekcc/tensorflow,alshedivat/tensorflow,paolodedios/tensorflow,benoitsteiner/tensorflow-xsmm,tensorflow/tensorflow-pywrap_tf_optimizer,alsrgv/tensorflow,dongjoon-hyun/tensorflow,unsiloai/syntaxnet-ops-hack,ran5515/DeepDecision,manipopopo/tensorflow,dendisuhubdy/tensorflow,DavidNorman/tensorflow,Xeralux/tensorflow,ZhangXinNan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,xodus7/tensorflow,jostep/tensorflow,Kongsea/tensorflow,alshedivat/tensorflow,alistairlow/tensorflow,sjperkins/tensorflow,xodus7/tensorflow,cxxgtxy/tensorflow,Xeralux/tensorflow,Bismarrck/tensorflow,ageron/tensorflow,eaplatanios/tensorflow,frreiss/tensorflow-fred,dyoung418/tensorflow,Kongsea/tensorflow,a-doumoulakis/tensorflow,horance-liu/tensorflow,xodus7/tensorflow,ran5515/DeepDecision,manazhao/tf_recsys,andrewcmyers/tensorflow,petewarden/tensorflow,JVillella/tensorflow,jalexvig/tensorflow,ZhangXinNan/tensorflow,sjperkins/tensorflow,ville-k/tensorflow,zasdfgbnm/tensorflow,ppwwyyxx/tensorflow,ville-k/tensorflow,yongtang/tensorflow,arborh/tensorflow,Intel-tensorflow/tensorflow,ageron/tensorflow,benoitsteiner/tensorflow-xsmm,eaplatanios/tensorflow,Mistobaan/tensorflow,karllessard/tensorflow,dancingdan/tensorflow,nightjean/Deep-Learning,aldian/tensorflow,Bismarrck/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jhaux/tensorflow,mdrumond/tensorflow,tornadozou/tensorflow,Intel-tensorflow/tensorflow,xzturn/tensorflow,Intel-tensorflow/tensorflow,benoitsteiner/tensorflow-xsmm,suiyuan2009/tensorflow,alshedivat/tensorflow,jbedorf/tensorflow,gautam1858/tensorflow,annarev/tensorflow,tiagofrepereira2012/tensorflow,yanchen036/tensorflow,karllessard/tensorflow,Bismarrck/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Xeralux/tensorflow,alsrgv/tensorflow,hsaputra/tensorflow,brchiu/tensorflow,dongjoon-hyun/tensorflow,aselle/tensorflow,unsiloai/syntaxnet-ops-hack,ravindrapanda/tensorflow,DavidNorman/tensorflow,ran5515/DeepDecision,girving/tensorflow,xzturn/tensorflow,dancingdan/tensorflow,zycdragonball/tensorflow,DavidNorman/tensorflow,drpngx/tensorflow,nburn42/tensorflow,kevin-coder/tensorflow-fork,drpngx/tensorflow,eaplatanios/tensorflow,theflofly/tensorflow,Intel-Corporation/tensorflow,zasdfgbnm/tensorflow,jendap/tensorflow,renyi533/tensorflow,ppwwyyxx/tensorflow,alsrgv/tensorflow,ZhangXinNan/tensorflow,dancingdan/tensorflow,llhe/tensorflow,hehongliang/tensorflow,Intel-tensorflow/tensorflow,Bulochkin/tensorflow_pack,Mistobaan/tensorflow,freedomtan/tensorflow,xzturn/tensorflow,renyi533/tensorflow,jalexvig/tensorflow,ishay2b/tensorflow,xodus7/tensorflow,arborh/tensorflow,tensorflow/tensorflow-pywrap_saved_model,benoitsteiner/tensorflow-opencl,adit-chandra/tensorflow,Bismarrck/tensorflow,laszlocsomor/tensorflow,ZhangXinNan/tensorflow,caisq/tensorflow,freedomtan/tensorflow,llhe/tensorflow,llhe/tensorflow,yufengg/tensorflow,tensorflow/tensorflow-pywrap_saved_model,asimshankar/tensorflow,nolanliou/tensorflow,xodus7/tensorflow,asimshankar/tensorflow,hfp/tensorflow-xsmm,Xeralux/tensorflow,adit-chandra/tensorflow,arborh/tensorflow,with-git/tensorflow,DavidNorman/tensorflow,seanli9jan/tensorflow,seanli9jan/tensorflow,eaplatanios/tensorflow,lukeiwanski/tensorflow,seanli9jan/tensorflow,dancingdan/tensorflow,jostep/tensorflow,kobejean/tensorflow,xzturn/tensorflow,JingJunYin/tensorflow,sjperkins/tensorflow,yanchen036/tensorflow,ville-k/tensorflow,caisq/tensorflow,sarvex/tensorflow,jwlawson/tensorflow,DavidNorman/tensorflow,aldian/tensorflow,llhe/tensorflow,sjperkins/tensorflow,frreiss/tensorflow-fred,hfp/tensorflow-xsmm,Kongsea/tensorflow,gautam1858/tensorflow,jendap/tensorflow,frreiss/tensorflow-fred,JVillella/tensorflow,AnishShah/tensorflow,codrut3/tensorflow,rabipanda/tensorflow,mixturemodel-flow/tensorflow,yongtang/tensorflow,ZhangXinNan/tensorflow,laszlocsomor/tensorflow,eaplatanios/tensorflow,meteorcloudy/tensorflow,brchiu/tensorflow,dendisuhubdy/tensorflow,lukeiwanski/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,Moriadry/tensorflow,rabipanda/tensorflow,ageron/tensorflow,nburn42/tensorflow,brchiu/tensorflow,apark263/tensorflow,jendap/tensorflow,aldian/tensorflow,dendisuhubdy/tensorflow,alsrgv/tensorflow,guschmue/tensorflow,tensorflow/tensorflow-pywrap_saved_model,zasdfgbnm/tensorflow,jalexvig/tensorflow,sarvex/tensorflow,theflofly/tensorflow,Bulochkin/tensorflow_pack,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,renyi533/tensorflow,with-git/tensorflow,Mistobaan/tensorflow,benoitsteiner/tensorflow-opencl,arborh/tensorflow,bowang/tensorflow,zycdragonball/tensorflow,ychfan/tensorflow,JVillella/tensorflow,allenlavoie/tensorflow,jwlawson/tensorflow,xzturn/tensorflow,rabipanda/tensorflow,yongtang/tensorflow,dancingdan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,av8ramit/tensorflow,chemelnucfin/tensorflow,nolanliou/tensorflow,guschmue/tensorflow,tillahoffmann/tensorflow,aldian/tensorflow,maciekcc/tensorflow,gunan/tensorflow,yongtang/tensorflow,laszlocsomor/tensorflow,caisq/tensorflow,Intel-Corporation/tensorflow,lakshayg/tensorflow,horance-liu/tensorflow,jendap/tensorflow,zycdragonball/tensorflow,adamtiger/tensorflow,ghchinoy/tensorflow,Bismarrck/tensorflow,girving/tensorflow,renyi533/tensorflow,gunan/tensorflow,ville-k/tensorflow,paolodedios/tensorflow,drpngx/tensorflow,Bulochkin/tensorflow_pack,lukeiwanski/tensorflow,dendisuhubdy/tensorflow,av8ramit/tensorflow,dyoung418/tensorflow,jwlawson/tensorflow,tensorflow/tensorflow,benoitsteiner/tensorflow-opencl,yanchen036/tensorflow,yufengg/tensorflow,cxxgtxy/tensorflow,benoitsteiner/tensorflow,Moriadry/tensorflow,AnishShah/tensorflow,ravindrapanda/tensorflow,jart/tensorflow,pavelchristof/gomoku-ai,lakshayg/tensorflow,jendap/tensorflow,adit-chandra/tensorflow,chemelnucfin/tensorflow,jwlawson/tensorflow,mdrumond/tensorflow,Xeralux/tensorflow,gojira/tensorflow,suiyuan2009/tensorflow,gojira/tensorflow,yongtang/tensorflow,nburn42/tensorflow,brchiu/tensorflow,laszlocsomor/tensorflow,JingJunYin/tensorflow,benoitsteiner/tensorflow-opencl,Intel-tensorflow/tensorflow,manazhao/tf_recsys,JingJunYin/tensorflow,mdrumond/tensorflow,davidzchen/tensorflow,Bulochkin/tensorflow_pack,davidzchen/tensorflow,DavidNorman/tensorflow,dyoung418/tensorflow,xzturn/tensorflow,ravindrapanda/tensorflow,asimshankar/tensorflow,tensorflow/tensorflow-pywrap_saved_model,a-doumoulakis/tensorflow,Xeralux/tensorflow,pavelchristof/gomoku-ai,hsaputra/tensorflow,ageron/tensorflow,gojira/tensorflow,gautam1858/tensorflow,dancingdan/tensorflow,cxxgtxy/tensorflow,jart/tensorflow,snnn/tensorflow,adamtiger/tensorflow,benoitsteiner/tensorflow,codrut3/tensorflow,tornadozou/tensorflow,kobejean/tensorflow,jhaux/tensorflow,nightjean/Deep-Learning,jwlawson/tensorflow,ageron/tensorflow,asimshankar/tensorflow,paolodedios/tensorflow,laszlocsomor/tensorflow,chemelnucfin/tensorflow,brchiu/tensorflow,kobejean/tensorflow,Mistobaan/tensorflow,Xeralux/tensorflow,jhaux/tensorflow,ZhangXinNan/tensorflow,jalexvig/tensorflow,petewarden/tensorflow,adamtiger/tensorflow,karllessard/tensorflow,ppwwyyxx/tensorflow,theflofly/tensorflow,yanchen036/tensorflow,Mazecreator/tensorflow,dyoung418/tensorflow,ravindrapanda/tensorflow,jwlawson/tensorflow,Mazecreator/tensorflow,rabipanda/tensorflow,benoitsteiner/tensorflow-opencl,hsaputra/tensorflow,guschmue/tensorflow,xodus7/tensorflow,benoitsteiner/tensorflow-xsmm,jostep/tensorflow,lukeiwanski/tensorflow,tillahoffmann/tensorflow,nburn42/tensorflow,horance-liu/tensorflow,alsrgv/tensorflow,alivecor/tensorflow,ZhangXinNan/tensorflow,annarev/tensorflow,sjperkins/tensorflow,allenlavoie/tensorflow,av8ramit/tensorflow,sjperkins/tensorflow,horance-liu/tensorflow,nolanliou/tensorflow,maciekcc/tensorflow,raymondxyang/tensorflow,arborh/tensorflow,sjperkins/tensorflow,jhaux/tensorflow,jhaux/tensorflow,girving/tensorflow,drpngx/tensorflow,bowang/tensorflow,rabipanda/tensorflow,xzturn/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,dendisuhubdy/tensorflow,kevin-coder/tensorflow-fork,ZhangXinNan/tensorflow,gunan/tensorflow,jhseu/tensorflow,chemelnucfin/tensorflow,petewarden/tensorflow,av8ramit/tensorflow,AnishShah/tensorflow,nolanliou/tensorflow,AnishShah/tensorflow,aldian/tensorflow,arborh/tensorflow,av8ramit/tensorflow,aselle/tensorflow,Bulochkin/tensorflow_pack,horance-liu/tensorflow,girving/tensorflow,laszlocsomor/tensorflow,adamtiger/tensorflow,benoitsteiner/tensorflow-xsmm,manipopopo/tensorflow,hehongliang/tensorflow,davidzchen/tensorflow,ychfan/tensorflow,yanchen036/tensorflow,codrut3/tensorflow,DavidNorman/tensorflow,nightjean/Deep-Learning,paolodedios/tensorflow,andrewcmyers/tensorflow,meteorcloudy/tensorflow,renyi533/tensorflow,manipopopo/tensorflow,davidzchen/tensorflow,jalexvig/tensorflow,seanli9jan/tensorflow,ArtsiomCh/tensorflow,drpngx/tensorflow,gunan/tensorflow,a-doumoulakis/tensorflow,nightjean/Deep-Learning,maciekcc/tensorflow,karllessard/tensorflow,ravindrapanda/tensorflow,kevin-coder/tensorflow-fork,renyi533/tensorflow,unsiloai/syntaxnet-ops-hack,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,Mistobaan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,rabipanda/tensorflow,apark263/tensorflow,lakshayg/tensorflow,meteorcloudy/tensorflow,dyoung418/tensorflow,alivecor/tensorflow,Mazecreator/tensorflow,jhseu/tensorflow,jhaux/tensorflow,AnishShah/tensorflow,unsiloai/syntaxnet-ops-hack,brchiu/tensorflow,nburn42/tensorflow,davidzchen/tensorflow,bowang/tensorflow,Bulochkin/tensorflow_pack,nolanliou/tensorflow,sarvex/tensorflow,hsaputra/tensorflow,Bismarrck/tensorflow,arborh/tensorflow,aldian/tensorflow,Mistobaan/tensorflow,xodus7/tensorflow,theflofly/tensorflow,manazhao/tf_recsys,Xeralux/tensorflow,meteorcloudy/tensorflow,Moriadry/tensorflow,Moriadry/tensorflow,eadgarchen/tensorflow,brchiu/tensorflow,yufengg/tensorflow,manipopopo/tensorflow,alshedivat/tensorflow,guschmue/tensorflow,alistairlow/tensorflow,benoitsteiner/tensorflow-xsmm,ppwwyyxx/tensorflow,pavelchristof/gomoku-ai,dongjoon-hyun/tensorflow,nolanliou/tensorflow,av8ramit/tensorflow,adit-chandra/tensorflow,laszlocsomor/tensorflow,gojira/tensorflow,adit-chandra/tensorflow,llhe/tensorflow,jhseu/tensorflow,kobejean/tensorflow,tensorflow/tensorflow-pywrap_saved_model,ageron/tensorflow,Bismarrck/tensorflow,jart/tensorflow,aam-at/tensorflow,karllessard/tensorflow,benoitsteiner/tensorflow,paolodedios/tensorflow,ran5515/DeepDecision,gojira/tensorflow,adamtiger/tensorflow,nburn42/tensorflow,kevin-coder/tensorflow-fork,alistairlow/tensorflow,lakshayg/tensorflow,zasdfgbnm/tensorflow,jwlawson/tensorflow,kevin-coder/tensorflow-fork,suiyuan2009/tensorflow,asimshankar/tensorflow,gunan/tensorflow,alivecor/tensorflow,alistairlow/tensorflow,xzturn/tensorflow,xzturn/tensorflow,eadgarchen/tensorflow,mavenlin/tensorflow,tensorflow/tensorflow,ishay2b/tensorflow,chemelnucfin/tensorflow,manipopopo/tensorflow,rabipanda/tensorflow,renyi533/tensorflow,hehongliang/tensorflow,eadgarchen/tensorflow,Kongsea/tensorflow,allenlavoie/tensorflow,ville-k/tensorflow,Bismarrck/tensorflow,meteorcloudy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,ville-k/tensorflow,ravindrapanda/tensorflow,gojira/tensorflow,jwlawson/tensorflow,suiyuan2009/tensorflow,kobejean/tensorflow,jendap/tensorflow,JingJunYin/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,chemelnucfin/tensorflow,gojira/tensorflow,Bismarrck/tensorflow,tornadozou/tensorflow,ZhangXinNan/tensorflow,seanli9jan/tensorflow,Bulochkin/tensorflow_pack,alshedivat/tensorflow,maciekcc/tensorflow,Intel-tensorflow/tensorflow,tiagofrepereira2012/tensorflow,jalexvig/tensorflow,manipopopo/tensorflow,aselle/tensorflow,tensorflow/tensorflow,ravindrapanda/tensorflow,Intel-Corporation/tensorflow,snnn/tensorflow,alshedivat/tensorflow,av8ramit/tensorflow,davidzchen/tensorflow,guschmue/tensorflow,guschmue/tensorflow,ishay2b/tensorflow,ishay2b/tensorflow,guschmue/tensorflow,codrut3/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow,with-git/tensorflow,snnn/tensorflow,cxxgtxy/tensorflow,tornadozou/tensorflow,bowang/tensorflow,jart/tensorflow,ArtsiomCh/tensorflow,mixturemodel-flow/tensorflow,codrut3/tensorflow,annarev/tensorflow,ageron/tensorflow,adit-chandra/tensorflow,caisq/tensorflow,gautam1858/tensorflow,benoitsteiner/tensorflow-opencl,sjperkins/tensorflow,lakshayg/tensorflow,allenlavoie/tensorflow,asimshankar/tensorflow,gautam1858/tensorflow,dongjoon-hyun/tensorflow,alivecor/tensorflow,alsrgv/tensorflow,ychfan/tensorflow,unsiloai/syntaxnet-ops-hack,chemelnucfin/tensorflow,tensorflow/tensorflow,mdrumond/tensorflow,dancingdan/tensorflow,Moriadry/tensorflow,codrut3/tensorflow,tillahoffmann/tensorflow,Bulochkin/tensorflow_pack,xzturn/tensorflow,JingJunYin/tensorflow,asimshankar/tensorflow,lakshayg/tensorflow,zycdragonball/tensorflow,jart/tensorflow,ArtsiomCh/tensorflow,with-git/tensorflow,drpngx/tensorflow,AnishShah/tensorflow,kobejean/tensorflow,davidzchen/tensorflow,annarev/tensorflow,ville-k/tensorflow,eadgarchen/tensorflow,mavenlin/tensorflow,jbedorf/tensorflow,snnn/tensorflow,Mistobaan/tensorflow,hsaputra/tensorflow,kobejean/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,benoitsteiner/tensorflow,pavelchristof/gomoku-ai,davidzchen/tensorflow,snnn/tensorflow,nburn42/tensorflow,pavelchristof/gomoku-ai,manipopopo/tensorflow,adit-chandra/tensorflow,hfp/tensorflow-xsmm,xzturn/tensorflow,mdrumond/tensorflow,ghchinoy/tensorflow,chemelnucfin/tensorflow,benoitsteiner/tensorflow,seanli9jan/tensorflow,jostep/tensorflow,raymondxyang/tensorflow,dongjoon-hyun/tensorflow,gunan/tensorflow,ravindrapanda/tensorflow,tiagofrepereira2012/tensorflow,chemelnucfin/tensorflow,jalexvig/tensorflow,yongtang/tensorflow,JingJunYin/tensorflow,JingJunYin/tensorflow,hfp/tensorflow-xsmm,manipopopo/tensorflow,davidzchen/tensorflow,ghchinoy/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,allenlavoie/tensorflow,karllessard/tensorflow,Kongsea/tensorflow,Intel-Corporation/tensorflow,kevin-coder/tensorflow-fork,ppwwyyxx/tensorflow,kevin-coder/tensorflow-fork,bowang/tensorflow,sjperkins/tensorflow,jostep/tensorflow,aam-at/tensorflow,hfp/tensorflow-xsmm,yufengg/tensorflow,JVillella/tensorflow,freedomtan/tensorflow,andrewcmyers/tensorflow,meteorcloudy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,theflofly/tensorflow,yongtang/tensorflow,ArtsiomCh/tensorflow,alshedivat/tensorflow,girving/tensorflow,Mazecreator/tensorflow,nightjean/Deep-Learning,aam-at/tensorflow,jhseu/tensorflow,JVillella/tensorflow,nburn42/tensorflow,rabipanda/tensorflow,andrewcmyers/tensorflow,zasdfgbnm/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,benoitsteiner/tensorflow-xsmm,tillahoffmann/tensorflow,alshedivat/tensorflow,Mistobaan/tensorflow,DavidNorman/tensorflow,girving/tensorflow,Kongsea/tensorflow,raymondxyang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,hfp/tensorflow-xsmm,Intel-Corporation/tensorflow,caisq/tensorflow,meteorcloudy/tensorflow,Bulochkin/tensorflow_pack,jhseu/tensorflow,alivecor/tensorflow,aselle/tensorflow,llhe/tensorflow,seanli9jan/tensorflow,laszlocsomor/tensorflow,suiyuan2009/tensorflow,codrut3/tensorflow,adamtiger/tensorflow,andrewcmyers/tensorflow,mixturemodel-flow/tensorflow,ran5515/DeepDecision,seanli9jan/tensorflow,ghchinoy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,JVillella/tensorflow,jbedorf/tensorflow,seanli9jan/tensorflow,brchiu/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,rabipanda/tensorflow,a-doumoulakis/tensorflow,tensorflow/tensorflow-pywrap_saved_model,eaplatanios/tensorflow,dancingdan/tensorflow,frreiss/tensorflow-fred,ghchinoy/tensorflow,gautam1858/tensorflow,xodus7/tensorflow,girving/tensorflow,ville-k/tensorflow,tillahoffmann/tensorflow,adit-chandra/tensorflow,jwlawson/tensorflow,apark263/tensorflow,girving/tensorflow,ArtsiomCh/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,guschmue/tensorflow,tiagofrepereira2012/tensorflow,lakshayg/tensorflow,nburn42/tensorflow,AnishShah/tensorflow,freedomtan/tensorflow,aam-at/tensorflow,petewarden/tensorflow,zasdfgbnm/tensorflow,llhe/tensorflow,av8ramit/tensorflow,with-git/tensorflow,paolodedios/tensorflow,xodus7/tensorflow,raymondxyang/tensorflow,unsiloai/syntaxnet-ops-hack,llhe/tensorflow,theflofly/tensorflow,lukeiwanski/tensorflow,tensorflow/tensorflow,jostep/tensorflow,aldian/tensorflow,petewarden/tensorflow,benoitsteiner/tensorflow-xsmm,ychfan/tensorflow,jart/tensorflow,alistairlow/tensorflow,frreiss/tensorflow-fred,bowang/tensorflow,dendisuhubdy/tensorflow,JingJunYin/tensorflow,hfp/tensorflow-xsmm,apark263/tensorflow,arborh/tensorflow,Intel-tensorflow/tensorflow,ZhangXinNan/tensorflow,gunan/tensorflow,apark263/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,alistairlow/tensorflow,aselle/tensorflow,mixturemodel-flow/tensorflow,sarvex/tensorflow,ychfan/tensorflow,kevin-coder/tensorflow-fork,seanli9jan/tensorflow,allenlavoie/tensorflow,horance-liu/tensorflow,lakshayg/tensorflow,benoitsteiner/tensorflow-opencl,frreiss/tensorflow-fred,AnishShah/tensorflow,zycdragonball/tensorflow,codrut3/tensorflow,lukeiwanski/tensorflow,dendisuhubdy/tensorflow,guschmue/tensorflow,ravindrapanda/tensorflow,renyi533/tensorflow,nolanliou/tensorflow,ZhangXinNan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,caisq/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,benoitsteiner/tensorflow-xsmm,a-doumoulakis/tensorflow,meteorcloudy/tensorflow,yufengg/tensorflow,theflofly/tensorflow,nolanliou/tensorflow,Xeralux/tensorflow,frreiss/tensorflow-fred,jbedorf/tensorflow,jhseu/tensorflow,zasdfgbnm/tensorflow,tiagofrepereira2012/tensorflow,ghchinoy/tensorflow,asimshankar/tensorflow,rabipanda/tensorflow,Kongsea/tensorflow,meteorcloudy/tensorflow,alistairlow/tensorflow,theflofly/tensorflow,ychfan/tensorflow,raymondxyang/tensorflow,eadgarchen/tensorflow,Mistobaan/tensorflow,ychfan/tensorflow,asimshankar/tensorflow,benoitsteiner/tensorflow,dongjoon-hyun/tensorflow,jhseu/tensorflow,DavidNorman/tensorflow,dongjoon-hyun/tensorflow,ageron/tensorflow,Mistobaan/tensorflow,karllessard/tensorflow,Moriadry/tensorflow,petewarden/tensorflow,jhseu/tensorflow,suiyuan2009/tensorflow,horance-liu/tensorflow,ychfan/tensorflow,jhseu/tensorflow,benoitsteiner/tensorflow,adit-chandra/tensorflow,gojira/tensorflow,theflofly/tensorflow,Intel-tensorflow/tensorflow,alivecor/tensorflow | tensorflow/core/platform/default/build_config_root.bzl | tensorflow/core/platform/default/build_config_root.bzl | # Lower-level functionality for build config.
# The functions in this file might be referred by tensorflow.bzl. They have to
# be separate to avoid cyclic references.
def tf_cuda_tests_tags():
return ["local"]
def tf_sycl_tests_tags():
return ["local"]
def tf_additional_plugin_deps():
return select({
"//tensorflow:with_xla_support": ["//tensorflow/compiler/jit"],
"//conditions:default": [],
})
def tf_additional_xla_deps_py():
return []
def tf_additional_license_deps():
return select({
"//tensorflow:with_xla_support": ["@llvm//:LICENSE.TXT"],
"//conditions:default": [],
})
def tf_additional_verbs_deps():
return select({
"//tensorflow:with_verbs_support": [
"//tensorflow/contrib/verbs:verbs_server_lib",
"//tensorflow/contrib/verbs:grpc_verbs_client"
],
"//conditions:default": [],
})
def tf_additional_mpi_deps():
return select({
"//tensorflow:with_mpi_support": [
"//tensorflow/contrib/mpi:mpi_server_lib"
],
"//conditions:default": [],
})
| # Lower-level functionality for build config.
# The functions in this file might be referred by tensorflow.bzl. They have to
# be separate to avoid cyclic references.
def tf_cuda_tests_tags():
return ["local"]
def tf_sycl_tests_tags():
return ["local"]
def tf_additional_plugin_deps():
return select({
"//tensorflow:with_xla_support": ["//tensorflow/compiler/jit"],
"//conditions:default": [],
})
def tf_additional_xla_deps_py():
return []
def tf_additional_license_deps():
return select({
"//tensorflow:with_xla_support": ["@llvm//:LICENSE.TXT"],
"//conditions:default": [],
})
def tf_additional_verbs_deps():
return select({
"//tensorflow:with_verbs_support": [
"//tensorflow/contrib/verbs:verbs_server_lib",
"//tensorflow/contrib/verbs:grpc_verbs_client"],
"//conditions:default": [],
})
def tf_additional_mpi_deps():
return select({
"//tensorflow:with_mpi_support": [
"//tensorflow/contrib/mpi:mpi_server_lib"],
"//conditions:default": [],
})
| apache-2.0 | Python |
cfc11472f16040d06915a58c3dd3c027b77fdd80 | set up testing with py.test | hdashnow/STRetch,Oshlack/STRetch,Oshlack/STRetch,hdashnow/STRetch,Oshlack/STRetch | scripts/tests/test_identify_locus.py | scripts/tests/test_identify_locus.py | import sys
sys.path.append("..")
from identify_locus import *
def test_answer():
assert 5 == 5
| mit | Python | |
4f1e1874f3ed9af8922aa26eb20230dbee5e6d73 | Add some example code for creation of disk partitions. | vpodzime/blivet,dwlehman/blivet,rhinstaller/blivet,vojtechtrefny/blivet,jkonecny12/blivet,vojtechtrefny/blivet,AdamWill/blivet,AdamWill/blivet,rvykydal/blivet,rvykydal/blivet,rhinstaller/blivet,jkonecny12/blivet,dwlehman/blivet,vpodzime/blivet | examples/partitioning.py | examples/partitioning.py | import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create two disk image files on which to create new devices
disk1_file = create_sparse_file(b, "disk1", 100000)
b.config.diskImages["disk1"] = disk1_file
disk2_file = create_sparse_file(b, "disk2", 100000)
b.config.diskImages["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
disk2 = b.devicetree.getDeviceByName("disk2")
b.initializeDisk(disk1)
b.initializeDisk(disk2)
# new partition on either disk1 or disk2 with base size 10000 MiB and growth
# up to a maximum size of 50000 MiB
dev = b.newPartition(size=10000, grow=True, maxsize=50000,
parents=[disk1, disk2])
b.createDevice(dev)
# new partition on disk1 with base size 5000 MiB and unbounded growth and an
# ext4 filesystem
dev = b.newPartition(fmt_type="ext4", size=5000, grow=True, parents=[disk1])
b.createDevice(dev)
# new partition on any suitable disk with a fixed size of 2000 MiB formatted
# as swap space
dev = b.newPartition(fmt_type="swap", size=2000)
b.createDevice(dev)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
os.unlink(disk2_file)
| lgpl-2.1 | Python | |
d35dd563b25f85d23191169e8f87882129f64adb | Add images admin | stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten | features/images/admin.py | features/images/admin.py | from django.contrib import admin
from . import models
admin.site.register(models.Image)
| agpl-3.0 | Python | |
a0c23d3fc448f916ffdd668a2daf56408dd9c0c0 | Add simple implementation for a pre-commit hook | EliRibble/mothermayi | mothermayi/pre_commit.py | mothermayi/pre_commit.py | import mothermayi.entryway
import mothermayi.git
def handle_plugins(entries):
for entry in entries:
result = entry()
def run():
with mothermayi.git.stash():
entries = mothermayi.entryway.get_entries('pre-commit')
handle_plugins(entries)
| mit | Python | |
d9c283c37f350c8b1629e671f38ef447ef1bbd1f | Add inventory unit tests | dtroyer/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk,openstack-infra/shade,dtroyer/python-openstacksdk,openstack-infra/shade,stackforge/python-openstacksdk,openstack/python-openstacksdk | shade/tests/unit/test_inventory.py | shade/tests/unit/test_inventory.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os_client_config
from shade import inventory
from shade.tests.unit import base
@mock.patch("os_client_config.config.OpenStackConfig")
class TestInventory(base.TestCase):
def setUp(self):
super(TestInventory, self).setUp()
@mock.patch("shade.OpenStackCloud")
def test__init(self, mock_cloud, mock_config):
mock_config.return_value.get_all_clouds.return_value = [{}]
inv = inventory.OpenStackInventory()
mock_config.assert_called_once_with(
config_files=os_client_config.config.CONFIG_FILES
)
self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds))
self.assertTrue(mock_config.return_value.get_all_clouds.called)
@mock.patch("shade.OpenStackCloud")
def test_list_hosts(self, mock_cloud, mock_config):
mock_config.return_value.get_all_clouds.return_value = [{}]
inv = inventory.OpenStackInventory()
server = dict(id='server_id', name='server_name')
self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds))
inv.clouds[0].list_servers.return_value = [server]
inv.clouds[0].get_openstack_vars.return_value = server
ret = inv.list_hosts()
inv.clouds[0].list_servers.assert_called_once_with()
inv.clouds[0].get_openstack_vars.assert_called_once_with(server)
self.assertEqual([server], ret)
@mock.patch("shade.OpenStackCloud")
def test_search_hosts(self, mock_cloud, mock_config):
mock_config.return_value.get_all_clouds.return_value = [{}]
inv = inventory.OpenStackInventory()
server = dict(id='server_id', name='server_name')
self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds))
inv.clouds[0].list_servers.return_value = [server]
inv.clouds[0].get_openstack_vars.return_value = server
ret = inv.search_hosts('server_id')
self.assertEqual([server], ret)
@mock.patch("shade.OpenStackCloud")
def test_get_host(self, mock_cloud, mock_config):
mock_config.return_value.get_all_clouds.return_value = [{}]
inv = inventory.OpenStackInventory()
server = dict(id='server_id', name='server_name')
self.assertIsInstance(inv.clouds, list)
self.assertEqual(1, len(inv.clouds))
inv.clouds[0].list_servers.return_value = [server]
inv.clouds[0].get_openstack_vars.return_value = server
ret = inv.get_host('server_id')
self.assertEqual(server, ret)
| apache-2.0 | Python | |
ec69c9260a2205ab0b7b9b7c4903ae2ced6029a1 | add settings file | andreas-h/mss-chem | msschem_settings.py | msschem_settings.py | import os.path
from msschem.models import CAMSRegDriver
from msschem.download import CAMSRegDownload
register_datasources = {
'CAMSReg_ENSEMBLE': CAMSRegDriver(
dict(
dldriver=CAMSRegDownload(
token='MYTOKEN',
modelname='ENSEMBLE'),
force=False,
basepath=os.path.expanduser('~/tmp/mss/data/'),
name='CAMSReg-ENSEMBLE',
temppath=None,
)
)
}
| mit | Python | |
e8d579177ee9fae714dcc3dfc36b5a5c234dfa0e | Create simulator_follow_blue.py | CSavvy/python | simulator/simulator_follow_blue.py | simulator/simulator_follow_blue.py | from Myro import *
init("sim")
setPictureSize("small")
autoCamera()
#'''
#chase blue setting
ylow = 0
yhigh = 255
ulow = 170 #130 for the straw # originally 134
uhigh = 250 # up to 158, really 145, 157 for the straw #142 originally
vlow = 90 # down to 109, really 114, 110 for the straw #114 originally
vhigh = 120 #really up to 123, 127 for the straw #121 originally
xsum = 0
ysum = 0
num = 0
'''
#chase red setting
ylow = 0
yhigh = 255
ulow = 122
uhigh = 134
vlow = 143
vhigh = 183
xsum = 0
ysum = 0
num = 0
#'''
s = 2
left = -s
right = s
time = 0.5/s
c = 0
motors(5,-5,0.4*2)
while True:
wait(2)
print(getIR())
while getIR() == [1, 1]:
num = 1
xsum = 0
ysum = 0
p = takePicture()
show(p)
for pixel in getPixels(p):
color = getColor(pixel)
r = getRed(pixel)
g = getGreen(pixel)
b = getBlue(pixel)
y, u, v = rgb2yuv(r,g,b)
#this is the color of interest, highlighted in red
if u >= ulow and u <= uhigh and v >= vlow and v <= vhigh:
num = num + 1
xsum += getX(pixel)
ysum += getY(pixel)
setRed(pixel, 200)
#print(rgb2yuv(r,g,b))#
else: #colors that are not of interest, highlighted in blue
'''
c += 1
if (c % 25 == 0):
print(rgb2yuv(r,g,b))#
'''
#print(rgb2yuv(r,g,b))
setBlue(pixel, 200)
print("Number of pixels of interest color: ", num)
xavg = xsum / num
print("Average x position of interest color: ", xavg)
yavg = ysum / num
#print(yavg)
#continue #uncomment this line to prevent motion
if num > 200: #there are enough pixels of the color of interest
if xavg < 90:# the object is to the left
#turnBy(-10)
left = -s
right = s
time = 0.5/s
motors(left,right,time)
elif xavg > 190:# the object is to the right
#turnBy(10)
left = s
right = -s
time = 0.5/s
motors(left,right,time)
else:#the object is ahead
forward(s,0.8)
#beep(0.1, 600)
else:#no object of interest is detected
print(left,right,time)
motors(left,right,time)
#beep(0.1, 670, 690)
| mit | Python | |
7ebded009386fe5b9ef4a6719fe100fa0a3836ba | Create 0013.py | Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python | Liez-python-code/0013/0013.py | Liez-python-code/0013/0013.py | # 网址:http://tieba.baidu.com/p/4341640851
import os
import re
import urllib.request
def pic_collector(url):
content = urllib.request.urlopen(url).read()
r = re.compile('<img class="BDE_Image" pic_type="1" width="450" height="450" src="(.*?)" ')
pic_list = r.findall(content.decode('utf-8'))
os.mkdir('pic_collection')
os.chdir(os.path.join(os.getcwd(), 'pic_collection'))
for i in range(len(pic_list)):
pic_num = str(i) + '.jpg'
urllib.request.urlretrieve(pic_list[i], pic_num)
print("success!" + pic_list[i])
pic_collector("http://tieba.baidu.com/p/4341640851")
| mit | Python | |
cc488a5082a98f9e0a5a60a86815cb8c518fcc2d | Add test for PosRecChiSquareGamma | XENON1T/pax,XENON1T/pax | tests/test_posrecchisquaregamma.py | tests/test_posrecchisquaregamma.py | import unittest
import numpy as np
from pax import core, plugin
from pax.datastructure import Event, Peak
from pax.utils import empty_event
class TestPosRecChiSquareGamma(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='posrecChi',
just_testing=True,
config_dict={'pax': {'plugin_group_names':
['test'], 'test':
'PosRecChiSquareGamma.PosRecChiSquareGamma'}})
self.plugin = self.pax.get_plugin_by_name('PosRecChiSquareGamma')
self.e = empty_event()
def example_event(self, channels_with_something):
channels = np.array(channels_with_something, dtype='float64')
e = empty_event()
e.peaks.append(Peak({'left': 5,
'right': 9,
'type': 'S2',
'detector': 'tpc',
'area_per_channel': channels}))
return e
def test_get_chisquare_plugin(self):
self.assertIsInstance(self.plugin, plugin.TransformPlugin)
self.assertEqual(self.plugin.__class__.__name__, 'PosRecChiSquareGamma')
def test_posrec(self):
e = self.example_event([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0,
0, 1, 1, 0, 0, 3, 6, 7, 2, 2, 0, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 0, 1, 2, 0, 0, 0, 1, 2, 2,
3, 2, 14, 33, 12, 5, 2, 2, 0, 1, 0, 0, 1, 1,
0, 1, 0, 0, 0, 0, 0, 0, 5, 40, 226, 45, 7, 0,
2, 1, 1, 0, 1, 0, 0, 0, 1, 0, 7, 14, 36, 3, 0,
2, 1, 0, 0, 0, 1, 4, 4, 3, 1, 0])
e = self.plugin.transform_event(e)
self.assertIsInstance(e, Event)
self.assertEqual(len(e.peaks), 1)
self.assertEqual(len(e.S2s()), 1)
self.assertEqual(len(e.peaks[0].reconstructed_positions), 1)
rp = e.peaks[0].reconstructed_positions[0]
self.assertEqual(rp.algorithm, 'PosRecChiSquareGamma')
# position agrees within one cm for test
x_truth = 3.1625
y_truth = -10.0172
self.assertTrue(rp.x < x_truth+1 and rp.x > x_truth-1)
self.assertTrue(rp.y < y_truth+1 and rp.y > y_truth-1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python | |
d8eb3d1f840dbcfbe32bf98e757b841fd062ad9e | Create ContainDupII_001.py | Chasego/codirit,Chasego/cod,Chasego/cod,Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/cod,Chasego/codi,Chasego/codi,Chasego/codi,Chasego/cod,cc13ny/algo,Chasego/codirit,Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codi,cc13ny/algo,cc13ny/algo,Chasego/codirit,cc13ny/Allin,cc13ny/Allin | leetcode/219-Contains-Duplicate-II/ContainDupII_001.py | leetcode/219-Contains-Duplicate-II/ContainDupII_001.py | class Solution:
# @param {integer[]} nums
# @param {integer} k
# @return {boolean}
def containsNearbyDuplicate(self, nums, k):
tb = {}
for i in range(len(nums)):
num = nums[i]
if num not in tb:
tb[num] = i
elif i - tb[num] <= k:
return True
else:
tb[num] = i
return False
| mit | Python | |
16615d7794b127e9752b1a2b0bd8e70adfb0954c | Add tests for inplace subset | theislab/anndata | anndata/tests/test_inplace_subset.py | anndata/tests/test_inplace_subset.py | import numpy as np
import pytest
from sklearn.utils.testing import (
assert_array_equal
)
from scipy import sparse
from anndata.tests.helpers import (
gen_adata,
subset_func,
asarray
)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"]
)
def matrix_type(request):
return request.param
# TODO: Test values of .uns
def test_inplace_subset_var(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.var_names)
modified = orig.copy()
from_view = orig[:, subset_idx].copy()
modified._inplace_subset_var(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
assert_array_equal(asarray(orig.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
def test_inplace_subset_obs(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.obs_names)
modified = orig.copy()
from_view = orig[subset_idx, :].copy()
modified._inplace_subset_obs(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
assert_array_equal(asarray(orig.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
| bsd-3-clause | Python | |
db660f2a155b490f6a7f553ef19b8eaf8f9c9776 | Create สร้าง-LINE-Bot-ด้วย-Python.py | wannaphongcom/code-python3-blog | สร้าง-LINE-Bot-ด้วย-Python.py | สร้าง-LINE-Bot-ด้วย-Python.py | # อ่าบทความ https://python3.wannaphong.com/blog/2016/10/02/สร้าง-line-bot-ด้วย-python/
from flask import Flask, request
import json
import requests
app = Flask(__name__)
@app.route('/')
def index():
return "Hello World!"
# ส่วน callback สำหรับ Webhook
@app.route('/callback', methods=['POST'])
def callback():
json_line = request.get_json()
json_line = json.dumps(json_line)
decoded = json.loads(json_line)
user = decoded["events"][0]['replyToken']
#id=[d['replyToken'] for d in user][0]
#print(json_line)
print("ผู้ใช้:",user)
sendText(user,'งง') # ส่งข้อความ งง
return '',200
def sendText(user, text):
LINE_API = 'https://api.line.me/v2/bot/message/reply'
Authorization = 'Bearer ENTER_ACCESS_TOKEN' # ใส่ ENTER_ACCESS_TOKEN เข้าไป
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Authorization':Authorization
}
data = json.dumps({
"replyToken":user,
"messages":[{
"type":"text",
"text":text
}]
})
#print("ข้อมูล:",data)
r = requests.post(LINE_API, headers=headers, data=data) # ส่งข้อมูล
#print(r.text)
if __name__ == '__main__':
app.run(debug=True)
| mit | Python | |
e27bc1f295e9e3f41f5d1fc8b964283188e6e5d5 | add GP classification implementation | feuerchop/h3lib | h3lib.learn.api/H3GPC.py | h3lib.learn.api/H3GPC.py | __author__ = 'morgan'
import numpy as np
from H3Kernels import kernel
class H3GPC(object):
'''
Gaussian process classification main class
The implementation is referred to Rasmussen & Williams' GP book.
Note: we use Laplace appriximation and logistic function for GPC.
'''
def __init__(self, sigma=0.5, kernel='rbf', gamma=0.5,
coef0=1, degree=2):
'''
object constructor
:param
sigma: noise level
kernel: string type of kernel, {'rbf','linear','polynomial'}
gamma: e.g., in rbf kernel: exp(-gamma|xj-xi|^2)
coef0: for linear kernel, e.g., coef0*XX^T
degree: for polynomial, e.g., |xi-xj|^(degree)
:return:
'''
self.sigma = sigma
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.likelihood_f = lambda x: 1. / (1 + np.exp(-x))
def fit(self, X, y, Xt):
'''
fit function of Gaussian process regression
:param X: observations
:param y: binary labels
:param Xt: test samples
:return: predictive class probability for Xt (for y=+1)
'''
def laplace_mode(K, y):
# find the mode of the Laplace appriximation
f_new = f_old = np.zeros(n)
while np.linalg.norm(f_new - f_old) > 1e-4:
class_prob = self.likelihood_f(f_old) # class=1 probability
log_prime = .5 * (y + 1) - class_prob.ravel()
W = np.diag(-class_prob * (1 - class_prob))
W_root = np.sqrt(W)
B = np.eye(n) + W_root.dot(K).dot(W_root)
B_inv = np.linalg.inv(B)
f_new = (K - K.dot(W_root).dot(B_inv).dot(W_root).dot(K)
).dot(W.dot(f_old.reshape(n, 1)) + log_prime)
return f_new
n, d = X.shape
K = kernel(X, metric=self.kernel, gamma=self.gamma, coef0=self.coef0, degree=self.degree,
filter_params=True) + 1e-3
K_xt = kernel(Xt, X, metric=self.kernel, gamma=self.gamma, coef0=self.coef0, degree=self.degree,
filter_params=True)
K_tt = kernel(Xt, metric=self.kernel, gamma=self.gamma, coef0=self.coef0, degree=self.degree,
filter_params=True)
f_mode = laplace_mode(K, y)
class_prob = self.likelihood_f(f_mode).reshape(n, 1)
W = np.diag(-class_prob * (1 - class_prob))
W_root = np.sqrt(W)
B = np.eye(n) + W_root.dot(K).dot(W_root)
B_inv = np.linalg.inv(B)
ft_mean = K_xt.T.dot(class_prob)
ft_var = K_tt - K_xt.T.dot(W_root.dot(B_inv).dot(W_root)).dot(K_xt)
return ft_mean, ft_var
if __name__ == '__main__':
import matplotlib.pyplot as plt
import utils.plot_utils as pltool
X1 = np.random.multivariate_normal([-2.5, 2.5], [[0.5, 0], [0, 1.5]], 50)
y1 = np.ones(X1.shape[0])
X2 = np.random.multivariate_normal([2.5, 2.5], [[1.5, 0], [0, 0.5]], 50)
y2 = -np.ones(X2.shape[0])
X, y = np.r_[X1, X2], np.r_[y1, y2]
X1t = np.random.multivariate_normal([-2.5, 2.5], [[0.5, 0], [0, 1.5]], 10)
y1t = np.ones(X1t.shape[0])
X2t = np.random.multivariate_normal([2.5, 2.5], [[1.5, 0], [0, 0.5]], 10)
y2t = -np.ones(X2t.shape[0])
Xt, yt = np.r_[X1t, X2t], np.r_[y1t, y2t]
plt.plot(X[y == 1, 0], X[y == 1, 1], 'ro')
plt.plot(X[y == -1, 0], X[y == -1, 1], 'bo')
pltool.setAxSquare(plt.gca())
clf = H3GPC(gamma=0.5)
ft_mean, ft_var = clf.fit(X, y, Xt)
print ft_mean, ft_var
plt.show()
| mit | Python | |
8f227ebc4900b931a90e0c87d9f61b4c6ea0e084 | add simple topology example | knodir/son-emu,knodir/son-emu,knodir/son-emu,knodir/son-emu | src/emuvim/examples/sonata_simple_topology.py | src/emuvim/examples/sonata_simple_topology.py | """
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
"""
A simple topology with two PoPs for the y1 demo story board.
(dc1) <<-->> s1 <<-->> (dc2)
- SAP deployment enabled
- learning switch enabled
"""
import logging
from mininet.log import setLogLevel
from emuvim.dcemulator.net import DCNetwork
from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
from mininet.node import RemoteController
logging.basicConfig(level=logging.INFO)
def create_topology1():
# create topology
net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
dc1 = net.addDatacenter("dc1")
# add the command line interface endpoint to each DC (REST API)
rapi1 = RestApiEndpoint("0.0.0.0", 5001)
rapi1.connectDCNetwork(net)
rapi1.connectDatacenter(dc1)
# run API endpoint server (in another thread, don't block)
rapi1.start()
# add the SONATA dummy gatekeeper to each DC
sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
sdkg1.connectDatacenter(dc1)
# run the dummy gatekeeper (in another thread, don't block)
sdkg1.start()
# start the emulation platform
net.start()
net.CLI()
net.stop()
def main():
setLogLevel('info') # set Mininet loglevel
create_topology1()
if __name__ == '__main__':
main()
| apache-2.0 | Python | |
eb87d38a65620c7e4a716dca8a8b9488b3a338d3 | Fix syntax error in collector test | tellapart/Diamond,socialwareinc/Diamond,python-diamond/Diamond,works-mobile/Diamond,tuenti/Diamond,Ensighten/Diamond,TinLe/Diamond,sebbrandt87/Diamond,actmd/Diamond,Nihn/Diamond-1,EzyInsights/Diamond,bmhatfield/Diamond,zoidbergwill/Diamond,jaingaurav/Diamond,krbaker/Diamond,TAKEALOT/Diamond,thardie/Diamond,thardie/Diamond,cannium/Diamond,cannium/Diamond,tusharmakkar08/Diamond,acquia/Diamond,acquia/Diamond,signalfx/Diamond,timchenxiaoyu/Diamond,eMerzh/Diamond-1,signalfx/Diamond,anandbhoraskar/Diamond,szibis/Diamond,szibis/Diamond,skbkontur/Diamond,Slach/Diamond,stuartbfox/Diamond,russss/Diamond,Ormod/Diamond,timchenxiaoyu/Diamond,python-diamond/Diamond,hvnsweeting/Diamond,jaingaurav/Diamond,ramjothikumar/Diamond,Nihn/Diamond-1,mzupan/Diamond,Netuitive/Diamond,mfriedenhagen/Diamond,stuartbfox/Diamond,cannium/Diamond,Ormod/Diamond,saucelabs/Diamond,socialwareinc/Diamond,Slach/Diamond,sebbrandt87/Diamond,eMerzh/Diamond-1,ramjothikumar/Diamond,russss/Diamond,tusharmakkar08/Diamond,thardie/Diamond,russss/Diamond,Ensighten/Diamond,Precis/Diamond,dcsquared13/Diamond,Ssawa/Diamond,gg7/diamond,h00dy/Diamond,hvnsweeting/Diamond,codepython/Diamond,tusharmakkar08/Diamond,MichaelDoyle/Diamond,janisz/Diamond-1,disqus/Diamond,EzyInsights/Diamond,rtoma/Diamond,Ssawa/Diamond,Clever/Diamond,rtoma/Diamond,zoidbergwill/Diamond,anandbhoraskar/Diamond,signalfx/Diamond,hamelg/Diamond,EzyInsights/Diamond,TAKEALOT/Diamond,tellapart/Diamond,python-diamond/Diamond,krbaker/Diamond,EzyInsights/Diamond,works-mobile/Diamond,hamelg/Diamond,MichaelDoyle/Diamond,Ensighten/Diamond,tuenti/Diamond,joel-airspring/Diamond,ramjothikumar/Diamond,hvnsweeting/Diamond,mzupan/Diamond,actmd/Diamond,Netuitive/netuitive-diamond,tuenti/Diamond,anandbhoraskar/Diamond,CYBERBUGJR/Diamond,jumping/Diamond,tuenti/Diamond,eMerzh/Diamond-1,codepython/Diamond,Netuitive/netuitive-diamond,TinLe/Diamond,Netuitive/Diamond,Clever/Diamond,joel-airspring/Diamond,codepython/Diamond,mfriedenhagen/Diamond,gg7/diamond,skbkontur/Diamond,actmd/Diamond,szibis/Diamond,Basis/Diamond,jaingaurav/Diamond,stuartbfox/Diamond,Precis/Diamond,Basis/Diamond,Netuitive/Diamond,Netuitive/netuitive-diamond,TinLe/Diamond,hvnsweeting/Diamond,tusharmakkar08/Diamond,joel-airspring/Diamond,skbkontur/Diamond,thardie/Diamond,russss/Diamond,rtoma/Diamond,stuartbfox/Diamond,rtoma/Diamond,zoidbergwill/Diamond,Basis/Diamond,jriguera/Diamond,skbkontur/Diamond,krbaker/Diamond,actmd/Diamond,janisz/Diamond-1,disqus/Diamond,h00dy/Diamond,mfriedenhagen/Diamond,Netuitive/Diamond,saucelabs/Diamond,gg7/diamond,saucelabs/Diamond,Nihn/Diamond-1,Clever/Diamond,h00dy/Diamond,TAKEALOT/Diamond,ramjothikumar/Diamond,jumping/Diamond,works-mobile/Diamond,Slach/Diamond,codepython/Diamond,dcsquared13/Diamond,zoidbergwill/Diamond,jriguera/Diamond,bmhatfield/Diamond,acquia/Diamond,Netuitive/netuitive-diamond,Ssawa/Diamond,eMerzh/Diamond-1,bmhatfield/Diamond,CYBERBUGJR/Diamond,TinLe/Diamond,Precis/Diamond,mzupan/Diamond,krbaker/Diamond,sebbrandt87/Diamond,timchenxiaoyu/Diamond,TAKEALOT/Diamond,CYBERBUGJR/Diamond,signalfx/Diamond,dcsquared13/Diamond,jriguera/Diamond,socialwareinc/Diamond,szibis/Diamond,CYBERBUGJR/Diamond,Ensighten/Diamond,timchenxiaoyu/Diamond,tellapart/Diamond,Ormod/Diamond,disqus/Diamond,hamelg/Diamond,anandbhoraskar/Diamond,janisz/Diamond-1,Slach/Diamond,socialwareinc/Diamond,jumping/Diamond,tellapart/Diamond,hamelg/Diamond,MichaelDoyle/Diamond,Clever/Diamond,Ssawa/Diamond,Precis/Diamond,works-mobile/Diamond,cannium/Diamond,saucelabs/Diamond,dcsquared13/Diamond,mfriedenhagen/Diamond,Basis/Diamond,MichaelDoyle/Diamond,bmhatfield/Diamond,joel-airspring/Diamond,jriguera/Diamond,jumping/Diamond,mzupan/Diamond,Ormod/Diamond,Nihn/Diamond-1,jaingaurav/Diamond,h00dy/Diamond,janisz/Diamond-1,acquia/Diamond,gg7/diamond,sebbrandt87/Diamond | src/collectors/numa/test/testnuma.py | src/collectors/numa/test/testnuma.py | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42,
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
| mit | Python |
e238d7cfe1a4eace62ba6a9d199813f317c34c6a | Create AnalyticalDistributions.py | Effective-Quadratures/Effective-Quadratures,psesh/Effective-Quadratures | effective_quadratures/AnalyticalDistributions.py | effective_quadratures/AnalyticalDistributions.py | #!/usr/bin/env python
import numpy as np
from scipy.special import erf
"""
Analytical definitions for some sample PDFs. Functions in this file are
called by PolyParams when constructing "custom" orthogonal polynomials,
which require Stieltejes procedure for computing the recurrence coefficients.
Pranay Seshadri
ps583@cam.ac.uk
Copyright (c) 2016 by Pranay Seshadri
"""
def Gaussian(N, mu, sigma):
x = np.linspace(-15*sigma, 15*sigma, N) # x scaled by the standard deviation!
w = 1.0/( np.sqrt(2 * sigma**2 * np.pi) * np.exp(-(x - mu)**2 * 1.0/(2 * sigma**2) )
w = w/np.sum(w) # normalize!
return x, w
def truncatedGaussian(N, mu, sigma, a, b):
def GaussianPDF():
def GaussianCDF():
| lgpl-2.1 | Python | |
17ba5378934d043052c3e368b4b5468c3a24a7e2 | Test example to specify initial harmonies | gfairchild/pyHarmonySearch | examples/2-D_continuous_initial_harmonies_set.py | examples/2-D_continuous_initial_harmonies_set.py | #!/usr/bin/env python
"""
Copyright (c) 2013, Los Alamos National Security, LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Los Alamos National Security, LLC nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
module_dir = '/Users/szhan/Desktop/Projects/pyHarmonySearch/'
sys.path.append(module_dir)
from pyharmonysearch import ObjectiveFunctionInterface, harmony_search
from math import pow
import random
class ObjectiveFunction(ObjectiveFunctionInterface):
"""
This is a toy objective function that contains only continuous variables. A random seed is used, so the same result
will be generated every time.
Goal:
maximize -(x^2 + (y+1)^2) + 4
The maximum is 4 at (0, -1).
Note that since all variables are continuous, we don't actually need to implement get_index() and get_num_discrete_values().
Warning: Stochastically solving a linear system is dumb. This is just a toy example.
"""
def __init__(self):
self._lower_bounds = [-1000, -1000]
self._upper_bounds = [1000, 1000]
self._variable = [True, True]
# define all input parameters
self._maximize = True # do we maximize or minimize?
self._max_imp = 50000 # maximum number of improvisations
self._hms = 100 # harmony memory size
self._hmcr = 0.75 # harmony memory considering rate
self._par = 0.5 # pitch adjusting rate
self._mpap = 0.25 # maximum pitch adjustment proportion (new parameter defined in pitch_adjustment()) - used for continuous variables only
self._mpai = 2 # maximum pitch adjustment index (also defined in pitch_adjustment()) - used for discrete variables only
self._random_seed = 8675309 # optional random seed for reproducible results
def get_fitness(self, vector):
"""
maximize -(x^2 + (y+1)^2) + 4
The maximum is 4 at (0, -1).
"""
return -(pow(vector[0], 2) + pow(vector[1] + 1, 2)) + 4
def get_value(self, i, index=None):
"""
Values are returned uniformly at random in their entire range. Since both parameters are continuous, index can be ignored.
"""
return random.uniform(self._lower_bounds[i], self._upper_bounds[i])
def get_lower_bound(self, i):
return self._lower_bounds[i]
def get_upper_bound(self, i):
return self._upper_bounds[i]
def is_variable(self, i):
return self._variable[i]
def is_discrete(self, i):
# all variables are continuous
return False
def get_num_parameters(self):
return len(self._lower_bounds)
def use_random_seed(self):
return hasattr(self, '_random_seed') and self._random_seed
def get_random_seed(self):
return self._random_seed
def get_max_imp(self):
return self._max_imp
def get_hmcr(self):
return self._hmcr
def get_par(self):
return self._par
def get_hms(self):
return self._hms
def get_mpai(self):
return self._mpai
def get_mpap(self):
return self._mpap
def maximize(self):
return self._maximize
if __name__ == '__main__':
obj_fun = ObjectiveFunction()
num_processes = 1
num_iterations = 1 # because random_seed is defined, there's no point in running this multiple times
initial_harmonies = []
for i in range(100):
initial_harmonies.append([2.0, 3.0])
results = harmony_search(obj_fun, num_processes, num_iterations, initial_harmonies)
print('Elapsed time: {}\nBest harmony: {}\nBest fitness: {}'.format(results.elapsed_time, results.best_harmony, results.best_fitness))
| bsd-3-clause | Python | |
0263eb5a96f610b5a2e77d11ad26e892d78c9eda | add 191 | EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler,EdisonAlgorithms/ProjectEuler,zeyuanxy/project-euler,zeyuanxy/project-euler,zeyuanxy/project-euler,EdisonAlgorithms/ProjectEuler | vol4/191.py | vol4/191.py | if __name__ == "__main__":
lst = [1, 3, 0, 2, 1, 0, 0, 1]
while lst[0] < 30:
n, t, a, b, c, d, e, f = lst
lst = [n + 1, 2 * t + b - a, c, 2 * b - a + d, t - (a + c), e, f, t]
print lst[1]
| mit | Python | |
84d3e73fd7ff79e36268fce4b470af8fa3617f0c | Add couple of routes for the admin blueprint | finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is | app/admin/routes.py | app/admin/routes.py | from flask import render_template, redirect, url_for, flash, request
from flask.ext.login import login_required, current_user
from . import admin
from .forms import ProfileForm
from .. import db
from ..models import User
@admin.route('/')
@login_required
def index():
return render_template('admin/user.html', user=current_user)
@admin.route('/edit_user', methods=['GET', 'POST'])
@login_required
def edit_user():
form = ProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.bio = form.bio.data
db.session.add(current_user._get_current_object())
db.session.commit()
flash("Síðan hefur verið uppfærð")
return redirect(url_for('admin.index'))
form.name.data = current_user.name
form.location.data = current_user.location
form.bio.data = current_user.bio
return render_template('admin/edit_user.html', form=form)
| mit | Python | |
12bb21ca19a36465241c85b4f69838294c817630 | Update to version 3.4.2 | willthames/ansible-lint,MatrixCrawler/ansible-lint,dataxu/ansible-lint | lib/ansiblelint/version.py | lib/ansiblelint/version.py | __version__ = '3.4.2'
| __version__ = '3.4.1'
| mit | Python |
4e4d4365a0ef1a20d181f1015152acb116226e3d | Add device class for low battery (#10829) | mKeRix/home-assistant,pschmitt/home-assistant,turbokongen/home-assistant,jnewland/home-assistant,sander76/home-assistant,adrienbrault/home-assistant,tboyce021/home-assistant,tchellomello/home-assistant,persandstrom/home-assistant,molobrakos/home-assistant,HydrelioxGitHub/home-assistant,mezz64/home-assistant,aequitas/home-assistant,tboyce021/home-assistant,auduny/home-assistant,lukas-hetzenecker/home-assistant,postlund/home-assistant,balloob/home-assistant,jawilson/home-assistant,rohitranjan1991/home-assistant,DavidLP/home-assistant,soldag/home-assistant,Danielhiversen/home-assistant,Cinntax/home-assistant,nkgilley/home-assistant,jabesq/home-assistant,ewandor/home-assistant,joopert/home-assistant,tboyce1/home-assistant,qedi-r/home-assistant,nugget/home-assistant,sander76/home-assistant,robbiet480/home-assistant,lukas-hetzenecker/home-assistant,mezz64/home-assistant,DavidLP/home-assistant,rohitranjan1991/home-assistant,w1ll1am23/home-assistant,jabesq/home-assistant,titilambert/home-assistant,jamespcole/home-assistant,tboyce1/home-assistant,balloob/home-assistant,FreekingDean/home-assistant,kennedyshead/home-assistant,toddeye/home-assistant,fbradyirl/home-assistant,turbokongen/home-assistant,jnewland/home-assistant,ewandor/home-assistant,HydrelioxGitHub/home-assistant,aronsky/home-assistant,toddeye/home-assistant,auduny/home-assistant,tboyce1/home-assistant,home-assistant/home-assistant,kennedyshead/home-assistant,robbiet480/home-assistant,Teagan42/home-assistant,sdague/home-assistant,GenericStudent/home-assistant,rohitranjan1991/home-assistant,ewandor/home-assistant,tboyce1/home-assistant,qedi-r/home-assistant,PetePriority/home-assistant,aequitas/home-assistant,auduny/home-assistant,Teagan42/home-assistant,HydrelioxGitHub/home-assistant,tinloaf/home-assistant,tinloaf/home-assistant,nugget/home-assistant,aronsky/home-assistant,molobrakos/home-assistant,MartinHjelmare/home-assistant,PetePriority/home-assistant,MartinHjelmare/home-assistant,adrienbrault/home-assistant,postlund/home-assistant,leppa/home-assistant,persandstrom/home-assistant,jnewland/home-assistant,tinloaf/home-assistant,partofthething/home-assistant,jabesq/home-assistant,jamespcole/home-assistant,titilambert/home-assistant,mKeRix/home-assistant,nugget/home-assistant,jawilson/home-assistant,balloob/home-assistant,home-assistant/home-assistant,Danielhiversen/home-assistant,PetePriority/home-assistant,soldag/home-assistant,sdague/home-assistant,w1ll1am23/home-assistant,mKeRix/home-assistant,joopert/home-assistant,molobrakos/home-assistant,persandstrom/home-assistant,pschmitt/home-assistant,partofthething/home-assistant,jamespcole/home-assistant,tchellomello/home-assistant,DavidLP/home-assistant,MartinHjelmare/home-assistant,GenericStudent/home-assistant,fbradyirl/home-assistant,aequitas/home-assistant,fbradyirl/home-assistant,nkgilley/home-assistant,mKeRix/home-assistant,Cinntax/home-assistant,leppa/home-assistant,FreekingDean/home-assistant | homeassistant/components/binary_sensor/__init__.py | homeassistant/components/binary_sensor/__init__.py | """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DEVICE_CLASSES = [
'battery', # On means low, Off means normal
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'occupancy', # On means occupied, Off means not occupied
'opening', # Door, window, etc.
'plug', # On means plugged in, Off means unplugged
'power', # Power, over-current, etc
'presence', # On means home, Off means away
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
yield from component.async_setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
| """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DEVICE_CLASSES = [
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'occupancy', # On means occupied, Off means not occupied
'opening', # Door, window, etc.
'plug', # On means plugged in, Off means unplugged
'power', # Power, over-current, etc
'presence', # On means home, Off means away
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
yield from component.async_setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
| mit | Python |
4d2b0fdb4ee3289c7a2ba435f77f70c978cdd166 | add script to fetch recursively [bug] infinite loop | DeercoderPractice/tools,DeercoderPractice/tools | fetch_recursively.py | fetch_recursively.py | #!/usr/bin/env python
from sys import argv
import random
import urllib
def fetch(src_url):
## first fetch the source HTML page
num = 100000 * random.random()
filename = str(num)
urllib.urlretrieve(src_url, filename=filename)
txt = open(filename, "r")
for line in txt:
index = line.find(".pdf")
index_ppt = line.find(".ppt")
if index != -1 or index_ppt != -1:
lists = line.split("\"")
rets = [list for list in lists if list.find(".pdf") != -1 or list.find(".ppt") != -1]
for ret in rets:
# for relative path, combine it with previous path
if line.find("http:") == -1:
fetch_url = url + str(ret)
name = str(ret).split("/")[-1]
if fetch_url.find("<") == -1 or fetch_url.find(">") == -1:
print fetch_url + " ---> " + name
urllib.urlretrieve(fetch_url, filename=name)
# for absolute path, just use it
else:
name = str(ret).split("/")[-1]
# fix bugs: ret may not contain "http", just with "ppt/pdf"(line has but ret not)
if ret.find("http:") != -1:
print ret + " ---> " + name
urllib.urlretrieve(ret, filename=name)
else:
if line.find("http:") != -1: # there is an url
lists = line.split("\"")
rets = [list for list in lists if list.find(".htm") != -1 or list.find(".html") != -1]
for ret in rets:
if ret.find("http:") != -1:
print "Now there is a new url, fetch it:%s"%ret
fetch(ret)
### This needs to make sure again and again!
url="http://cs224d.stanford.edu/"
print "NOTE: add url webpage and make sure the fetch url is correct, especially the base url!"
src_url = argv[1]
print "source webpage: " + src_url
fetch(src_url)
| apache-2.0 | Python | |
2ea9f36c8eaf796011d93c361e663b01ba259842 | Call new doc sync in repair command | zenefits/sentry,JackDanger/sentry,mitsuhiko/sentry,mvaled/sentry,BuildingLink/sentry,nicholasserra/sentry,zenefits/sentry,BuildingLink/sentry,BuildingLink/sentry,alexm92/sentry,BuildingLink/sentry,fotinakis/sentry,gencer/sentry,JamesMura/sentry,fotinakis/sentry,looker/sentry,JackDanger/sentry,daevaorn/sentry,mvaled/sentry,fotinakis/sentry,zenefits/sentry,JamesMura/sentry,ifduyue/sentry,looker/sentry,JackDanger/sentry,beeftornado/sentry,fotinakis/sentry,jean/sentry,nicholasserra/sentry,gencer/sentry,daevaorn/sentry,gencer/sentry,alexm92/sentry,beeftornado/sentry,mvaled/sentry,nicholasserra/sentry,jean/sentry,jean/sentry,alexm92/sentry,gencer/sentry,BuildingLink/sentry,ifduyue/sentry,JamesMura/sentry,ifduyue/sentry,JamesMura/sentry,looker/sentry,ifduyue/sentry,JamesMura/sentry,zenefits/sentry,looker/sentry,mvaled/sentry,daevaorn/sentry,jean/sentry,ifduyue/sentry,mvaled/sentry,gencer/sentry,daevaorn/sentry,beeftornado/sentry,mitsuhiko/sentry,mvaled/sentry,zenefits/sentry,looker/sentry,jean/sentry | src/sentry/runner/commands/repair.py | src/sentry/runner/commands/repair.py | """
sentry.runner.commands.repair
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import os
import click
from sentry.runner.decorators import configuration
@click.command()
@configuration
def repair():
"Attempt to repair any invalid data."
click.echo('Forcing documentation sync')
from sentry.utils.integrationdocs import sync_docs, DOC_FOLDER
if os.access(DOC_FOLDER, os.W_OK):
sync_docs()
else:
click.echo(' - skipping (path cannot be written to)')
from sentry.models import Activity, Project, ProjectKey
click.echo('Creating missing project keys')
queryset = Project.objects.filter(key_set__isnull=True)
for project in queryset:
try:
ProjectKey.objects.get_or_create(
project=project,
)
except ProjectKey.MultipleObjectsReturned:
pass
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute("""
UPDATE sentry_groupedmessage SET num_comments = (
SELECT COUNT(*) from sentry_activity
WHERE type = %s and group_id = sentry_groupedmessage.id
)
""", [Activity.NOTE])
| """
sentry.runner.commands.repair
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import click
from sentry.runner.decorators import configuration
@click.command()
@configuration
def repair():
"Attempt to repair any invalid data."
click.echo('Forcing documentation sync')
from sentry.tasks.sync_docs import sync_docs
sync_docs()
from sentry.models import Activity, Project, ProjectKey
click.echo('Creating missing project keys')
queryset = Project.objects.filter(key_set__isnull=True)
for project in queryset:
try:
ProjectKey.objects.get_or_create(
project=project,
)
except ProjectKey.MultipleObjectsReturned:
pass
from django.db import connection
click.echo("Correcting Group.num_comments counter")
cursor = connection.cursor()
cursor.execute("""
UPDATE sentry_groupedmessage SET num_comments = (
SELECT COUNT(*) from sentry_activity
WHERE type = %s and group_id = sentry_groupedmessage.id
)
""", [Activity.NOTE])
| bsd-3-clause | Python |
eec339635bee64d3e50a29167a639a93bc40a3a3 | Create set selections for debconf | hatchery/Genepool2,hatchery/genepool | genes/debconf/set.py | genes/debconf/set.py | import os
from subprocess import call
from functools import partial
#TODO: stop using sudo or ensure it exists
#TODOE: specify user to run as
#TODO: utilize functools partial to handle some of the above functionality
class Config:
SET_SELECTIONS = ['sudo', '-E', 'debconf-set-selections']
ENV = os.environ.copy()
ENV['DEBIAN_FRONTEND'] = "noninteractive"
ENV_CALL = partial(call, env=ENV)
def set_selections(*selections):
if selections:
Config.ENV_CALL(['echo'] + list(selections) + ['|'] + Config.SET_SELECTIONS)
else:
#FIXME: add error
pass
| mit | Python | |
7a4857682567b5a23f940e05189e02d797599d51 | Add tests/test_00_info.py that simply reports the icat version number and package directory to the terminal. | icatproject/python-icat | tests/test_00_info.py | tests/test_00_info.py | """Report version info about python-icat being tested.
"""
from __future__ import print_function
import pytest
import os.path
import icat
class Reporter(object):
"""Cumulate messages and report them later using a terminalreporter.
"""
def __init__(self, terminalreporter):
super(Reporter, self).__init__()
self.terminal = terminalreporter
self.msgs = []
def addmsg(self, m):
self.msgs.append(m)
def flush(self):
for m in self.msgs:
self.terminal.write_line(" " + m)
self.msgs = []
@pytest.fixture()
def terminal(pytestconfig):
return pytestconfig.pluginmanager.getplugin('terminalreporter')
@pytest.fixture()
def diag(request, terminal):
rep = Reporter(terminal)
request.addfinalizer(rep.flush)
return rep
def test_info(diag):
assert icat.__version__
assert icat.__revision__
diag.addmsg("Version: python-icat %s (%s)"
% (icat.__version__, icat.__revision__))
diag.addmsg("Path: %s"
% os.path.dirname(os.path.abspath(icat.__file__)))
| apache-2.0 | Python | |
9afbdbb0fa7f77269b08680e9290fa2628d88caf | add problem 069 | smrmkt/project_euler | problem_069.py | problem_069.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
Euler's Totient function, φ(n) [sometimes called the phi function],
is used to determine the number of positive numbers less than or
equal to n which are relatively prime to n.
For example, as 1, 2, 4, 5, 7, and 8, are all less than nine and
relatively prime to nine, φ(9)=6.
The number 1 is considered to be relatively prime to every positive number,
so φ(1)=1.
Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation of 79180.
Find the value of n, 1 < n < 107, for which φ(n) is a permutation of n and
the ratio n/φ(n) produces a minimum.
'''
import math
import timeit
def is_prime(n):
for i in range(2, int(math.sqrt(n))+1):
if n % i == 0:
return False
return True
def next_prime(n):
while True:
n += 1
if is_prime(n):
return n
def calc(n):
prime = 1
phi = 1
while phi*prime <= n:
phi *= prime
prime = next_prime(prime)
return phi
if __name__ == '__main__':
print calc(1000000)
print timeit.Timer('problem_069.calc(1000000)', 'import problem_069').timeit(1)
| mit | Python | |
292fd8b37f9f0b28176ceb3c41f3b2f85b227049 | bump version in __init__.py | WillianPaiva/1flow,1flow/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,WillianPaiva/1flow | oneflow/__init__.py | oneflow/__init__.py |
VERSION = '0.20.11.15'
|
VERSION = '0.20.11.14'
| agpl-3.0 | Python |
9df7b7e49c5f7ec9d8962cc28f1b19f18dda114c | Add custom exceptions for api errors | alexbotello/python-overwatch | overwatch/errors.py | overwatch/errors.py | class InvalidFilter(Exception):
"""
Raise when 'filter' key word argument is not recognized
"""
pass
class InvalidHero(Exception):
"""
Raise when 'hero' key word argument is not recognized
"""
pass
class InvalidCombination(Exception):
"""
Raise when 'filter' and 'hero' key word arguments
are an invalid combination.
"""
pass
class NotFound(Exception):
"""
Raise when stats could not be found
"""
pass
| mit | Python | |
0d3343300d62afc37ba7a3bc1ec6e81bb8f8c648 | add parent.py to be forked | open-lambda/open-lambda,open-lambda/open-lambda,open-lambda/open-lambda,open-lambda/open-lambda,open-lambda/open-lambda | worker/namespace/parent.py | worker/namespace/parent.py | #TODO make sure listening on the pipe blocks correctly, better error handling
import os, sys, ns, time
from subprocess import check_output
sys.path.append('/handler') # assume submitted .py file is /handler/lambda_func
def handler(args, path):
import lambda_func
try:
ret = lambda_func(args)
except:
ret = json.dumps{'error': 'handler execution failed with args: %s' % args}
with open(path) as pipe:
pipe.write(ret)
def listen(path):
args = ""
with open(path) as pipe:
while True:
data = pipe.read()
if len(data) == 0:
break
args += data
return args
def main(pid, inpath, outpath):
# parent never exits
while True:
args = listen(inpath)
r = forkenter(pid)
if r == 0:
break # grandchild escapes
elif r < 0:
sys.exit(0) # child dies quietly
handler(args, outpath)
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: test.py <ns_pid> <input_pipe> <output_pipe>')
sys.exit(1)
else:
main(sys.argv[1], sys.argv[2] sys.argv[3])
| apache-2.0 | Python | |
1b7cce5b8dd274b904466dc6deeac312238fc857 | add test_comp.py, add test function stubs | kmuehlbauer/wradlib,wradlib/wradlib,wradlib/wradlib,heistermann/wradlib,heistermann/wradlib,kmuehlbauer/wradlib | wradlib/tests/test_comp.py | wradlib/tests/test_comp.py | #!/usr/bin/env python
# -------------------------------------------------------------------------------
# Name: test_comp.py
# Purpose: testing file for the wradlib.comp module
#
# Authors: wradlib developers
#
# Created: 26.02.2016
# Copyright: (c) wradlib developers
# Licence: The MIT License
# -------------------------------------------------------------------------------
import unittest
import wradlib.vpr as vpr
import wradlib.georef as georef
import numpy as np
class ComposeTest(unittest.TestCase):
def test_extract_circle(self):
pass
def test_togrid(self):
pass
def compose_ko(self):
pass
def compose_weighted(self):
pass
if __name__ == '__main__':
unittest.main()
| mit | Python | |
80089ad2856b60383692a8dd5abe8520f61a0895 | Create write_OME-XML_from_file.py | sebi06/BioFormatsRead | write_OME-XML_from_file.py | write_OME-XML_from_file.py | # -*- coding: utf-8 -*-
"""
@author: Sebi
File: write_OME-XML_from_file.py
Date: 17.12.2015
Version. 1.0
"""
import bfimage as bf
from lxml import etree as etl
def create_omexml(testdata, method=1, writeczi_metadata=True):
# creates readable xml files from image data files. Default method should be = 1.
if method == 1:
# method 1
for i in range(0, len(testdata)):
# Change File name and write XML file to same folder
xmlfile1 = testdata[i] + '_MetaData1.xml'
try:
# get the actual OME-XML
omexml = bf.createOMEXML(testdata[i])
# create root and tree from XML string and write "pretty" to disk
root = etl.fromstring(omexml)
tree = etl.ElementTree(root)
tree.write(xmlfile1, pretty_print=True, encoding='utf-8', method='xml')
print 'Created OME-XML file for testdata: ', testdata[i]
except:
print 'Creating OME-XML failed for testdata: ', testdata[i]
if method == 2:
# method 2
for i in range(0, len(testdata)):
# Change File name and write XML file to same folder
xmlfile2 = testdata[i] + '_MetaData2.xml'
try:
# get the actual OME-XML
md, omexml = bf.get_metadata_store(testdata[i])
# create root and tree from XML string and write "pretty" to disk
root = etl.fromstring(omexml)
tree = etl.ElementTree(root)
tree.write(xmlfile2, pretty_print=True, encoding='utf-8', method='xml')
print 'Created OME-XML file for testdata: ', testdata[i]
except:
print 'Creating OME-XML failed for testdata: ', testdata[i]
if writeczi_metadata:
# this writes the special CZI xml metadata to disk, when a CZI file was found.
for i in range(0, len(testdata)):
if testdata[i][-4:] == '.czi':
try:
bf.czt.writexml_czi(testdata[i])
except:
print 'Could not write special CZI metadata for: ', testdata[i]
# INSERT THE FILES INSIDE THE LIST BELOW
testfiles = [r'c:\Users\Testuser\OME-TIFF_Metadatatest\test1.czi',
r'c:\Users\Testuser\Documents\Testdata_Zeiss\OME-TIFF_Metadatatest\Tile=4_T=3_CH=2_Z=3.czi_Fiji_Export_allTiles.ome.tiff']
create_omexml(testfiles, method=1, writeczi_metadata=True)
| bsd-2-clause | Python | |
5f5f48f2f6f8c82d97858230219f67229d3165a4 | patch to add match_ids to history | em92/quakelive-local-ratings,em92/quakelive-local-ratings,em92/pickup-rating,em92/quakelive-local-ratings,em92/pickup-rating,em92/quakelive-local-ratings,em92/pickup-rating,em92/quakelive-local-ratings | patch-rating-history2.py | patch-rating-history2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
from datetime import datetime
from dump_qlstats_data import connect_to_database
GAMETYPES_AVAILABLE = ["ad", "ctf", "tdm"]
def main(args):
try:
db = connect_to_database()
except Exception as e:
print("error: " + str(e))
return 1
for gametype in GAMETYPES_AVAILABLE:
print( gametype )
options = { gametype + ".history.timestamp": { "$ne": None }, gametype + ".history.match_id": None }
for player in db.players.find(options):
print( player["_id"] )
history_result = []
for history_item in player[gametype]["history"]:
match = db.matches.find_one( { "gametype": gametype, "timestamp": history_item["timestamp"] } )
print( match["_id"] )
history_item['match_id'] = match["_id"]
history_result.append( history_item )
result = { "timestamp": match['timestamp'], "rating": player[gametype]['rating'] }
db.players.update( { "_id": player['_id'] }, { "$set": { gametype + ".history": history_result } } )
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| agpl-3.0 | Python | |
38375800cee8c02051d7d8212ccc5fc843a109f1 | Create client.py | Colviz/Vince | client.py | client.py | import socket
def Main():
host = '192.168.43.130'
port = 5000
s = socket.socket()
s.connect((host, port))
message = raw_input('-->')
while message != 'q':
s.send(message)
data = s.recv(1024)
print "server: " + str(data)
message = raw_input('-->')
s.close()
if __name__ == '__main__':
Main()
| apache-2.0 | Python | |
901d430e3f6705af372974b6e1b42e36884ba47f | Add mplimporthook.py | mikebirdgeneau/Docker-Datastack,mikebirdgeneau/Docker-Datastack,mikebirdgeneau/Docker-Datastack,mikebirdgeneau/Docker-Datastack,mikebirdgeneau/Docker-Datastack | jupyter/mplimporthook.py | jupyter/mplimporthook.py | """Startup script for IPython kernel.
Installs an import hook to configure the matplotlib backend on the fly.
Originally from @minrk at
https://github.com/minrk/profile_default/blob/master/startup/mplimporthook.py
Repurposed for docker-stacks to address repeat bugs like
https://github.com/jupyter/docker-stacks/issues/235.
"""
import sys
from IPython import get_ipython
class MatplotlibFinder(object):
"""Import hook that notices when matplotlib.pyplot or pylab is imported
and tries to configure the matplotlib backend appropriately for the
environment.
"""
_called = False
def find_module(self, fullname, path=None):
if self._called:
# already handled
return
if fullname not in ('pylab', 'matplotlib.pyplot'):
# not matplotlib
return
# don't call me again
self._called = True
try:
# remove myself from the import hooks
sys.meta_path = [loader for loader in sys.meta_path if loader is not self]
except ValueError:
pass
ip = get_ipython()
if ip is None:
# not in an interactive environment
return
if ip.pylab_gui_select:
# backend already selected
return
if hasattr(ip, 'kernel'):
# default to inline in kernel environments
ip.enable_matplotlib('inline')
else:
print('enabling matplotlib')
ip.enable_matplotlib()
# install the finder immediately
sys.meta_path.insert(0, MatplotlibFinder())
| mit | Python | |
baaeee7b003030ded5336c7da9e01c04beea46f3 | add swscale dependency | tuttleofx/sconsProject | autoconf/swscale.py | autoconf/swscale.py | from _external import *
swscale = LibWithHeaderChecker(
'swscale',
'libswscale/swscale.h',
'c',
)
| mit | Python | |
626ce0dbd2450812e0cbac12293133e12bae0daf | Add parsing module | fbergroth/autosort | autosort/parsing.py | autosort/parsing.py | import ast
import textwrap
import tokenize
from collections import namedtuple
from tokenize import COMMENT, DEDENT, ENDMARKER, INDENT, NEWLINE, STRING, NAME
class Name(namedtuple('Name', 'name asname')):
CAMEL, SNAKE, CONST = range(3)
@property
def kind(self):
name = self.name.split('.')[-1]
if name.isupper():
return self.CONST
if name[0].isupper():
return self.CAMEL
return self.SNAKE
def key(self):
return self.kind, str(self)
def __str__(self):
if self.asname:
return '{0} as {1}'.format(self.name, self.asname)
return self.name
class Import(namedtuple('Import', 'kind module names noqa start end')):
def merge(self, other):
names = sorted(set(self.names + other.names), key=Name.key)
noqa = self.noqa or other.noqa
return Import(self.kind, self.module, names, noqa, -1, -1)
Block = namedtuple('Block', 'imports indent start')
def parse_imports(lines):
it = iter(lines)
tokens = (_TokenInfo(*token) for token in
tokenize.generate_tokens(lambda: next(it)))
parser = _ImportParser(tokens, lines)
return parser.parse_block('', 0)
class _TokenInfo(namedtuple('TokenInfo', 'type string start end line')):
@property
def name(self):
return self.type == NAME and self.string
@property
def starts_block(self):
return self.type == INDENT
@property
def ends_block(self):
return self.type in (DEDENT, ENDMARKER)
class _ImportParser(namedtuple('_ImportParser', 'tokens lines')):
def parse_block(self, indent, start):
imports = []
token = next(self.tokens)
# Push imports beneath docstring
if token.type == STRING:
start = token.end[0] + 1
token = next(self.tokens)
while not token.ends_block:
if token.starts_block:
self.parse_block(token.string, token.start[0] - 1)
elif token.name in ('from', 'import'):
imports += self.parse_imports(token)
token = next(self.tokens)
if imports:
# wrong
yield Block(imports, indent, start)
def parse_imports(self, token):
first = token
comments = []
while token.type != NEWLINE:
if token.type == COMMENT:
comments.append(token.string)
token = next(self.tokens)
start, end = first.start[0] - 1, token.end[0]
source = '\n'.join(self.lines[start:end])
nodes = ast.parse(textwrap.dedent(source)).body
# TODO: error on multiple nodes
return self._make_imports(first.name, nodes[0], comments, start, end)
@staticmethod
def _make_imports(kind, node, comments, start, end):
noqa = any(c.startswith('# noqa') for c in comments)
names = sorted([Name(n.name, n.asname)
for n in node.names], key=Name.key)
if kind == 'from':
modules = [Name(node.module, None)]
else:
modules, names = names, []
return [Import(kind, m, names, noqa, start, end) for m in modules]
| mit | Python | |
059dae76827ac016524925d94c10e5ed0a83f2c2 | Create PreFilterExample.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/Alessandruino/PreFilterExample.py | home/Alessandruino/PreFilterExample.py | from org.myrobotlab.opencv import OpenCVFilterAffine
affine = OpenCVFilterAffine("affine")
affine.setAngle(180.0)
leftPort= "/dev/cu.wchusbserial1450"
i01 = Runtime.start("i01","InMoov")
headTracking = i01.startHeadTracking(leftPort)
eyesTracking = i01.startEyesTracking(leftPort,10,12)
i01.headTracking.addPreFilter(affine)
i01.eyesTracking.addPreFilter(affine)
sleep(1)
i01.headTracking.faceDetect()
i01.eyesTracking.faceDetect()
| apache-2.0 | Python | |
c24fb3b6b41b2f06cee79fd21b090403f3a67457 | Add TwrSearch class | tchx84/twitter-gobject | twitter/twr_search.py | twitter/twr_search.py | #!/usr/bin/env python
#
# Copyright (c) 2013 Martin Abente Lahaye. - tch@sugarlabs.org
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import json
from gi.repository import GObject
import twr_error
from twr_object import TwrObject
class TwrSearch(GObject.GObject):
TWEETS_URL = 'https://api.twitter.com/1.1/search/tweets.json'
__gsignals__ = {
'tweets-downloaded': (GObject.SignalFlags.RUN_FIRST,
None, ([object])),
'tweets-downloaded-failed': (GObject.SignalFlags.RUN_FIRST,
None, ([str]))}
def tweets(self, q, count=None, since_id=None, max_id=None):
params = [('q', (q))]
if count is not None:
params += [('count', (count))]
if since_id is not None:
params += [('since_id', (since_id))]
if max_id is not None:
params += [('max_id', (max_id))]
GObject.idle_add(self._get,
self.TWEETS_URL,
params,
self.__completed_cb,
self.__failed_cb,
'tweets-downloaded',
'tweets-downloaded-failed')
def _get(self, url, params, completed_cb, failed_cb,
completed_data, failed_data):
object = TwrObject()
object.connect('transfer-completed', completed_cb, completed_data)
object.connect('transfer-failed', failed_cb, failed_data)
object.request('GET', url, params)
def __completed_cb(self, object, data, signal):
try:
info = json.loads(data)
if isinstance(info, dict) and ('errors' in info.keys()):
raise twr_error.TwrSearchError(str(info['errors']))
self.emit(signal, info)
except Exception, e:
print 'TwrSearch.__completed_cb crashed with %s' % str(e)
def __failed_cb(self, object, message, signal):
self.emit(signal, message)
| lgpl-2.1 | Python | |
49edeac697b6c4457f2f55ff4086c9dbacedaa71 | add try implementation | przemyslawjanpietrzak/pyMonet | pymonet/try.py | pymonet/try.py | class Try:
def __init__(self, value, is_success):
self.value = value
self.is_success = is_success
def __eq__(self, other):
return self.value == other.value and self.is_success == other.is_success
@classmethod
def of(cls, fn, *args):
try:
return cls(fn(*args), True)
except Exception as e:
return cls(e, True)
def map(self, mapper):
if self.is_success:
return Try(
mapper(self.value),
True
)
return Try(self.value, False)
def fold(self, mapper):
if self.is_success:
return Try.of(mapper, self.value)
return self.value
def on_success(self, success_callback):
if self.is_success:
return success_callback(self.value)
def on_fail(self, fail_callback):
if not self.is_success:
return fail_callback(self.value)
def filter(self, filterer):
pass | mit | Python | |
3f35e9b3913bb99cf7b299c36528eefa878337f4 | Add method to determine output name | bacontext/mopidy,liamw9534/mopidy,kingosticks/mopidy,pacificIT/mopidy,dbrgn/mopidy,mokieyue/mopidy,ali/mopidy,dbrgn/mopidy,swak/mopidy,pacificIT/mopidy,jodal/mopidy,rawdlite/mopidy,adamcik/mopidy,abarisain/mopidy,glogiotatidis/mopidy,adamcik/mopidy,jmarsik/mopidy,swak/mopidy,hkariti/mopidy,kingosticks/mopidy,ali/mopidy,adamcik/mopidy,mopidy/mopidy,ZenithDK/mopidy,mokieyue/mopidy,priestd09/mopidy,abarisain/mopidy,pacificIT/mopidy,glogiotatidis/mopidy,bacontext/mopidy,tkem/mopidy,bencevans/mopidy,jmarsik/mopidy,rawdlite/mopidy,bencevans/mopidy,ZenithDK/mopidy,quartz55/mopidy,ali/mopidy,diandiankan/mopidy,rawdlite/mopidy,jodal/mopidy,jcass77/mopidy,priestd09/mopidy,kingosticks/mopidy,ZenithDK/mopidy,vrs01/mopidy,swak/mopidy,tkem/mopidy,diandiankan/mopidy,vrs01/mopidy,hkariti/mopidy,mopidy/mopidy,SuperStarPL/mopidy,quartz55/mopidy,bacontext/mopidy,ali/mopidy,woutervanwijk/mopidy,quartz55/mopidy,bencevans/mopidy,tkem/mopidy,glogiotatidis/mopidy,mopidy/mopidy,jmarsik/mopidy,SuperStarPL/mopidy,quartz55/mopidy,vrs01/mopidy,bencevans/mopidy,vrs01/mopidy,jcass77/mopidy,jmarsik/mopidy,priestd09/mopidy,swak/mopidy,hkariti/mopidy,SuperStarPL/mopidy,jodal/mopidy,liamw9534/mopidy,hkariti/mopidy,bacontext/mopidy,diandiankan/mopidy,pacificIT/mopidy,jcass77/mopidy,diandiankan/mopidy,rawdlite/mopidy,tkem/mopidy,glogiotatidis/mopidy,mokieyue/mopidy,ZenithDK/mopidy,dbrgn/mopidy,mokieyue/mopidy,dbrgn/mopidy,woutervanwijk/mopidy,SuperStarPL/mopidy | mopidy/outputs/__init__.py | mopidy/outputs/__init__.py | import pygst
pygst.require('0.10')
import gst
import logging
logger = logging.getLogger('mopidy.outputs')
class BaseOutput(object):
"""Base class for providing support for multiple pluggable outputs."""
def get_bin(self):
"""
Build output bin that will attached to pipeline.
"""
description = 'queue ! %s' % self.describe_bin()
logger.debug('Creating new output: %s', description)
output = gst.parse_bin_from_description(description, True)
output.set_name(self.get_name())
self.modify_bin(output)
return output
def get_name(self):
"""
Return name of output in gstreamer context.
Defaults to class name, can be overriden by sub classes if required.
"""
return self.__class__.__name__
def modify_bin(self, output):
"""
Modifies bin before it is installed if needed.
Overriding this method allows for outputs to modify the constructed bin
before it is installed. This can for instance be a good place to call
`set_properties` on elements that need to be configured.
:param output: gst.Bin to modify in some way.
:type output: :class:`gst.Bin`
"""
pass
def describe_bin(self):
"""
Return text string describing bin in gst-launch format.
For simple cases this can just be a plain sink such as `autoaudiosink`
or it can be a chain `element1 ! element2 ! sink`. See `man
gst-launch0.10` for details on format.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def set_properties(self, element, properties):
"""
Helper to allow for simple setting of properties on elements.
Will call `set_property` on the element for each key that has a value
that is not None.
:param element: gst.Element to set properties on.
:type element: :class:`gst.Element`
:param properties: Dictionary of properties to set on element.
:type properties: dict
"""
for key, value in properties.items():
if value is not None:
element.set_property(key, value)
| import pygst
pygst.require('0.10')
import gst
import logging
logger = logging.getLogger('mopidy.outputs')
class BaseOutput(object):
"""Base class for providing support for multiple pluggable outputs."""
def get_bin(self):
"""
Build output bin that will attached to pipeline.
"""
description = 'queue ! %s' % self.describe_bin()
logger.debug('Creating new output: %s', description)
output = gst.parse_bin_from_description(description, True)
output.set_name(self.__class__.__name__)
self.modify_bin(output)
return output
def modify_bin(self, output):
"""
Modifies bin before it is installed if needed.
Overriding this method allows for outputs to modify the constructed bin
before it is installed. This can for instance be a good place to call
`set_properties` on elements that need to be configured.
:param output: gst.Bin to modify in some way.
:type output: :class:`gst.Bin`
"""
pass
def describe_bin(self):
"""
Return text string describing bin in gst-launch format.
For simple cases this can just be a plain sink such as `autoaudiosink`
or it can be a chain `element1 ! element2 ! sink`. See `man
gst-launch0.10` for details on format.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def set_properties(self, element, properties):
"""
Helper to allow for simple setting of properties on elements.
Will call `set_property` on the element for each key that has a value
that is not None.
:param element: gst.Element to set properties on.
:type element: :class:`gst.Element`
:param properties: Dictionary of properties to set on element.
:type properties: dict
"""
for key, value in properties.items():
if value is not None:
element.set_property(key, value)
| apache-2.0 | Python |
ea6458a88079b939188d0e5bf86eedeb62247609 | add src/cs_utime.py | nomissbowling/nomissbowling,nomissbowling/nomissbowling | src/cs_utime.py | src/cs_utime.py | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
'''cs_utime
os.stat(fn).st_ctime # 978307200.0
tt=time.strptime('20010101T090000', '%Y%m%dT%H%M%S') # (2001,1,1,9,0,0,0,1,-1)
t=time.mktime(tt) # 978307200.0
os.utime(fn, (t, t)) # (atime, mtime)
'''
import sys, os, stat
import time
FSENC = 'cp932'
UPATH = u'/tmp/tmp'
FILES = [
(u'f0.tsv', '20010101T090000'),
(u'f1.tsv', '20010101T090000'),
(u'f2.tsv', '20010101T090000')]
def set_ts(fn, ts):
t = time.mktime(time.strptime(ts, '%Y%m%dT%H%M%S'))
os.utime(fn, (t, t))
def main():
for fn, ts in FILES: set_ts((u'%s/%s' % (UPATH, fn)).encode(FSENC), ts)
if __name__ == '__main__':
main()
| mit | Python | |
a2a268ea8b9b2876011453476baedaa4bca01559 | Create ball.py | DarkAce65/rpi-led-matrix,DarkAce65/rpi-led-matrix | python/ball.py | python/ball.py | #!/usr/bin/env python
from rgbmatrix import RGBMatrix
import sys, time
import math
rows = 16
chains = 1
parallel = 1
ledMatrix = RGBMatrix(rows, chains, parallel)
numRows = 16
height = ledMatrix.height
width = ledMatrix.width
try:
print "Press Ctrl + C to stop executing"
while True:
nextFrame = ledMatrix.CreateFrameCanvas()
ledMatrix.SwapOnVSync(nextFrame)
except KeyboardInterrupt:
print "Exiting\n"
sys.exit(0) | mit | Python | |
5eb0dce7f2c287203ace05b6989785b7e4fdac75 | add script to track xmin in the database | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | nagios/check_pgsql_xmin.py | nagios/check_pgsql_xmin.py | """
Return the maximum xmin in the database
"""
import os
import sys
import stat
import psycopg2
IEM = psycopg2.connect(database='iem', host='iemdb', user='nobody')
icursor = IEM.cursor()
def check():
icursor.execute("""
SELECT datname, age(datfrozenxid) FROM pg_database
ORDER by age DESC LIMIT 1
""")
row = icursor.fetchone()
return row
if __name__ == '__main__':
dbname, count = check()
if count < 200000000:
print 'OK - %s %s |count=%s;200000000;215000000;220000000' % (count,
dbname, count)
sys.exit(0)
elif count < 215000000:
print 'WARNING - %s %s |count=%s;200000000;215000000;220000000' % (count,
dbname, count)
sys.exit(1)
else:
print 'CRITICAL - %s %s |count=%s;200000000;215000000;220000000' % (count,
dbname, count)
sys.exit(2) | mit | Python | |
c373bbb351de421881d9f0e2f8a16d541bb21347 | add test-receive-file-ipv6.py | freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut,freedesktop-unofficial-mirror/telepathy__telepathy-salut | tests/twisted/avahi/test-receive-file-ipv6.py | tests/twisted/avahi/test-receive-file-ipv6.py | import avahi
import urllib
import BaseHTTPServer
import SocketServer
import socket
from saluttest import exec_test
from file_transfer_helper import ReceiveFileTest
from avahitest import AvahiListener
from xmppstream import connect_to_stream6
from twisted.words.xish import domish
class TestReceiveFileIPv6(ReceiveFileTest):
def _resolve_salut_presence(self):
AvahiListener(self.q).listen_for_service("_presence._tcp")
e = self.q.expect('service-added', name = self.self_handle_name,
protocol = avahi.PROTO_INET6)
service = e.service
service.resolve()
e = self.q.expect('service-resolved', service = service)
return str(e.pt), e.port
def connect_to_salut(self):
host, port = self._resolve_salut_presence()
self.outbound = connect_to_stream6(self.q, self.contact_name,
self.self_handle_name, host, port)
e = self.q.expect('connection-result')
assert e.succeeded, e.reason
self.q.expect('stream-opened', connection = self.outbound)
def send_ft_offer_iq(self):
iq = domish.Element((None, 'iq'))
iq['to'] = self.self_handle_name
iq['from'] = self.contact_name
iq['type'] = 'set'
iq['id'] = 'gibber-file-transfer-0'
query = iq.addElement(('jabber:iq:oob', 'query'))
url = 'http://[::1]:%u/gibber-file-transfer-0/%s' % \
(self.httpd.server_port, urllib.quote(self.file.name))
url_node = query.addElement('url', content=url)
url_node['type'] = 'file'
url_node['size'] = str(self.file.size)
url_node['mimeType'] = self.file.content_type
query.addElement('desc', content=self.file.description)
self.outbound.send(iq)
def _get_http_server_class(self):
class HTTPServer6(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
address_family = getattr(socket, 'AF_INET6', None)
return HTTPServer6
if __name__ == '__main__':
test = TestReceiveFileIPv6()
exec_test(test.test)
| lgpl-2.1 | Python | |
f296eb4a87a1130cb72e00099f7e9441425548ec | add device handler | cmoberg/ncclient,joysboy/ncclient,ncclient/ncclient,OpenClovis/ncclient,nwautomator/ncclient,leopoul/ncclient,kroustou/ncclient,nnakamot/ncclient,einarnn/ncclient,vnitinv/ncclient,earies/ncclient,GIC-de/ncclient,aitorhh/ncclient,lightlu/ncclient | ncclient/devices/huawei.py | ncclient/devices/huawei.py | """
Handler for Cisco Nexus device specific information.
Note that for proper import, the classname has to be:
"<Devicename>DeviceHandler"
...where <Devicename> is something like "Default", "Huawei", etc.
All device-specific handlers derive from the DefaultDeviceHandler, which implements the
generic information needed for interaction with a Netconf server.
"""
from ncclient.xml_ import BASE_NS_1_0
from .default import DefaultDeviceHandler
class HuaweiDeviceHandler(DefaultDeviceHandler):
"""
Huawei handler for device specific information.
In the device_params dictionary, which is passed to __init__, you can specify
the parameter "ssh_subsystem_name". That allows you to configure the preferred
SSH subsystem name that should be tried on your Nexus switch. If connecting with
that name fails, or you didn't specify that name, the other known subsystem names
will be tried. However, if you specify it then this name will be tried first.
"""
_EXEMPT_ERRORS = []
def __init__(self, device_params):
super(HuaweiDeviceHandler, self).__init__(device_params)
def get_capabilities(self):
# Just need to replace a single value in the default capabilities
c = super(HuaweiDeviceHandler, self).get_capabilities()
return c
def get_xml_base_namespace_dict(self):
return { "xmlns":BASE_NS_1_0 }
def get_xml_extra_prefix_kwargs(self):
d = {
# "xmlns":"http://www.huawei.com/netconf/vrp"
}
d.update(self.get_xml_base_namespace_dict())
return d
| apache-2.0 | Python | |
401fbbd7440f16c462ca31150d9873da5f052356 | create app.py | Fillll/reddit2telegram,Fillll/reddit2telegram | reddit2telegram/channels/r_freegamefindings/app.py | reddit2telegram/channels/r_freegamefindings/app.py | #encoding:utf-8
subreddit = 'freegamefindings'
t_channel = '@r_freegamefindings'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| mit | Python | |
412828bea81f5aad917188881c1e7e4d6ce52400 | Add tests for the management views | usingnamespace/usingnamespace | usingnamespace/tests/test_views_management.py | usingnamespace/tests/test_views_management.py | import unittest
from pyramid import testing
class ManagementViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import Management
return Management(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_verify_context_request(self):
view_class = self.makeOne("1", "2")
self.assertEqual(view_class.context, "1")
self.assertEqual(view_class.request, "2")
def test_management_home(self):
view_class = self.makeWithInfo()
self.assertEqual(view_class.home(), {})
class ManagementNotAuthorizedViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.request = None
self.context = None
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import ManagementNotAuthorized
return ManagementNotAuthorized(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_view_forbidden(self):
from pyramid.httpexceptions import HTTPForbidden
view_class = self.makeWithInfo()
self.assertRaises(HTTPForbidden, view_class.management_not_authed)
def test_view_not_found(self):
view_class = self.makeWithInfo()
view_class.management_not_found()
self.assertEqual(self.request.response.status_int, 404)
| isc | Python | |
50d84e5b134b69cebcb4935da24a3cc702e1feef | Add coverage for resolve_ref_for_build | getsentry/zeus,getsentry/zeus,getsentry/zeus,getsentry/zeus | tests/zeus/tasks/test_resolve_ref.py | tests/zeus/tasks/test_resolve_ref.py | from zeus import factories
from zeus.tasks import resolve_ref_for_build
def test_resolve_ref_for_build(mocker, db_session, default_revision):
build = factories.BuildFactory.create(
repository=default_revision.repository, ref=default_revision.sha
)
assert build.revision_sha is None
resolve_ref_for_build(build.id)
assert build.revision_sha == default_revision.sha
| apache-2.0 | Python | |
fbb8bffd5e1cb633cc59eb5b1e61fef2067e836a | add empty unit test for viewimage module | julien6387/supvisors,julien6387/supervisors,julien6387/supvisors,julien6387/supervisors,julien6387/supervisors,julien6387/supervisors,julien6387/supvisors,julien6387/supvisors | supvisors/tests/test_viewimage.py | supvisors/tests/test_viewimage.py | #!/usr/bin/python
#-*- coding: utf-8 -*-
# ======================================================================
# Copyright 2016 Julien LE CLEACH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================================================
import sys
import unittest
class ViewImageTest(unittest.TestCase):
""" Test case for the viewimage module. """
def test_stats_images(self):
""" Test the values set at construction. """
from supvisors.viewimage import address_image_contents, process_image_contents
self.assertIsNotNone(address_image_contents)
self.assertIsNotNone(process_image_contents)
def test_address_image_view(self):
""" Test the values set at construction. """
from supvisors.viewimage import AddressImageView
view = AddressImageView()
self.assertIsNotNone(view)
def test_process_image_view(self):
""" Test the values set at construction. """
from supvisors.viewimage import ProcessImageView
view = ProcessImageView()
self.assertIsNotNone(view)
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| apache-2.0 | Python | |
0f395ca18526ea4c6675bd772cc9af88a6baf006 | Create __init__.py | Fillll/reddit2telegram,nsiregar/reddit2telegram,Fillll/reddit2telegram,nsiregar/reddit2telegram | channels/r_gonewild30plus/__init__.py | channels/r_gonewild30plus/__init__.py | mit | Python | ||
10fe17255335e16aac9f828764050bd2c0874175 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/79deb43ff6fe999bc7767f19ebdc20814b1dfe80. | karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "79deb43ff6fe999bc7767f19ebdc20814b1dfe80"
TFRT_SHA256 = "3b9a217602bd20258595ebe997a204f675aad2006ce3d9cdeb2b431f8564e28d"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "0ca0913cd468b657b76a001b74ffa6e91e4eed03"
TFRT_SHA256 = "a85d109a5ca7daee97115903784bdc5430ae1421090f74c45080fc56f0e04351"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| apache-2.0 | Python |
430b5daebbd5385551203c2a0cf23bb355a2c027 | Add a script that uses the Flickr API. | danielballan/photomosaic | doc/source/scripts/06-cat-of-cats.py | doc/source/scripts/06-cat-of-cats.py | import os
import photomosaic as pm
import photomosaic.flickr
import matplotlib.pyplot as plt
# For these published examples we use os.environ to keep our API key private.
# Just set your own Flickr API key here.
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
# Get a pool of cat photos from Flickr.
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', 'cats/', 1000)
pool = pm.make_pool('cats/*.jpg')
pm.export_pool(pool, 'cats/pool.json') # save color analysis for future reuse
# Build mosaic.
mosaic = pm.basic_mosaic(img, pool, (30, 30), depth=4)
plt.plot(mosaic)
plt.show()
| bsd-3-clause | Python | |
3aad37e287a6d0fdb037393d4fcc30817c9dece5 | Create statanalyisi.py | zsalmasi/much-ado-about-nothing-project | statanalyisi.py | statanalyisi.py | import string #This line defines string so as to be able to get rid of punctuation.
import re
name = raw_input("Enter file:")
if len(name) < 1 : name = "manqnohyphen.txt"
print "Much Ado about Nothing statistics"
handle = open(name)
text = handle.read()
print ''
print 'There are', len(text), 'characters in the text.' #prints the number of characters in the text
lexis = text.split()
print ''
print 'There are', len(lexis), 'words in the text.'
handle = open(name)
counts = dict()
for line in handle:
line = line.rstrip()
line = line.translate(None, string.punctuation) #This line gets rid of punctuation.
words = line.split()
for word in words:
wrd = word.lower()
counts[wrd] = counts.get(wrd,0) + 1
#print counts
lst = list()
for lexicon,occurrence in counts.items():
lst.append((occurrence, lexicon))
print 'The least frequently used words are:'
lst.sort()
for occurrence, lexicon in lst[:]:
if occurrence == 1:
print lexicon, occurrence
print 'The most frequently used words are:'
lst.sort(reverse=True)
for occurrence, lexicon in lst[:30]:
print lexicon, occurrence
handle = open("manqnohyphen.txt")
print "Compound words divided with a hyphen:"
lineno = 0
compoundno = 0
for line in handle:
line = line.rstrip()
lineno = lineno + 1
cp = re.findall("\S+-\S+", line)
if len(cp) > 0 :
compoundno = compoundno + 1
print cp
print "There are", lineno, "lines in the play."
print "The number of lines in which compounds divided with hyphen appear are", compoundno, "."
relfre = float(compoundno) / lineno
print "The relative frequency of the lines in which there are hyphenated compounds is:", relfre, "."
handle = open("manqnohyphen.txt")
text = handle.read()
lexis = text.split()
print ''
print 'There are', len(lexis), 'words in the text.'
relfrewords = 56.0 / len(lexis)
print relfrewords
| cc0-1.0 | Python | |
b66b02be95e7b0c36a9ced53b07d91298190ca4a | Add tests for mpi4py.dl module | mpi4py/mpi4py,mpi4py/mpi4py,pressel/mpi4py,pressel/mpi4py,mpi4py/mpi4py,pressel/mpi4py,pressel/mpi4py | test/test_dl.py | test/test_dl.py | from mpi4py import dl
import mpiunittest as unittest
import sys
import os
class TestDL(unittest.TestCase):
def testDL1(self):
if sys.platform == 'darwin':
libm = 'libm.dylib'
else:
libm = 'libm.so'
handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'sqrt')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'xxxxx')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL2(self):
handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'malloc')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, '!@#$%^&*()')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL3(self):
handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle == 0)
self.assertTrue(dl.dlerror() is not None)
if os.name != 'posix':
del TestDL
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | Python | |
63c4883c8bbcd9d1d2ef9417a776ea6134e8e48c | Add python file | UrsusPilot/python-plot | gui.py | gui.py | ################################################################################
# File name: gui.py
#
# Function: Display three data from stm32f4 using Python (matplotlib)
# The three data is roll, pith, yall angle of quadcopter attitude.
#
# Reference:http://electronut.in/plotting-real-time-data-from-arduino-using-python/
#
################################################################################
import sys, serial
import numpy as np
from time import sleep
from collections import deque
from matplotlib import pyplot as plt
# class that holds analog data for N samples
class AnalogData:
# constr
def __init__(self, maxLen):
self.ax = deque([0.0]*maxLen)
self.ay = deque([0.0]*maxLen)
self.az = deque([0.0]*maxLen)
self.maxLen = maxLen
# ring buffer
def addToBuf(self, buf, val):
if len(buf) < self.maxLen:
buf.append(val)
else:
buf.pop()
buf.appendleft(val)
#Add new data
def add(self, data):
assert(len(data) == 3)
self.addToBuf(self.ax, data[0])
self.addToBuf(self.ay, data[1])
self.addToBuf(self.az, data[2])
# plot class
class AnalogPlot:
# constr
def __init__(self, analogData):
# set plot to animated
plt.ion()
self.axline, = plt.plot(analogData.ax)
self.ayline, = plt.plot(analogData.ay)
self.azline, = plt.plot(analogData.az)
plt.ylim([0, 180]) # [0,Y]: this Y is meaning vertical axis scale.
# update plot
def update(self, analogData):
self.axline.set_ydata(analogData.ax)
self.ayline.set_ydata(analogData.ay)
self.azline.set_ydata(analogData.az)
plt.draw()
def main():
# expects 1 arg - serial port string
if(len(sys.argv) != 2):
print "Example usage: python gui.py '/dev/ttyUSB0'"
exit(1)
#strPort = '/dev/tty.usbserial-A7006Yqh'
strPort = sys.argv[1];
# plot parameters
analogData = AnalogData(200) # (X) this X is meaning horizontal scale
analogPlot = AnalogPlot(analogData)
print "plotting data..."
# open serial port
ser = serial.Serial(strPort, 9600)
while True:
try:
line = ser.readline()
data = [float(val) for val in line.split()]
print data[0] , data[1] , data[2] #Show three data on the terminal
if(len(data) == 3):
analogData.add(data)
analogPlot.update(analogData)
except KeyboardInterrupt:
print "exiting"
break
# close serial
ser.flush()
ser.close()
# call main
if __name__ == '__main__':
main()
| mit | Python | |
cf3cae6493a369173244e05d190cceae41b9abbd | Add some coverage for olog callback. | ericdill/bluesky,ericdill/bluesky | bluesky/tests/test_olog_cb.py | bluesky/tests/test_olog_cb.py | from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
fresh_RE.subscribe('start', logbook_cb_factory(f, long_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(fresh_RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
fresh_RE.subscribe('start', logbook_cb_factory(f, long_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
| bsd-3-clause | Python | |
57cfba649e3d7a441e7c10a25448ccb8413b964e | Put PageAdmin back | Elarnon/mangaki,RaitoBezarius/mangaki,Mako-kun/mangaki,Mako-kun/mangaki,RaitoBezarius/mangaki,Elarnon/mangaki,Elarnon/mangaki,RaitoBezarius/mangaki,Mako-kun/mangaki | mangaki/mangaki/admin.py | mangaki/mangaki/admin.py | # coding=utf8
from mangaki.models import Anime, Track, OST, Artist, Rating, Page, Suggestion
from django.forms import Textarea
from django.db import models
from django.contrib import admin, messages
class AnimeAdmin(admin.ModelAdmin):
search_fields = ('id', 'title')
list_display = ('id', 'title', 'nsfw')
list_filter = ('nsfw',)
class TrackAdmin(admin.ModelAdmin):
pass
class OSTAdmin(admin.ModelAdmin):
pass
class ArtistAdmin(admin.ModelAdmin):
pass
class RatingAdmin(admin.ModelAdmin):
pass
class PageAdmin(admin.ModelAdmin):
pass
class SuggestionAdmin(admin.ModelAdmin):
list_display = ('work', 'problem', 'date', 'user', 'is_checked')
list_filter = ('problem',)
admin.site.register(Anime, AnimeAdmin)
admin.site.register(Track, TrackAdmin)
admin.site.register(OST, OSTAdmin)
admin.site.register(Artist, ArtistAdmin)
admin.site.register(Rating, RatingAdmin)
admin.site.register(Page, PageAdmin)
admin.site.register(Suggestion, SuggestionAdmin)
| # coding=utf8
from mangaki.models import Anime, Track, OST, Artist, Rating, Page, Suggestion
from django.forms import Textarea
from django.db import models
from django.contrib import admin, messages
class AnimeAdmin(admin.ModelAdmin):
search_fields = ('id', 'title')
list_display = ('id', 'title', 'nsfw')
list_filter = ('nsfw',)
class TrackAdmin(admin.ModelAdmin):
pass
class OSTAdmin(admin.ModelAdmin):
pass
class ArtistAdmin(admin.ModelAdmin):
pass
class RatingAdmin(admin.ModelAdmin):
pass
class PageAdmin(admin.ModelAdmin):
pass
class SuggestionAdmin(admin.ModelAdmin):
list_display = ('work', 'problem', 'date', 'user', 'is_checked')
list_filter = ('problem',)
admin.site.register(Anime, AnimeAdmin)
admin.site.register(Track, TrackAdmin)
admin.site.register(OST, OSTAdmin)
admin.site.register(Artist, ArtistAdmin)
admin.site.register(Rating, RatingAdmin)
admin.site.register(Suggestion, SuggestionAdmin)
| agpl-3.0 | Python |
e3d92ce2cd17a967ac19aecad2998c4094f2ae11 | Add script to draw NACA foil | petebachant/CFT-vectors | run.py | run.py | #!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
import gizeh as gz
import numpy as np
import matplotlib.pyplot as plt
def gen_naca_points(naca="0020", c=100, npoints=100):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
points = [(x0, y0) for x0, y0 in zip(x, y)]
return points
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def draw_foil(naca="0020", c=100):
"""Draw NACA 0020 foil."""
points = gen_naca_points(naca, c)
line = gz.polyline(points, close_path=False, stroke_width=2, xy=(300, 300))
return line
def main():
canvas = gz.Surface(width=700, height=700)
foil = draw_foil()
foil.draw(canvas)
canvas.write_to_png("cft-vectors.png")
if __name__ == "__main__":
main()
| mit | Python | |
4b8c5dd8ebc4261bcdb5e9f92e7936eba68fd5ad | Add BNB prediction script | rnowling/pop-gen-models | bernoulli_nb/bnb_predict.py | bernoulli_nb/bnb_predict.py | import sys
from sklearn.naive_bayes import BernoulliNB as BNB
import matplotlib.pyplot as plt
import numpy as np
def read_variants(flname):
fl = open(flname)
markers = []
individuals = []
population_ids = []
population = -1
for ln in fl:
if "Marker" in ln:
if len(individuals) == 0:
continue
marker = dict()
marker["individuals"] = np.array(individuals)
marker["population_labels"] = np.array(population_ids)
markers.append(marker)
population = -1
population_ids = []
individuals = []
elif "Population" in ln:
population += 1
else:
individual = map(float, ln.strip().split())
individuals.append(individual)
population_ids.append(population)
if len(individuals) != 0:
marker = dict()
marker["individuals"] = np.array(individuals)
marker["population_labels"] = np.array(population_ids)
markers.append(marker)
fl.close()
return markers
def predict_scores(markers, threshold=0.05):
scores = []
for i, marker in enumerate(markers):
try:
bnb = BNB()
bnb.fit(marker["individuals"], marker["population_labels"])
scores.append(bnb.score(marker["individuals"], marker["population_labels"]))
except:
scores.append((0.0, i))
scores.sort()
scores.reverse()
cutoff_idx = int(threshold * len(scores))
return scores[:cutoff_idx]
def write_scores(scores, flname):
fl = open(flname, "w")
for loci, score in scores:
fl.write("%s %s\n" % (loci, score))
fl.close()
if __name__ == "__main__":
variants_fl = sys.argv[1]
scores_flname = sys.argv[2]
variants = read_variants(variants_fl)
scores = predict_scores(variants)
write_scores(scores, scores_flname)
| apache-2.0 | Python | |
2ccbed0e4d867652554dff208d7c1b7bdd1710f9 | add bench/test_curry | berrytj/toolz,cpcloud/toolz,whilo/toolz,obmarg/toolz,berrytj/toolz,simudream/toolz,whilo/toolz,pombredanne/toolz,quantopian/toolz,llllllllll/toolz,jdmcbr/toolz,karansag/toolz,quantopian/toolz,llllllllll/toolz,karansag/toolz,pombredanne/toolz,jdmcbr/toolz,machinelearningdeveloper/toolz,machinelearningdeveloper/toolz,simudream/toolz,jcrist/toolz,cpcloud/toolz,obmarg/toolz,bartvm/toolz,jcrist/toolz,bartvm/toolz | bench/test_curry.py | bench/test_curry.py | from toolz.curried import get
pairs = [(1, 2) for i in range(100000)]
def test_get_curried():
first = get(0)
for p in pairs:
first(p)
| bsd-3-clause | Python | |
3017396176491dae5bab3effda82a59e64a3591f | Add button_example | lazka/pgi,lazka/pgi | examples/pygobject/button_example.py | examples/pygobject/button_example.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2011 Sebastian Pölsterl
#
# Permission is granted to copy, distribute and/or modify this document
# under the terms of the GNU Free Documentation License, Version 1.3
# or any later version published by the Free Software Foundation;
# with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
import sys
sys.path.insert(0, '../..')
import pgi
pgi.install_as_gi()
from gi.repository import Gtk
class ButtonWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Button Demo")
self.set_border_width(10)
hbox = Gtk.Box(spacing=6)
self.add(hbox)
button = Gtk.Button("Click Me")
button.connect("clicked", self.on_click_me_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button(stock=Gtk.STOCK_OPEN)
button.connect("clicked", self.on_open_clicked)
hbox.pack_start(button, True, True, 0)
button = Gtk.Button("_Close", use_underline=True)
button.connect("clicked", self.on_close_clicked)
hbox.pack_start(button, True, True, 0)
def on_click_me_clicked(self, button):
print "\"Click me\" button was clicked"
def on_open_clicked(self, button):
print "\"Open\" button was clicked"
def on_close_clicked(self, button):
print "Closing application"
Gtk.main_quit()
win = ButtonWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
| lgpl-2.1 | Python | |
efc7097c0248394716144f552522daa1d44b74ce | add test python script | dennissergeev/atmosscibot | twython_test.py | twython_test.py | import datetime
import sys
from twython import Twython
from gettokens import tokens
tweet = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
api = Twython(tokens['api_key'],
tokens['api_secret'],
tokens['access_token'],
tokens['access_token_secret'])
api.update_status(status=tweet)
#print("Tweeted: " + tweet)
| mit | Python | |
bb5457ab736b5f94b9efb9772da16c5ebf97fa06 | Test for interpolation functions | senarvi/theanolm,senarvi/theanolm | tests/theanolm/probfunctions_test.py | tests/theanolm/probfunctions_test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import math
from theanolm.probfunctions import *
class TestProbFunctions(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_interpolate_linear(self):
self.assertAlmostEqual(
interpolate_linear(math.log(0.2), math.log(0.3), 0.25),
math.log(0.25 * 0.2 + 0.75 * 0.3))
self.assertAlmostEqual(
interpolate_linear(float('-inf'), math.log(0.3), 0.01),
math.log(0.3 * 0.99))
self.assertEqual(
interpolate_linear(float('-inf'), -10.0, 0.0),
-10.0)
self.assertAlmostEqual(
interpolate_linear(math.log(0.3), float('-inf'), 0.99),
math.log(0.3 * 0.99))
self.assertEqual(
interpolate_linear(-10.0, float('-inf'), 1.0),
-10.0)
self.assertAlmostEqual(
interpolate_linear(-1001, -1002, 0.25),
-1001.64263, # ln(0.25 * exp(-1001) + 0.75 * exp(-1002))
places=4)
def test_interpolate_loglinear(self):
self.assertEqual(
interpolate_loglinear(-1001.0, -1002.0, 0.25, 0.75),
-1001.75)
self.assertEqual(
interpolate_loglinear(float('-inf'), -1002.0, 0.25, 0.75),
float('-inf'))
self.assertEqual(
interpolate_loglinear(float('-inf'), -1002.0, 0.0, 1.0),
-1002.0)
self.assertEqual(
interpolate_loglinear(-1001.0, float('-inf'), 0.25, 0.75),
float('-inf'))
self.assertEqual(
interpolate_loglinear(-1001.0, float('-inf'), 1.0, 0.0),
-1001.0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
b8e27997000c448d191121ef0f8b08ebca877ed0 | Add GNU Prolog package. | tmerrick1/spack,LLNL/spack,matthiasdiener/spack,lgarren/spack,iulian787/spack,tmerrick1/spack,LLNL/spack,skosukhin/spack,krafczyk/spack,lgarren/spack,matthiasdiener/spack,tmerrick1/spack,tmerrick1/spack,iulian787/spack,iulian787/spack,EmreAtes/spack,iulian787/spack,LLNL/spack,matthiasdiener/spack,mfherbst/spack,mfherbst/spack,matthiasdiener/spack,lgarren/spack,krafczyk/spack,tmerrick1/spack,skosukhin/spack,mfherbst/spack,iulian787/spack,mfherbst/spack,mfherbst/spack,LLNL/spack,TheTimmy/spack,TheTimmy/spack,TheTimmy/spack,skosukhin/spack,krafczyk/spack,TheTimmy/spack,krafczyk/spack,EmreAtes/spack,skosukhin/spack,TheTimmy/spack,matthiasdiener/spack,LLNL/spack,skosukhin/spack,EmreAtes/spack,lgarren/spack,EmreAtes/spack,lgarren/spack,krafczyk/spack,EmreAtes/spack | var/spack/repos/builtin/packages/gnu-prolog/package.py | var/spack/repos/builtin/packages/gnu-prolog/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class GnuProlog(Package):
"""A free Prolog compiler with constraint solving over finite domains."""
homepage = "http://www.gprolog.org/"
url = "http://www.gprolog.org/gprolog-1.4.4.tar.gz"
version('1.4.4', '37009da471e5217ff637ad1c516448c8')
parallel = False
def install(self, spec, prefix):
with working_dir('src'):
configure('--with-install-dir=%s' % prefix,
'--without-links-dir')
make()
make('install')
| lgpl-2.1 | Python | |
d57a1049b45614abfe393f80328a07d78c98b5b2 | Add system tray support | gearlles/planb-client,gearlles/planb-client,gearlles/planb-client | main.py | main.py | #!/usr/bin/env python
# -*-coding: utf8 -*-
import wx
import webbrowser
TRAY_TOOLTIP = 'System Tray Demo'
TRAY_ICON = 'icon/network.png'
def create_menu_item(menu, label, func):
item = wx.MenuItem(menu, -1, label)
menu.Bind(wx.EVT_MENU, func, id=item.GetId())
menu.AppendItem(item)
return item
class TaskBarIcon(wx.TaskBarIcon):
def __init__(self):
super(TaskBarIcon, self).__init__()
self.set_icon(TRAY_ICON)
self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, self.on_double_click)
def CreatePopupMenu(self):
menu = wx.Menu()
create_menu_item(menu, 'Open', self.on_double_click)
create_menu_item(menu, 'Settings', self.on_settings)
menu.AppendSeparator()
create_menu_item(menu, 'Exit', self.on_exit)
return menu
def set_icon(self, path):
icon = wx.IconFromBitmap(wx.Bitmap(path))
self.SetIcon(icon, TRAY_TOOLTIP)
def on_double_click(self, event):
print 'Tray icon was left-clicked.'
webbrowser.open('http://www.google.com', new=0, autoraise=True)
def on_settings(self, event):
print 'Settings window.'
def on_exit(self, event):
wx.CallAfter(self.Destroy)
def main():
app = wx.App(False)
TaskBarIcon()
app.MainLoop()
if __name__ == '__main__':
main() | apache-2.0 | Python | |
a61bb392fd69c953d7d8c23155d1370bc145533d | Create TOA_Filtering.py | NANOGravDataManagement/bridge,NANOGravDataManagement/bridge,shakeh/bridge | filtering-libstempo/TOA_Filtering.py | filtering-libstempo/TOA_Filtering.py | # TOA_Filtering.py
# A script that takes in a .par file, .tim file, start time and end time, and an output directory
# as a result, it creates a new file with TOAs in the time range, stored in output directory
# sample input:
# python TOA_Filtering.py /Users/fkeri/Desktop/B1855+09_NANOGrav_9yv0.par /Users/fkeri/Desktop/B1855+09_NANOGrav_9yv0.tim 51000 56000 /Users/fkeri/Desktop/
# we can see that it takes in 5 line arguments: [INPUT .par], [INPUT .tim], [TIME START], [TIME END], [OUTPUT DIRECTORY]
# [TIME START] and [TIME END] formats: MJD or YYYY/MM/DD
# the output file will have the same name as the input file, with "TOArange_" as a prefix: "TOArange_B1855+09_NANOGrav_9yv0.tim"
# it is possible to name the output file differently by putting the file name in [OUTPUT DIRECTORY]: /Users/fkeri/Desktop/filename.tim
import sys
import math
import datetime
import libstempo as T
#import jdcal
import glob
import os.path
def remove_empty( A ):
ret = []
for i in range( len( A ) ):
if A[i] != "":
ret.append( A[i] )
return ret
def date2mjd(year, month, day):
"""
function that converts date in YYYY/MM/DD to MJD
"""
jd = sum(jdcal.gcal2jd(year, month, day))
mjd = jd -2400000.5
return mjd
def isFloat( X ):
try:
float( X )
return True
except ValueError:
return False
def transform( X ):
X = str( X )
A = [ "", "", "", "", "" ]
ch = 0
cnt = 0
for i in range( len( X ) ):
ch = X[i]
if not ( ord( ch ) > 44 and ord( ch ) < 58 ):
return "not ok"
if ch == "-" or ch == "/":
cnt += 1
else:
A[ cnt ] += ch
if cnt == 2:
if int( A[1] ) > 12 or int( A[2] ) > 31:
return "not ok"
return date2mjd( int( A[0] ), int( A[1] ), int( A[2] ) )
return float( X )
inFile = open( sys.argv[2], "r" )
save_path = sys.argv[5]
if save_path[-4] != '.':
nameFile = os.path.join( save_path, "TOArange_"+sys.argv[2].split("/")[-1] )
else:
nameFile = save_path
outFile = open( nameFile, "w" )
inFile.readline() #omit first line
ALLlines = inFile.readlines()
start = str( sys.argv[3] )
end = str( sys.argv[4] )
start = transform( start )
end = transform( end )
psr = T.tempopulsar( parfile = sys.argv[1], timfile = sys.argv[2], maxobs = 100000 )
cnt = 0
if ( start == "not ok" ) or ( end == "not ok" ):
outFile.write( "Wrong format! Please enter the data again! (either MJD or YYYY/MM/DD format)" )
else:
if ( start < 0 ) or ( end < 0 ):
outFile.write( "Your starting and ending points cannot be less than zero! Please enter the data again! (either MJD or YYYY/MM/DD format)" )
else:
if ( start > end ):
outFile.write( "Your starting point cannot be greater than your ending point! Please enter the data again! (either MJD or YYYY/MM/DD format)" )
else:
for i in range( len( ALLlines ) ):
X = ALLlines[i].split(' ')
X = remove_empty( X )
for j in range( len( X ) ):
if isFloat( X[j] ):
if float( X[j] ) == psr.freqs[cnt]:
if ( psr.stoas[cnt] >= start ) and ( psr.stoas[cnt] <= end ):
outFile.write( ' '.join( X ) )
cnt += 1
break
inFile.close()
outFile.close()
| apache-2.0 | Python | |
d6120537ec982f50d08fa188e91c68c023809db3 | Send ERROR when the user disconnects | Heufneutje/txircd,ElementalAlchemist/txircd | txircd/modules/rfc/response_error.py | txircd/modules/rfc/response_error.py | from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "errorResponse"
core = True
def actions(self):
return [("quit", 10, self.sendError)]
def sendError(self, user, reason):
user.sendMessage("ERROR", ":Closing Link: {}@{} [{}]".format(user.ident, user.host, reason), to=None, prefix=None)
errorResponse = ErrorResponse() | bsd-3-clause | Python | |
12dda7a7473c094b4483789630e178fd60b0eba4 | add routes.py | fwilson42/dchacks2015,fwilson42/dchacks2015,fwilson42/dchacks2015 | web/transit/routes.py | web/transit/routes.py | import transit
import transit.views.basic
from transit import app
routes = (
('/api/trains/', transit.views.basic.get_all_trains),
('/api/trains/line/<line>/', transit.views.basic.get_trains_on_line),
('/api/trains/station/<station>/', transit.views.basic.get_trains_at_station)
)
for route, func in routes:
app.add_url_rule(rule=route, endpoint.__name__, endpoint)
| mit | Python | |
2a590a11f92e03d923d66cfc6e8fe5837feb0f20 | Add a snippet: 'matplotlib/gaussian_convolution'. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | matplotlib/gaussian_convolution/gaussian_convolution_1d.py | matplotlib/gaussian_convolution/gaussian_convolution_1d.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Gaussian convolution 1D"""
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import matplotlib.pyplot as plt
SIGMA = 0.5
def d_square(x, xarray):
"""Computes the squared euclidian distance between x and xarray values."""
#d = np.sum(np.power(x - xarray, 2), 1) # if x is a vector
d = np.power(x - xarray, 2)
return d
def estimate(x, x_known, y_known, sigma):
"""Estimates the value y of x, knowing x_known and y_known sets."""
d = d_square(x, x_known)
e = np.exp( -1. / pow(sigma, 2) * d )
term1 = np.sum(e * y_known)
term2 = np.sum(e) # to normalize "term1"
y_hat = term1 / term2
#return y_hat
return y_hat, term1, term2 # uncomment this line to see term1 and term2
def main():
"""Main function"""
# Known points
x_known = np.array([-3., -2., -1., 0., 1., 2.])
x_known = np.array([-3., -2., -1.1, -1., 0., 1., 2.])
y_known = np.array([-1., -2., 1.2, 1., 4., 3., 2.])
# Points to approximate
x_test = np.arange(-5., 5., 0.05).tolist()
y_test = np.array([estimate(x, x_known, y_known, SIGMA) for x in x_test])
# Plot
plt.plot(x_known, y_known, 'r*')
plt.plot(x_test, y_test)
plt.xlabel('$x$')
plt.ylabel('$\hat{y}$')
plt.show()
if __name__ == '__main__':
main()
| mit | Python | |
fac031300c81c31f5d6022a65d3637fd4e62fa91 | add blink 3 script | mikebranstein/IoT,mikebranstein/IoT | raspberry-pi/led-blink3.py | raspberry-pi/led-blink3.py | import RPi.GPIO as GPIO
import time
def blink(ledPin, onTime, offTime):
GPIO.output(ledPin, GPIO.HIGH)
time.sleep(onTime)
GPIO.output(ledPin, GPIO.LOW)
time.sleep(offTime)
return
def blinkThree(redLedPin, blueLedPin, greenLedPin):
blink(redLedPin, 1, 0)
blink(blueLedPin, 1, 0)
blink(greenLedPin, 1, 0)
# tell GPIO to use the Pi's board numbering for pins
GPIO.setmode(GPIO.BOARD)
redLedPin = 22
blueLedPin = 14
greenLedPin = 12
# set data direction of LED pin to output
GPIO.setup(redLedPin, GPIO.OUT)
GPIO.setup(blueLedPin, GPIO.OUT)
GPIO.setup(greenLedPin, GPIO.OUT)
# loop until keyboard interrupt
try:
while True:
blinkThree(redLedPin, blueLedPin, greenLedPin)
except KeyboardInterrupt:
pass
GPIO.cleanup() | mit | Python | |
ac44a041e3e7808305b025e1087f48b7d4a9234a | Add script to delete Bit.ly raw results from S3 | berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud | tools/bitly/delete_bitly_blobs.py | tools/bitly/delete_bitly_blobs.py | #!/usr/bin/env python3
import argparse
import boto3
import os
from typing import List
from mediawords.util.log import create_logger
l = create_logger(__name__)
def delete_bitly_blobs(story_ids: List[int]):
session = boto3.Session(profile_name='mediacloud')
s3 = session.resource('s3')
bucket = s3.Bucket('mediacloud-bitly-processing-results')
chunk_size = 999 # up to 1000 objects to be deleted at once
story_ids_chunks = [story_ids[x:x + chunk_size] for x in range(0, len(story_ids), chunk_size)]
l.info('Deleting %d Bit.ly blobs, split into %d chunks...' % (len(story_ids), len(story_ids_chunks)))
for chunk in story_ids_chunks:
objects_to_delete = []
for stories_id in chunk:
objects_to_delete.append({'Key': 'json_blobs/%d' % stories_id})
bucket.delete_objects(
Delete={
'Objects': objects_to_delete,
}
)
l.info('Done deleting %d Bit.ly blobs.' % len(story_ids))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete Bit.ly raw results from S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_file', type=str, required=True, help='Input file with Bit.ly story IDs.')
args = parser.parse_args()
if not os.path.isfile(args.input_file):
raise Exception('Input file "%s" does not exist.' % args.input_file)
bitly_story_ids = []
with open(args.input_file, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
if line:
line = int(line)
bitly_story_ids.append(line)
delete_bitly_blobs(story_ids=bitly_story_ids)
| agpl-3.0 | Python | |
37c0257fcc5e65b67fabfd17c2bf884ad8fe03e1 | Add migration to reset signatures | Osmose/normandy,mozilla/normandy,mozilla/normandy,Osmose/normandy,Osmose/normandy,mozilla/normandy,mozilla/normandy,Osmose/normandy | recipe-server/normandy/recipes/migrations/0038_remove_invalid_signatures.py | recipe-server/normandy/recipes/migrations/0038_remove_invalid_signatures.py | """
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-27 00:03
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0037_auto_20170113_0627'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
| mpl-2.0 | Python | |
8ff39701ce4549bf46f03342ee4ff6bdadddc2ff | Create Blob Upload script | CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge | blueprints/gcp_storage/management/upload_blob.py | blueprints/gcp_storage/management/upload_blob.py | from __future__ import unicode_literals
import json
import os
from pathlib import Path
from typing import Optional
from common.methods import set_progress
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import Resource as GCPResource
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseUpload
from resourcehandlers.gcp.models import GCPHandler
from resources.models import Resource
FILE = "{{file}}"
MAKE_BLOB_PUBLIC = bool("{{make_blob_public}}")
# Helper functions for the discover_resources() function
def create_storage_api_wrapper(handler: GCPHandler) -> Optional[GCPResource]:
"""
Using googleapiclient.discovery, build the api wrapper for the storage api.
https://googleapis.github.io/google-api-python-client/docs/dyn/storage_v1.html
"""
if not handler.gcp_api_credentials:
set_progress(f"Handler {handler} is missing gcp api credentials.")
return None
credentials_dict = json.loads(handler.gcp_api_credentials)
credentials = Credentials(**credentials_dict)
set_progress(f"Connecting to GCP for handler: {handler}")
storage_wrapper: GCPResource = build(
"storage", "v1", credentials=credentials, cache_discovery=False
)
set_progress("Connection established")
return storage_wrapper
def upload_object(
wrapper, bucket_name: str, object_name: str, file_location: str, is_public: bool
):
"""
Upload an object from a file to a bucket
Media insertion:
https://googleapis.github.io/google-api-python-client/docs/dyn/storage_v1.objects.html#insert
Uploader:
https://googleapis.github.io/google-api-python-client/docs/epy/googleapiclient.http.MediaIoBaseUpload-class.html
"""
upload_kwargs = {
"mimetype": "application/octet-stream",
"chunksize": 1024 * 1024,
"resumable": False,
}
insert_kwargs = {
"bucket": bucket_name,
"body": {},
"name": object_name,
"predefinedAcl": "publicRead" if is_public else "private",
}
set_progress(f"Opening file '{file_location}'")
with open(file_location) as file:
set_progress("Beginning to upload file.")
media = MediaIoBaseUpload(file, **upload_kwargs)
wrapper.objects().insert(**insert_kwargs, media_body=media).execute()
set_progress("Upload complete!")
# generate_options_for_* functions are used to create option in the ui
def generate_options_for_file_name(**kwargs):
"""
Get all blobs/object names in the bucket.
"""
resource: Resource = kwargs.get("resource")
if not resource:
return []
objects_in_bucket = Resource.objects.filter(parent_resource=resource)
object_names = [o.name for o in objects_in_bucket]
return object_names
def generate_options_for_make_blob_public(**kwargs):
return [True, False]
def run(job, *args, **kwargs):
# Confirm the path is valid
if not os.path.exists(FILE):
return "FAILURE", "The path to the file isn't a valid path.", ""
file_name = Path(FILE).name
# Get system information
bucket: Resource = kwargs.get("resource")
resource_handler = GCPHandler.objects.get(id=bucket.google_rh_id)
# Connect to GCP
wrapper = create_storage_api_wrapper(resource_handler)
if not wrapper:
error_message = "Please verify the connection on the Resource Handler."
return "FAILURE", "", error_message
# Upload the object
upload_object(wrapper, bucket.name, file_name, FILE, MAKE_BLOB_PUBLIC)
return f"SUCCESS", f"`{file_name}` Uploaded successfully", ""
| apache-2.0 | Python | |
f33ca28e7465a0b35d2419dd9016196f63a114d8 | Add demo.py | 7forz/numpy_pandas_tushare_learning | demo.py | demo.py | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
from indexes import *
import global_data
def main(stock='000001', date=global_data.NEWEST_TRADE_DATE, p_MA=5, p_MACD=(12,26,9),
p_RSI=6, p_KDJ=(9,3), p_MTM=(12,6)):
"""
Example
date: str, '2017-08-18'
p_MA: int, 5
p_MACD: tuple, (12,26,9)
p_RSI: int, 6
p_KDJ: tuple, (9,3)
p_MTM: tuple, (12,6)
"""
rsi = RSI(stock)
ma = MA(stock)
macd = MACD(stock)
mtm = MTM(stock)
kdj = KDJ(stock)
global_data.add_data(stock) # download data to database
print(stock, date)
print('MA%s' % str(p_MA), ma.get_ma(date, p_MA))
print('MACD%s' % str(p_MACD), macd.get_macd(date, *p_MACD))
print('RSI%s' % str(p_RSI), rsi.get_rsi(date, p_RSI))
print('KDJ%s' % str(p_KDJ), kdj.get_kdj(date, *p_KDJ))
print('MTM%s' % str(p_MTM), mtm.get_mtm(date, *p_MTM))
global_data.save_database(global_data.DB_FILE)
if __name__ == '__main__':
main()
| agpl-3.0 | Python | |
c19c1838802ef8b4429df605e085176aef3bb45f | Create 04_peery_beams.py | robbievanleeuwen/section-properties | examples/01-advanced/04_peery_beams.py | examples/01-advanced/04_peery_beams.py | r"""
.. _ref_ex_peery_beams:
Symmetric and Unsymmetric Beams in Complex Bending
--------------------------------------------------
Calculate section properties of two different beams
given in examples from 'Aircraft Structures,' by Peery.
These cases have known results, and the output from
SectionProperties can be compared for accuracy. These
examples represent a more rigourous 'proof' against a
'real' problem. Only results that have values in the
reference material are tested here.
BibTeX Entry for reference:
@Book{Peery,
title = {Aircraft Structures},
author = {David J. Peery},
organization = {Pensylvania State University},
publisher = {McGraw-Hill Book Company},
year = {1950},
edition = {First},
ISBN = {978-0486485805}
}
"""
# sphinz_gallery_thumbnail_number = 1
from sectionproperties.pre.library import nastran_sections
from sectionproperties.analysis.section import Section
# %%
# Example 1 in Sec. 6.2 (Symmetric Bending)
# This is a symmetric I-section with no lateral supports,
# undergoing pure unidirectional cantilever bending.
# Note that units here are **inches**, to match the text.
#
# We'll use a very coarse mesh here, to show a conservative
# comparison for accuracy. Theoretically, with more
# discretization, we would capture the real results more accurately.
geometry = nastran_sections.nastran_i(6,3,3,1,1,1)
geometry = geometry.shift_section(x_offset=0,y_offset=-3)
geometry = geometry.create_mesh(mesh_sizes=[0.25])
section = Section(geometry)
section.plot_mesh()
# %%
# Perform a geometric analysis on the section, and plot properties
# We don't need warping or plastic analysis for these simple checks.
section.calculate_geometric_properties()
section.plot_centroids()
| mit | Python | |
c29bf2b9a87fdeb58d78a3ef3219292742371314 | test script for BEL2 stmt checks | OpenBEL/openbel-server,OpenBEL/openbel-server,OpenBEL/openbel-server | bin/test_bel2_validation.py | bin/test_bel2_validation.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage: program.py <customer>
"""
import requests
import json
base_url = "http://localhost:9292"
files = ['bel2.0-example-statements.bel', 'bel2_document_examples.bel']
def send_request(bel):
# Issue #134
# GET http://localhost:9292/api/expressions/rxn(reactants(a(CHEBI:superoxide)),products(a(CHEBI:%22hydrogen%20peroxide%22),%20a(CHEBI:%20%22oxygen%22))/validation
try:
response = requests.get(
url=f"{base_url}/api/expressions/{bel}/validation",
)
try:
r = response.json()
except:
r = None
# print(f"Status {response.status_code} Response: {r}")
return (response.status_code, r)
except requests.exceptions.RequestException:
# return (response.status_code, response.json())
print(f"Error {response.status_code}, {bel}")
def run_examples():
results = []
cnt = error_cnt = success_cnt = 0
for fn in files:
with open(fn, 'r') as f:
for bel in f:
cnt += 1
bel = bel.strip()
print(f"Running bel: {bel}")
(status, msg) = send_request(bel)
if status != 200:
error_cnt += 1
results.append((status, bel, msg))
else:
success_cnt += 1
print(f"Total: {cnt} Success: {success_cnt} Errors: {error_cnt}")
with open('test_results.json', 'w') as f:
json.dump(results, f, indent=4)
def main():
run_examples()
if __name__ == '__main__':
main()
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.