commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
88ec4243ff78fe511331461b7563bd49f7124fe2 | Add tuple. | tuple/tuple.py | tuple/tuple.py | #!/usr/local/bin/python
x=(42,)
print x
y=3*(3,)
print y
z=tuple("hello")
i=1,2,3
print i[2]
print i[0:2]
| Python | 0.000014 | |
24788b106b9cdd70e7240dc3eccac82fba290c85 | Add test for yaml enviroment | tests/util/test_yaml.py | tests/util/test_yaml.py | """Test Home Assistant yaml loader."""
import io
import unittest
import os
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
def test_enviroment_variable(self):
"""Test config file with enviroment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['password'] == "secret_password"
del os.environ["PASSWORD"]
def test_invalid_enviroment_variable(self):
"""Test config file with no enviroment variable sat."""
conf = "password: !env_var PASSWORD"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
| """Test Home Assistant yaml loader."""
import io
import unittest
from homeassistant.util import yaml
class TestYaml(unittest.TestCase):
"""Test util.yaml loader."""
def test_simple_list(self):
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['config'] == ["simple", "list"]
def test_simple_dict(self):
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as f:
doc = yaml.yaml.safe_load(f)
assert doc['key'] == 'value'
def test_duplicate_key(self):
"""Test simple dict."""
conf = "key: thing1\nkey: thing2"
try:
with io.StringIO(conf) as f:
yaml.yaml.safe_load(f)
except Exception:
pass
else:
assert 0
| Python | 0 |
2b0a96791ad43ef1f27b610233dd34027cf83c75 | Create currency-style.py | CiO/currency-style.py | CiO/currency-style.py | import re
def checkio(text):
numbers = re.findall('(?<=\$)[^ ]*\d', text)
for old in numbers:
new = old.replace('.', ',')
if ',' in new and len(new.split(',')[-1]) == 2:
new = '.'.join(new.rsplit(',', 1))
text = text.replace(old, new)
return text
| Python | 0.000003 | |
a46f960e811123a137e4e5fe4350f6a850e9b33e | Create average-of-levels-in-binary-tree.py | Python/average-of-levels-in-binary-tree.py | Python/average-of-levels-in-binary-tree.py | # Time: O(n)
# Space: O(h)
# Given a non-empty binary tree,
# return the average value of the nodes on each level in the form of an array.
#
# Example 1:
# Input:
# 3
# / \
# 9 20
# / \
# 15 7
# Output: [3, 14.5, 11]
# Explanation:
# The average value of nodes on level 0 is 3,
# on level 1 is 14.5, and on level 2 is 11. Hence return [3, 14.5, 11].
#
# Note:
# The range of node's value is in the range of 32-bit signed integer.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
result = []
q = collections.deque([root])
while q:
total, count = 0, 0
next_q = collections.deque([])
while q:
n = q.popleft()
total += n.val;
count += 1
if n.left:
next_q.append(n.left)
if n.right:
next_q.append(n.right)
q, next_q = next_q, q
result.append(float(total) / count)
return result
| Python | 0.004195 | |
2131eb1da8b221bfbce08bc9cac30123f08460ca | Add cron module to contrib | kitnirc/contrib/cron.py | kitnirc/contrib/cron.py | import datetime
import logging
import random
import threading
import time
from kitnirc.modular import Module
_log = logging.getLogger(__name__)
class Cron(object):
"""An individual cron entry."""
def __init__(self, event, seconds, minutes, hours):
self.event = event
self.seconds = self.parse_time_field(seconds, 60)
self.minutes = self.parse_time_field(minutes, 60)
self.hours = self.parse_time_field(hours, 24)
now = datetime.datetime.now().replace(microsecond=0)
self.next_fire = self.calculate_next_fire(now)
def parse_time_field(self, inputstr, count):
values = set()
for item in inputstr.split(","):
# See if it's just a single number.
try:
values.add(int(item))
continue
except ValueError:
pass
# ? can be used to specify "a single random value"
if item == '?':
values.add(random.randint(0, count-1))
continue
# * can be used to specify "all values"
if item.startswith("*"):
# With an optional /X to specify "every Xth value"
_, _, divisor = item.partition("/")
if divisor:
values.update(range(0, count, int(divisor)))
else:
values.update(range(count))
continue
_log.warning("Ignoring invalid specifier '%s' for cron event '%s'",
item, self.event)
# Ensure only values within the proper range are utilized
return sorted(val for val in values if 0 <= val < count)
def calculate_next_fire(self, after):
# Keeps track of if we've already moved a field by at least
# one notch, so that other fields are allowed to stay the same.
equal_okay = False
next_second = self.seconds[0]
for second in self.seconds:
if second > after.second:
next_second = second
equal_okay = True
break
next_minute = self.minutes[0]
for minute in self.minutes:
if equal_okay and minute == after.minute:
next_minute = minute
break
elif minute > after.minute:
next_minute = minute
equal_okay = True
break
next_hour = self.hours[0]
for hour in self.hours:
if equal_okay and hour == after.hour:
next_hour = hour
break
elif hour > after.hour:
next_hour = hour
break
next_fire = after.replace(hour=next_hour, minute=next_minute,
second=next_second, microsecond=0)
# If we need to roll over to the next day...
if next_fire <= after:
next_fire += datetime.timedelta(days=1)
return next_fire
def maybe_fire(self, client, after, upto):
if self.next_fire is None:
return
if after < self.next_fire <= upto:
_log.debug("Cron event '%s' firing.", self.event)
client.dispatch_event(self.event)
self.next_fire = self.calculate_next_fire(upto)
class CronModule(Module):
"""A KitnIRC module which provides other modules with scheduling.
Note: due to how this module interacts with other modules, reloading
it without reloading other modules will result in previously added
crons being wiped. If you need to reload this module, you should
probably just reload all modules.
"""
def __init__(self, *args, **kwargs):
super(CronModule, self).__init__(*args, **kwargs)
self.crons = []
self.last_tick = datetime.datetime.now()
self.thread = threading.Thread(target=self.loop, name='cron')
self.thread.daemon = True
self._stop = False
def start(self, *args, **kwargs):
super(CronModule, self).start(*args, **kwargs)
self._stop = False
self.last_tick = datetime.datetime.now()
self.thread.start()
def stop(self, *args, **kwargs):
super(CronModule, self).stop(*args, **kwargs)
self._stop = True
# In any normal circumstances, the cron thread should finish in
# about half a second or less. We'll give it a little extra buffer.
self.thread.join(1.0)
if self.thread.is_alive():
_log.warning("Cron thread alive 1s after shutdown request.")
def loop(self):
while not self._stop:
# Use a single "now" for all crons, to ensure consistency
# relative to the next last_tick value.
now = datetime.datetime.now().replace(microsecond=0)
for cron in self.crons:
cron.maybe_fire(self.controller.client, self.last_tick, now)
self.last_tick = now
# Wake up every half-second or so to avoid missing seconds
time.sleep(0.5)
@Module.handle("ADDCRON")
def add_cron(self, client, event, seconds="*", minutes="*", hours="*"):
"""Add a cron entry.
The arguments for this event are:
1. The name of the event to dispatch when the cron fires.
2. What seconds to trigger on, as a timespec (default "*")
3. What minutes to trigger on, as a timespec (default "*")
4. What hours to trigger on, as a timespec (default "*")
Timespecs may be omitted in reverse order of frequency - if hours
is omitted, the previous timespecs will be applied every hour. If
both hours and minutes are omitted, the seconds timespec will be
applied every minute of every hour, and if all timespecs are omitted,
the event will fire each second.
Timespecs are strings in the following formats:
Plain integer - specifies that exact value for the unit.
"?" - specifies a random value from 0 to the unit max.
"*" - specifies all values for the unix from 0 to max.
"*/X" - specifies all multiples of X for the unit.
Any number of these can be combined in a comma-separated list.
For instance, "*/15" would be the same as "0,15,30,45" if used
in the seconds field.
"""
for cron in self.crons:
if cron.event == event:
_log.warning("Cron '%s' is already registered.", event)
return True
_log.info("Registering cron for '%s'.", event)
cron = Cron(event, seconds, minutes, hours)
self.crons.append(cron)
return True
@Module.handle("REMOVECRON")
def remove_cron(self, client, event):
"""Remove a cron entry by event name."""
for index, cron in enumerate(self.crons):
if cron.event == event:
_log.info("De-registering cron '%s'.", event)
# Yes, we're modifying the list we're iterating over, but
# we immediate stop iterating so it's okay.
self.crons.pop(index)
break
return True
module = CronModule
# vim: set ts=4 sts=4 sw=4 et:
| Python | 0.000001 | |
ee076055f11638b8711658972dda8c4d4b40f666 | Enforce max length on project name (#3982) | src/sentry/web/forms/add_project.py | src/sentry/web/forms/add_project.py | from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntry, AuditLogEntryEvent, Project
from sentry.signals import project_created
from sentry.utils.samples import create_sample_event
BLANK_CHOICE = [("", "")]
class AddProjectForm(forms.ModelForm):
name = forms.CharField(label=_('Name'), max_length=64,
widget=forms.TextInput(attrs={
'placeholder': _('i.e. API, Frontend, My Application Name'),
}),
help_text=_('Using the repository name generally works well.'),
)
class Meta:
fields = ('name',)
model = Project
def __init__(self, organization, *args, **kwargs):
forms.ModelForm.__init__(self, *args, **kwargs)
self.organization = organization
def save(self, actor, team, ip_address):
project = super(AddProjectForm, self).save(commit=False)
project.team = team
project.organization = team.organization
project.save()
AuditLogEntry.objects.create(
organization=project.organization,
actor=actor,
ip_address=ip_address,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
project_created.send(project=project, user=actor, sender=self)
create_sample_event(project, platform='javascript')
return project
| from __future__ import absolute_import
from django import forms
from django.utils.translation import ugettext_lazy as _
from sentry.models import AuditLogEntry, AuditLogEntryEvent, Project
from sentry.signals import project_created
from sentry.utils.samples import create_sample_event
BLANK_CHOICE = [("", "")]
class AddProjectForm(forms.ModelForm):
name = forms.CharField(label=_('Name'), max_length=200,
widget=forms.TextInput(attrs={
'placeholder': _('i.e. API, Frontend, My Application Name'),
}),
help_text=_('Using the repository name generally works well.'),
)
class Meta:
fields = ('name',)
model = Project
def __init__(self, organization, *args, **kwargs):
forms.ModelForm.__init__(self, *args, **kwargs)
self.organization = organization
def save(self, actor, team, ip_address):
project = super(AddProjectForm, self).save(commit=False)
project.team = team
project.organization = team.organization
project.save()
AuditLogEntry.objects.create(
organization=project.organization,
actor=actor,
ip_address=ip_address,
target_object=project.id,
event=AuditLogEntryEvent.PROJECT_ADD,
data=project.get_audit_log_data(),
)
project_created.send(project=project, user=actor, sender=self)
create_sample_event(project, platform='javascript')
return project
| Python | 0 |
96f224a6b80720a88fefc8530aea113f975ef110 | Add new layout window command | new_layout.py | new_layout.py | import sublime, sublime_plugin
class NewLayoutCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.window().run_command("set_layout", args)
self.view.window().run_command("focus_group", { "group": 0 })
self.view.window().run_command("move_to_group", { "group": 1 } )
| Python | 0 | |
62ff128888bce33cf87e083a921ddac65a2f1879 | Add regression test for #3951 | spacy/tests/regression/test_issue3951.py | spacy/tests/regression/test_issue3951.py | # coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Doc
@pytest.mark.xfail
def test_issue3951(en_vocab):
"""Test that combinations of optional rules are matched correctly."""
matcher = Matcher(en_vocab)
pattern = [
{"LOWER": "hello"},
{"LOWER": "this", "OP": "?"},
{"OP": "?"},
{"LOWER": "world"},
]
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "my", "new", "world"])
matches = matcher(doc)
assert len(matches) == 0
| Python | 0.000001 | |
bec4e467d3d00d8195e4abaf95f3043c6f5e2b95 | Set up ChaNGa command in velocity calc to use MPI | calc_velocity_mpi.py | calc_velocity_mpi.py | # -*- coding: utf-8 -*-
"""
Same as calc_velocity.py, but calls mpi with changa to allow many nodes
Created on Wed Apr 9 15:39:28 2014
@author: ibackus
"""
import numpy as np
import pynbody
SimArray = pynbody.array.SimArray
import isaac
import subprocess
import os
import glob
import time
def v_xy(f, param, changbin=None, nr=50, min_per_bin=100):
"""
Attempts to calculate the circular velocities for particles in a thin
(not flat) keplerian disk. Requires ChaNGa
**ARGUMENTS**
f : tipsy snapshot
For a gaseous disk
param : dict
a dictionary containing params for changa. (see isaac.configparser)
changbin : str (OPTIONAL)
If set, should be the full path to the ChaNGa executable. If None,
an attempt to find ChaNGa is made
nr : int (optional)
number of radial bins to use when averaging over accelerations
min_per_bin : int (optional)
The minimum number of particles to be in each bin. If there are too
few particles in a bin, it is merged with an adjacent bin. Thus,
actual number of radial bins may be less than nr.
**RETURNS**
vel : SimArray
An N by 3 SimArray of gas particle velocities.
"""
if changbin is None:
# Try to find the ChaNGa binary full path
changbin = os.popen('which ChaNGa_uw_mpi').read().strip()
# Load up mpi
os.popen('module load gcc_4.4.7-ompi_1.6.5')
os.popen('export MX_RCACHE=0')
# Load stuff from the snapshot
x = f.g['x']
y = f.g['y']
z = f.g['z']
r = f.g['rxy']
vel0 = f.g['vel'].copy()
# Remove units from all quantities
r = isaac.strip_units(r)
x = isaac.strip_units(x)
y = isaac.strip_units(y)
z = isaac.strip_units(z)
# Temporary filenames for running ChaNGa
f_prefix = str(np.random.randint(0, 2**32))
f_name = f_prefix + '.std'
p_name = f_prefix + '.param'
# Update parameters
p_temp = param.copy()
p_temp['achInFile'] = f_name
p_temp['achOutName'] = f_prefix
if 'dDumpFrameTime' in p_temp: p_temp.pop('dDumpFrameTime')
if 'dDumpFrameStep' in p_temp: p_temp.pop('dDumpFrameStep')
# --------------------------------------------
# Estimate velocity from gravity only
# --------------------------------------------
# Note, accelerations due to gravity are calculated twice to be extra careful
# This is so that any velocity dependent effects are properly accounted for
# (although, ideally, there should be none)
# The second calculation uses the updated velocities from the first
for iGrav in range(2):
# Save files
f.write(filename=f_name, fmt = pynbody.tipsy.TipsySnap)
isaac.configsave(p_temp, p_name, ftype='param')
# Run ChaNGa, only calculating gravity
command = 'mpirun --mca mtl mx --mca pml cm ' + changbin + ' -gas -n 0 ' + p_name
#command = 'charmrun ++local ' + changbin + ' -gas -n 0 ' + p_name
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while p.poll() is None:
time.sleep(0.1)
# Load accelerations
acc_name = f_prefix + '.000000.acc2'
a = isaac.load_acc(acc_name)
# Clean-up
for fname in glob.glob(f_prefix + '*'): os.remove(fname)
# If a is not a vector, calculate radial acceleration. Otherwise, assume
# a is the radial acceleration
a_r = a[:,0]*x/r + a[:,1]*y/r
# Make sure the units are correct then remove them
a_r = isaac.match_units(a_r, a)[0]
a_r = isaac.strip_units(a_r)
# Calculate cos(theta) where theta is angle above x-y plane
cos = r/np.sqrt(r**2 + z**2)
ar2 = a_r*r**2
# Bin the data
r_edges = np.linspace(r.min(), (1+np.spacing(2))*r.max(), nr + 1)
ind, r_edges = isaac.digitize_threshold(r, min_per_bin, r_edges)
ind -= 1
nr = len(r_edges) - 1
r_bins, ar2_mean, err = isaac.binned_mean(r, ar2, binedges=r_edges, \
weighted_bins=True)
# Fit lines to ar2 vs cos for each radial bin
m = np.zeros(nr)
b = np.zeros(nr)
for i in range(nr):
mask = (ind == i)
p = np.polyfit(cos[mask], ar2[mask], 1)
m[i] = p[0]
b[i] = p[1]
# Interpolate the line fits
m_spline = isaac.extrap1d(r_bins, m)
b_spline = isaac.extrap1d(r_bins, b)
# Calculate circular velocity
ar2_calc = m_spline(r)*cos + b_spline(r)
v_calc = np.sqrt(abs(ar2_calc)/r)
vel = f.g['vel'].copy()
v_calc = isaac.match_units(v_calc,vel)[0]
vel[:,0] = -v_calc*y/r
vel[:,1] = v_calc*x/r
# Assign to f
f.g['vel'] = vel
# --------------------------------------------
# Estimate pressure/gas dynamics accelerations
# --------------------------------------------
a_grav = a
ar2_calc_grav = ar2_calc
# Save files
f.write(filename=f_name, fmt = pynbody.tipsy.TipsySnap)
isaac.configsave(p_temp, p_name, ftype='param')
# Run ChaNGa, including SPH
command = 'mpirun --mca mtl mx --mca pml cm ' + changbin + ' +gas -n 0 ' + p_name
p = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while p.poll() is None:
time.sleep(0.1)
# Load accelerations
acc_name = f_prefix + '.000000.acc2'
a_total = isaac.load_acc(acc_name)
# Clean-up
for fname in glob.glob(f_prefix + '*'): os.remove(fname)
# Estimate the accelerations due to pressure gradients/gas dynamics
a_gas = a_total - a_grav
ar_gas = a_gas[:,0]*x/r + a_gas[:,1]*y/r
ar_gas = isaac.strip_units(ar_gas)
ar2_gas = ar_gas*r**2
logr_bins, ratio, err = isaac.binned_mean(np.log(r), ar2_gas/ar2_calc_grav, nbins=nr,\
weighted_bins=True)
r_bins = np.exp(logr_bins)
ratio_spline = isaac.extrap1d(r_bins, ratio)
ar2_calc = ar2_calc_grav*(1 + ratio_spline(r))
a_calc = ar2_calc/r**2
v = np.sqrt(r*abs(a_calc))
v = isaac.match_units(v, vel0.units)[0]
vel = vel0.copy()
vel[:,0] = -v*y/r
vel[:,1] = v*x/r
# more cleanup
f.g['vel'] = vel0
return vel | Python | 0 | |
8436253648c67205de23db8797c9fcc7c2172b3e | add the actual test | test/test_slice.py | test/test_slice.py | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
'''
Tests related to slices.
'''
import unittest
import common
class SliceTestCase:#(common.TestCase):
'''
test that slices work.
'''
def test_slice(self):
self.check('test_slice')
if __name__ == '__main__':
unittest.main()
| Python | 0.001366 | |
8ca0e88b7df79461f401e7c46c822f16223ddd0b | Create solution.py | hackerrank/algorithms/implementation/easy/between_two_sets/py/solution.py | hackerrank/algorithms/implementation/easy/between_two_sets/py/solution.py | #!/bin/python3
import sys
# Hackerrank Python3 environment does not provide math.gcd
# as of the time of writing. We define it ourselves.
def gcd(n, m):
while m > 0:
n, m = m, n % m
return n
def lcm(x, y):
return (x * y) // gcd(x, y)
def between(s1, s2):
import functools
cd = functools.reduce(gcd, s2)
cm = functools.reduce(lcm, s1)
return tuple(x for x in range(cm, cd + 1) if cd % x == 0 and x % cm == 0)
n, m = input().strip().split(' ')
n, m = [int(n),int(m)]
a = [int(a_temp) for a_temp in input().strip().split(' ')]
b = [int(b_temp) for b_temp in input().strip().split(' ')]
btw = between(a, b)
print(len(btw))
| Python | 0.000018 | |
325465d18e963400b427f259547d4292a47368c9 | Use Django nose for tests. | oneflow/settings/snippets/common_development.py | oneflow/settings/snippets/common_development.py | #
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
INSTALLED_APPS += ('django_nose', )
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
| #
# Include your development machines hostnames here.
#
# NOTE: this is not strictly needed, as Django doesn't enforce
# the check if DEBUG==True. But Just in case you wanted to disable
# it temporarily, this could be a good thing to have your hostname
# here.
#
# If you connect via http://localhost:8000/, everything is already OK.
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.debug',
)
ALLOWED_HOSTS += [
'localhost',
'chani.licorn.org',
'leto.licorn.org',
'gurney.licorn.org'
]
| Python | 0 |
1f9c24f54047b616f1162500a053031adaf7b7d3 | add basic uci implementation | testWrt/lib/uci.py | testWrt/lib/uci.py | """ uci parsing """
import logging
import re
class UciError(RuntimeError):
pass
class UciWrongTypeError(UciError):
pass
class UciNotFoundError(UciError):
pass
class UciParseError(UciError):
pass
class Config(object):
def __init__(self, uci_type, name=None):
self.uci_type = uci_type
self.name = name
# options are key -> str(value)
# lists are key -> [value x, value y]
self.keys = {}
def add_list(self, key, value):
if key in self.keys:
self.keys[key].append(value)
else:
self.keys[key] = [value]
def remove_list_pos(self, key, pos):
try:
if not isinstance(self.keys[key], list):
raise UciWrongTypeError
del self.keys[key][pos]
except(ValueError, KeyError):
return
def remove_list_value(self, key, value):
try:
self.keys[key].remove(value)
except(ValueError, KeyError):
return
def set_option(self, key, value):
if key in self.keys:
if isinstance(self.keys[key], list):
raise UciWrongTypeError()
self.keys[key] = value
def remove_option(self, key):
if key in self.keys:
del self.keys[key]
def export(self):
export = []
if self.name:
export.append("config '%s' '%s'\n" % (self.uci_type, self.name))
else:
export.append("config '%s'\n" % (self.uci_type))
for opt_list in self.keys:
if isinstance(self.keys[opt_list], list):
export.extend([("\tlist '%s' '%s'\n" % (opt_list, element)) for element in self.keys[opt_list]])
else:
export.append("\toption '%s' '%s'\n" % (opt_list, self.keys[opt_list]))
export.append('\n')
return ''.join(export)
def __repr__(self):
return "Config[%s:%s] %s" % (self.uci_type, self.name, repr(self.keys))
class Package(list):
def __init__(self, name):
super().__init__()
self.name = name
def add_config(self, config):
self.append(config)
class Uci(object):
logger = logging.getLogger('uci')
def __init__(self):
self.packages = {}
def add_package(self, package_name):
if package_name not in self.packages:
self.packages[package_name] = Package(package_name)
return self.packages[package_name]
def add_config(self, package_name, config):
if not isinstance(config, Config):
return RuntimeError()
if package_name not in self.packages:
self.packages[package_name] = Package()
self.packages[package_name].append(config)
def del_config(self, config):
pass
def del_path(self, path):
pass
def export_tree(self):
export = []
for package, content in self.packages.items():
export.append("package '%s'\n" % package)
export.append("\n")
export.extend([config.export() for config in content])
return "".join(export)
def test_split_into_parts(self):
for single_foo in ["foo", '"foo"', "'foo'"]:
assert self.split_into_parts(single_foo) == ['foo']
for double_foo in ['foo foo', "foo 'foo'", 'foo "foo"', '"foo" \'foo\'']:
assert self.split_into_parts(double_foo) == ["foo", "foo"]
def split_into_parts(self, line):
"""
split line into a list of strings
e.g.
line = "config 'fooo' 'fooo' "
will be splitted into ['config', 'fooo', 'fooo']
split_into_parts('goo "foo') => raise Exception
"""
orig_line = line
# all regex have 2 groups '()()'
# the first group is our token, the second the rest of the string(likely without whitespaces or tabs)
# "config foo bar" -> groups() = ('config', 'foo bar')
reg_without = re.compile(r"^([^'\"][^ \t]*)[ \t]*(.*)$")
reg_single = re.compile(r"^'([^']*)'[ \t]*(.*)$")
reg_double = re.compile(r'^"([^"]*)"[ \t]*(.*)$')
splitted = []
# a line can only be matched by one reg not two or more.
while len(line):
matches = [reg.match(line) for reg in [reg_single, reg_double, reg_without] if reg.match(line)]
if len(matches) == 0:
self.logger.info("Can not parse complete Line:%s" % orig_line)
self.logger.info("Unparsable Parts:%s" % line)
return splitted
else:
splitted.append(matches[0].groups()[0])
line = matches[0].groups()[1]
return splitted
def load_tree(self, export_tree):
package = None
config = None
reg_strip_white = re.compile(r"[ \t]*([^ \t].*)$")
for line in export_tree.splitlines():
match = reg_strip_white.match(line)
if match:
line = match.groups()[0]
tokens = self.split_into_parts(line)
if not tokens:
continue
if line.startswith('config'):
if tokens[0] == 'config':
if len(tokens) == 2:
if package is None:
self.logger.info("Ignoring config %s" % (tokens[1]))
else:
config = Config(tokens[1])
package.add_config(config)
continue
elif len(tokens) == 3:
if package is None:
self.logger.info("Ignoring config %s %s" % (tokens[1], tokens[2]))
else:
config = Config(tokens[1], tokens[2])
package.add_config(config)
continue
elif line.startswith('package'):
if len(tokens) == 2 and tokens[0] == 'package':
package = self.add_package(tokens[1])
continue
elif line.startswith('option'):
if len(tokens) == 3 and tokens[0] == 'option':
if config is None:
self.logger.info("Ignoring option - no config %s %s" % (tokens[1], tokens[2]))
else:
config.set_option(tokens[1], tokens[2])
continue
elif line.startswith('list'):
if len(tokens) == 3 and tokens[0] == 'list':
if config is None:
self.logger.info("Ignoring option - no config %s %s" % (tokens[1], tokens[2]))
else:
config.add_list(tokens[1], tokens[2])
continue
self.logger.warn("Can not parse foo '%s' %s" % (line, tokens))
# error
class UciConfig(object):
""" Class for configurations - like network... """
pass
if __name__ == '__main__':
uci_export = open('uci_export')
alles = uci_export.read(1000000)
logging.basicConfig()
ucilog = logging.getLogger('uci')
ucilog.setLevel(logging.DEBUG)
uci = Uci()
uci.load_tree(alles)
print(uci.export_tree())
| Python | 0.000001 | |
df69df55cdf51da60e62226c16b30c76e2836c20 | Add initial test suite | test_fiscalyear.py | test_fiscalyear.py | import fiscalyear
import pytest
class TestFiscalYear:
@pytest.fixture(scope='class')
def a(self):
return fiscalyear.FiscalYear(2016)
@pytest.fixture(scope='class')
def b(self):
return fiscalyear.FiscalYear(2017)
@pytest.fixture(scope='class')
def c(self):
return fiscalyear.FiscalYear('2017')
def test_basic(self, a):
assert a.fiscal_year == 2016
def test_repr(self, a):
assert repr(a) == 'fiscalyear.FiscalYear(2016)'
def test_str(self, a):
assert str(a) == 'FY2016'
def test_less_than(self, a, b):
assert a < b
def test_less_than_equals(self, a, b, c):
assert a <= b <= c
def test_equals(self, b, c):
assert b == c
def test_not_equals(self, a, b):
assert a != b
def test_greater_than(self, a, b):
assert b > a
def test_greater_than_equals(self, a, b, c):
assert c >= b >= a
def test_from_string(self, c):
assert c.fiscal_year == 2017
def test_wrong_type(self):
with pytest.raises(TypeError):
x = fiscalyear.FiscalYear(2017.5)
with pytest.raises(TypeError):
y = fiscalyear.FiscalYear('hello world')
def test_out_of_range(self):
with pytest.raises(ValueError):
x = fiscalyear.FiscalYear(0)
with pytest.raises(ValueError):
y = fiscalyear.FiscalYear(-2017)
class TestFiscalQuarter:
@pytest.fixture(scope='class')
def a(self):
return fiscalyear.FiscalQuarter(2016, 4)
@pytest.fixture(scope='class')
def b(self):
return fiscalyear.FiscalQuarter(2017, 1)
@pytest.fixture(scope='class')
def c(self):
return fiscalyear.FiscalQuarter(2017, 2)
@pytest.fixture(scope='class')
def d(self):
return fiscalyear.FiscalQuarter(2017, 3)
@pytest.fixture(scope='class')
def e(self):
return fiscalyear.FiscalQuarter(2017, 4)
@pytest.fixture(scope='class')
def f(self):
return fiscalyear.FiscalQuarter(2018, 1)
@pytest.fixture(scope='class')
def g(self):
return fiscalyear.FiscalQuarter('2018', '1')
def test_basic(self, a):
assert a.fiscal_year == 2016
assert a.quarter == 4
def test_repr(self, a):
assert repr(a) == 'fiscalyear.FiscalQuarter(2016, 4)'
def test_str(self, a):
assert str(a) == 'FY2016 Q4'
def test_less_than(self, a, b, c, d, e, f):
assert a < b < c < d < e < f
def test_less_than_equals(self, a, b, c, d, e, f, g):
assert a <= b <= c <= d <= e <= f <= g
def test_equals(self, f, g):
assert f == g
def test_not_equals(self, b, c, g):
# Same year, different quarter
assert b != c
# Same quarter, different year
assert b != g
def test_greater_than(self, a, b, c, d, e, f):
assert f > e > d > c > b > a
def test_greater_than_equals(self, a, b, c, d, e, f, g):
assert g >= f >= e >= d >= c >= b >= a
def test_from_string(self, g):
assert g.fiscal_year == 2018
assert g.quarter == 1
def test_wrong_type(self):
with pytest.raises(TypeError):
x = fiscalyear.FiscalQuarter(2017.5, 1.2)
with pytest.raises(TypeError):
y = fiscalyear.FiscalQuarter('hello', 'world')
def test_out_of_range(self):
with pytest.raises(ValueError):
x = fiscalyear.FiscalQuarter(2017, 0)
with pytest.raises(ValueError):
y = fiscalyear.FiscalQuarter(2017, 5)
with pytest.raises(ValueError):
z = fiscalyear.FiscalQuarter(0, 2)
| Python | 0 | |
3e9289f142efd0769beff97cddfcbcbede40f85a | add a half written Qkkk | pacfiles/Qkkk.py | pacfiles/Qkkk.py | #!/usr/bin/env python3
import pyalpm
import pycman
import tarfile
import sys, os, os.path
pacmanconf = pycman.config.init_with_config("/etc/pacman.conf")
rootdir = pacmanconf.rootdir
def local_database():
handle = pacmanconf
localdb = handle.get_localdb()
packages = localdb.pkgcache
syncdbs = handle.get_syncdbs()
db = dict()
for pkg in packages:
for syncdb in syncdbs:
if syncdb.get_pkg(pkg.name) is not None:
db[pkg.name] = syncdb.get_pkg(pkg.name)
return db
def get_pkgfiles(package):
db = local_database()
pkg = db[package].filename
result = []
for d in pacmanconf.cachedirs:
p = os.path.join(d, pkg)
if os.path.exists(p):
result.append(p)
return result
def error_file(file, pkgfile, pkgname):
print(f'"{{file}}" in {{pkgfile}} of {{pkgname}} mismatch')
def check_pkgfile(pkgname, pkgfile):
with tarfile.open(pkgfile) as tar:
for fn in tar:
fnpath = os.path.join(rootdir, fn.name)
if fn.isdir():
if not os.path.isdir(fnpath):
error_file(fnpath, pkgfile, pkgname)
# else if fn.issym():
# if not os.path.issym(fnpath):
def main():
for pkgname in sys.args:
for pkgfile in get_pkgfiles(pkgname):
check_pkgfile(pkgname, pkgfile)
if __name__ == '__main__':
main()
| Python | 0.999936 | |
701acbccc764101e00eef35dfff81dda5c5437a3 | Create pages_in_dict.py | pages_in_dict.py | pages_in_dict.py | import codecs
import os
import re
letters = []
no_letters = []
number_of = {}
pages = os.listdir(".")
for page in pages:
if page.endswith('.html'):
if page[0:3] not in letters:
letters.append(page[0:3])
f = codecs.open(page, 'r', 'utf-8-sig')
text = f.read()
#n = re.findall('Page [0-9]*? of [0-9][0-9]', text, flags=re.U)
#number_of[page[0:3]] = n[-1:-2]
for i in range(161, 206):
if str(i) not in letters:
no_letters.append(str(i))
print no_letters
| Python | 0.000001 | |
39bf0b2ab6f89cfe3450102699a5bbeaf235011a | Create 4.py | 4.py | 4.py | #!/usr/bin/env python
MAX_TRI = 999999L
triangles = []
def next_pos(mn, pos):
if mn > triangles[MAX_TRI - 1]:
return -1
else:
maxv = MAX_TRI - 1
minv = 0
mid = minv + (maxv - minv) / 2
while triangles[mid] != mn and minv < maxv:
if triangles[mid] < mn :
minv = mid + 1
else :
maxv = mid - 1
mid = minv + (maxv - minv) / 2
return mid
def gen_triangles(offset):
triangles[:] = []
i = 1L + offset * MAX_TRI
bound = i + MAX_TRI
print "Generating %i through %i " % (i, bound)
while i <= bound:
triangles.append((i * (i + 1L)) / 2L)
i += 1L
print "Max value = %i " % (triangles[MAX_TRI - 1])
def pe321():
offset = 0L
#total = 0L
#count = 0L
#pos = 0L
n = 1L
#mn = 0L
gen_triangles(offset)
offset = total = count = mn = 0L
n = 1L
while count < 41:
mn = 2L * n + n * n
while mn % 3 != 0 and mn % 9 != 1:
n += 1L
mn = 2L * n + n * n
pos = next_pos(mn, pos)
if pos == -1 :
offset += 2L
gen_triangles(offset)
pos = 0L
if mn == triangles[pos]:
count += 1L
total += n
print "M(%i) = %i is triangular" % (n, mn)
n += 1L;
else:
n += 1L
print "The sum of the first %i terms = %i" % (count, total)
pe321()
| Python | 0.000001 | |
ac357bc1ccefe55e25bb34021772301726ceec0e | Complete P4 | Quiz/Problem4_defMyLog.py | Quiz/Problem4_defMyLog.py | def myLog(x, b):
'''
x: a positive integer
b: a positive integer; b >= 2
returns: log_b(x), or, the logarithm of x relative to a base b.
'''
if x < b:
return 0
else:
return 1 + myLog(x / b, b) | Python | 0.000001 | |
e6ea8ad5b94b51d8b07dea238f2545eacba3abfe | Create Elentirmo_GUI_V0.1.py | Elentirmo_GUI_V0.1.py | Elentirmo_GUI_V0.1.py | #!/usr/bin/python
from Tkinter import *
root = Tk()
root.title("Elentirmo Observatory Controller v0.1")
dust_cover_text = StringVar()
dust_cover_text.set('Cover Closed')
flat_box_text = StringVar()
flat_box_text.set('Flat Box Off')
def dust_cover_open():
print "Opening"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to OPEN the dust cover."
ser.write("O")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
dust_cover_label.config(bg="Green")
dust_cover_text.set('Cover is Open')
def dust_cover_close():
print "Closing"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to CLOSE the dust cover."
ser.write("C")
print "Closing serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
dust_cover_label.config(bg="red")
dust_cover_text.set('Cover is closed')
def flat_on():
print "Activating flat box"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to turn on the flat box via relay."
ser.write("Q")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
flat_box_label.config(bg="Green")
flat_box_text.set('Flat Box on')
def flat_off():
print "Dectivating flat box"
## Open a serial connection with Arduino.
import time
import serial
ser = serial.Serial("COM9", 9600) # Open serial port that Arduino is using
time.sleep(3) # Wait 3 seconds for Arduino to reset
print ser # Print serial config
print "Sending serial command to turn off the flat box via relay."
ser.write("F")
print "Opening serial connection."
ser.close()
# Reminder to close the connection when finished
if(ser.isOpen()):
print "Serial connection is still open."
flat_box_label.config(bg="red")
flat_box_text.set('Flat Box Off')
open_dust_cover_btn = Button(text=" Open Cover ", width=15, command=dust_cover_open)
open_dust_cover_btn.grid(row=0, column=0)
close_dust_cover_btn = Button(text=" Close Cover ", width=15, command=dust_cover_close)
close_dust_cover_btn.grid(row=1, column=0)
flat_box_on_btn = Button(text="Turn On Light", width=15, command=flat_on)
flat_box_on_btn.grid(row=0, column=2)
flat_box_off_btn = Button(text="Turn Off Light", width=15, command=flat_off)
flat_box_off_btn.grid(row=1, column=2)
status_label = Label(root, text=("Current Status"), width=15, fg="Black")
status_label.grid(row=2, column=1)
dust_cover_label = Label(root, textvariable=dust_cover_text, width=15, fg="Black", bg="Red")
dust_cover_label.grid(row=2, column=0)
flat_box_label = Label(root, textvariable=flat_box_text, width=15, fg="Black", bg="Red")
flat_box_label.grid(row=2, column=2)
root.mainloop()
| Python | 0 | |
51feabbc27821c5acb7f0ceb932d19c0d79f16d1 | test ssl version check functions as expected in python 2.6 | tests/test_help.py | tests/test_help.py | # -*- encoding: utf-8
import sys
import pytest
from requests.help import info
@pytest.mark.skipif(sys.version_info[:2] != (2,6), reason="Only run on Python 2.6")
def test_system_ssl_py26():
"""OPENSSL_VERSION_NUMBER isn't provided in Python 2.6, verify we don't
blow up in this case.
"""
assert info()['system_ssl'] == {'version': ''}
@pytest.mark.skipif(sys.version_info < (2,7), reason="Only run on Python 2.7+")
def test_system_ssl():
"""Verify we're actually setting system_ssl when it should be available."""
assert info()['system_ssl']['version'] != ''
| Python | 0 | |
ca27dc71bd814fe42282521edd97ae444d6c714b | Add test of PlotData | tests/test_plot.py | tests/test_plot.py | from maflib.plot import *
import unittest
class TestPlotData(unittest.TestCase):
inputs = [
{ 'x': 1, 'y': 2, 'z': 50, 'k': 'p' },
{ 'x': 5, 'y': 3, 'z': 25, 'k': 'q' },
{ 'x': 3, 'y': 5, 'z': 10, 'k': 'q' },
{ 'x': 7, 'y': 4, 'z': 85, 'k': 'p' }
]
def test_empty_inputs(self):
pd = PlotData([])
data = pd.get_data_1d('x')
self.assertListEqual([], data)
def test_get_data_1d(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x')
self.assertListEqual([1, 3, 5, 7], data)
def test_get_data_1d_unsorted(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x', sort=False)
self.assertListEqual([1, 5, 3, 7], data)
def test_get_data_1d_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x', key='k')
self.assertDictEqual({ 'p': [1, 7], 'q': [3, 5] }, data)
def test_get_data_1d_unsorted_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_1d('x', key='k', sort=False)
self.assertDictEqual({ 'p': [1, 7], 'q': [5, 3] }, data)
def test_get_data_2d(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y')
self.assertEqual(2, len(data))
self.assertListEqual([1, 3, 5, 7], data[0])
self.assertListEqual([2, 5, 3, 4], data[1])
def test_get_data_2d_unsorted(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y', sort=False)
self.assertEqual(2, len(data))
self.assertListEqual([1, 5, 3, 7], data[0])
self.assertListEqual([2, 3, 5, 4], data[1])
def test_get_data_2d_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y', key='k')
self.assertDictEqual(
{ 'p': ([1, 7], [2, 4]), 'q': ([3, 5], [5, 3]) }, data)
def test_get_data_2d_unsorted_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_2d('x', 'y', key='k', sort=False)
self.assertDictEqual(
{ 'p': ([1, 7], [2, 4]), 'q': ([5, 3], [3, 5]) }, data)
def test_get_data_3d(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z')
self.assertEqual(3, len(data))
self.assertListEqual([1, 3, 5, 7], data[0])
self.assertListEqual([2, 5, 3, 4], data[1])
self.assertListEqual([50, 10, 25, 85], data[2])
def test_get_data_3d_unsorted(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z', sort=False)
self.assertEqual(3, len(data))
self.assertListEqual([1, 5, 3, 7], data[0])
self.assertListEqual([2, 3, 5, 4], data[1])
self.assertListEqual([50, 25, 10, 85], data[2])
def test_get_data_3d_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z', key='k')
self.assertDictEqual({
'p': ([1, 7], [2, 4], [50, 85]),
'q': ([3, 5], [5, 3], [10, 25])
}, data)
def test_Get_data_3d_unsorted_with_key(self):
pd = PlotData(self.inputs)
data = pd.get_data_3d('x', 'y', 'z', key='k', sort=False)
self.assertDictEqual({
'p': ([1, 7], [2, 4], [50, 85]),
'q': ([5, 3], [3, 5], [25, 10])
}, data)
| Python | 0 | |
63804c534f23ffbe16ff539087048d99f9fcaf17 | Implement test_encoder_decoder | test_encoder_decoder.py | test_encoder_decoder.py | #! /usr/bin/env python
# coding:utf-8
if __name__ == "__main__":
import sys
import argparse
from seq2seq import decode
from util import load_dictionary
import configparser
import os
from chainer import serializers
# GPU config
parser = argparse.ArgumentParser()
parser.add_argument('config_file', metavar='config_file', type=str,
help='config file')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--type', '-t', default="relu", type=str,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
gpu_flag = True if args.gpu >= 0 else False
config_file = args.config_file
parser_config = configparser.ConfigParser()
parser_config.read(config_file)
config = parser_config["CONFIG"]
# config["SEPARATOR"] = bytes(
# config["DEFAULT"]["SEPARATOR"], "utf-8"
# ).decode("unicode_escape")
# params
model_dir = config["model_dir"]
n_units = int(config["n_units"])
# load conversation sentences
dictionary = load_dictionary(config["dict_file"])
# Prepare encoder RNN model
dim = len(dictionary.keys())
model_type = args.type
if model_type == "relu":
import relu_rnn
model = relu_rnn.Classifier(
relu_rnn.ReLURNN(
embed_dim=dim,
n_units=int(config["n_units"]),
gpu=args.gpu
)
)
elif model_type == "lstm":
import lstm
model = lstm.Classifier(
lstm.LSTM(
embed_dim=dim,
n_units=int(config["n_units"]),
gpu=args.gpu
)
)
else:
raise Exception("model argment should be relu or lstm")
# load model
init_model_name = os.path.join(
model_dir,
"model.npz"
)
if os.path.exists(init_model_name):
serializers.load_npz(init_model_name, model)
print("load model {}".format(init_model_name))
else:
raise Exception("learn model first")
for text in (_.strip() for _ in sys.stdin):
ws = text.split()
print(ws)
decoded_words = decode(
ws,
model,
model,
dictionary,
)
answer_text = "".join(decoded_words[1:-1])
print(answer_text)
| Python | 0.003162 | |
8eeb4c2db613c1354c38696ac6691cf79f66a383 | Add spider for Brookdale Senior Living | locations/spiders/brookdale.py | locations/spiders/brookdale.py | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
URL = 'https://www.brookdale.com/bin/brookdale/community-search?care_type_category=resident&loc=&finrpt=&state='
US_STATES = (
"AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "OH", "OK", "OR", "PA", "RI", "SC",
"TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
)
class TemplateSpider(scrapy.Spider):
name = "brookdale"
allowed_domains = ["www.brookdale.com"]
def start_requests(self):
for state in US_STATES:
url = ''.join([URL, state])
yield scrapy.Request(url, callback=self.parse_info)
def parse_info(self, response):
data = json.loads(response.body_as_unicode())
i = 0
while i < len(data):
print(data[i]['name'])
properties = {
"ref": data[i]['community_id'],
"name": data[i]['name'],
"lat": data[i]['latitude'],
"lon": data[i]['longitude'],
"addr_full": data[i]['address1'],
"city": data[i]['city'],
"state": data[i]['state'],
"country": data[i]['country_code'],
"postcode": data[i]['zip_postal_code'],
"website": data[i]['website'],
"phone": data[i]['contact_center_phone'],
}
yield GeojsonPointItem(**properties)
i += 1 | Python | 0 | |
47893c708f3b63f79a01d5ee927f4c7d3f6dff27 | Create script to delete untitled and unpublished projects | akvo/rsr/management/commands/delete_untitled_and_unpublished_projects.py | akvo/rsr/management/commands/delete_untitled_and_unpublished_projects.py | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
import datetime
from tablib import Dataset
from django.core.management.base import BaseCommand
from django.db.models import Q
from akvo.rsr.models import Project, PublishingStatus, IndicatorPeriodData, Result, IatiActivityImport
class Command(BaseCommand):
help = """\
Delete all Untitled and Unpublished projects created before the given date
<script> <date:%Y-%m-%d> --delete
"""
def add_arguments(self, parser):
parser.add_argument('date', type=lambda date: datetime.datetime.strptime(date, '%Y-%m-%d').date())
parser.add_argument('--delete', action='store_true', help='Actually delete projects')
def handle(self, *args, **options):
the_date = options['date']
projects = Project.objects\
.filter(created_at__lt=the_date)\
.filter(Q(title__exact='') | Q(publishingstatus__status=PublishingStatus.STATUS_UNPUBLISHED))
project_ids = projects.values_list('id', flat=True)
if options['delete']:
updates = IndicatorPeriodData.objects.filter(period__indicator__result__project__in=project_ids)
print(f"Deleting {updates.count()} period updates")
updates.delete()
iati_import = IatiActivityImport.objects.filter(project__in=project_ids)
print(f"Deleting {iati_import.count()} iati activity import")
iati_import.delete()
results = Result.objects.filter(project__in=project_ids)
print(f"Deleting {results.count()} results")
results.delete()
print(f"Deleting {projects.count()} projects)")
projects.delete()
else:
data = Dataset()
data.headers = [
'project_id',
'project_title',
'is_published',
'created_at'
]
for p in projects:
data.append([
p.id,
p.title,
p.is_published(),
p.created_at
])
print(data.export('csv'))
print(f'Found {projects.count()} projects to delete.')
| Python | 0 | |
03c837b0da9d7f7a6c6c54286631e9a403da3e60 | Add network scan python script - Closes #7 | backend/net_scan.py | backend/net_scan.py | #!/usr/bin/python
import sys, getopt, nmap
def usage():
print 'sword_nmap.py -t <target> -p <port range>'
def main(argv):
target=''
port_range=''
try:
opts, args = getopt.getopt(argv,'ht:p:',['target=','ports='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ('-t', '--target'):
target = arg
elif opt in ('-p', '--ports'):
port_range = arg
if target == '':
usage()
sys.exit(1)
if port_range == '':
usage()
sys.exit(1)
scan(target, port_range)
def scan (target, port_range):
print ('Scanning %s %s' %(target, port_range))
nm = nmap.PortScanner()
nm.scan(target, port_range)
nm.command_line()
for host in nm.all_hosts():
print('Host : %s (%s): %s' % (host, nm[host].hostname(), nm[host].state()))
for proto in nm[host].all_protocols():
lport = nm[host][proto].keys()
lport.sort()
for port in lport:
print ('\t%s port %s %s' % (proto, port, nm[host][proto][port]['state']))
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0 | |
24e4ed9f26f9803d54d37202d0d71e8f47b18aa3 | Add swix create functionality and make verifyswix test use it | swixtools/create.py | swixtools/create.py | #!/usr/bin/env python3
# Copyright ( c ) 2021 Arista Networks, Inc.
# Use of this source code is governed by the Apache License 2.0
# that can be found in the LICENSE file.
'''
This module is responsible for packaging a SWIX file.
'''
import argparse
import hashlib
import os
import shutil
import subprocess
import sys
import tempfile
def dealWithExistingOutputFile( outputSwix, force ):
'''
If the desired output file exists, fail unless `force` is specified.
'''
if os.path.exists( outputSwix ):
if force:
os.remove( outputSwix )
else:
sys.exit( f'File {outputSwix!r} exists: use --force to overwrite.\n' )
def sha1sum( filename, blockSize=65536 ):
'''
Compute the SHA1 sum of a file.
We read in blocks in case of large files.
'''
result = hashlib.sha1()
with open( filename, 'rb' ) as f:
block = f.read( blockSize )
while block:
result.update( block )
block = f.read( blockSize )
return result.hexdigest()
def createManifest( tempDir, rpms ):
'''
Create a manifest file for the SWIX which contains:
- The format version.
- The name of the primary RPM.
- The SHA1 digest of all RPMs.
'''
manifestBaseName = 'manifest.txt'
manifestFileName = os.path.join( tempDir, manifestBaseName )
basename = os.path.basename
try:
with open( manifestFileName, 'w' ) as manifest:
print( 'format: 1', file=manifest )
print( f'primaryRpm: {basename( rpms[0] )}', file=manifest )
for rpm in rpms:
print( f'{basename( rpm )}-sha1: {sha1sum( rpm )}', file=manifest )
except Exception as e:
sys.exit( f'{manifestFileName}: {e}\n' )
return manifestFileName
def create( outputSwix=None, info=None, rpms=None, force=False, sign=False ):
'''
Create a SWIX file named `outputSwix` given a list of RPMs.
`info` and `sign` are currently unused.
'''
dealWithExistingOutputFile( outputSwix, force )
try:
tempDir = tempfile.mkdtemp( suffix='.tempDir',
dir='.',
prefix=os.path.basename( outputSwix ) )
manifest = createManifest( tempDir, rpms )
filesToZip = [manifest] + rpms
if info:
pass # TODO: If YAML file, verify.
# '-0' means 'no compression'.
# '-j' means 'use basenames'.
subprocess.check_call( f'zip -0 -j {outputSwix}'.split() + filesToZip )
if sign:
pass # TODO: Sign.
except Exception as e:
sys.exit( f'Error occurred during generation of SWIX file: {e}\n' )
finally:
shutil.rmtree( tempDir, ignore_errors=True )
def parseCommandArgs( args ):
parser = argparse.ArgumentParser( prog='swix create' )
add = parser.add_argument
add( 'outputSwix', metavar='OUTFILE.swix',
help='Name of output file' )
add( 'rpms', metavar='PACKAGE.rpm', type=str, nargs='+',
help='An RPM to add to the swix' )
add( '-f', '--force', action='store_true',
help='Overwrite OUTFILE.swix if it already exists' )
add( '-i', '--info', metavar='manifest.yaml', action='store', type=str,
help='Location of manifest.yaml file to add metadata to swix' )
return parser.parse_args( args )
def main():
args = parseCommandArgs( sys.argv[1:] )
create( **args.__dict__ )
if __name__ == '__main__':
main()
| Python | 0 | |
4699c1c301f1cb99f6c9e616b31769c01bc291d5 | change datafiles in v1.* to make it work in v2.* | v1_to_v2.py | v1_to_v2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import optparse, pickle, exam
def main():
opt = optparse.OptionParser()
(options, args) = opt.parse_args()
for i in args:
with open(i,'rb') as f: data = pickle.load(f)
data = exam.QuestForm(data)
with open('v3.'.join(i.split('.')),'wb') as f: pickle.dump(data,f)
return
main()
| Python | 0.000001 | |
0403d6f78189be3f3b22f068dad1db0c53687ef7 | Add ptch module and base PatchFile class. This class can unpack RLE-compressed patchfiles. | ptch/__init__.py | ptch/__init__.py | # -*- coding: utf-8 -*-
"""
PTCH files are a container format for Blizzard patch files.
They begin with a 72 byte header containing some metadata, immediately
followed by a RLE-packed BSDIFF40.
The original BSDIFF40 format is compressed with bzip2 instead of RLE.
"""
#from hashlib import md5
from struct import unpack
from binascii import hexlify
from cStringIO import StringIO
class PatchFile(object):
def __init__(self, file):
# Parse the header
file.seek(0)
assert file.read(4) == "PTCH"
unk1 = file.read(4)
self.sizeBefore, self.sizeAfter = unpack("ii", file.read(8))
assert file.read(4) == "MD5_"
assert unpack("i", file.read(4)) == (0x28, )
self.md5Before, self.md5After = unpack("16s16s", file.read(32))
self.md5Before, self.md5After = hexlify(self.md5Before), hexlify(self.md5After)
assert file.read(4) == "XFRM"
file.read(4)
assert file.read(4) == "BSD0"
self.fileSize, = unpack("i", file.read(4))
self.compressedDiff = file.read()
file.close()
def __repr__(self):
header = ("sizeBefore", "sizeAfter", "md5Before", "md5After", "fileSize")
return "%s(%s)" % (self.__class__.__name__, ", ".join("%s=%r" % (k, getattr(self, k)) for k in header))
def rleUnpack(self):
"""
Read the RLE-packed data and
return the unpacked output.
"""
data = StringIO(self.compressedDiff)
ret = []
byte = data.read(1)
while byte:
byte = ord(byte)
# Is it a repeat control?
if byte & 0x80:
count = (byte & 0x7F) + 1
ret.append(data.read(count))
else:
ret.append("\0" * (byte+1))
byte = data.read(1)
return "".join(ret)
| Python | 0 | |
8533c93505a733980406ce655372c7742dfcfdfc | Add update policy that allows for in place upgrade of ES cluster (#1537) | troposphere/policies.py | troposphere/policies.py | from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'MinSuccessfulInstancesPercent': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([basestring], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class AutoScalingReplacingUpdate(AWSProperty):
props = {
'WillReplace': (boolean, False),
}
class CodeDeployLambdaAliasUpdate(AWSProperty):
props = {
'AfterAllowTrafficHook': (basestring, False),
'ApplicationName': (boolean, True),
'BeforeAllowTrafficHook': (basestring, False),
'DeploymentGroupName': (boolean, True),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
'AutoScalingReplacingUpdate': (AutoScalingReplacingUpdate, False),
'CodeDeployLambdaAliasUpdate': (CodeDeployLambdaAliasUpdate, False),
'UseOnlineResharding': (boolean, False),
'EnableVersionUpgrade': (boolean, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class AutoScalingCreationPolicy(AWSProperty):
props = {
'MinSuccessfulInstancesPercent': (integer, False),
}
class CreationPolicy(AWSAttribute):
props = {
'AutoScalingCreationPolicy': (AutoScalingCreationPolicy, False),
'ResourceSignal': (ResourceSignal, True),
}
| from . import AWSProperty, AWSAttribute, validate_pausetime
from .validators import positive_integer, integer, boolean
class AutoScalingRollingUpdate(AWSProperty):
props = {
'MaxBatchSize': (positive_integer, False),
'MinInstancesInService': (integer, False),
'MinSuccessfulInstancesPercent': (integer, False),
'PauseTime': (validate_pausetime, False),
'SuspendProcesses': ([basestring], False),
'WaitOnResourceSignals': (boolean, False),
}
class AutoScalingScheduledAction(AWSProperty):
props = {
'IgnoreUnmodifiedGroupSizeProperties': (boolean, False),
}
class AutoScalingReplacingUpdate(AWSProperty):
props = {
'WillReplace': (boolean, False),
}
class CodeDeployLambdaAliasUpdate(AWSProperty):
props = {
'AfterAllowTrafficHook': (basestring, False),
'ApplicationName': (boolean, True),
'BeforeAllowTrafficHook': (basestring, False),
'DeploymentGroupName': (boolean, True),
}
class UpdatePolicy(AWSAttribute):
props = {
'AutoScalingRollingUpdate': (AutoScalingRollingUpdate, False),
'AutoScalingScheduledAction': (AutoScalingScheduledAction, False),
'AutoScalingReplacingUpdate': (AutoScalingReplacingUpdate, False),
'CodeDeployLambdaAliasUpdate': (CodeDeployLambdaAliasUpdate, False),
'UseOnlineResharding': (boolean, False),
}
class ResourceSignal(AWSProperty):
props = {
'Count': (positive_integer, False),
'Timeout': (validate_pausetime, False),
}
class AutoScalingCreationPolicy(AWSProperty):
props = {
'MinSuccessfulInstancesPercent': (integer, False),
}
class CreationPolicy(AWSAttribute):
props = {
'AutoScalingCreationPolicy': (AutoScalingCreationPolicy, False),
'ResourceSignal': (ResourceSignal, True),
}
| Python | 0 |
2a6527c60d09c0cbb2f1902b57ae02ddade213eb | Create communicati.py | libs/communicati.py | libs/communicati.py | # communications.py
# Mónica Milán (@mncmilan)
# mncmilan@gmail.com
# http://steelhummingbird.blogspot.com.es/
# Library that contains all necessary methods in order to enable communications between PC and eZ430-Chronos.
import serial
s = serial.Serial('COM4', baudrate=115200,timeout=None) # open serial port
class CommunicationManager():
def open_serial_port(self):
s.write(bytearray([255, 7, 3])) # starting communications with serial port
def send_data_request(self):
s.write(bytearray([255, 8, 7, 0, 0, 0, 0])) # acceleration data request
bytesToRead = s.inWaiting()
return bytesToRead
def read_from_labVIEW_request(self):
bytes_to_read = s.inWaiting()
inbyte = s.read(bytes_to_read)
return bytes_to_read, inbyte
def read_data(self, bytes_to_read):
inbyte = s.read(bytes_to_read)
return inbyte
def close_serial_port(self):
s.write(bytearray([255, 9, 3])) # stop transmitting
s.close()
| Python | 0.000001 | |
e5293f7e33740f210ab58c3c05db18829db1474d | add docstrings [skip ci] | mailthon/helpers.py | mailthon/helpers.py | """
mailthon.helpers
~~~~~~~~~~~~~~~~
Implements various helper functions/utilities.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
import sys
import mimetypes
from collections import MutableMapping
from email.utils import formataddr
if sys.version_info[0] == 3:
bytes_type = bytes
else:
bytes_type = str
def guess(filename, fallback='application/octet-stream'):
"""
Using the mimetypes library, guess the mimetype and
encoding for a given *filename*. If the mimetype
cannot be guessed, *fallback* is assumed instead.
:param filename: Filename- can be absolute path.
:param fallback: A fallback mimetype.
"""
guessed, encoding = mimetypes.guess_type(filename, strict=False)
if guessed is None:
return fallback, encoding
return guessed, encoding
def format_addresses(addrs):
"""
Given an iterable of addresses or name-address
tuples *addrs*, return a header value that joins
all of them together with a space and a comma.
"""
return ', '.join(
formataddr(item) if isinstance(item, tuple) else item
for item in addrs
)
def encode_address(addr, encoding='utf-8'):
"""
Given an email address *addr*, try to encode
it with ASCII. If it's not possible, encode
the *local-part* with the *encoding* and the
*domain* with IDNA.
"""
if isinstance(addr, bytes_type):
return addr
try:
addr = addr.encode('ascii')
except UnicodeEncodeError:
if '@' in addr:
localpart, domain = addr.split('@', 1)
addr = b'@'.join([
localpart.encode(encoding),
domain.encode('idna'),
])
else:
addr = addr.encode(encoding)
return addr
class UnicodeDict(dict):
"""
A dictionary that handles unicode values
magically - that is, byte-values are
automatically decoded. Accepts a dict
or iterable *values*.
:param encoding: Default encoding used
if no encoding is specified.
"""
def __init__(self, values=(), encoding='utf-8'):
dict.__init__(self)
self.encoding = encoding
self.update(values)
def __setitem__(self, key, value):
if isinstance(value, bytes_type):
value = value.decode(self.encoding)
dict.__setitem__(self, key, value)
update = MutableMapping.update
| """
mailthon.helpers
~~~~~~~~~~~~~~~~
Implements various helper functions/utilities.
:copyright: (c) 2015 by Eeo Jun
:license: MIT, see LICENSE for details.
"""
import sys
import mimetypes
from collections import MutableMapping
from email.utils import formataddr
if sys.version_info[0] == 3:
bytes_type = bytes
else:
bytes_type = str
def guess(filename, fallback='application/octet-stream'):
"""
Using the mimetypes library, guess the mimetype and
encoding for a given *filename*. If the mimetype
cannot be guessed, *fallback* is assumed instead.
:param filename: Filename- can be absolute path.
:param fallback: A fallback mimetype.
"""
guessed, encoding = mimetypes.guess_type(filename, strict=False)
if guessed is None:
return fallback, encoding
return guessed, encoding
def format_addresses(addrs):
"""
Given an iterable of addresses or name-address
tuples *addrs*, return a header value that joins
all of them together with a space and a comma.
"""
return ', '.join(
formataddr(item) if isinstance(item, tuple) else item
for item in addrs
)
def encode_address(addr, encoding='utf-8'):
"""
Given an email address *addr*, try to encode
it with ASCII. If it's not possible, encode
the *local-part* with the *encoding* and the
*domain* with IDNA.
"""
if isinstance(addr, bytes_type):
return addr
try:
addr = addr.encode('ascii')
except UnicodeEncodeError:
if '@' in addr:
localpart, domain = addr.split('@', 1)
addr = b'@'.join([
localpart.encode(encoding),
domain.encode('idna'),
])
else:
addr = addr.encode(encoding)
return addr
class UnicodeDict(dict):
def __init__(self, values=(), encoding='utf-8'):
dict.__init__(self)
self.encoding = encoding
self.update(values)
def __setitem__(self, key, value):
if isinstance(value, bytes_type):
value = value.decode(self.encoding)
dict.__setitem__(self, key, value)
update = MutableMapping.update
| Python | 0 |
ea652c892219d1ed08a0453a3b2ede3efb452e23 | Create __init__.py | ui_techmenu/__init__.py | ui_techmenu/__init__.py | # -*- coding: utf-8 -*-
######################################################################
#
# ui_techmenu - Explode Technical Menu for Odoo
# Copyright (C) 2012 - TODAY, Ursa Information Systems (<http://ursainfosystems.com>)
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>)
# contact@ursainfosystems.com
#
#
# Ursa is willing to revoke copyright claims in the future if Odoo wishes to certify this module.
#
######################################################################
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#
# For clients with an annual support contract with Ursa, this program is warranted within the guidelines of that contract.
#
# For ALL OTHERS, this program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY (including the absence
# of implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE). See the GNU Affero General Public License for
# more information.
#
######################################################################
#
# You should have received a copy of the GNU Affero General Public License along with this program. The license is in the file
# named LICENSE in the top level directory and also provided on the web at <http://www.gnu.org/licenses/>.
#
######################################################################
# python dependencies (either files or classes) are designated below
# import <file_dependency>
# import <class_dependency>
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.000429 | |
841e8fe236eab35b803cb9d8bec201306ce4642e | Add script to generate big RUM_* files | util/repeat_rum_file.py | util/repeat_rum_file.py | from rum_mapping_stats import aln_iter
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--times', type=int)
parser.add_argument('--max-seq', type=int)
parser.add_argument('rum_file', type=file)
args = parser.parse_args()
alns = list(aln_iter(args.rum_file))
for t in range(args.times):
for aln in alns:
old_read_num = aln.read_num
aln.read_num = old_read_num + t * args.max_seq
aln.write(sys.stdout)
aln.read_num = old_read_num
| Python | 0 | |
3ab98baaf2b81ffa1afef808f27608f06bc946d3 | Create commands.py | web/commands.py | web/commands.py | #
# Commands for RPC interface
#
from twisted.protocols.amp import AMP, Boolean, Integer, String, Float, Command
class Sum(Command):
arguments = [('a', Integer()),
('b', Integer())]
response = [('status', Integer())]
class HeartbeatCmd(Command):
arguments = [('enabled', Boolean())]
response = [('status', Boolean())]
requiresAnswer = False
class HaltCmd(Command):
arguments = []
response = [('status', Boolean())]
requiresAnswer = False
class ModeCmd(Command):
arguments = [('mode', String())]
response = [('status', String())]
requiresAnswer = False
class QueryStatus(Command):
arguments = []
response = [('fix', Boolean()),
('lat', Float()),
('lon', Float()),
('gps_heading', Float()),
('gps_speed', Float()),
('altitude', Float()),
('num_sat', Integer()),
('timestamp', String()),
('datestamp', String()),
('compass_heading', Float()),
('temperature', Float())]
class NavigationCmd(Command):
arguments = [('speed', Float()), ('heading', Float())]
response = [('status', Boolean())]
requiresAnswer = False
class ManualDriveCmd(Command):
arguments = [('throttle', Float()), ('steering', Float())]
response = [('status', Boolean())]
requiresAnswer = False
class ExitCmd(Command):
arguments = []
response = []
requiresAnswer = False
| Python | 0.000011 | |
b5f8299cbe539cf2a01988ca25e0c7638400bc8c | Create stuff.py | bottomline/stuff.py | bottomline/stuff.py | # Testing
print 'heck yeah!'
| Python | 0.000001 | |
5777877d1ed71ed21f67e096b08ad495ff844ed8 | add testexample/simpleTestwithPython.py | testexample/simpleTestwithPython.py | testexample/simpleTestwithPython.py | import os
import re
import json
import sys
import getopt
import argparse
from docopt import docopt
from urllib2 import urlopen, Request
import urllib
import urllib2
import requests
url_phenotypes = 'http://localhost:9000/api/phenotypes'
url_genotypes = 'http://localhost:9000/api/genotypes'
token = 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6Ik5JQUdBRFMiLCJleHAiOjE0NjEzNjI0NTV9.-Roix0YvuPy9VHaWm9wE83yB7NiSunyVXsVlR74lu2Y'
headers = {'Authorization': '%s' % token}
request_phenotypes = Request(url_phenotypes, headers=headers)
request_genotypes = Request(url_genotypes, headers=headers)
response_phenotypes = urlopen(request_phenotypes)
response_genotypes = urlopen(request_genotypes)
data_phenotypes = json.loads(response_phenotypes.read())
data_genotypes = json.loads(response_genotypes.read())
def loadPhenotypes(data_phenotypes):
phenotypes_list = data_phenotypes['phenotypes']
for phenotype in phenotypes_list:
print(phenotype['title'])
print(phenotype['family_id'])
print(phenotype['individual_id'])
print(phenotype['paternal_id'])
print(phenotype['maternal_id'])
def loadGenotypes(data_genotypes):
genotypes_list = data_genotypes['genotypes']
for genotype in genotypes_list:
print(genotype['title'])
print(genotype['chr'])
print(genotype['coordinate'])
print(genotype['variant_id'])
def postGenotypes(url_genotypes, token, headers):
values = {"title":"test","chr":"2","variant_id":"snp4","location":"0","coordinate":"1111830","call":"G T G T G G T T G T T T"}
data = json.dumps(values)
req = requests.post(url_genotypes, data, headers=headers)
print req.status_code
loadPhenotypes(data_phenotypes)
loadGenotypes(data_genotypes)
postGenotypes(url_genotypes, token, headers)
| Python | 0.000001 | |
606020fbb7c3e608c8eab19ca143919003ea4f7d | add some first tests. | test_triptan.py | test_triptan.py | import os
from unittest import TestCase
from tempfile import TemporaryDirectory
from triptan.core import Triptan
class TriptanInitializationTest(TestCase):
"""
Asserts that triptan can setup the necessary data correctly.
"""
def test_init_file_structure(self):
"""
Assert the file structure is created correctly.
"""
with TemporaryDirectory() as tmpd:
Triptan.setup(
tmpd,
'triptan.yml',
{'revisions_location': 'revisions'}
)
assert os.path.exists(os.path.join(tmpd, 'triptan.yml'))
assert os.path.exists(os.path.join(tmpd, 'revisions'))
class TriptanTest(TestCase):
"""
Assert the core functionality is working.
"""
def setUp(self):
"""
Create a temporary directory and set triptan up with it.
"""
self.path = TemporaryDirectory()
Triptan.setup(
self.path.name,
'triptan.yml',
{'revisions_location': 'revisions'}
)
self.triptan = Triptan(self.path.name, 'triptan.yml')
def test_default_revision(self):
"""
Assert the default revision is -1.
"""
assert self.triptan.current_revision == -1
def test_revision_creation(self):
"""
Assert that revisions are correctly created.
"""
self.triptan.new_revision("test revision")
rev_path = os.path.join(self.path.name, 'revisions/revision-000.py')
assert os.path.exists(rev_path)
self.triptan.new_revision("another")
rev_path = os.path.join(self.path.name, 'revisions/revision-001.py')
assert os.path.exists(rev_path)
| Python | 0 | |
67219e743f224cc82b6d17b167c9c9a16540d5e7 | Add a test that we are including proper license files for all requirements. | awx/main/tests/functional/test_licenses.py | awx/main/tests/functional/test_licenses.py |
import glob
import json
import os
from django.conf import settings
from pip._internal.req import parse_requirements
def test_python_and_js_licenses():
def index_licenses(path):
# Check for GPL (forbidden) and LGPL (need to ship source)
# This is not meant to be an exhaustive check.
def check_license(license_file):
with open(license_file) as f:
data = f.read()
is_lgpl = 'GNU LESSER GENERAL PUBLIC LICENSE' in data.upper()
# The LGPL refers to the GPL in-text
# Case-sensitive for GPL to match license text and not PSF license reference
is_gpl = 'GNU GENERAL PUBLIC LICENSE' in data and not is_lgpl
return (is_gpl, is_lgpl)
def find_embedded_source_version(path, name):
for entry in os.listdir(path):
# Check variations of '-' and '_' in filenames due to python
for fname in [name, name.replace('-','_')]:
if entry.startswith(fname) and entry.endswith('.tar.gz'):
entry = entry[:-7]
(n, v) = entry.rsplit('-',1)
return v
return None
list = {}
for txt_file in glob.glob('%s/*.txt' % path):
filename = txt_file.split('/')[-1]
name = filename[:-4].lower()
(is_gpl, is_lgpl) = check_license(txt_file)
list[name] = {
'name': name,
'filename': filename,
'gpl': is_gpl,
'source_required': (is_gpl or is_lgpl),
'source_version': find_embedded_source_version(path, name)
}
return list
def read_api_requirements(path):
ret = {}
for req_file in ['requirements.txt', 'requirements_ansible.txt', 'requirements_git.txt', 'requirements_ansible_git.txt']:
fname = '%s/%s' % (path, req_file)
for reqt in parse_requirements(fname, session=''):
name = reqt.name
version = str(reqt.specifier)
if version.startswith('=='):
version=version[2:]
if reqt.link:
(name, version) = reqt.link.filename.split('@',1)
if name.endswith('.git'):
name = name[:-4]
ret[name] = { 'name': name, 'version': version}
return ret
def read_ui_requirements(path):
def json_deps(jsondata):
ret = {}
deps = jsondata.get('dependencies',{})
for key in deps.keys():
key = key.lower()
devonly = deps[key].get('dev',False)
if not devonly:
if key not in ret.keys():
depname = key.replace('/','-')
ret[depname] = {
'name': depname,
'version': deps[key]['version']
}
ret.update(json_deps(deps[key]))
return ret
with open('%s/package-lock.json' % path) as f:
jsondata = json.load(f)
return json_deps(jsondata)
def remediate_licenses_and_requirements(licenses, requirements):
errors = []
items = licenses.keys()
items.sort()
for item in items:
if item not in requirements.keys() and item != 'awx':
errors.append(" license file %s does not correspond to an existing requirement; it should be removed." % (licenses[item]['filename'],))
continue
# uWSGI has a linking exception
if licenses[item]['gpl'] and item != 'uwsgi':
errors.append(" license for %s is GPL. This software cannot be used." % (item,))
if licenses[item]['source_required']:
version = requirements[item]['version']
if version != licenses[item]['source_version']:
errors.append(" embedded source for %s is %s instead of the required version %s" % (item, licenses[item]['source_version'], version))
elif licenses[item]['source_version']:
errors.append(" embedded source version %s for %s is included despite not being needed" % (licenses[item]['source_version'],item))
items = requirements.keys()
items.sort()
for item in items:
if item not in licenses.keys():
errors.append(" license for requirement %s is missing" %(item,))
return errors
base_dir = settings.BASE_DIR
api_licenses = index_licenses('%s/../docs/licenses' % base_dir)
ui_licenses = index_licenses('%s/../docs/licenses/ui' % base_dir)
api_requirements = read_api_requirements('%s/../requirements' % base_dir)
ui_requirements = read_ui_requirements('%s/ui' % base_dir)
errors = []
errors += remediate_licenses_and_requirements(ui_licenses, ui_requirements)
errors += remediate_licenses_and_requirements(api_licenses, api_requirements)
if errors:
raise Exception('Included licenses not consistent with requirements:\n%s' %
'\n'.join(errors))
| Python | 0 | |
1803ec42e2eaad689dd51d3afb0b943e411f10d5 | Add breath first search algorithm | breath_first_search/breath_first_search.py | breath_first_search/breath_first_search.py | #!/usr/bin/env python
from collections import deque
class BreathFirstSearchGame(object):
def __init__(self):
# The node index are from 0 to 7, such as 0, 1, 2, 3, 4
self.node_number = 8
# The edges to connect each node
self.edges = [(0, 1), (0, 3), (1, 2), (1, 5), (2, 7), (3, 4), (3, 6),
(4, 5), (5, 7)]
# The 8 * 8 matrix of boolean values, only updated by the edges
self.graph = [[False for j in range(self.node_number)]
for i in range(self.node_number)]
#print(self.graph)
# The queue of open set, which is an array
self.open_set = deque()
# The source and destination nodes for this game
self.source_node = 0
self.destination_node = 7
# The 8 array of boolean which means this node is visited
self.is_visit_node_array = [False for i in range(self.node_number)]
# The 8 array of int which means this node's best parent node id
self.best_parent_node_array = [-1 for i in range(self.node_number)]
self.initialize_internal_variables()
#print(self.graph)
self.travel_and_update_variables()
self.travel_desination_path(self.destination_node)
def initialize_internal_variables(self):
# Update the graph with edges
for i, j in self.edges:
self.graph[i][j] = True
self.graph[j][i] = True
# Update the open set with the source nodes
self.open_set.append(self.source_node)
self.is_visit_node_array[self.source_node] = True
self.best_parent_node_array[self.source_node] = self.source_node
def travel_and_update_variables(self):
# Travel if some nodes in open set
while len(self.open_set) > 0:
current_node = self.open_set.popleft()
for other_node in range(self.node_number):
#import ipdb;ipdb.set_trace()
# Check if these two nodes are connected
if self.graph[current_node][other_node]:
# Check if the other node is visited
if self.is_visit_node_array[other_node] == False:
# Update the open set and visited array
self.open_set.append(other_node)
self.best_parent_node_array[other_node] = current_node
self.is_visit_node_array[other_node] = True
def travel_desination_path(self, destination_node):
if destination_node == self.source_node:
print(destination_node)
else:
self.travel_desination_path(
self.best_parent_node_array[destination_node])
print(destination_node)
def main():
print("Start breath first search")
game = BreathFirstSearchGame()
if __name__ == "__main__":
main()
| Python | 0.000049 | |
c98eff8545c90563246a53994fe8f65faaf76b0a | Add fetch recipe for the open source infra repo. | recipes/infra.py | recipes/infra.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Infra(recipe_util.Recipe):
"""Basic Recipe class for the Infrastructure repositories."""
@staticmethod
def fetch_spec(props):
url = 'https://chromium.googlesource.com/infra/infra.git'
solution = {
'name' : 'infra',
'url' : url,
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution],
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'infra'
def main(argv=None):
return Infra().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python | 0.00002 | |
5b3b5bb145eea8a71c81a383d2bdac7ecf13f98e | Add sys module tests | tests/integration/modules/sysmod.py | tests/integration/modules/sysmod.py | # Import python libs
import os
# Import salt libs
import integration
class SysModuleTest(integration.ModuleCase):
'''
Validate the sys module
'''
def test_list_functions(self):
'''
sys.list_functions
'''
funcs = self.run_function('sys.list_functions')
self.assertTrue('hosts.list_hosts' in funcs)
self.assertTrue('pkg.install' in funcs)
def test_list_modules(self):
'''
sys.list_moduels
'''
mods = self.run_function('sys.list_modules')
self.assertTrue('hosts' in mods)
self.assertTrue('pkg' in mods)
| Python | 0.000001 | |
7d800a0fc2d94cad14e825faa27e1f5b2d2cbed8 | Create new package (#6648) | var/spack/repos/builtin/packages/breseq/package.py | var/spack/repos/builtin/packages/breseq/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Breseq(AutotoolsPackage):
"""breseq is a computational pipeline for finding mutations relative to a
reference sequence in short-read DNA re-sequencing data for haploid
microbial-sized genomes."""
homepage = "http://barricklab.org/breseq"
url = "https://github.com/barricklab/breseq/archive/v0.31.1.tar.gz"
version('0.31.1', 'a4e602d5481f8692833ba3d5a3cd0394')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('bedtools2', type='run')
depends_on('r', type='run')
| Python | 0 | |
19cf7a2833ba2ffcff46bd4543ed93fd80c1d8ea | fix trying to run configure on an already configured directory fixes #2959 (#2961) | var/spack/repos/builtin/packages/libmng/package.py | var/spack/repos/builtin/packages/libmng/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmng(AutotoolsPackage):
"""libmng -THE reference library for reading, displaying, writing
and examining Multiple-Image Network Graphics. MNG is the animation
extension to the popular PNG image-format."""
homepage = "http://sourceforge.net/projects/libmng/"
url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz"
version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')
depends_on("jpeg")
depends_on("zlib")
depends_on("lcms")
def patch(self):
# jpeg requires stdio to beincluded before its headrs.
filter_file(r'^(\#include \<jpeglib\.h\>)',
'#include<stdio.h>\n\\1', 'libmng_types.h')
@run_before('configure')
def clean_configure_directory(self):
make('distclean')
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libmng(AutotoolsPackage):
"""libmng -THE reference library for reading, displaying, writing
and examining Multiple-Image Network Graphics. MNG is the animation
extension to the popular PNG image-format."""
homepage = "http://sourceforge.net/projects/libmng/"
url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz"
version('2.0.2', '1ffefaed4aac98475ee6267422cbca55')
depends_on("jpeg")
depends_on("zlib")
depends_on("lcms")
def patch(self):
# jpeg requires stdio to beincluded before its headrs.
filter_file(r'^(\#include \<jpeglib\.h\>)',
'#include<stdio.h>\n\\1', 'libmng_types.h')
| Python | 0 |
94f2ea927d9e218f2b5065456275d407164ddf0a | Add anidub.com tracker support | updatorr/tracker_handlers/handler_anidub.py | updatorr/tracker_handlers/handler_anidub.py | from updatorr.handler_base import BaseTrackerHandler
from updatorr.utils import register_tracker_handler
import urllib2
class AnidubHandler(BaseTrackerHandler):
"""This class implements .torrent files downloads
for http://tr.anidub.com tracker."""
logged_in = False
# Stores a number of login attempts to prevent recursion.
login_counter = 0
def get_torrent_file(self):
"""This is the main method which returns
a filepath to the downloaded file."""
torrent_file = None
download_link = self.get_download_link()
if download_link is None:
self.dump_error('Cannot find torrent file download link at %s' % self.resource_url)
else:
self.debug('Torrent download link found: %s' % download_link)
torrent_file = self.download_torrent(download_link)
return torrent_file
def get_id_from_link(self):
"""Returns forum thread identifier from full thread URL."""
return self.resource_url.split('=')[1]
def login(self, login, password):
"""Implements tracker login procedure."""
self.logged_in = False
if login is None or password is None:
return False
self.login_counter += 1
# No recursion wanted.
if self.login_counter > 1:
return False
login_url = 'http://tr.anidub.com/takelogin.php'
self.debug('Trying to login at %s ...' % login_url)
form_data = {
'username': login,
'password': password,
}
self.get_resource(login_url, form_data)
cookies = self.get_cookies()
# Login success check.
if cookies.get('uid') is not None:
self.logged_in = True
return self.logged_in
def get_download_link(self):
"""Tries to find .torrent file download link at forum thread page
and return that one."""
response, page_html = self.get_resource(self.resource_url)
page_links = self.find_links(page_html)
download_link = None
for page_link in page_links:
if 'login.php?returnto=' in page_link:
download_link = None
self.debug('Login is required to download torrent file.')
if self.login(self.get_settings('login'), self.get_settings('password')):
download_link = self.get_download_link()
if 'download.php?id=' in page_link:
download_link = 'http://tr.anidub.com/'+urllib2.unquote(page_link).replace("&", "&")
return download_link
def download_torrent(self, url):
"""Gets .torrent file contents from given URL and
stores that in a temporary file within a filesystem.
Returns a path to that file.
"""
self.debug('Downloading torrent file from %s ...' % url)
# That was a check that user himself visited torrent's page ;)
cookies = self.get_cookies()
#self.set_cookie('uid', self.get_id_from_link())
contents = self.get_resource(url, {})[1]
return self.store_tmp_torrent(contents)
# With that one we tell updatetorr to handle links to `rutracker.org` domain with RutrackerHandler class.
register_tracker_handler('tr.anidub.com', AnidubHandler)
| Python | 0 | |
24e3064002656ae649e8ddb931ee2370037812a0 | image.regression was missing | lib/image/regression.py | lib/image/regression.py | import copy, os, csv, string, fpformat
import numpy as N
import enthought.traits as traits
import neuroimaging.image as image
from neuroimaging.reference import grid
from neuroimaging.statistics.regression import RegressionOutput
from neuroimaging.statistics import utils
class ImageRegressionOutput(RegressionOutput):
"""
A class to output things in GLM passes through fMRI data. It
uses the image\'s iterator values to output to an image.
"""
nout = traits.Int(1)
imgarray = traits.false
clobber = traits.false
def __init__(self, grid, **keywords):
traits.HasTraits.__init__(self, **keywords)
self.grid = grid
if self.nout > 1:
self.grid = grid.DuplicatedGrids([self.grid]*self.nout)
if self.imgarray:
self.img = iter(image.Image(N.zeros(self.grid.shape, N.Float), grid=self.grid))
def sync_grid(self, img=None):
"""
Synchronize an image's grid iterator to self.grid's iterator.
"""
if img is None:
img = self.img
img.grid.itertype = self.grid.itertype
img.grid.labels = self.grid.labels
img.grid.labelset = self.grid.labelset
iter(img)
def __iter__(self):
return self
def next(self, data=None):
value = self.grid.itervalue
self.img.next(data=data, value=value)
def extract(self, results):
return 0.
class TContrastOutput(ImageRegressionOutput):
contrast = traits.Any() # should really start specifying classes with traits, too
effect = traits.true
sd = traits.true
t = traits.true
outdir = traits.Str()
ext = traits.Str('.img')
subpath = traits.Str('contrasts')
def __init__(self, grid, contrast, path='.', **keywords):
ImageRegressionOutput.__init__(self, grid, **keywords)
self.contrast = contrast
self.outdir = os.path.join(path, self.subpath, self.contrast.name)
self.path = path
self.setup_contrast()
self.setup_output(time=self.frametimes)
def setup_contrast(self, **extra):
self.contrast.getmatrix(**extra)
def setup_output(self, **extra):
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
outname = os.path.join(self.outdir, 't%s' % self.ext)
self.timg = image.Image(outname, mode='w', grid=self.grid,
clobber=self.clobber)
self.sync_grid(img=self.timg)
if self.effect:
outname = os.path.join(self.outdir, 'effect%s' % self.ext)
self.effectimg = image.Image(outname, mode='w', grid=self.grid,
clobber=self.clobber)
self.sync_grid(img=self.effectimg)
if self.sd:
outname = os.path.join(self.outdir, 'sd%s' % self.ext)
self.sdimg = iter(image.Image(outname, mode='w', grid=self.grid,
clobber=self.clobber))
self.sync_grid(img=self.sdimg)
outname = os.path.join(self.outdir, 'matrix.csv')
outfile = file(outname, 'w')
outfile.write(string.join([fpformat.fix(x,4) for x in self.contrast.matrix], ',') + '\n')
outfile.close()
outname = os.path.join(self.outdir, 'matrix.bin')
outfile = file(outname, 'w')
self.contrast.matrix = self.contrast.matrix.astype('<f8')
self.contrast.matrix.tofile(outfile)
outfile.close()
def extract(self, results):
return results.Tcontrast(self.contrast.matrix, sd=self.sd, t=self.t)
def next(self, data=None):
value = self.grid.itervalue
self.timg.next(data=data.t, value=value)
if self.effect:
self.effectimg.next(data=data.effect, value=value)
if self.sd:
self.sdimg.next(data=data.effect, value=value)
class FContrastOutput(ImageRegressionOutput):
contrast = traits.Any()
outdir = traits.Str()
ext = traits.Str('.img')
subpath = traits.Str('contrasts')
def __init__(self, grid, contrast, path='.', **keywords):
ImageRegressionOutput.__init__(self, grid, **keywords)
self.contrast = contrast
self.path = path
self.outdir = os.path.join(self.path, self.subpath, self.contrast.name)
self.setup_contrast()
self.setup_output()
def setup_contrast(self, **extra):
self.contrast.getmatrix(**extra)
def setup_output(self):
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
outname = os.path.join(self.outdir, 'F%s' % self.ext)
self.img = iter(image.Image(outname, mode='w', grid=self.grid,
clobber=self.clobber))
self.sync_grid()
outname = os.path.join(self.outdir, 'matrix.csv')
outfile = file(outname, 'w')
writer = csv.writer(outfile)
for row in self.contrast.matrix:
writer.writerow([fpformat.fix(x, 4) for x in row])
outfile.close()
outname = os.path.join(self.outdir, 'matrix.bin')
outfile = file(outname, 'w')
self.contrast.matrix = self.contrast.matrix.astype('<f8')
self.contrast.matrix.tofile(outfile)
outfile.close()
def extract(self, results):
F = results.Fcontrast(self.contrast.matrix).F
return results.Fcontrast(self.contrast.matrix).F
class ResidOutput(ImageRegressionOutput):
outdir = traits.Str()
ext = traits.Str('.img')
basename = traits.Str('resid')
def __init__(self, grid, path='.', nout=1, **keywords):
ImageRegressionOutput.__init__(self, grid, nout=nout, **keywords)
self.outdir = os.path.join(path)
self.path = path
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
outname = os.path.join(self.outdir, '%s%s' % (self.basename, self.ext))
self.img = image.Image(outname, mode='w', grid=self.grid,
clobber=self.clobber)
self.nout = self.grid.shape[0]
self.sync_grid()
def extract(self, results):
return results.resid
def next(self, data=None):
value = self.grid.next()
self.img.next(data=data, value=value)
| Python | 0.999999 | |
b02ec9a16689bf2814e85f0edb01c7f4a5926214 | Add pre-migration script for account module. | addons/account/migrations/8.0.1.1/pre-migration.py | addons/account/migrations/8.0.1.1/pre-migration.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Akretion (http://www.akretion.com/)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
def migrate(cr, version):
if not version:
return
cr.execute(
"""SELECT id FROM account_analytic_journal WHERE type='purchase' """)
res = cr.fetchone()
print "mig account res=", res
if res:
openupgrade.add_xmlid(
cr, 'account', 'exp', 'account.analytic.journal', res[0], True)
| Python | 0 | |
07841312d062fd0dd48baa0d3bc0d92989e05841 | add script mp3-file-renamer.py | mp3-file-renamer.py | mp3-file-renamer.py | #!/usr/bin/python
#Python script to rename mp3 files according to the format
#"Track-number Track-name.mp3", for example: 02 Self Control.mp3
#Note: Tracks must have valid ID3 data for this to work - python-mutagen is required.
#By Charles Bos
import os
import sys
from mutagen.id3 import ID3, ID3NoHeaderError
def usage() :
print('''Usage:
mp3-file-renamer.py <path to music>''')
#Get music directory
args = sys.argv
if (len(args) != 2) or (args[1] == '-h') or (args[1] == '--help') :
usage()
os._exit(0)
else :
if os.path.exists(args[1]) : musicDir = args[1]
else :
usage()
os._exit(0)
#Get titles and track numbers for songs
musicFiles = []
tracknums = []
titles = []
for root, dirs, files in os.walk(musicDir, topdown=False):
for name in files:
if name.find(".mp3") != -1 :
musicFiles.append(os.path.join(root, name))
for x in musicFiles :
try :
audio = ID3(x)
titles.append(str(audio["TIT2"].text[0]))
tracknums.append(str(audio["TRCK"].text[0]))
except (ID3NoHeaderError, KeyError) :
musicFiles.remove(x)
#Add leading 0 if missing
for x in tracknums :
if len(x) == 1 : tracknums[tracknums.index(x)] = "0" + x
if (len(tracknums) != len(titles)) or (len(tracknums) == len(titles) == 0) :
print("No valid music files found. Nothing to do. Exiting...")
os._exit(0)
else :
#Start renaming
def getPath(origSong) :
return origSong.rfind("/") + 1
counter = 0
for x in musicFiles :
path = x[:getPath(x)]
os.rename(x, path + tracknums[counter] + " " + titles[counter] + ".mp3")
counter += 1 | Python | 0.000001 | |
cc76c00efa919f8532e21365606f38431093cc22 | Write inversion counting algorithm | count_inversions.py | count_inversions.py | def count_inversions(list, inversion_count = 0):
"""
recursively counts inversions of halved lists
where inversions are instances where a larger el occurs before a smaller el
merges the halved lists and increments the inversion count at each level
:param list list: list containing comparable elements
:param list list: list containing comparable elements
:returns: tuple w merged list and number of inversions
"""
if len(list) < 2:
return (list, inversion_count)
mid_point = len(list) / 2
# recursively count inversions in 1st half of input
first_half = count_inversions(list[0:mid_point], inversion_count)
# recursively count inversions in 2nd half of input
second_half = count_inversions(list[mid_point:len(list)], inversion_count)
# TODO: indexing into the returned tuple is confusing
# consider returning a dict instead
running_inversion_count = first_half[1] + second_half[1]
return merge_and_count_inversions(first_half[0], second_half[0], running_inversion_count)
def merge_and_count_inversions(a, b, inversion_count):
"""
steps through indexes in both input lists, appending the smaller val to the merged list at each step
increments the inversion count when els from list b are appended to the output before a is exhausted
:param list a: ordered list
:param list b: ordered list
:returns: tuple w merged list and number of inversions
"""
i = 0
j = 0
total_len = len(a) + len(b)
merged = []
for k in range(total_len):
try:
a[i]
except IndexError:
# concat merged w remainder of b if a's finished
merged = merged + b[j:len(b)]
j += 1
return (merged, inversion_count)
try:
b[j]
except IndexError:
# concat merged w remainder of a if b's finished
merged = merged + a[i:len(a)]
i += 1
return (merged, inversion_count)
if a[i] < b[j]:
merged.append(a[i])
i += 1
else:
merged.append(b[j])
j += 1
# increment inversion_count by num els remaining in a if a isn't exhausted
try:
a[i]
# inversion_count = len(a) - i
remaining_in_a = len(a) - i
inversion_count = inversion_count + remaining_in_a
except IndexError:
pass # a is exhausted
return (merged, inversion_count)
list = [ 1, 2, 9, -1, 0]
print count_inversions(list)[1]
# a = [1, 3, 5, 6]
# b = [2, 4, 7, 8, 9]
# print merge_and_count_inversions(a, b) | Python | 0.0007 | |
3331a9a6b8ada075aaefef021a8ad24a49995931 | Add test for prepare_instance_slug #92 | derrida/books/tests/test_search_indexes.py | derrida/books/tests/test_search_indexes.py | from unittest.mock import patch
from django.test import TestCase
from derrida.books.models import Reference, Instance
from derrida.books.search_indexes import ReferenceIndex
class TestReferenceIndex(TestCase):
fixtures = ['test_references.json']
def setUp(self):
'''None of the Instacefixtures have slugs, so generate them'''
for instance in Instance.objects.all():
instance.slug = instance.generate_safe_slug()
instance.save()
def test_prepare_instance_slug(self):
# create a ReferenceIndex object
refindex = ReferenceIndex()
# get a reference
reference = Reference.objects.first()
# not a book section (none in test set are)
# should return the slug of its instance
slug = refindex.prepare_instance_slug(reference)
assert slug == reference.instance.slug
# create a work as a 'collected in'
ecrit = Instance.objects.get(slug__icontains='lecriture-et-la')
debat = Instance.objects.get(slug__icontains='le-debat-sur')
# make ecrit a 'section' of debat
ecrit.collected_in = debat
ecrit.save()
# get a reference from ecrit
reference = Reference.objects.filter(instance=ecrit).first()
# should return the slug for debat not ecrit
slug = refindex.prepare_instance_slug(reference)
assert slug == debat.slug
| Python | 0 | |
451799f126afcdda70138dc348b9e1f276b1f86f | Add setting file for later use. | ox_herd/settings.py | ox_herd/settings.py | """Module to represent basic settings for ox_herd package.
"""
| Python | 0 | |
ec5136b86cce92a49cf2eea852f1d8f2d7110cf0 | Create element_search.py | 09-revisao/practice_python/element_search.py | 09-revisao/practice_python/element_search.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Exercise 20: Element Search
Write a function that takes an ordered list of numbers (a list where the
elements are in order from smallest to largest) and another number. The
function decides whether or not the given number is inside the list and
returns (then prints) an appropriate boolean.
Extras:
Use binary search.
"""
def in_list(a_list, number):
return True if [True for i in a_list if i == number] else False
def in_list2(a_list, number):
if len(a_list) == 1:
return a_list[0] == number
elif a_list[len(a_list) // 2] > number:
return in_list2(a_list[:len(a_list) // 2], number)
else:
return in_list2(a_list[len(a_list) // 2:], number)
if __name__ == "__main__":
a_list = [1, 3, 4, 5, 6, 7, 8, 12, 15, 20, 23, 33, 45, 64]
number = int(input("Enter a number: "))
print(
"The number %i is in the list %s: %s" %
(number, a_list, in_list(a_list, number)))
print(
"The number %i is in the list %s: %s" %
(number, a_list, in_list2(a_list, number)))
| Python | 0.000002 | |
cc06421fb4250640b2c9eef75480a3627a339473 | Add a script to normalize Gerrit ACLs | tools/normalize_acl.py | tools/normalize_acl.py | #!/usr/bin/env python
# Usage: normalize_acl.py acl.config [transformation [transformation [...]]]
#
# Transformations:
# 0 - dry run (default, print to stdout rather than modifying file in place)
# 1 - strip/condense whitespace and sort (implied by any other transformation)
# 2 - get rid of unneeded create on refs/tags
# 3 - remove any project.stat{e,us} = active since it's a default or a typo
# 4 - strip default *.owner = group Administrators permissions
# 5 - sort the exclusiveGroupPermissions group lists
import re
import sys
aclfile = sys.argv[1]
try:
transformations = sys.argv[2:]
except KeyError:
transformations = []
def tokens(data):
"""Human-order comparison
This handles embedded positive and negative integers, for sorting
strings in a more human-friendly order."""
data = data.replace('.', ' ').split()
for n in range(len(data)):
try:
data[n] = int(data[n])
except ValueError:
pass
return data
acl = {}
out = ''
if '0' in transformations or not transformations:
dry_run = True
else:
dry_run = False
aclfd = open(aclfile)
for line in aclfd:
# condense whitespace to single spaces and get rid of leading/trailing
line = re.sub('\s+', ' ', line).strip()
# skip empty lines
if not line:
continue
# this is a section heading
if line.startswith('['):
section = line.strip(' []')
# use a list for this because some options can have the same "key"
acl[section] = []
# key=value lines
elif '=' in line:
acl[section].append(line)
# WTF
else:
raise Exception('Unrecognized line!')
aclfd.close()
if '2' in transformations:
try:
acl['access "refs/tags/*"'] = [
x for x in acl['access "refs/tags/*"']
if not x.startswith('create = ')]
except KeyError:
pass
if '3' in transformations:
try:
acl['project'] = [x for x in acl['project'] if x not in
('state = active', 'status = active')]
except KeyError:
pass
if '4' in transformations:
for section in acl.keys():
acl[section] = [x for x in acl[section] if x !=
'owner = group Administrators']
if '5' in transformations:
for section in acl.keys():
newsection = []
for option in acl[section]:
key, value = [x.strip() for x in option.split('=')]
if key == 'exclusiveGroupPermissions':
newsection.append('%s = %s' % (
key, ' '.join(sorted(value.split()))))
else:
newsection.append(option)
acl[section] = newsection
for section in sorted(acl.keys()):
if acl[section]:
out += '\n[%s]\n' % section
for option in sorted(acl[section], key=tokens):
out += '%s\n' % option
if dry_run:
print(out[1:-1])
else:
aclfd = open(aclfile, 'w')
aclfd.write(out[1:])
aclfd.close()
| Python | 0.000003 | |
7060b82030d719cdcbdcecdb5eb7d34b405aa805 | Make the migration for previous commit | platforms/migrations/0003_auto_20150718_0050.py | platforms/migrations/0003_auto_20150718_0050.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('platforms', '0002_auto_20150718_0042'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='default_installer',
field=jsonfield.fields.JSONField(null=True),
),
]
| Python | 0.000013 | |
3b42e348987294602440c3c1d4aa4361afcdc298 | Add problem 14 | problem_14.py | problem_14.py | from problem_12 import new_encryption_oracle, find_blocksize
import random
from string import printable
RANDOM_PREFIX = ''.join(random.choice(printable) for _ in range(random.randrange(0, 20)))
# print len(RANDOM_PREFIX)
def oracle(adversary_input):
return new_encryption_oracle(RANDOM_PREFIX + adversary_input)
def find_oracle_added_length(blocksize):
adversary_input = ''
previous_length = len(oracle(adversary_input))
while True:
adversary_input += '0'
current_length = len(oracle(adversary_input))
if current_length > previous_length:
return current_length - len(adversary_input) - blocksize
def find_padding_length(blocksize):
adversary_input = '0'*64
zero_encrypted_block = oracle(adversary_input)[2*blocksize:3*blocksize]
change_counter = 1
while True:
adversary_input = change_counter*'1' + '0'*(64-change_counter)
current_second_block = oracle(adversary_input)[2*blocksize:3*blocksize]
if current_second_block != zero_encrypted_block:
return 2*blocksize - change_counter + 1
change_counter += 1
def find_single_ecb_character(blocksize, decrypted, start_padding_length, unknown_text_length):
bypass_start_padding = '0'*(2*blocksize - start_padding_length)
input_padding = bypass_start_padding + '0'*(blocksize*(unknown_text_length/blocksize + 1) - len(decrypted) - 1)
test_padding = input_padding + decrypted
block_position = (len(test_padding) - len(bypass_start_padding))/blocksize
ciphertext = oracle(input_padding)[2*blocksize:]
cipher_blocks = [ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(ciphertext)/blocksize)]
for test_char in printable:
test_character = test_padding + test_char
test_character_ciphertext = oracle(test_character)[2*blocksize:]
test_blocks = [test_character_ciphertext[i*blocksize:(i+1)*blocksize] for i in range(len(test_character_ciphertext)/blocksize)]
if test_blocks[block_position] == cipher_blocks[block_position]:
return test_char
if __name__ == '__main__':
blocksize = find_blocksize(oracle)
oracle_added_length = find_oracle_added_length(blocksize)
start_padding_length = find_padding_length(blocksize)
unknown_text_length = oracle_added_length - start_padding_length
decrypted = ''
while len(decrypted) < unknown_text_length:
decrypted += find_single_ecb_character(blocksize, decrypted, start_padding_length, unknown_text_length)
print decrypted.decode('base64')
| Python | 0 | |
49b1de4a68133e618723f96f2dc922b311bdd982 | Add Script to encode raw RGB565 | util/encode_raw.py | util/encode_raw.py | #!/usr/bin/env python
# Converts raw RGB565 video to MP4/AVI
from sys import argv, exit
from array import array
from subprocess import call
buf=None
TMP_FILE = "/tmp/video.raw"
if (len(argv) != 4):
print("Usage: encode_raw input.raw output.avi fps")
exit(1)
with open(argv[1], "rb") as f:
buf = array("H", f.read())
#Swap not needed if rgb565be is supported
buf.byteswap()
with open(TMP_FILE, "wb") as f:
f.write(buf.tostring())
cmd = "ffmpeg -vcodec rawvideo -r %d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %s -vcodec mpeg4 %s"%(int(argv[3]), TMP_FILE, argv[2])
call(cmd.split())
| Python | 0 | |
74354263acb3399295e7fde18d6aeed4b7bb7397 | Fix maybe all flake8 errors. Add first test. | what_transcode/tests.py | what_transcode/tests.py | """
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from what_transcode.utils import get_mp3_ids
class UtilsTests(TestCase):
def test_get_mp3_ids(self):
what_group = {
'torrents': [
{
'id': 0,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 1,
'format': 'MP3',
'encoding': '320',
'media': 'CD',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 2,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'CD',
'remastered': True,
'remasterCatalogueNumber': 'catno',
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 3,
'format': 'FLAC',
'encoding': 'Lossless',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 4,
'format': 'MP3',
'encoding': 'V0 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
{
'id': 5,
'format': 'MP3',
'encoding': 'V2 (VBR)',
'media': 'WEB',
'remastered': False,
'remasterCatalogueNumber': None,
'remasterRecordLabel': None,
'remasterTitle': None,
'remasterYear': None,
},
]
}
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][0]
}), {'320': 1})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][2]
}), {})
self.assertEqual(get_mp3_ids(what_group, {
'torrent': what_group['torrents'][3]
}), {'V0': 4, 'V2': 5})
| Python | 0 | |
5dba86b3a68c27a01eb143a6dfdb35d01c3c99e8 | add app_test | turbo/test/app_test.py | turbo/test/app_test.py | from __future__ import absolute_import, division, print_function, with_statement
import os
import signal
import sys
import unittest
import random
import time
import threading
import logging
import requests
import multiprocessing
from turbo import app
from turbo.conf import app_config
from turbo import register
app_config.app_name = 'app_test'
app_config.web_application_setting = {}
logger = logging.getLogger()
print(logger.level)
class HomeHandler(app.BaseBaseHandler):
def get(self):
logger.info('get')
def run_server():
register.register_url('/', HomeHandler)
app.start()
class AppTest(unittest.TestCase):
def setUp(self):
server = multiprocessing.Process(target=run_server)
server.start()
self.localhost = 'http://localhost:8888'
self.pid = server.pid
logger.warning(self.pid)
def tearDown(self):
os.kill(self.pid, signal.SIGKILL)
def test_get(self):
resp = requests.get(self.localhost)
logger.warning(resp.status_code)
if __name__ == '__main__':
unittest.main() | Python | 0.000003 | |
be0331e64726d659b824187fbc91b54ce0405615 | add initial implementation of weighted EM PCA | wpca/test/test_empca.py | wpca/test/test_empca.py | import numpy as np
from numpy.testing import assert_allclose
from ..empca import orthonormalize, random_orthonormal, pca, empca
def norm_sign(X):
i_max_abs = np.argmax(abs(X), 0)
sgn = np.sign(X[i_max_abs, range(X.shape[1])])
return X * sgn
def assert_columns_allclose_upto_sign(A, B, *args, **kwargs):
assert_allclose(norm_sign(A), norm_sign(B), *args, **kwargs)
def test_orthonormalize():
rand = np.random.RandomState(42)
X = rand.randn(3, 4)
X2 = orthonormalize(X)
assert_allclose(X[0] / np.linalg.norm(X[0]), X2[0])
assert_allclose(np.dot(X2, X2.T), np.eye(X2.shape[0]), atol=1E-15)
def test_random_orthonormal():
def check_random_orthonormal(n_samples, n_features):
X = random_orthonormal(n_samples, n_features, 42)
assert X.shape == (n_samples, n_features)
assert_allclose(np.dot(X, X.T), np.eye(X.shape[0]), atol=1E-15)
for n_samples in range(1, 6):
yield check_random_orthonormal, n_samples, 5
def test_empca_vs_pca():
rand = np.random.RandomState(42)
X = rand.randn(50, 5)
W = np.ones_like(X)
evecs1, coeff1 = empca(X, W, 5, niter=100)
evecs2, coeff2 = pca(X, 5)
assert_columns_allclose_upto_sign(evecs1.T, evecs2.T, rtol=1E-6)
assert_columns_allclose_upto_sign(coeff1, coeff2, rtol=1E-6)
| Python | 0 | |
7ccfc89a51a76764c36b009dd9b5fc55570e3f56 | Add forgot password test | api/radar_api/tests/test_forgot_password.py | api/radar_api/tests/test_forgot_password.py | import json
from radar_api.tests.fixtures import get_user
from radar.database import db
def test_forgot_password(app):
user = get_user('admin')
client = app.test_client()
assert user.reset_password_token is None
assert user.reset_password_date is None
response = client.post('/forgot-password', data={
'username': user.username,
'email': user.email
})
assert response.status_code == 200
db.session.refresh(user)
assert user.reset_password_token is not None
assert user.reset_password_date is not None
def test_missing_username(app):
user = get_user('admin')
client = app.test_client()
response = client.post('/forgot-password', data={
'email': user.email,
})
assert response.status_code == 422
data = json.loads(response.data)
assert data == {
'errors': {
'username': ['This field is required.']
}
}
def test_missing_email(app):
user = get_user('admin')
client = app.test_client()
response = client.post('/forgot-password', data={
'username': user.username,
})
assert response.status_code == 422
data = json.loads(response.data)
assert data == {
'errors': {
'email': ['This field is required.']
}
}
def test_user_not_found(app):
client = app.test_client()
response = client.post('/forgot-password', data={
'username': '404',
'email': '404@example.org',
})
assert response.status_code == 422
data = json.loads(response.data)
assert data == {
'errors': {
'username': ['No user found with that username and email.']
}
}
| Python | 0.000001 | |
b4bf757a15c404080679335bcce04ba45a7e4eae | Update fix_nonwarehouse_ledger_gl_entries_for_transactions.py | erpnext/patches/v7_0/fix_nonwarehouse_ledger_gl_entries_for_transactions.py | erpnext/patches/v7_0/fix_nonwarehouse_ledger_gl_entries_for_transactions.py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock"):
return
frappe.reload_doctype("Account")
warehouses = frappe.db.sql_list("""select name from tabAccount
where account_type = 'Stock' and is_group = 0
and (warehouse is null or warehouse = '')""")
if warehouses:
warehouses = set_warehouse_for_stock_account(warehouses)
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s) and creation > '2016-05-01'
and not exists(select name from `tabGL Entry`
where account=sle.warehouse and voucher_type=sle.voucher_type and voucher_no=sle.voucher_no)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
try:
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries()
frappe.db.commit()
except Exception, e:
print frappe.get_traceback()
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print rejected
def set_warehouse_for_stock_account(warehouse_account):
for account in warehouse_account:
if frappe.db.exists('Warehouse', account):
frappe.db.set_value("Account", account, "warehouse", account)
else:
warehouse_account.remove(account)
return warehouse_account
| # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if not frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock"):
return
frappe.reload_doctype("Account")
warehouses = frappe.db.sql_list("""select name from tabAccount
where account_type = 'Stock' and is_group = 0
and (warehouse is null or warehouse = '')""")
if warehouses:
warehouses = set_warehouse_for_stock_account(warehouses)
stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where sle.warehouse in (%s) and creation > '2016-05-01'
and not exists(select name from `tabGL Entry`
where account=sle.warehosue and voucher_type=sle.voucher_type and voucher_no=sle.voucher_no)
order by sle.posting_date""" %
', '.join(['%s']*len(warehouses)), tuple(warehouses))
rejected = []
for voucher_type, voucher_no in stock_vouchers:
try:
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
voucher = frappe.get_doc(voucher_type, voucher_no)
voucher.make_gl_entries()
frappe.db.commit()
except Exception, e:
print frappe.get_traceback()
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print rejected
def set_warehouse_for_stock_account(warehouse_account):
for account in warehouse_account:
if frappe.db.exists('Warehouse', account):
frappe.db.set_value("Account", account, "warehouse", account)
else:
warehouse_account.remove(account)
return warehouse_account | Python | 0.000001 |
ae52e3e4dc1fc254b7e1c258caa1fe00317bb9a5 | Add migrate script. | disqus_converter.py | disqus_converter.py | '''Convert disquls XML comments to YAML.'''
import os
import copy
import pathlib
import hashlib
import yaml
import iso8601
import xmltodict
from postsinfo import mapping
from rebuild_comments import encrypt
COMMENT_DIR = os.environ.get('COMMENT_DIR', './_data/comments')
def get_disqus_threads(infile):
with open(infile, 'r', encoding='utf-8') as file:
disqus = xmltodict.parse(file.read())['disqus']
threads = {}
for trd in disqus['thread']:
if not is_local_thread(trd):
threads[trd['@dsq:id']] = trd
threads[trd['@dsq:id']]['posts'] = []
for pst in disqus['post']:
key = pst['thread']['@dsq:id']
if key in threads:
threads[key]['posts'].append(pst)
return threads
def is_local_thread(thread):
return '0.0.0.0' in thread['link'] or '://localhost' in thread['link']
def write(thread, post_info):
uid = post_info['page_id'][1:]
comments = transform(thread, post_info)
if comments:
with open(os.path.join(COMMENT_DIR, f'{uid}.yml'), 'a+', encoding='utf8') as file:
yaml.dump(comments,
file,
default_flow_style=False,
allow_unicode=True)
def transform(thread, post_info):
'''Convert disqus form data to a normal comment.'''
comments = []
for post in thread['posts']:
comment = copy.copy(post_info)
comment.update(
{'date': iso8601.parse_date(post['createdAt']),
'name': post['author']['name'],
'email': hashlib.md5(post['author']['email'].encode('ascii')).hexdigest(),
'bucket': encrypt(post['author']['email']),
'website': make_profile_url(post),
'message': post['message']})
comments.append(comment)
return comments
def make_profile_url(post):
return 'https://disqus.com/by/{}/'.format(post['author']['username']) if post['author']['isAnonymous'] == 'false' else ''
def main():
# Load disqus
disqus_threads = get_disqus_threads(infile='db.xml')
# Make sure the comment directory exists
pathlib.Path(COMMENT_DIR).mkdir(parents=True, exist_ok=True)
# Convert disqus to current comment format. Use posts mapping.
for trd in disqus_threads.values():
# Update comment files with converted disqus comments
if trd['link'] in mapping:
write(trd, mapping[trd['link']])
if __name__ == '__main__':
main() | Python | 0 | |
588d49ef47cb4fa0848e44775a0102a7bd3f492a | add hdfs utils to distributed | distributed/hdfs.py | distributed/hdfs.py | import os
from .utils import ignoring
with ignoring(ImportError):
import snakebite.protobuf.ClientNamenodeProtocol_pb2 as client_proto
from snakebite.client import Client
def get_locations(filename, name_host, name_port):
client = Client(name_host, name_port, use_trash=False)
files = list(client.ls([filename]))
return [pair for file in files for pair in find(file, client)]
def find(f, client, data_root='/data/dfs/dn'):
request = client_proto.GetBlockLocationsRequestProto()
request.src = f['path']
request.length = long(f['length'])
request.offset = long(0)
response = client.service.getBlockLocations(request)
return [{'block': block,
'path': get_local_path(block, data_root),
'hosts': [location.id.ipAddr for location in block.locs]}
for block in response.locations.blocks]
def get_local_path(block, data_root='/data/dfs/dn'):
pool = block.b.poolId
Id = block.b.blockId
loc = idtoBlockdir(Id)
return "{}/current/{}/current/finalized/{}/blk_{}".format(
data_root, pool, loc, Id)
BLOCK_SUBDIR_PREFIX = 'subdir'
def idtoBlockdir(blockId):
d1 = str(((blockId >> 16) & 0xff))
d2 = str(((blockId >> 8) & 0xff))
pathd1 = BLOCK_SUBDIR_PREFIX+d1
pathd2 = BLOCK_SUBDIR_PREFIX+d2
path = os.path.join(pathd1, pathd2)
return path
def get_data_root():
confd = os.environ.get('HADOOP_CONF_DIR', os.environ.get('HADOOP_INSTALL',
'') + '/hadoop/conf')
conf = os.sep.join([confd, 'hdfs-site.xml'])
import xml
x = xml.etree.ElementTree.fromstring(open(conf).read())
for e in x:
if e.find('name').text == 'dfs.datanode.data.dir':
return e.find('value').text
def hdfs_map_blocks(executor, func, location, namenode_host, namenode_port):
""" Map a function over blocks of a location in HDFS
>>> L = map_blocks(executor, pd.read_csv, '/data/nyctaxi/',
... '192.168.1.100', 9000) # doctest: +SKIP
>>> type(L)[0] # doctest: +SKIP
Future
"""
blocks = get_locations(location, namenode_host, namenode_port)
paths = [blk['path'] for blk in blocks]
hosts = [blk['hosts'] for blk in blocks]
return executor.map(func, paths, workers=hosts)
def hdfs_dask_graph(executor, func, location, namenode_host, namenode_port):
""" Produce dask graph mapping function over blocks in HDFS
Inserts HDFS host restrictions into the executor.
Returns a graph and keys corresponding to function applied to blocks.
Does not trigger execution.
>>> dsk, keys = dask_graph(executor, pd.read_csv, '/data/nyctaxi/',
... '192.168.1.100', 9000) # doctest: +SKIP
"""
blocks = get_locations(location, namenode_host, namenode_port)
paths = [blk['path'] for blk in blocks]
hosts = [blk['hosts'] for blk in blocks]
names = [(funcname(func), path) for path in paths]
restrictions = dict(zip(names, hosts))
dsk = {name: (func, path) for name, path in zip(names, paths)}
executor.scheduler_queue.put_nowait({'op': 'update-graph',
'dsk': {},
'keys': [],
'restrictions': restrictions})
return dsk, names
| Python | 0 | |
c8ae682ff98f2c5b5733ae4b299970c820e46630 | Add regression test for #636 | spacy/tests/regression/test_issue636.py | spacy/tests/regression/test_issue636.py | # coding: utf8
from __future__ import unicode_literals
from ...tokens.doc import Doc
import pytest
@pytest.mark.xfail
@pytest.mark.models
@pytest.mark.parametrize('text', ["I cant do this."])
def test_issue636(EN, text):
"""Test that to_bytes and from_bytes don't change the token lemma."""
doc1 = EN(text)
doc2 = Doc(EN.vocab)
doc2.from_bytes(doc1.to_bytes())
print([t.lemma_ for t in doc1], [t.lemma_ for t in doc2])
assert [t.lemma_ for t in doc1] == [t.lemma_ for t in doc2]
| Python | 0.000001 | |
423707ea25e88b2454a9541eb52f900da87e95b2 | allow external backends, specified via ZMQ_BACKEND env | zmq/backend/__init__.py | zmq/backend/__init__.py | """Import basic exposure of libzmq C API as a backend"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from .select import public_api, select_backend
if 'PYZMQ_BACKEND' in os.environ:
backend = os.environ['PYZMQ_BACKEND']
if backend in ('cython', 'cffi'):
backend = 'zmq.backend.%s' % backend
_ns = select_backend(backend)
else:
# default to cython, fallback to cffi
try:
_ns = select_backend('zmq.backend.cython')
except ImportError:
_ns = select_backend('zmq.backend.cffi')
globals().update(_ns)
__all__ = public_api
| """Import basic exposure of libzmq C API as a backend"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .select import public_api, select_backend
try:
_ns = select_backend('zmq.backend.cython')
except ImportError:
_ns = select_backend('zmq.backend.cffi')
globals().update(_ns)
__all__ = public_api
| Python | 0 |
6e501f2cbfe6b53eca72389c9a1c98a3c3d098c9 | Add redhat official helper | bin/helpers/redhatofficial/redhatoffical.py | bin/helpers/redhatofficial/redhatoffical.py | #!/usr/bin/env python
# Copyright 2018, Red Hat
# Copyright 2018, Fabien Boucher
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import yaml
import github3
import argparse
import requests
# This is a small tool to read the redhatofficial project file
# and create a repoXplorer compatible projects.yaml file.
INFO_URI = (
"https://raw.githubusercontent.com/"
"RedHatOfficial/RedHatOfficial.github.io/"
"dev/app/data/projects.json")
parser = argparse.ArgumentParser(
description='Read/Index RedhatOffical projects file')
parser.add_argument(
'--output-path', type=str,
help='yaml file path to register organization repositories details')
args = parser.parse_args()
def fetch_repos(org, template, repo=None, query=None):
# anon = github3.GitHub()
anon = github3.GitHub('', token='')
orga = anon.organization(org)
data = {}
if not orga:
print(
"Org %s not found, try to find single"
" user's repos ..." % org)
repos = anon.repositories_by(org)
else:
repos = orga.repositories()
for r in repos:
if repo and r.name != repo:
continue
if r.fork:
continue
if query and query not in r.name:
continue
data[r.name] = {
"branches": [r.default_branch],
}
data[r.name]["template"] = template
return data
if __name__ == "__main__":
gp = yaml.safe_load(requests.get(INFO_URI).text)
projects = {}
templates = {}
struct = {'projects': projects,
'project-templates': templates}
c = len(gp)
for project in gp:
print(project)
print("Remain: %d" % c)
c -= 1
uri = project['projectRepository'].rstrip('/')
if '?q=' in uri:
query = uri.split('?q=')[1]
uri = uri.split('?q=')[0]
print("There is a query on %s for %s" % (uri, query))
else:
query = None
uris = uri.split('/')
if uris[-2] == 'github.com':
# It is a github org
org = uris[-1]
repo = None
orguri = uri
else:
# It is a single github repo
org = uris[-2]
repo = uris[-1]
orguri = "/".join(uris[0:-1])
projects[project['projectName']] = {
'repos': fetch_repos(org, project['projectName'], repo, query),
'description': project['projectDescription'],
}
templates[project['projectName']] = {
"branches": ["master"],
"uri": orguri + "/%(name)s",
"gitweb": orguri + "/%(name)s/commit/%%(sha)s",
"tags": [project['category']]
}
path = 'redhatoffical.yaml'
if args.output_path:
path = os.path.join(os.path.expanduser(args.output_path), path)
with open(path, 'w') as fd:
fd.write(yaml.safe_dump(struct,
default_flow_style=False))
print("")
print("RedHatOffical source repositories details"
" has been written to %s" % path)
print("Please edit the yaml file if needed (like adding additional"
" branches to index, defines custom releases, ...)")
sys.exit(0)
| Python | 0.000001 | |
cb9166c4564c4e763e1214355dc76cbe6d466258 | Add data migration for section | books/migrations/0009_auto_20141127_1718.py | books/migrations/0009_auto_20141127_1718.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_sections(apps, schema_editor):
# Don't just use books.models.Section, that could be out of date
Section = apps.get_model('books', 'Section')
FRONT_MATTER_CHOICES = [
#('db_value', 'human readable'),
('half_title', 'Half title'),
('title_page', 'Title Page'),
('colophon', 'Colophon'),
('contents', 'Contents'),
('foreward', 'Foreward'),
('preface', 'Preface'),
('acknowledgment', 'Acknowlegment'),
('introduction', 'Introduction'),
('dedication', 'Dedication'),
('epigraph', 'Epigraph'),
('prologue', 'Prologue'),
]
BACK_MATTER_CHOICES = [
('epilogue', 'Epilogue'),
('afterward', 'Afterward'),
('conclusion', 'Conclusion'),
('postscript', 'Postscript'),
('appendix', 'Appendix'),
('glossary', 'Glossary'),
('bibliography', 'Bibliography'),
('index', 'Index'),
('colophon', 'Colophon'),
]
for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='front')
sect.save()
for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):
sect = Section(name=sect_name, order=order, location='back')
sect.save()
def remove_sections(apps, schema_editor):
""" Just make the migration reversible, by calling this function. """
Section = apps.get_model('books', 'Section')
for section in Section.objects.all():
section.delete()
class Migration(migrations.Migration):
dependencies = [
('books', '0008_auto_20141127_1657'),
]
operations = [
migrations.RunPython(add_sections, remove_sections),
]
| Python | 0 | |
161feec0d3764c7cdeebfdc7cd62e5901a89666a | Add initial implementation | runtracker.py | runtracker.py | import cv2
import numpy as np
import imutils
PI = 3.141592654
AREA_ERROR_THRESH = 0.05 # Error away from the mean area
# Color ranges
#CALIB_COLOR_MIN = ( 70, 40, 61)
#CALIB_COLOR_MAX = (110, 175, 255)
CALIB_COLOR_MIN = ( 52, 24, 56)
CALIB_COLOR_MAX = ( 98, 169, 178)
TRACK_COLOR_MIN = ( 0, 0, 0)
TRACK_COLOR_MAX = (255, 225, 255)
prevCalib = []
prevTrack = None
def ellipseArea(ellipse):
return ellipse[1][0] * ellipse[1][1] * PI / 4
def main():
# Open webcam
cap = cv2.VideoCapture(0)
while True:
# Get frame
ret, frame = cap.read()
output = frame.copy()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert to HSV (for color range)
# Apply morphological filtering
k_x = cv2.getGaussianKernel(8, 0)
k_y = cv2.getGaussianKernel(8, 0)
kernel = k_x * np.transpose(k_y)
filt = cv2.inRange(hsv, CALIB_COLOR_MIN, CALIB_COLOR_MAX)
filt = cv2.morphologyEx(filt, cv2.MORPH_OPEN, kernel, iterations=2)
# Find contours
_, contours, _ = cv2.findContours(filt, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(output, contours, -1, (0, 0, 255), 3)
matches = [] # Contours that match a marker
for c in contours:
e = cv2.fitEllipse(c)
area_c = cv2.contourArea(c)
area_e = ellipseArea(e)
if abs(area_c - area_e) < (AREA_ERROR_THRESH * (area_c + area_e) / 2): # Is within error
matches.append((c, e))
# Sort by size
matches.sort(key=lambda x: ellipseArea(x[1]), reverse=True)
# Get 2 best ellipses
for i in range(0, min(len(matches), 2)):
c = matches[i][0]
e = matches[i][1]
cv2.ellipse(output, e, (0, 255, 0), 2)
cv2.putText(output, 'C: ' + str(cv2.contourArea(c)) + ' | E: ' + str(ellipseArea(e)), (int(e[0][0]), int(e[0][1])), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
# Show frame
cv2.imshow('Frame', frame)
cv2.imshow('Filtered', filt)
cv2.imshow('Output', output)
if cv2.waitKey(1) == 27:
break
if __name__ == '__main__':
main() | Python | 0.000001 | |
07825b7f80a12619c847de49f0f2b991faeea7b4 | Add a simple handler cookie_wsh.py useful for cookie test | example/cookie_wsh.py | example/cookie_wsh.py | # Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the COPYING file or at
# https://developers.google.com/open-source/licenses/bsd
import urlparse
def _add_set_cookie(request, value):
request.extra_headers.append(('Set-Cookie', value))
def web_socket_do_extra_handshake(request):
components = urlparse.urlparse(request.uri)
command = components[4]
ONE_DAY_LIFE = 'Max-Age=86400'
if command == 'set':
_add_set_cookie(request, '; '.join(['foo=bar', ONE_DAY_LIFE]))
elif command == 'set_httponly':
_add_set_cookie(request,
'; '.join(['httpOnlyFoo=bar', ONE_DAY_LIFE, 'httpOnly']))
elif command == 'clear':
_add_set_cookie(request, 'foo=0; Max-Age=0')
_add_set_cookie(request, 'httpOnlyFoo=0; Max-Age=0')
def web_socket_transfer_data(request):
pass
| Python | 0.000004 | |
39019e998da2c1f73f82e0eb446df78ffc95c134 | Create safe_steps.py | safe_steps.py | safe_steps.py | import mcpi.minecraft as minecraft
import mcpi.block as block
mc = minecraft.Minecraft.create()
while True:
p = mc.player.getTilePos()
b = mc.getBlock(p.x, p.y-1, p.z)
if b == block.AIR.id or b == block.WATER_FLOWING.id or b==block.WATER_STATIONARY.id:
mc.setBlock(pos.x, pos.y-1, pos.z, block.WOOD_PLANKS.id)
| Python | 0.000459 | |
c78480fc1f566bb6d266705336dbe9cd90d07996 | Create 476_number_complement.py | 476_number_complement.py | 476_number_complement.py | """
https://leetcode.com/problems/number-complement/description/
Given a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.
Note:
The given integer is guaranteed to fit within the range of a 32-bit signed integer.
You could assume no leading zero bit in the integer’s binary representation.
Example 1:
Input: 5
Output: 2
Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
Example 2:
Input: 1
Output: 0
Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
"""
class Solution(object):
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
# Solution: Extract binary of the number using bin() function. For every character in the binary, get complement, append.
c = ''
bin_c = ''
bin_num = bin(num)[2:]
print bin_num
for i in range(0,len(bin_num)):
print bin_num[i]
if bin_num[i] == '0':
# print bin_num[i]
c = '1'
elif bin_num[i] == '1':
# print bin_num[i]
c = '0'
bin_c = bin_c+c
print "bin output: ",(bin_c)
return(int(bin_c,2))
| Python | 0.998761 | |
0104600fe32b2b676974f29df37d10cc86a7441a | enable CMake build (with HTTP/3) -- take 2 | build/fbcode_builder/specs/proxygen_quic.py | build/fbcode_builder/specs/proxygen_quic.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.folly as folly
import specs.fizz as fizz
import specs.mvfst as mvfst
import specs.sodium as sodium
import specs.wangle as wangle
import specs.zstd as zstd
def fbcode_builder_spec(builder):
builder.add_option("proxygen/proxygen:cmake_defines", {"BUILD_QUIC": "ON"})
return {
"depends_on": [folly, wangle, fizz, sodium, zstd, mvfst],
"steps": [builder.fb_github_cmake_install("proxygen/proxygen", "..")],
}
| Python | 0.000001 | |
37e74416a090342c18cfad87df74dd958400145d | Add 'Others' category. | bulb/migrations/0009_add_others_category.py | bulb/migrations/0009_add_others_category.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_categories(apps, schema_editor):
Category = apps.get_model('bulb', 'Category')
Category.objects.create(code_name="others", name="أخرى")
def remove_categories(apps, schema_editor):
Category = apps.get_model('bulb', 'Category')
Category.objects.filter(code_name="others").delete()
class Migration(migrations.Migration):
dependencies = [
('bulb', '0008_improve_status'),
]
operations = [
migrations.RunPython(
add_categories,
reverse_code=remove_categories),
]
| Python | 0.998649 | |
317160665a58a2e0433202e4605710b09a71de9d | add scrub script to remove solution tags, thanks https://gist.github.com/minrk/3836889 | scrub_sols.py | scrub_sols.py | #!/usr/bin/env python
"""
simple example script for scrubping solution code cells from IPython notebooks
Usage: `scrub_code.py foo.ipynb [bar.ipynb [...]]`
Marked code cells are scrubbed from the notebook
"""
import io
import os
import sys
from IPython.nbformat.current import read, write
def scrub_code_cells(nb):
scrubbed = 0
cells = 0
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type != 'code':
continue
cells += 1
# scrub cells marked with initial '# Solution' comment
# any other marker will do, or it could be unconditional
if cell.input.startswith("# Solution"):
cell.input = u'# Solution goes here'
scrubbed += 1
cell.outputs = []
print
print("scrubbed %i/%i code cells from notebook %s" % (scrubbed, cells, nb.metadata.name))
if __name__ == '__main__':
for ipynb in sys.argv[1:]:
print("scrubbing %s" % ipynb)
with io.open(ipynb, encoding='utf8') as f:
nb = read(f, 'json')
scrub_code_cells(nb)
base, ext = os.path.splitext(ipynb)
new_ipynb = "%s_blank%s" % (base, ext)
with io.open(new_ipynb, 'w', encoding='utf8') as f:
write(nb, f, 'json')
print("wrote %s" % new_ipynb)
| Python | 0 | |
3bafceba383125475d5edb895bc9d88b0dfc5042 | Add status to Role | project/apps/api/migrations/0093_role_status.py | project/apps/api/migrations/0093_role_status.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-05 23:28
from __future__ import unicode_literals
from django.db import migrations
import django_fsm
class Migration(migrations.Migration):
dependencies = [
('api', '0092_auto_20160305_1514'),
]
operations = [
migrations.AddField(
model_name='role',
name='status',
field=django_fsm.FSMIntegerField(choices=[(0, b'New')], default=0),
),
]
| Python | 0.000001 | |
4735ee97aa36920e811edc450d8b6e8a09b5caf5 | add utility for explode bam | iron/utilities/explode_bam.py | iron/utilities/explode_bam.py | #!/usr/bin/python
import sys, argparse
from subprocess import Popen, PIPE
from SamBasics import SamStream
from multiprocessing import cpu_count, Pool
def main():
parser = argparse.ArgumentParser(description="Break a bam into evenly sized chunks print the number of chunks",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input',help="Use - for STDIN sam or directly name bamfile")
parser.add_argument('output_base',help="output base name myout will go to myout.1.bam")
parser.add_argument('-k',type=int,required=True,help="Number per chunk")
parser.add_argument('--threads',type=int,default=cpu_count(),help="Number of threads")
args = parser.parse_args()
inf = None
if args.input == '-':
inf = sys.stdin
else:
cmd = "samtools view -h "+args.input
p = Popen(cmd.split(),stdout=PIPE)
inf = p.stdout
v = SamStream(inf)
buffer = []
i = 0
if args.threads > 1:
poo= Pool(processes=args.threads)
while True:
e = v.read_entry()
if not e: break
buffer.append(e)
if len(buffer) >= args.k:
i+=1
if args.threads > 1:
poo.apply_async(do_output,args=(buffer,v.header[:],i,args.output_base))
else:
do_output(buffer,v.header[:],i,args.output_base)
buffer = []
if len(buffer) > 0:
i+=1
if args.threads > 1:
poo.apply_async(do_output,args=(buffer,v.header[:],i,args.output_base))
else:
do_output(buffer,v.header[:],i,args.output_base)
if args.threads > 1:
poo.close()
poo.join()
if args.input != '-':
p.communicate()
print i
def do_output(buffer,header,i,output_base):
of = open(output_base+'.'+str(i)+'.bam','w')
cmd = 'samtools view - -Sb'
p = Popen(cmd.split(),stdin=PIPE,stdout=of)
for e in header:
p.stdin.write(e)
for e in buffer:
p.stdin.write(e)
p.communicate()
of.close()
if __name__=="__main__":
main()
| Python | 0.000001 | |
77a031fd34d73047a529fe9e06d7781ba0d4c56d | add basic structure of python ui | models/synthetic/ui/synthetic.py | models/synthetic/ui/synthetic.py |
from Tkinter import *
# initial
root = Tk()
root.title("Synthetic Model")
label = Label(root, text = 'Synthetic Model', font = (None, 20))
label.pack()
m1 = PanedWindow()
m1.pack(fill = BOTH, expand = 1)
m2 = PanedWindow(m1, orient = VERTICAL)
m1.add(m2)
m3 = PanedWindow(m1, orient = VERTICAL)
m1.add(m3)
m4 = PanedWindow(m1, orient = VERTICAL)
m1.add(m4)
m5 = PanedWindow(m1, orient = VERTICAL)
m1.add(m5)
m6 = PanedWindow(m1, orient = VERTICAL)
m1.add(m6)
# network
nt2 = Label(m2, text = 'Netwrok Selection')
nt3 = Label(m3, text = ' ')
nt4 = Label(m4, text = ' ')
nt5 = Label(m5, text = ' ')
nt6 = Label(m6, text = ' ')
m2.add(nt2)
m3.add(nt3)
m4.add(nt4)
m5.add(nt5)
m6.add(nt6)
network = IntVar()
R1 = Radiobutton(m2, text = "Watts-strogattz", font = 30, fg = "red", variable = network, value = 1)
ws_mT = Label(m3, text = 'mean degree', fg = 'red')
ws_mV = Entry(m4, bd = 2)
ws_pT = Label(m5, text = 'probability', fg = 'red')
ws_pV = Entry(m6, bd = 2)
R2 = Radiobutton(m2, text = "Barabsi-Albert", font = 30, fg = "red", variable = network, value = 2)
ba_mT = Label(m3, text = 'mean degree', fg = 'red')
ba_mV = Entry(m4, bd = 2)
ba_pT = Label(m5, text = 'probability', fg = 'red')
ba_pV = Entry(m6, bd = 2)
m2.add(R1)
m3.add(ws_mT)
m4.add(ws_mV)
m5.add(ws_pT)
m6.add(ws_pV)
m2.add(R2)
m3.add(ba_mT)
m4.add(ba_mV)
m5.add(ba_pT)
m6.add(ba_pV)
# Node selection
DIST = [("Exponential", "1"),
("Geometric", "2"),
("Binomial", "3"),
("Normal", "4"),
("Uniform", "5"),
("Poisson", "6"),
("Lognormal", "7"),
]
ns = IntVar()
ns.set("1")
for dist, c in DIST:
b = Radiobutton(m2, text = dist, variable = ns, value = c, font = 30, fg = "blue")
m2.add(b)
# Event send
es = IntVar()
for dist, c in DIST:
e = Radiobutton(m2, text = dist, variable = ns, value = c, font = 30, fg = "red")
m2.add(e)
# nodes
node1 = Label(m2, text = "Number of Nodes", font = 30, fg = "blue")
nodevar = Entry(m2, bd = 4)
nodevar.insert(0, "100000")
m2.add(node1)
m2.add(nodevar)
# state size
s1 = Label(m2, text = "State Size: Min", font = 30, fg = "red")
s2 = Label(m2, text = "Max", font = 30, fg = "red")
s_min = Entry(m2, bd = 4)
s_max = Entry(m2, bd = 4)
s_min.insert(0,"100")
s_max.insert(0,"100")
m2.add(s1)
m2.add(s_min)
m2.add(s2)
m2.add(s_max)
# floating point operation count
fp1 = Label(m2, text = "Floating Point: Min", font = 30, fg = "blue")
fp2 = Label(m2, text = "Max", font = 30, fg = "blue")
fp_min = Entry(m2, bd = 4)
fp_max = Entry(m2, bd = 4)
fp_min.insert(0,"1000")
fp_max.insert(0,"1000")
m2.add(fp1)
m2.add(fp_min)
m2.add(fp2)
m2.add(fp_max)
mainloop()
| Python | 0.000032 | |
406420f0686cdfbd56090a2f0bf8c623b9216461 | Create DQN_Agent_LSTM.py | Agents/DQN_Agent_LSTM.py | Agents/DQN_Agent_LSTM.py | #SC2-pySC2 agent for HallucinIce mini-game
#@SoyGema
#Thanks to @DavSuCar
import numpy as np
import sys
import random
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, Activation, MaxPooling2D, TimeDistributed, LSTM, Reshape
from keras.optimizers import Adam, Adamax, Nadam
from keras.backend import set_image_dim_ordering
from absl import flags
from pysc2.env import sc2_env, environment
from pysc2.lib import actions
from pysc2.lib import features
from rl.memory import SequentialMemory
from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy
from rl.core import Processor
from rl.callbacks import FileLogger, ModelIntervalCheckpoint
from rl.agents.dqn import DQNAgent
from rl.agents.sarsa import SARSAAgent
# Actions from pySC2 API
FUNCTIONS = actions.FUNCTIONS
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3
_PLAYER_HOSTILE = 4
_NO_OP = FUNCTIONS.no_op.id
_MOVE_SCREEN = FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = FUNCTIONS.Attack_screen.id
_SELECT_ARMY = FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_HAL_ADEPT = FUNCTIONS.Hallucination_Adept_quick.id
_HAL_ARCHON = FUNCTIONS.Hallucination_Archon_quick.id
_HAL_COL = FUNCTIONS.Hallucination_Colossus_quick.id
_HAL_DISRUP = FUNCTIONS.Hallucination_Disruptor_quick.id
_HAL_HIGTEM = FUNCTIONS.Hallucination_HighTemplar_quick.id
_HAL_IMN = FUNCTIONS.Hallucination_Immortal_quick.id
_HAL_PHOENIX = FUNCTIONS.Hallucination_Phoenix_quick.id
_HAL_STALKER = FUNCTIONS.Hallucination_Stalker_quick.id
_HAL_VOIDRAID = FUNCTIONS.Hallucination_VoidRay_quick.id
_HAL_ZEALOT = FUNCTIONS.Hallucination_Zealot_quick.id
_FORCE_FIELD = FUNCTIONS.Effect_ForceField_screen.id
_GUARD_FIELD = FUNCTIONS.Effect_GuardianShield_quick.id
# Size of the screen and length of the window
_SIZE = 64
_WINDOW_LENGTH = 1
# Load and save weights for training
LOAD_MODEL = False # True if the training process is already created
SAVE_MODEL = True
# global variable
episode_reward = 0
# Configure Flags for executing model from console
FLAGS = flags.FLAGS
flags.DEFINE_string("mini-game", "HalucinIce", "Name of the minigame")
flags.DEFINE_string("algorithm", "deepq", "RL algorithm to use")
# Processor
class SC2Proc(Processor):
def process_observation(self, observation):
"""Process the observation as obtained from the environment for use an agent and returns it"""
obs = observation[0].observation["feature_screen"][_PLAYER_RELATIVE]
return np.expand_dims(obs, axis=2)
def process_state_batch(self, batch):
"""Processes an entire batch of states and returns it"""
batch = np.swapaxes(batch, 0, 1)
return batch[0]
# Define the environment
class Environment(sc2_env.SC2Env):
"""Starcraft II environmet. Implementation details in lib/features.py"""
def step(self, action):
"""Apply actions, step the world forward, and return observations"""
global episode_reward
action = actions_to_choose()
obs = super(Environment, self).step([action])
observation = obs
r = obs[0].reward
done = obs[0].step_type == environment.StepType.LAST
episode_reward += r
return observation, r, done, {}
def reset(self):
global episode_reward
episode_reward = 0
super(Environment, self).reset()
return super(Environment, self).step([actions.FunctionCall(_SELECT_ARMY, [_SELECT_ALL])])
def actions_to_choose():
hall = [_HAL_ADEPT, _HAL_ARCHON, _HAL_COL, _HAL_DISRUP,
_HAL_HIGTEM, _HAL_IMN, _HAL_PHOENIX, _HAL_STALKER,
_HAL_VOIDRAID, _HAL_ZEALOT, _FORCE_FIELD, _GUARD_FIELD]
action = actions.FunctionCall(_HAL_ADEPT, [_NOT_QUEUED])
print(action)
return action
# TO-DO : Define actions_to_choose based on SC2 sentry unit
# Agent architecture using keras rl
def neural_network_model(input, actions):
model = Sequential()
# Define CNN model
print(input)
model.add(Conv2D(256, kernel_size=(5, 5), input_shape=input))
model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Reshape((1, 256)))
model.add(LSTM(256))
model.add(Dense(actions, activation='softmax'))
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"])
return model
def training_game():
env = Environment(map_name="HallucinIce", visualize=True, game_steps_per_episode=150,
agent_interface_format=features.AgentInterfaceFormat(
feature_dimensions=features.Dimensions(screen=64, minimap=32)
))
input_shape = (_SIZE, _SIZE, 1)
nb_actions = 12 # Number of actions
model = neural_network_model(input_shape, nb_actions)
memory = SequentialMemory(limit=5000, window_length=_WINDOW_LENGTH)
processor = SC2Proc()
# Policy
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr="eps", value_max=1, value_min=0.7, value_test=.0,
nb_steps=1e6)
# Agent
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, enable_double_dqn=False,
nb_steps_warmup=500, target_model_update=1e-2, policy=policy,
batch_size=150,
processor=processor)
dqn.compile(Adam(lr=.001), metrics=["mae"])
# Save the parameters and upload them when needed
name = "HallucinIce"
w_file = "dqn_{}_weights.h5f".format(name)
check_w_file = "train_w" + name + "_weights.h5f"
if SAVE_MODEL:
check_w_file = "train_w" + name + "_weights_{step}.h5f"
log_file = "training_w_{}_log.json".format(name)
callbacks = [ModelIntervalCheckpoint(check_w_file, interval=1000)]
callbacks += [FileLogger(log_file, interval=100)]
if LOAD_MODEL:
dqn.load_weights(w_file)
dqn.fit(env, callbacks=callbacks, nb_steps=1e7, action_repetition=2,
log_interval=1e4, verbose=2)
dqn.save_weights(w_file, overwrite=True)
dqn.test(env, action_repetition=2, nb_episodes=30, visualize=False)
if __name__ == '__main__':
FLAGS(sys.argv)
training_game()
| Python | 0.000001 | |
ebd62eac70d5589b0b7f593009024868f981e658 | Add actor with behavior similar to old-style Delay | calvin/actorstore/systemactors/std/ClassicDelay.py | calvin/actorstore/systemactors/std/ClassicDelay.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
class ClassicDelay(Actor):
"""
After first token, pass on token once every 'delay' seconds.
Input :
token: anything
Outputs:
token: anything
"""
@manage(['delay'])
def init(self, delay=0.1):
self.delay = delay
self.use('calvinsys.events.timer', shorthand='timer')
self.timer = None
def setup(self):
self.timer = self['timer'].repeat(self.delay)
def will_migrate(self):
self.timer.cancel()
def did_migrate(self):
self.setup()
@condition(['token'], ['token'])
@guard(lambda self, _: not self.timer)
def start_timer(self, token):
self.setup()
return ActionResult(production=(token, ))
@condition(['token'], ['token'])
@guard(lambda self, _: self.timer and self.timer.triggered)
def passthrough(self, token):
self.timer.ack()
return ActionResult(production=(token, ))
action_priority = (start_timer, passthrough)
requires = ['calvinsys.events.timer']
| Python | 0.000002 | |
a88cf930a5c0e67a7aef93ab5c4eb705ad7aad32 | Fix ‘permissions_classes’ typos | kolibri/core/lessons/tests.py | kolibri/core/lessons/tests.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
# Create your tests here.
| Python | 0.999755 |
4bbd622921fcef6a07d5d87c0640a9eb4e48cf12 | Add nurseryTherm python file | nurseryTherm.py | nurseryTherm.py | #!/usr/bin/python
#CamJam Edukit 2 - Sensors
# Worksheet 3 - Temperature
# Import Libraries
import os
import glob
import time
import paho.mqtt.client as paho
import json
# Initialize the GPIO Pins
os.system('modprobe w1-gpio') # Turns on the GPIO module
os.system('modprobe w1-therm') # Turns on the Temperature module
# Finds the correct device file that holds the temperature data
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
# A function that reads the sensors data
def read_temp_raw():
f = open(device_file, 'r') # Opens the temperature device file
lines = f.readlines() # Returns the text
f.close()
return lines
# Convert the value of the sensor into a temperature
def read_temp():
lines = read_temp_raw() # Read the temperature 'device file'
# While the first line does not contain 'YES', wait for 0.2s
# and then read the device file again.
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
# Look for the position of the '=' in the second line of the
# device file.
equals_pos = lines[1].find('t=')
# If the '=' is found, convert the rest of the line after the
# '=' into degrees Celsius, then degrees Fahrenheit
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_c, temp_f
def on_connect(client, userdata, flags, rc):
m="Connected flags"+str(flags)+"result code "\
+str(rc)+"client1_id "+str(client)
print(m)
def on_message(client1, userdata, message):
print("message received " ,str(message.payload.decode("utf-8")))
# Print out the temperature until the program is stopped.
#Connect to an MQTT server
client = paho.Client()
#client.on_connect= on_connect #attach function to callback
#client.on_message=on_message #attach function to callback
time.sleep(1)
client.connect("192.168.1.104") #connect to broker
#client.loop_start() #start the loop
#client.subscribe("house/nursery")
#>>> from time import gmtime, strftime
#>>> strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
# While running print the time and temperature
# Optionally write to a CSV file
# Publish the temperature to the MQTT server
try:
while True:
strTime = time.strftime("%H:%M:%S", time.localtime())
strDate = time.strftime("%Y-%m-%d",time.localtime())
temp = read_temp()
print(strTime,temp)
client.publish("/house/nursery/temp","%0.1f"%temp[0])
# f = open("/home/pi/nurseryTemp.csv","a")
# f.write("%s,%s,%s\n"%(strDate,strTime,temp[0]))
# f.close()
time.sleep(30)
except:
client.disconnect()
#client.loop_stop()
print("Closing")
| Python | 0.000002 | |
4f921177ae5f8f0dac2b30233c2723cadfffbe45 | add a waveform generator | waveform.py | waveform.py | #!/usr/bin/env python
"""A Waveform or Signal Generator Library for creating audio waveforms."""
import sys
import argparse
import math
import numpy
VERSION = "0.1"
class Generator(object):
def __init__(self, length=1.0, framerate=44100, verbose=False):
self.length = length
self.framerate = framerate
self.verbose = verbose
def _init(self, length=None, framerate=None, verbose=None, **kwargs):
if length:
self.length = length
if framerate:
self.framerate = framerate
if verbose:
self.verbose = verbose
# framecount = frames/sec * sec
self.framecount = int(self.framerate * self.length)
# rectify length to actual framecount
self.length = float(self.framecount)/self.framerate
self.dprint('framecount = %s' % self.framecount)
self.dprint('rectified length = %s' % self.length)
self.wavedata = numpy.zeros((self.framecount, 1))
def dprint(self, msg):
"""Conditionally print a debugging message."""
if self.verbose:
print(msg)
def whitenoise(self, *args, **kwargs):
"""Random Gaussian White Noise."""
self._init(*args, **kwargs)
self.wavedata = numpy.random.randn(self.framecount, 1)
return self.wavedata
def _sinusoid_amplitude(self, frame, frequency):
"""Calculate the amplitude of a sinusoid wave at a given frequency."""
# avoid divide by zero
frame = 0.001 if frame is 0 else frame
return math.sin(frame /
((self.framerate / frequency) / math.pi))
def sin_constant(self, frequency, *args, **kwargs):
"""Sinusoid wave of constant frequency."""
self._init(*args, **kwargs)
frequency = float(frequency)
for frame in range(self.framecount):
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
def sin_linear(self, start_freq, end_freq, *args, **kwargs):
"""Sinusoid wave of linearly changing frequency."""
self._init(*args, **kwargs)
for frame in range(self.framecount):
# freq = start_freq + frame * freq_rate
# freq_rate = total_freq_change / framecount
frequency = start_freq + frame * (
float(end_freq - start_freq) / self.framecount)
amplitude = self._sinusoid_amplitude(frame, frequency)
self.wavedata[frame, 0] = amplitude
return self.wavedata
if __name__ == '__main__':
DOC="""A Demonstration Program for the Signal Generator."""
import wav_file
import plot
VERBOSE = False
DEBUG = False
def whitenoise(args, generator):
generator.whitenoise()
def sin_constant(args, generator):
generator.sin_constant(args.frequency)
def sin_linear(args, generator):
generator.sin_linear(args.frequency/2, args.frequency*2)
UI_MAP = {'noise': whitenoise, 'constant': sin_constant,
'linear': sin_linear}
def main():
args = parse_cmd_line()
sg = Generator(length=args.length, verbose=args.debug)
UI_MAP[args.type](args, sg)
if args.plot:
plot.plot_waveform(sg.wavedata, 1, 0, sg.framecount)
else:
wfile = wav_file.WavFile(args.filename, 1, sg.framecount)
try:
wfile.write(sg.wavedata)
finally:
wfile.close()
return 0
def parse_cmd_line():
parser = argparse.ArgumentParser(description=DOC)
parser.add_argument(
'--version', help='Print the version and exit.', action='version',
version='%(prog)s {}'.format(VERSION))
DebugAction.add_parser_argument(parser)
VerboseAction.add_parser_argument(parser)
parser.add_argument(
'--filename',
help='File to write the generated waveform to.')
parser.add_argument(
'-l', '--length', type=float,
help='Length in seconds of the generated wav.')
parser.add_argument(
'-p', '--plot', help='Plot the waveform instead. '
'Warning: Use a small length (e.g. 0.05) or the plot '
'will be massive.', action='store_true')
parser.add_argument(
'-t', '--type', help='Type of signal to generate',
choices=UI_MAP.keys())
parser.add_argument(
'-f', '--frequency', help='Frequency to use.', type=float)
parser.set_defaults(filename='signal.wav', length=1.0, debug=False,
frequency=480, type='constant')
return parser.parse_args()
def dprint(msg):
"""Conditionally print a debug message."""
if DEBUG:
print(msg)
def vprint(msg):
"""Conditionally print a verbose message."""
if VERBOSE:
print(msg)
class DebugAction(argparse.Action):
"""Enable the debugging output mechanism."""
flag = '--debug'
help = 'Enable debugging output.'
@classmethod
def add_parser_argument(cls, parser):
parser.add_argument(cls.flag, help=cls.help, action=cls)
def __init__(self, option_strings, dest, **kwargs):
super(DebugAction, self).__init__(option_strings, dest, nargs=0,
default=False, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print('Enabling debugging output.')
global DEBUG
DEBUG = True
setattr(namespace, self.dest, True)
class VerboseAction(DebugAction):
"""Enable the verbose output mechanism."""
flag = '--verbose'
help = 'Enable verbose output.'
def __call__(self, parser, namespace, values, option_string=None):
print('Enabling verbose output.')
global VERBOSE
VERBOSE = True
setattr(namespace, self.dest, True)
try:
sys.exit(main())
except SystemExit:
sys.exit(0)
except KeyboardInterrupt:
print('...interrupted by user, exiting.')
sys.exit(1)
except Exception as exc:
if DEBUG:
raise
else:
print('Unhandled Error:\n{}'.format(exc))
sys.exit(1)
| Python | 0 | |
8e067196b44d78f60cdf904eb05ebdaaf27b0c64 | Add module for quick diagnostic plots of profiles. | diagnostic_plots.py | diagnostic_plots.py | """
Created on Tue Nov 22 11:12:58 2016
@author: Jens von der Linden
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
"""Python 3.x compatibility"""
import sys
sys.path.append('scipy_mod')
import fitpack
reload(fitpack)
from fitpack import splev
import numpy as np
from numpy import atleast_1d
import scipy.integrate
import singularity_frobenius as frob
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
def plot_all_profiles_suydam(profile, normalize=False,
mu_0=None, axes=None, title=None):
r"""
"""
if not axes:
axes = plt.gca()
r = np.linspace(0, 1, 250)
splines = profile.get_splines()
j_z = splines['j_z'](r)
b_z = splines['b_z'](r)
b_z_prime = splines['b_z'].derivative()(r)
b_theta = splines['b_theta'](r)
b_theta_prime = splines['b_theta'].derivative()(r)
p_prime = splines['p_prime'](r)
p = splines['pressure'](r)
safety_factor = splines['q'](r)
safety_factor_prime = splines['q'].derivative()(r)
if mu_0:
beta_0 = mu_0
else:
beta_0 = profile.beta_0()
alpha = frob.alpha_func(r, b_z, b_z_prime, b_theta, b_theta_prime)
beta = frob.beta_func(b_z, b_theta, p_prime, beta_0)
suydam_mu = alpha + 4.*beta
suydam_q = r*b_z**2./(8. * beta_0)*(safety_factor_prime/safety_factor)**2. + p_prime
if 'j_theta' in splines.keys():
print('true')
j_theta = splines['j_theta'](r)
else:
print('false')
j_theta = None
if normalize:
j_z = j_z/np.max(np.abs(j_z))
b_theta = b_theta/np.max(np.abs(b_theta))
alpha = alpha/np.max(np.abs(alpha))
beta = beta/np.max(np.abs(beta))
suydam_q = suydam_q/np.max(np.abs(suydam_q))
suydam_mu = suydam_mu/np.nanmax(np.abs(suydam_mu))
p = p/np.max(np.abs(p))
p_prime = p_prime/np.max(np.abs(p_prime))
safety_factor_prime = safety_factor_prime/np.max(np.abs(safety_factor_prime))
if 'j_theta' in splines.keys():
j_theta = j_theta/np.nanmax(np.abs(j_theta))
axes.plot(r, j_z, c='#087804', label=r'$j_z$')
if 'j_theta' in splines.keys():
axes.plot(r, j_theta, c='#6fc276', label=r'$j_\theta$')
axes.plot(r, b_theta, c='#e50000', label=r'$B_\theta$')
axes.plot(r, p_prime, c='#7e1e9c', label=r"$p'$")
axes.plot(r, p, c='#bf77f6', label=r"$p$")
axes.plot(r, safety_factor, c='#000000', label=r"$q$")
axes.plot(r, safety_factor_prime, c='#7d7f7c', label=r"$q'$")
axes.plot(r[1:], suydam_q[1:], c='#acfffc', label=r"suydam_q")
#axes.plot(r[1:], suydam_mu[1:], c='#82cafc', label=r"suydam_mu")
if title:
axes.set_title(title)
axes.legend(loc='best')
return axes
def plot_suydam(profile, normalize=False):
r"""
"""
axes = plt.gca()
r = np.linspace(0, 1, 250)
splines = profile.get_splines()
j_z = splines['j_z'](r)
b_z = splines['b_z'](r)
b_z_prime = splines['b_z'].derivative()(r)
b_theta = splines['b_theta'](r)
b_theta_prime = splines['b_theta'](derivative)(r)
p_prime = splines['p_prime'](r)
p = splines['pressure'](r)
safety_factor = splines['q'](r)
safety_factor_prime = splines['q'].derivative()(r)
beta_0 = profile.beta_0()
alpha = frob.alpha_func(r, b_z, b_z_prime, b_theta, b_theta_prime)
beta = frob.beta_func(b_z, b_theta, p_prime, beta_0)
suydam_mu = alpha + 4.*beta
suydam_q = r*b_z**2./(8. * beta_0)*(safety_factor_prime/safety_factor)**2. + p_prime
if normalize:
j_z = j_z/np.max(np.abs(j_z))
b_theta = b_theta/np.max(np.abs(b_theta))
alpha = alpha/np.max(np.abs(alpha))
beta = beta/np.max(np.abs(beta))
suydam_q = suydam_q/np.max(np.abs(suydam_q))
suydam_mu = suydam_mu/np.nanmax(np.abs(suydam_mu))
p_prime = p_prime/np.max(np.abs(p_prime))
safety_factor_prime = safety_factor_prime/np.max(np.abs(safety_factor_prime))
#axes.plot(r, j_z, c='#087804', label=r'$j_z$')
#axes.plot(r, b_theta, c='#e50000', label=r'$B_\theta$')
#axes.plot(r, p_prime, c='#7e1e9c', label=r"$p'$")
#axes.plot(r, p, c='#bf77f6', label=r"$p$")
#axes.plot(r, safety_factor, c='#000000', label=r"$q$")
#axes.plot(r, safety_factor_prime, c='#7d7f7c', label=r"$q'$")
axes.plot(r, suydam_q, c='#acfffc', label=r"suydam_q")
axes.plot(r, suydam_mu, c='#82cafc', label=r"suydam_mu")
axes.legend(loc='best')
return suydam_q, suydam_mu, axes
| Python | 0 | |
e363aac46c9a5b607c7b32bcc5546c5a2728d750 | Add migration which fixes missing message IDs. | climate_data/migrations/0029_auto_20170628_1527.py | climate_data/migrations/0029_auto_20170628_1527.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-28 15:27
from __future__ import unicode_literals
from django.db import migrations
from datetime import timedelta
# noinspection PyUnusedLocal
def add_message_id_to_reading(apps, schema_editor):
# noinspection PyPep8Naming
Reading = apps.get_model('climate_data', 'Reading')
# noinspection PyPep8Naming
Message = apps.get_model('climate_data', 'Message')
for reading in Reading.objects.filter(message_id=None):
reading.message = Message.objects.filter(
station=reading.station,
arrival_time__gt=reading.read_time,
arrival_time__lt=(reading.read_time + timedelta(minutes=52))
).first()
reading.save()
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0028_auto_20170627_1914'),
]
operations = [
migrations.RunPython(add_message_id_to_reading),
]
| Python | 0 | |
840bc57e7120ae67e84c1c7bca94cfef34c8d2a8 | Copy old script from @erinspace which added identifiers to existing preprints. | scripts/add_missing_identifiers_to_preprints.py | scripts/add_missing_identifiers_to_preprints.py | import sys
import time
import logging
from scripts import utils as script_utils
from django.db import transaction
from website.app import setup_django
from website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers
setup_django()
logger = logging.getLogger(__name__)
def add_identifiers_to_preprints(dry=True):
from osf.models import PreprintService
preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)
logger.info('About to add identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
for preprint in preprints_without_identifiers:
logger.info('Saving identifier for preprint {} from source {}'.format(preprint._id, preprint.provider.name))
if not dry:
ezid_response = request_identifiers_from_ezid(preprint)
id_dict = parse_identifiers(ezid_response)
preprint.set_identifier_values(doi=id_dict['doi'], ark=id_dict['ark'])
preprint.save()
doi = preprint.get_identifier('doi')
assert preprint._id.upper() in doi.value
logger.info('Created DOI {} for Preprint with guid {} from service {}'.format(doi.value, preprint._id, preprint.provider.name))
time.sleep(1)
else:
logger.info('Dry run - would have created identifier for preprint {} from service {}'.format(preprint._id, preprint.provider.name))
logger.info('Finished Adding identifiers to {} preprints.'.format(preprints_without_identifiers.count()))
def main(dry=True):
# Start a transaction that will be rolled back if any exceptions are un
add_identifiers_to_preprints(dry)
if dry:
# When running in dry mode force the transaction to rollback
raise Exception('Dry Run complete -- not actually saved')
if __name__ == '__main__':
dry = '--dry' in sys.argv
if not dry:
# If we're not running in dry mode log everything to a file
script_utils.add_file_logger(logger, __file__)
# Allow setting the log level just by appending the level to the command
if '--debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif '--warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif '--info' in sys.argv:
logger.setLevel(logging.INFO)
elif '--error' in sys.argv:
logger.setLevel(logging.ERROR)
# Finally run the migration
main(dry=dry)
| Python | 0 | |
07e94e2bd2630dbff87d785cc5d6e67d78944e3f | add a script to help run salmon on transcriptomes | seqtools/cli/utilities/fastq_to_salmon_quant.py | seqtools/cli/utilities/fastq_to_salmon_quant.py | #!/usr/bin/env python
"""Take a fastq/fasta and make a transcriptome quantification"""
import argparse, sys, os, gzip
from shutil import rmtree, copy
from multiprocessing import cpu_count, Pool
from tempfile import mkdtemp, gettempdir
from seqtools.format.fasta import FASTAData
from seqtools.format.gpd import GPDStream
from subprocess import Popen, PIPE
def main(args):
""" First read the genome """
sys.stderr.write("reading reference genome\n")
ref = FASTAData(open(args.genome).read())
sys.stderr.write("read in "+str(len(ref.keys()))+" chromosomes\n")
""" Next make the transcriptome """
txome = {}
sys.stderr.write("write the transcriptome\n")
inf = None
if args.gpd[-3:] == '.gz':
inf = gzip.open(args.gpd)
else:
inf = open(args.gpd)
stream = GPDStream(inf)
tof = open(args.tempdir+'/transcriptome.fa','w')
z = 0
for gpd in stream:
z += 1
if gpd.get_transcript_name() in txome:
sys.stderr.write("WARNING already have a transcript "+gpd.get_transcript_name()+" ignoring line "+str(z)+" of the gpd\n")
continue
txome[gpd.get_transcript_name()] = gpd
tof.write('>'+gpd.get_transcript_name()+"\n"+gpd.get_sequence(ref)+"\n")
tof.close()
inf.close()
sys.stderr.write("wrote "+str(len(txome.keys()))+" transcripts\n")
"""Build the salmon index"""
sys.stderr.write("building a salmon index\n")
cmd = 'salmon index -p '+str(args.numThreads)+' -t '+args.tempdir+'/transcriptome.fa -i '+args.tempdir+'/salmon_index'
p = Popen(cmd.split())
p.communicate()
sys.stderr.write("finished building the index\n")
"""Use the index to quanitfy"""
sys.stderr.write("quanitfy reads\n")
reads = ''
if args.rU:
reads = '-r '+args.rU
else:
reads = '-1 '+args.r1+' -2 '+args.r2
cmd = 'salmon quant -p '+str(args.numThreads)+' -i '+args.tempdir+'/salmon_index -l A '+reads+' -o '+args.tempdir+'/output_quant'
p = Popen(cmd.split())
p.communicate()
sys.stderr.write("finished quanitfying\n")
"""Now parse the salmon output to add gene name"""
salmon = {}
with open(args.tempdir+'/output_quant/quant.sf') as inf:
header = inf.readline()
for line in inf:
f = line.rstrip().split("\t")
# by each transcript name hold a data strcture of the information
salmon[f[0]] = {'name':f[0],'length':int(f[1]),'EffectiveLength':float(f[2]),'TPM':float(f[3]),'NumReads':float(f[4])}
genes = {}
for name in salmon:
gene = txome[name].get_gene_name()
if gene not in genes: genes[gene] = []
genes[gene].append(salmon[name])
genetot = {}
for gene in genes:
tot = sum([x['TPM'] for x in genes[gene]])
genetot[gene] = tot
ordered_gene_names = sorted(genetot.keys(), key=lambda x: genetot[x],reverse=True)
"""Collected enough information to make output"""
sys.stderr.write("generating output\n")
of = sys.stdout
if args.output != '-':
of = open(args.output,'w')
for gene in ordered_gene_names:
txs = sorted(genes[gene],key=lambda x: x['TPM'],reverse=True)
for tx in txs:
of.write(gene+"\t"+tx['name']+"\t"+str(tx['length'])+"\t"+str(tx['EffectiveLength'])+"\t"+str(tx['TPM'])+"\t"+str(tx['NumReads'])+"\t"+str(genetot[gene])+"\n")
of.close()
# Temporary working directory step 3 of 3 - Cleanup
if not args.specific_tempdir:
rmtree(args.tempdir)
def do_inputs():
# Setup command line inputs
parser=argparse.ArgumentParser(description="Quantify a genepred defined transcriptome using salmon",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--rU',help="INPUT FASTQ/FASTA can be gzipped")
parser.add_argument('--r1',help="INPUT Pair1 FASTQ/FASTA can be gzipped")
parser.add_argument('--r2',help="INPUT Pair2 FASTQ/FASTA can be gzipped")
parser.add_argument('gpd',help="transcriptome genepred")
parser.add_argument('genome',help="reference fasta")
parser.add_argument('--output','-o',required=True,help="Specifiy path to write output or - for stdout")
parser.add_argument('-p','--numThreads',type=int,default=cpu_count(),help="INT number of threads to run. Default is system cpu count")
# Temporary working directory step 1 of 3 - Definition
group = parser.add_mutually_exclusive_group()
group.add_argument('--tempdir',default=gettempdir(),help="The temporary directory is made and destroyed here.")
group.add_argument('--specific_tempdir',help="This temporary directory will be used, but will remain after executing.")
args = parser.parse_args()
if not ((args.r1 and args.r2) or args.rU):
parser.error("Either rU or both r1 and r2 need to be specified")
# Temporary working directory step 2 of 3 - Creation
setup_tempdir(args)
return args
def setup_tempdir(args):
if args.specific_tempdir:
if not os.path.exists(args.specific_tempdir):
os.makedirs(args.specific_tempdir.rstrip('/'))
args.tempdir = args.specific_tempdir.rstrip('/')
if not os.path.exists(args.specific_tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
else:
args.tempdir = mkdtemp(prefix="weirathe.",dir=args.tempdir.rstrip('/'))
if not os.path.exists(args.tempdir.rstrip('/')):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
if not os.path.exists(args.tempdir):
sys.stderr.write("ERROR: Problem creating temporary directory\n")
sys.exit()
return
def external_cmd(cmd):
cache_argv = sys.argv
sys.argv = cmd.split()
args = do_inputs()
main(args)
sys.argv = cache_argv
if __name__=="__main__":
args = do_inputs()
main(args)
| Python | 0.000001 | |
c54a09765c409698f058e706a0688e1870f8bc22 | Add execution module for RallyDev | salt/modules/rallydev.py | salt/modules/rallydev.py | # -*- coding: utf-8 -*-
'''
Support for RallyDev
Requires a ``username`` and a ``password`` in ``/etc/salt/minion``:
.. code-block: yaml
rallydev:
username: myuser@example.com
password: 123pass
'''
# Import python libs
from __future__ import absolute_import, print_function
import json
import logging
# Import salt libs
from salt.exceptions import SaltException
from salt.exceptions import SaltInvocationError
import salt.utils.http
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if apache is installed
'''
if not __opts__.get('rallydev', {}).get('username', None):
return False
if not __opts__.get('rallydev', {}).get('password', None):
return False
return True
def _get_token():
'''
Get an auth token
'''
username = __opts__.get('rallydev', {}).get('username', None)
password = __opts__.get('rallydev', {}).get('password', None)
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/security/authorize'
result = salt.utils.http.query(
path,
decode=True,
decode_type='json',
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
if not 'dict' in result:
return None
return result['dict']['OperationResult']['SecurityToken']
def _query(action=None,
command=None,
args=None,
method='GET',
header_dict=None,
data=None):
'''
Make a web call to Stormpath
.. versionadded:: Beryllium
'''
token = _get_token()
username = __opts__.get('rallydev', {}).get('username', None)
password = __opts__.get('rallydev', {}).get('password', None)
path = 'https://rally1.rallydev.com/slm/webservice/v2.0/'
if action:
path += action
if command:
path += '/{0}'.format(command)
log.debug('RallyDev URL: {0}'.format(path))
if not isinstance(args, dict):
args = {}
args['key'] = token
if header_dict is None:
header_dict = {'Content-type': 'application/json'}
if method != 'POST':
header_dict['Accept'] = 'application/json'
decode = True
if method == 'DELETE':
decode = False
return_content = None
result = salt.utils.http.query(
path,
method,
params=args,
data=data,
header_dict=header_dict,
decode=decode,
decode_type='json',
text=True,
status=True,
username=username,
password=password,
cookies=True,
persist_session=True,
opts=__opts__,
)
log.debug('RallyDev Response Status Code: {0}'.format( result['status']))
if 'error' in result:
log.error(result['error'])
return [result['status'], result['error']]
return [result['status'], result.get('dict', {})]
def list_items(name):
'''
List items of a particular type
CLI Examples:
.. code-block:: bash
salt myminion rallydev.list_<item name>s
salt myminion rallydev.list_users
salt myminion rallydev.list_artifacts
'''
status, result = _query(action=name)
return result
def query_item(name, query_string, order='Rank'):
'''
Query a type of record for one or more items. Requires a valid query string.
See https://rally1.rallydev.com/slm/doc/webservice/introduction.jsp for
information on query syntax.
CLI Example:
.. code-block:: bash
salt myminion rallydev.query_<item name> <query string> [<order>]
salt myminion rallydev.query_task '(Name contains github)'
salt myminion rallydev.query_task '(Name contains reactor)' Rank
'''
status, result = _query(
action=name,
args={'query': query_string,
'order': order}
)
return result
def show_item(name, id_):
'''
Show an item
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_<item name> <item id>
'''
status, result = _query(action=name, command=id_)
return result
def update_item(name, id_, field=None, value=None, postdata=None):
'''
Update an item. Either a field and a value, or a chunk of POST data, may be
used, but not both.
CLI Example:
.. code-block:: bash
salt myminion rallydev.update_<item name> <item id> field=<field> value=<value>
salt myminion rallydev.update_<item name> <item id> postdata=<post data>
'''
if field and value:
if postdata:
raise SaltInvocationError('Either a field and a value, or a chunk '
'of POST data, may be specified, but not both.')
postdata = {name.title(): {field: value}}
if postdata is None:
raise SaltInvocationError('Either a field and a value, or a chunk of '
'POST data must be specified.')
status, result = _query(
action=name,
command=id_,
method='POST',
data=json.dumps(postdata),
)
return result
def show_artifact(id_):
'''
Show an artifact
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_artifact <artifact id>
'''
return show_item('artifact', id_)
def list_users():
'''
List the users
CLI Example:
.. code-block:: bash
salt myminion rallydev.list_users
'''
return list_items('user')
def show_user(id_):
'''
Show a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.show_user <user id>
'''
return show_item('user', id_)
def update_user(id_, field, value):
'''
Update a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.update_user <user id> <field> <new value>
'''
return update_item('user', id_, field, value)
def query_user(query_string, order='UserName'):
'''
Update a user
CLI Example:
.. code-block:: bash
salt myminion rallydev.query_user '(Name contains Jo)'
'''
return query_item('user', query_string, order)
| Python | 0 | |
3a9ec86e4b996912b1a47abe07c70116be14b3f8 | Create hello.py | hello.py | hello.py | print "Hello all"
| Python | 0.999503 | |
d73b2108358c8aa43509b6def6879fc70b138fb5 | add objects | nefi2_main/nefi2/view/test2.py | nefi2_main/nefi2/view/test2.py | from PyQt4 import QtGui, QtCore
import sys
class Main(QtGui.QMainWindow):
def __init__(self, parent = None):
super(Main, self).__init__(parent)
# main button
self.addButton = QtGui.QPushButton('button to add other widgets')
self.addButton.clicked.connect(self.addWidget)
# scroll area widget contents - layout
self.scrollLayout = QtGui.QFormLayout()
# scroll area widget contents
self.scrollWidget = QtGui.QWidget()
self.scrollWidget.setLayout(self.scrollLayout)
# scroll area
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setWidget(self.scrollWidget)
# main layout
self.mainLayout = QtGui.QVBoxLayout()
# add all main to the main vLayout
self.mainLayout.addWidget(self.addButton)
self.mainLayout.addWidget(self.scrollArea)
# central widget
self.centralWidget = QtGui.QWidget()
self.centralWidget.setLayout(self.mainLayout)
# set central widget
self.setCentralWidget(self.centralWidget)
def addWidget(self):
self.scrollLayout.addRow(Test())
class Test(QtGui.QWidget):
def __init__( self, parent=None):
super(Test, self).__init__(parent)
self.pushButton = QtGui.QPushButton('I am in Test widget')
layout = QtGui.QHBoxLayout()
layout.addWidget(self.pushButton)
self.setLayout(layout)
app = QtGui.QApplication(sys.argv)
myWidget = Main()
myWidget.show()
app.exec_() | Python | 0.000006 | |
98bf1c67b95d40888e26068015e4abf1b94d0640 | add ddns state module | salt/states/ddns.py | salt/states/ddns.py | '''
Dynamic DNS updates.
====================
Ensure a DNS record is present or absent utilizing RFC 2136
type dynamic updates. Requires dnspython module.
.. code-block:: yaml
webserver:
ddns.present:
- zone: example.com
- ttl: 60
'''
def __virtual__():
return 'ddns' if 'ddns.update' in __salt__ else False
def present(name, zone, ttl, data, rdtype='A'):
'''
Ensures that the named DNS record is present with the given ttl.
name
The host portion of the DNS record, e.g., 'webserver'
zone
The zone to check/update
ttl
TTL for the record
data
Data for the DNS record. E.g., the IP addres for an A record.
rdtype
DNS resource type. Default 'A'.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} record "{1}" will be updated'.format(rdtype, name)
return ret
status = __salt__['ddns.update'](zone, name, ttl, rdtype, data)
if status is None:
ret['result'] = True
ret['comment'] = '{0} record "{1}" already present with ttl of {2}'.format(
rdtype, name, ttl)
elif status:
ret['result'] = True
ret['comment'] = 'Updated {0} record for "{1}"'.format(rdtype, name)
ret['changes'] = {'name': name,
'zone': zone,
'ttl': ttl,
'rdtype': rdtype,
'data': data
}
else:
ret['result'] = False
ret['comment'] = 'Failed to create or update {0} record for "{1}"'.format(rdtype, name)
return ret
def absent(name, zone, data=None, rdtype=None):
'''
Ensures that the named DNS record is absent.
name
The host portion of the DNS record, e.g., 'webserver'
zone
The zone to check
data
Data for the DNS record. E.g., the IP addres for an A record. If omitted,
all records matching name (and rdtype, if provided) will be purged.
rdtype
DNS resource type. If omitted, all types will be purged.
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if __opts__['test']:
ret['result'] = None
ret['comment'] = '{0} record "{1}" will be deleted'.format(rdtype, name)
return ret
status = __salt__['ddns.delete'](zone, name, rdtype, data)
if status is None:
ret['result'] = True
ret['comment'] = 'No matching DNS record(s) present'
elif status:
ret['result'] = True
ret['comment'] = 'Deleted DNS record(s)'
ret['changes'] = True
else:
ret['result'] = False
ret['comment'] = 'Failed to delete DNS record(s)'
return ret
| Python | 0 | |
4ea54e24948356b039ad961c857e685c30bb0737 | Solve task #500 | 500.py | 500.py | class Solution(object):
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
rows = ['qwertyuiop', 'asdfghjkl', 'zxcvbnm']
def inOneRow(word):
mask = [0, 0, 0]
for i in range(len(rows)):
for ch in word:
if ch in rows[i]:
mask[i] = 1
return sum(mask) == 1
ans = []
for word in words:
wordl = word.lower()
if inOneRow(wordl):
ans.append(word)
return ans
| Python | 0.999999 | |
ce3eef2c749f7d9f7bcd1d439497121e89e3727b | Add notification | devicehive/notification.py | devicehive/notification.py | from devicehive.api_object import ApiObject
class Notification(ApiObject):
"""Notification class."""
DEVICE_ID_KEY = 'deviceId'
ID_KEY = 'id'
NOTIFICATION_KEY = 'notification'
PARAMETERS_KEY = 'parameters'
TIMESTAMP_KEY = 'timestamp'
def __init__(self, transport, token, notification):
ApiObject.__init__(self, transport)
self._token = token
self._device_id = notification[self.DEVICE_ID_KEY]
self._id = notification[self.ID_KEY]
self._notification = notification[self.NOTIFICATION_KEY]
self._parameters = notification[self.PARAMETERS_KEY]
self._timestamp = notification[self.TIMESTAMP_KEY]
def device_id(self):
return self._device_id
def id(self):
return self._id
def notification(self):
return self._notification
def parameters(self):
return self._parameters
def timestamp(self):
return self._timestamp
| Python | 0.000001 | |
f26afa19a06b02c668073785bbf5f248ac8072f6 | Rename test runs, to prep for real tests. | colr_test_run.py | colr_test_run.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" test_colr.py
Run a few tests for the Colr library.
I know these are not good tests. They do catch bugs though.
Actual unit tests are coming, but for now just try all the methods and see
if anything breaks.
-Christopher Welborn 08-30-2015
"""
from docopt import docopt
import os
import sys
from colr import __version__, auto_disable, color, disabled, Colr
NAME = 'Test Colr'
VERSIONSTR = '{} v. {}'.format(NAME, __version__)
SCRIPT = os.path.split(os.path.abspath(sys.argv[0]))[1]
SCRIPTDIR = os.path.abspath(sys.path[0])
USAGESTR = """{versionstr}
Usage:
{script} [-h | -v]
Options:
-h,--help : Show this help message.
-v,--version : Show version.
""".format(script=SCRIPT, versionstr=VERSIONSTR)
# Max widths, 1/3 width, for justification tests.
maxwidth = 78
chunkwidth = maxwidth / 3
# Automatically disable colors when piping output.
auto_disable()
def main(argd):
""" Main entry point, expects doctopt arg dict as argd. """
print('Running {}'.format(color(VERSIONSTR, fore='red', style='bright')))
justify_tests()
join_tests()
gradient_override_tests()
gradient_mix_tests()
rainbow_tests()
if disabled():
print('\nColr was disabled.')
return 0
def gradient_mix_tests():
""" Test display of the gradient options. """
# Gradient should operate on self.data when no text is provided.
print(Colr('This is a gradient self.data.').gradient())
# Gradient should append to self.data when no text is provided.
print(
Colr('This is a green self.data', fore='green')(' ')
.gradient('And this is an appended gradient.', name='blue'))
# Gradient should be okay with ljust/center/rjust.
print(Colr().gradient('This is a left gradient').ljust(maxwidth))
print(Colr().gradient('Center gradient.').center(maxwidth))
print(Colr().gradient('Right-aligned gradient.').rjust(maxwidth))
# Gradient and ljust/center/rjust would be chainable.
print(Colr()
.ljust(chunkwidth, text='Chained left.').gradient(name='red')
.center(chunkwidth, text='Chained center.').gradient(name='white')
.rjust(chunkwidth, text='Chained right.').gradient(name='blue'))
# Black/white gradient should work in linemode or non-linemode.
lines = ['This is a block made into a sad rainbow' for _ in range(5)]
print(Colr('\n'.join(lines)).gradient(name='black'))
lines = ['This is a block made into a long sad rainbow' for _ in range(5)]
print(Colr('\n'.join(lines)).gradient(name='white', linemode=False))
lines = ['This is a block made into a better rainbow' for _ in range(5)]
print(Colr('\n'.join(lines)).gradient(name='red'))
def gradient_override_tests():
""" Test gradient with explicit fore, back, and styles. """
try:
# Both fore and back are not allowed in a gradient.
print(Colr().gradient(' ' * maxwidth, fore='reset', back='reset'))
except ValueError:
pass
# Gradient back color.
print(Colr().gradient(' ' * maxwidth, name='black', fore='reset'))
# Explicit gradient fore color.
print(Colr().gradient('-' * maxwidth, name='white', spread=2, back='blue'))
# Implicit gradient fore color.
print(Colr().gradient('_' * maxwidth, name='white'), end='\n\n')
def join_tests():
""" Test join mixed with other methods. """
def fancy_log(label, msg, tag):
""" Squeezed justification with complex joins should account for
existing text width.
"""
return (
Colr(label, fore='green')
.center(
# Centering on maxwidth would ruin the next rjust because
# the spaces created by .center will not be overwritten.
maxwidth - (len(tag) + 2),
text=msg,
fore='yellow',
squeeze=True
)
.rjust(
maxwidth,
text=Colr(tag, fore='red').join(
'[', ']',
fore='blue'
),
squeeze=True)
)
print(fancy_log('This is a label:', 'This is centered.', 'Status: Okay'))
print(Colr('|', fore='blue').join(
'This is regular text.'.ljust(maxwidth // 2 - 1),
Colr('This is colored.', fore='red').rjust(maxwidth // 2)
))
def justify_tests():
""" Test the justification methods, alone and mixed with other methods.
"""
# Justified text should be chainable.
print(
Colr()
.ljust(chunkwidth, text='Left', fore=255, back='green', style='b')
.center(chunkwidth, text='Middle', fore=255, back='blue', style='b')
.rjust(chunkwidth, text='Right', fore=255, back='red', style='b')
)
# Chained formatting must provide the 'text' argument,
# otherwise the string is built up and the entire string width grows.
# This built up string would then be padded, instead of each individual
# string.
print(
Colr()
# 256 color methods can be called with bg_<num>, b_<num>, b256_<num>.
.b_82().b().f_255().ljust(chunkwidth, text='Left')
.b256_56().b().f_255().center(chunkwidth, text='Middle')
# Named background color start with 'bg' or 'b_'
.bgred().b().f_255().rjust(chunkwidth, text='Right')
)
# Width should be calculated without color codes.
print(Colr('True Middle').center(maxwidth, fore='magenta'))
# Squeezed justification should account for existing text width.
# But if text was previously justified, don't ruin it.
print(Colr('Lefty', fore=232, back=255).center(
maxwidth,
text='Center',
fore=232,
back='blue',
style='bright',
squeeze=True))
print(
Colr('LeftyCenter'.center(maxwidth // 2), fore=232, back=255)
.center(
maxwidth / 2,
text='Center',
fore=232,
back='blue',
style='bright',
squeeze=True
)
)
def rainbow_tests():
""" Test rainbow output, with or without linemode (short/long output)
"""
print(Colr('This is a rainbow. It is very pretty.').rainbow())
lines = ['This is a block of text made into a rainbow' for _ in range(5)]
print(Colr('\n'.join(lines)).rainbow(movefactor=5))
lines = ['This is a block made into a long rainbow' for _ in range(5)]
print(Colr('\n'.join(lines)).rainbow(linemode=False))
# Rainbow should honor fore,back,styles.
print(Colr(' ' * maxwidth).rainbow(fore='reset', spread=.5))
print(Colr('-' * maxwidth).rainbow(back='black', offset=30))
print(Colr('Rainbow bright.').rainbow(style='bright').center(maxwidth))
if __name__ == '__main__':
mainret = main(docopt(USAGESTR, version=VERSIONSTR))
sys.exit(mainret)
| Python | 0 | |
a02a46752d954c29a65bf8bc5b88fa3545315175 | Add unit tests for timestr() | lib/svtplay_dl/tests/utils.py | lib/svtplay_dl/tests/utils.py | #!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
import svtplay_dl.utils
class timestrTest(unittest.TestCase):
def test_1(self):
self.assertEqual(svtplay_dl.utils.timestr(1), "00:00:00,00")
def test_100(self):
self.assertEqual(svtplay_dl.utils.timestr(100), "00:00:00,10")
def test_3600(self):
self.assertEqual(svtplay_dl.utils.timestr(3600), "00:00:03,60")
def test_3600000(self):
self.assertEqual(svtplay_dl.utils.timestr(3600000), "01:00:00,00")
| Python | 0 | |
46c036cad1323d55c61f546b5cd6174739ab1b42 | add helper functions for data persistence | ws/data_persistence.py | ws/data_persistence.py | # https://github.com/usc-isi-i2/dig-etl-engine/issues/92
import json
import threading
import os
import codecs
# 1.acquire file write lock
# 2.write to file.new
# 3.acquire replace lock
# 4.rename file to file.old
# 5.rename file.new to file
# 6.release replace lock and write lock
# 7.remove file.old
def dump_data(data, file_path, write_lock, replace_lock):
new_path = file_path + '.new'
old_path = file_path + '.old'
try:
write_lock.acquire()
with codecs.open(new_path, 'w') as f:
f.write(data)
replace_lock.acquire()
# https://docs.python.org/2/library/os.html#os.rename
# On Unix, if dst exists and is a file,
# it will be replaced silently if the user has permission.
os.rename(file_path, old_path)
os.rename(new_path, file_path)
os.remove(old_path)
except Exception as e:
print e
finally:
write_lock.release()
replace_lock.release()
# when starting:
# if only file exists, correct.
# if both file.new and file.old exist, ignore file.old and rename file.new to file (shut down in the middle of replacing, file.new is complete)
# if both file.new and file exist, ignore file.new (shut down in the middle of generating file.new).
# if only file.new exists, error (user deletion)
# if only file.old exists, error (user deletion)
# if three of them exists, error (user operation, system error
def read_data(file_path):
new_path = file_path + '.new'
old_path = file_path + '.old'
has_file = os.path.exists(file_path)
has_new = os.path.exists(new_path)
has_old = os.path.exists(old_path)
if has_file and not has_new and not has_old:
pass
elif not has_file and has_old and has_new:
os.remove(old_path)
os.rename(new_path, file_path)
elif has_file and not has_old and has_new:
os.remove(new_path)
else:
return
with codecs.open(file_path, 'r') as f:
return f.read()
| Python | 0.000001 | |
1f94d2a6597f8ddf5d544afc52f6d627085deaad | Add script school-exclusion.py | school-exclusion.py | school-exclusion.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import time
from codecs import open
from argparse import ArgumentParser
EXCLUEDE_FLAG = False
def duration(start, end):
second = (end - start) % 60
minute = (end - start) % 3600 / 60
hour = (end - start) / 3600
return "%d:%02d:%02d" % (hour, minute, second)
def load_dict(tsv_file):
school_set = set()
with open(tsv_file, 'r') as fd:
for row in fd:
splited_row = row.strip().split()
assert len(splited_row) == 1
school = splited_row.pop(0)
school_set.add(school)
synonymy_map = dict((school, school) for school in school_set)
assert "中国地质大学(北京)" in synonymy_map
assert "大连理工大学" in synonymy_map
assert "复旦大学" in synonymy_map
assert "中南财经政法大学" in synonymy_map
assert "华中科技大学" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
assert "" in synonymy_map
synonymy_map["中国地质大学"] = "中国地质大学(北京)"
synonymy_map["大连理工"] = "大连理工大学"
synonymy_map["上海复旦大学"] = "复旦大学"
synonymy_map["复旦"] = "复旦大学"
synonymy_map["中南财经"] = "中南财经政法大学"
synonymy_map["中南财经大学"] = "中南财经政法大学"
synonymy_map["华中理工大学"] = "华中科技大学"
synonymy_map["华中科大"] = "华中科技大学"
synonymy_map["华中理工"] = "华中科技大学"
synonymy_map["华中科技"] = "华中科技大学"
synonymy_map["华中科"] = "华中科技大学"
synonymy_map["华中大"] = "华中科技大学"
synonymy_map["华科大"] = "华中科技大学"
synonymy_map["华科"] = "华中科技大学"
synonymy_map["华中科技"] = "华中科技大学"
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
synonymy_map[""] = ""
return school_set, synonymy_map
def judge_relation(query_school_set, bidword_school_set, synonymy_map):
source_set = set()
for school in query_school_set:
source_set.add(synonymy_map[school])
target_set = set()
for school in bidword_school_set:
target_set.add(synonymy_map[school])
return source_set.issubset(target_set)
def main():
parser = ArgumentParser()
parser.add_argument("tsv_file", help = "china school file in tsv format")
parser.add_argument("sim_file", help = "query to bid sim file in tsv format")
args = parser.parse_args()
tsv_file = args.tsv_file
sim_file = args.sim_file
start_time = time()
school_set, synonymy_map = load_dict(tsv_file)
end_time = time()
for school in school_set:
assert school in synonymy_map
with open(sim_file, 'r') as fd:
for line in fd:
splited_line = line.strip().split("\t")
if len(splited_line) < 2:
continue
query = splited_line.pop(0)
bidword_list = "".join(splited_line).strip(";").split(";")
query_school_set = set()
for query_seg in query.split():
if query_seg in synonymy_map:
query_school_set.add(query_seg)
if len(query_school_set) == 0:
if not EXCLUEDE_FLAG:
print "%s\t%s" % (query, ";".join(bidword_list))
continue
res_list = []
exc_list = [] # for debug
for bidword in bidword_list:
bidword_school_set = set()
for bidword_seg in bidword.split():
if bidword_seg in synonymy_map:
bidword_school_set.add(bidword_seg)
if len(bidword_school_set) == 0:
res_list.append(bidword)
continue
if judge_relation(query_school_set, bidword_school_set, synonymy_map):
res_list.append(bidword)
else:
exc_list.append(bidword)
assert len(res_list) + len(exc_list) == len(bidword_list)
if EXCLUEDE_FLAG:
if len(exc_list) > 0:
print "%s\t%s" % (query, ";".join(exc_list))
else:
if len(res_list) > 0:
print "%s\t%s" % (query, ";".join(res_list))
if __name__ == "__main__":
main()
| Python | 0.000002 | |
327b74d5e0328e6415520b907e4c43ed8cb54cf2 | add sample that fetches the graph and renders it as an ascii tree | examples/fetchDebianDependencyGraph.py | examples/fetchDebianDependencyGraph.py | #!/usr/bin/python
import sys
from pyArango.connection import *
from pyArango.graph import *
from asciitree import *
conn = Connection(username="root", password="")
db = conn["ddependencyGrahp"]
if not db.hasGraph('debian_dependency_graph'):
raise Exception("didn't find the debian dependency graph, please import first!")
ddGraph = db.graphs['debian_dependency_graph']
graphQuery = '''
FOR package, depends, path IN
1..2 ANY
@startPackage Depends RETURN path
'''
startNode = sys.argv[1]
bindVars = { "startPackage": "packages/" + startNode }
queryResult = db.AQLQuery(graphQuery, bindVars=bindVars, rawResults=True)
# sub iterateable object to build up the tree for draw_tree:
class Node(object):
def __init__(self, name, children):
self.name = name
self.children = children
def getChild(self, searchName):
for child in self.children:
if child.name == searchName:
return child
return None
def __str__(self):
return self.name
def iteratePath(path, depth, currentNode):
pname = path[depth]['name']
subNode = currentNode.getChild(pname)
if subNode == None:
subNode = Node(pname, [])
currentNode.children.append(subNode)
if len(path) > depth + 1:
iteratePath(path, depth + 1, subNode)
# Now we fold the paths substructure into the tree:
rootNode = Node(startNode, [])
for path in queryResult:
p = path['edges']
iteratePath(p, 0, rootNode)
print draw_tree(rootNode)
| Python | 0 | |
8e73752e9242796a933d3566eb4a5e4470f13d5e | Create sequences.py | sequences.py | sequences.py | import random
import sys
import os
# User input
user_input = input("Type in 5 integers of any sequence separated by commas. Example: 1,2,3,4,5: ")
list_input = user_input.split(",")
# Convert numbered strings into integers in list
list_int = list(map(int, list_input))
# Check Arithmetic Sequence
list_arith = list_int[1] - list_int[0]
if list_int[1] == list_int[0] + list_arith and list_int[2] == list_int[1] + list_arith:
print("Arithmetic Sequence")
# Check Geometric Sequence
if list_int[1] == list_int[0] * 2 and list_int[2] == list_int[1] * 2 and list_int[3] == list_int[2] * 2:
print("This is a Geometric Sequence")
# Check Quadratic Sequence
list_quad1 = list_int[1] - list_int[0]
list_quad2 = list_int[2] - list_int[1]
list_diff = list_quad2 - list_quad1
if list_int[1] == list_int[0] + list_quad1 and list_int[2] == list_int[1] + list_quad2:
print("This is a Quadratic Sequence")
# Check Cubic Sequence
cub1 = list_int[1] - list_int[0] # Subtraction Process
cub2 = list_int[2] - list_int[1] # Subtraction Process
cub3 = list_int[3] - list_int[2] # Subtraction Process
cub_r1 = cub3 - cub2 # Subtraction Process
cub_r2 = cub2 - cub1 # Subtraction Process
# "if" comparison
if cub_r1 == cub_r2:
print("This is a Cubic Sequence")
# Check Fibonacci Sequence
fib_chck1 = list_int[0] + list_int[1]
fib_chck2 = list_int[1] + list_int[2]
if list_int[2] == fib_chck1 and list_int[3] == fib_chck2:
print("Fibonacci Sequence")
| Python | 0.000009 | |
d6db1d0b81211a80884131b10212195ab38f99ad | Fix a conflict with IPython. | dosagelib/output.py | dosagelib/output.py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2005-2016 Tobias Gruetzmacher
import time
import sys
import os
import threading
import traceback
import codecs
from .ansicolor import Colorizer
lock = threading.Lock()
def get_threadname():
"""Return name of current thread."""
return threading.current_thread().getName()
class Output(object):
"""Print output with context, indentation and optional timestamps."""
def __init__(self, stream=None):
"""Initialize context and indentation."""
self.context = None
self.level = 0
self.timestamps = False
if stream is None:
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
self.encoding = sys.stdout.encoding
else:
self.encoding = 'utf-8'
if hasattr(sys.stdout, 'buffer'):
stream = sys.stdout.buffer
else:
stream = sys.stdout
stream = codecs.getwriter(self.encoding)(stream, 'replace')
self.setStream(stream)
def setStream(self, stream):
"""Initialize context and indentation."""
self.stream = Colorizer(stream)
def info(self, s, level=0):
"""Write an informational message."""
self.write(s, level=level)
def debug(self, s, level=2):
"""Write a debug message."""
self.write(s, level=level, color='white')
def warn(self, s):
"""Write a warning message."""
self.write(u"WARN: %s" % s, color='bold;yellow')
def error(self, s, tb=None):
"""Write an error message."""
self.write(u"ERROR: %s" % s, color='light;red')
def exception(self, s):
"""Write error message with traceback info."""
self.error(s)
type, value, tb = sys.exc_info()
self.writelines(traceback.format_stack(), 1)
self.writelines(traceback.format_tb(tb)[1:], 1)
self.writelines(traceback.format_exception_only(type, value), 1)
def write(self, s, level=0, color=None):
"""Write message with indentation, context and optional timestamp."""
if level > self.level:
return
if self.timestamps:
timestamp = time.strftime(u'%H:%M:%S ')
else:
timestamp = u''
with lock:
if self.context:
self.stream.write(u'%s%s> ' % (timestamp, self.context))
elif self.context is None:
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
self.stream.write(u'%s' % s, color=color)
try:
text_type = unicode
except NameError:
text_type = str
self.stream.write(text_type(os.linesep))
self.stream.flush()
def writelines(self, lines, level=0):
"""Write multiple messages."""
for line in lines:
for line in line.rstrip(u'\n').split(u'\n'):
self.write(line.rstrip(u'\n'), level=level)
out = Output()
| # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
import time
import sys
import os
import threading
import traceback
import codecs
from .ansicolor import Colorizer
lock = threading.Lock()
def get_threadname():
"""Return name of current thread."""
return threading.current_thread().getName()
class Output(object):
"""Print output with context, indentation and optional timestamps."""
def __init__(self, stream=None):
"""Initialize context and indentation."""
self.context = None
self.level = 0
self.timestamps = False
if stream is None:
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
self.encoding = sys.stdout.encoding
else:
self.encoding = 'utf-8'
if sys.version_info[0] >= 3:
stream = sys.stdout.buffer
else:
stream = sys.stdout
stream = codecs.getwriter(self.encoding)(stream, 'replace')
self.setStream(stream)
def setStream(self, stream):
"""Initialize context and indentation."""
self.stream = Colorizer(stream)
def info(self, s, level=0):
"""Write an informational message."""
self.write(s, level=level)
def debug(self, s, level=2):
"""Write a debug message."""
self.write(s, level=level, color='white')
def warn(self, s):
"""Write a warning message."""
self.write(u"WARN: %s" % s, color='bold;yellow')
def error(self, s, tb=None):
"""Write an error message."""
self.write(u"ERROR: %s" % s, color='light;red')
#if tb is not None:
# self.write('Traceback (most recent call last):', 1)
def exception(self, s):
"""Write error message with traceback info."""
self.error(s)
type, value, tb = sys.exc_info()
self.writelines(traceback.format_stack(), 1)
self.writelines(traceback.format_tb(tb)[1:], 1)
self.writelines(traceback.format_exception_only(type, value), 1)
def write(self, s, level=0, color=None):
"""Write message with indentation, context and optional timestamp."""
if level > self.level:
return
if self.timestamps:
timestamp = time.strftime(u'%H:%M:%S ')
else:
timestamp = u''
with lock:
if self.context:
self.stream.write(u'%s%s> ' % (timestamp, self.context))
elif self.context is None:
self.stream.write(u'%s%s> ' % (timestamp, get_threadname()))
self.stream.write(u'%s' % s, color=color)
try:
text_type = unicode
except NameError:
text_type = str
self.stream.write(text_type(os.linesep))
self.stream.flush()
def writelines(self, lines, level=0):
"""Write multiple messages."""
for line in lines:
for line in line.rstrip(u'\n').split(u'\n'):
self.write(line.rstrip(u'\n'), level=level)
out = Output()
| Python | 0.00166 |
ea40075f8924c2d61da8f92fe9ecf74045bbe6cc | add script to convert Tandem Repeats Finder dat format to bed format required for STRetch | scripts/TRFdat_to_bed.py | scripts/TRFdat_to_bed.py | #!/usr/bin/env python
from argparse import (ArgumentParser, FileType)
def parse_args():
"Parse the input arguments, use '-h' for help"
parser = ArgumentParser(description='Convert Tandem Repeat Finder (TRF) dat file to bed format with repeat units for microsatellite genotyping')
parser.add_argument(
'--dat', type=str, required=True,
help='Input dat file produced by Tandem Repeat Finder (TRF) using the -d option')
parser.add_argument(
'--bed', type=str, required=True,
help='Output bed file containing genomic locations and repeat units of microsatellites.')
return parser.parse_args()
### Main
def main():
# Parse command line arguments
args = parse_args()
datfile = args.dat
bedfile = args.bed
with open(bedfile, 'w') as bed:
chrom = ""
with open(datfile, 'r') as dat:
for line in dat:
splitline = line.split()
if line.startswith("Sequence:"):
chrom = line.split()[1]
else:
# Catch index errors when line is blank
try:
# Check if in header sequence (all non-header lines start with an int: start pos)
try:
int(splitline[0])
except ValueError:
continue
start = splitline[0]
end = splitline[1]
motif = splitline[13]
copynum = splitline[3]
bed.write('\t'.join([chrom,start,end,motif,copynum]) + '\n')
except IndexError:
pass
if __name__ == '__main__':
main()
| Python | 0 | |
272eceebbc44bd7dc44498233a7dca5ab9c2bdd8 | add iplookup | scripts/iplookup.py | scripts/iplookup.py | import sys
import json
import numpy as np
import pandas as pd
import geoip2.database
if len(sys.argv) != 3:
sys.exit('Please specify a GeoLite DB and an ip table.')
reader = geoip2.database.Reader(sys.argv[1])
def get_name(entry, lang):
if hasattr(entry, 'names') and lang in entry.names:
return entry.names[lang]
return 'unknown'
def get_location(addr):
response = reader.city(addr)
city = get_name(response.city, 'en')
lat = response.location.latitude
lng = response.location.longitude
return (city, lat, lng)
ip = np.loadtxt(sys.argv[2], dtype=str)
locations = map(get_location, ip)
series = pd.Series(locations)
ucounts = series.value_counts()
info = []
for location, count in zip(ucounts.keys(), ucounts.get_values()):
if location:
info.append({'city_name': location[0],
'lat': location[1],
'long': location[-1],
'nb_visits': count})
print json.dumps(info)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.