commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
94af46d678055a667220e1a28da509bf507c91dd | change version | docxxslt/__init__.py | docxxslt/__init__.py | import logging
from . import engines, package
__version__ = '0.1.0'
class DocxXsltTemplate(object):
"""Docx template renderer"""
main_document = 'word/document.xml'
def __init__(self, filename):
self.package = package.Package(filename)
self.package.read()
def save(self, filename=None, **kwargs):
filename = filename or self.package.filename
engine = kwargs.pop('engine', engines.DefaultEngine)
context = kwargs.pop('context')
logger = kwargs.pop('logger', logging.getLogger())
# read docx XML string
xml = self.package.get(self.main_document)
# render XML
xml = engine(logger=logger).render(xml, context)
# write docx document
self.package.update(self.main_document, xml)
self.package.write(filename)
| import logging
from . import engines, package
__version__ = '0.0.2'
class DocxXsltTemplate(object):
"""Docx template renderer"""
main_document = 'word/document.xml'
def __init__(self, filename):
self.package = package.Package(filename)
self.package.read()
def save(self, filename=None, **kwargs):
filename = filename or self.package.filename
engine = kwargs.pop('engine', engines.DefaultEngine)
context = kwargs.pop('context')
logger = kwargs.pop('logger', logging.getLogger())
# read docx XML string
xml = self.package.get(self.main_document)
# render XML
xml = engine(logger=logger).render(xml, context)
# write docx document
self.package.update(self.main_document, xml)
self.package.write(filename)
| Python | 0.000001 |
d23ffcf338162ad78c646a69f91e4ff36c894a05 | bump to 0.78.1 | dvc/version.py | dvc/version.py | # Used in setup.py, so don't pull any additional dependencies
#
# Based on:
# - https://github.com/python/mypy/blob/master/mypy/version.py
# - https://github.com/python/mypy/blob/master/mypy/git.py
import os
import subprocess
_BASE_VERSION = "0.78.1"
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
)
def _is_git_repo(dir_path):
"""Is the given directory version-controlled with git?"""
return os.path.exists(os.path.join(dir_path, ".git"))
def _have_git():
"""Can we run the git executable?"""
try:
subprocess.check_output(["git", "--help"])
return True
except subprocess.CalledProcessError:
return False
except OSError:
return False
def _is_release(dir_path, base_version):
try:
output = subprocess.check_output(
["git", "describe", "--tags", "--exact-match"],
cwd=dir_path,
stderr=subprocess.STDOUT,
).decode("utf-8")
tag = output.strip()
return tag == base_version
except subprocess.CalledProcessError:
return False
def _git_revision(dir_path):
"""Get the SHA-1 of the HEAD of a git repository."""
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=dir_path
).strip()
def _is_dirty(dir_path):
"""Check whether a git repository has uncommitted changes."""
try:
subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path)
return False
except subprocess.CalledProcessError:
return True
__version__ = _generate_version(_BASE_VERSION)
| # Used in setup.py, so don't pull any additional dependencies
#
# Based on:
# - https://github.com/python/mypy/blob/master/mypy/version.py
# - https://github.com/python/mypy/blob/master/mypy/git.py
import os
import subprocess
_BASE_VERSION = "0.78.0"
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
)
def _is_git_repo(dir_path):
"""Is the given directory version-controlled with git?"""
return os.path.exists(os.path.join(dir_path, ".git"))
def _have_git():
"""Can we run the git executable?"""
try:
subprocess.check_output(["git", "--help"])
return True
except subprocess.CalledProcessError:
return False
except OSError:
return False
def _is_release(dir_path, base_version):
try:
output = subprocess.check_output(
["git", "describe", "--tags", "--exact-match"],
cwd=dir_path,
stderr=subprocess.STDOUT,
).decode("utf-8")
tag = output.strip()
return tag == base_version
except subprocess.CalledProcessError:
return False
def _git_revision(dir_path):
"""Get the SHA-1 of the HEAD of a git repository."""
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=dir_path
).strip()
def _is_dirty(dir_path):
"""Check whether a git repository has uncommitted changes."""
try:
subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path)
return False
except subprocess.CalledProcessError:
return True
__version__ = _generate_version(_BASE_VERSION)
| Python | 0.000007 |
43a833dec24f4e0a7dc1d8494a5ad1b44113db15 | bump to 0.40.0 | dvc/version.py | dvc/version.py | # Used in setup.py, so don't pull any additional dependencies
#
# Based on:
# - https://github.com/python/mypy/blob/master/mypy/version.py
# - https://github.com/python/mypy/blob/master/mypy/git.py
import os
import subprocess
_BASE_VERSION = "0.40.0"
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
)
def _is_git_repo(dir_path):
"""Is the given directory version-controlled with git?"""
return os.path.exists(os.path.join(dir_path, ".git"))
def _have_git():
"""Can we run the git executable?"""
try:
subprocess.check_output(["git", "--help"])
return True
except subprocess.CalledProcessError:
return False
except OSError:
return False
def _is_release(dir_path, base_version):
try:
output = subprocess.check_output(
["git", "describe", "--tags", "--exact-match"],
cwd=dir_path,
stderr=subprocess.STDOUT,
).decode("utf-8")
tag = output.strip()
return tag == base_version
except subprocess.CalledProcessError:
return False
def _git_revision(dir_path):
"""Get the SHA-1 of the HEAD of a git repository."""
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=dir_path
).strip()
def _is_dirty(dir_path):
"""Check whether a git repository has uncommitted changes."""
try:
subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path)
return False
except subprocess.CalledProcessError:
return True
__version__ = _generate_version(_BASE_VERSION)
| # Used in setup.py, so don't pull any additional dependencies
#
# Based on:
# - https://github.com/python/mypy/blob/master/mypy/version.py
# - https://github.com/python/mypy/blob/master/mypy/git.py
import os
import subprocess
_BASE_VERSION = "0.35.7"
def _generate_version(base_version):
"""Generate a version with information about the git repository"""
pkg_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if not _is_git_repo(pkg_dir) or not _have_git():
return base_version
if _is_release(pkg_dir, base_version) and not _is_dirty(pkg_dir):
return base_version
return "{base_version}+{short_sha}{dirty}".format(
base_version=base_version,
short_sha=_git_revision(pkg_dir).decode("utf-8")[0:6],
dirty=".mod" if _is_dirty(pkg_dir) else "",
)
def _is_git_repo(dir_path):
"""Is the given directory version-controlled with git?"""
return os.path.exists(os.path.join(dir_path, ".git"))
def _have_git():
"""Can we run the git executable?"""
try:
subprocess.check_output(["git", "--help"])
return True
except subprocess.CalledProcessError:
return False
except OSError:
return False
def _is_release(dir_path, base_version):
try:
output = subprocess.check_output(
["git", "describe", "--tags", "--exact-match"],
cwd=dir_path,
stderr=subprocess.STDOUT,
).decode("utf-8")
tag = output.strip()
return tag == base_version
except subprocess.CalledProcessError:
return False
def _git_revision(dir_path):
"""Get the SHA-1 of the HEAD of a git repository."""
return subprocess.check_output(
["git", "rev-parse", "HEAD"], cwd=dir_path
).strip()
def _is_dirty(dir_path):
"""Check whether a git repository has uncommitted changes."""
try:
subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path)
return False
except subprocess.CalledProcessError:
return True
__version__ = _generate_version(_BASE_VERSION)
| Python | 0.000005 |
df26ac758c6a38f2e0128e3511db009f0764947f | Bump version to 2.0.0b6 | pyquil/__init__.py | pyquil/__init__.py | __version__ = "2.0.0b6"
from pyquil.quil import Program
from pyquil.api import list_quantum_computers, get_qc
| __version__ = "2.0.0b6.dev0"
from pyquil.quil import Program
from pyquil.api import list_quantum_computers, get_qc
| Python | 0 |
77db5c329dff3e76284efd5f7a70e993ca35e9a6 | correct filters to find users | corehq/apps/users/management/commands/add_multi_location_property.py | corehq/apps/users/management/commands/add_multi_location_property.py | from django.core.management.base import BaseCommand
from corehq.apps.es import filters, UserES, users as user_filters
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import user_location_data
from corehq.util.couch import iter_update, DocUpdate
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
args = ""
help = ("(Migration) Autofill the new field assigned_location_ids to existing users")
def handle(self, *args, **options):
self.options = options
user_ids = with_progress_bar(self.get_user_ids())
iter_update(CouchUser.get_db(), self.migrate_user, user_ids, verbose=True)
def migrate_user(self, doc):
if doc['doc_type'] == 'WebUser':
return self.migrate_web_user(doc)
elif doc['doc_type'] == 'CommCareUser':
return self.migrate_cc_user(doc)
def migrate_cc_user(self, doc):
# skip if doesn't have location
if not doc['location_id']:
return
# skip if already migrated
if doc['location_id'] in doc.get('assigned_location_ids', []):
user_data = doc.get('user_data', {})
expected = user_location_data(doc['assigned_location_ids'])
actual = user_data.get('commcare_location_ids', None)
if expected == actual:
return
apply_migration(doc)
apply_migration(doc['domain_membership'])
if doc['assigned_location_ids']:
doc['user_data'].update({
'commcare_location_ids': user_location_data(doc['assigned_location_ids'])
})
return DocUpdate(doc)
def migrate_web_user(self, doc):
def should_skip(dm):
if not dm['location_id']:
return True
if dm['location_id'] in dm.get('assigned_location_ids', []):
return True
if all([should_skip(dm) for dm in doc['domain_memberships']]):
return
for membership in doc['domain_memberships']:
if not should_skip(membership):
apply_migration(membership)
return DocUpdate(doc)
def get_user_ids(self):
res = (UserES()
.OR(filters.AND(user_filters.mobile_users(), filters.non_null('location_id')),
filters.AND(user_filters.web_users(), filters.non_null('domain_memberships.location_id')))
.exclude_source()
.run())
return list(res.doc_ids)
def apply_migration(doc):
# doc can be a user dict or a domain_membership dict
if doc['location_id']:
if 'assigned_location_ids' in doc:
doc['assigned_location_ids'].append(doc['location_id'])
else:
doc['assigned_location_ids'] = [doc['location_id']]
| from django.core.management.base import BaseCommand
from corehq.apps.es import UserES, users as user_filters
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import user_location_data
from corehq.util.couch import iter_update, DocUpdate
from corehq.util.log import with_progress_bar
class Command(BaseCommand):
args = ""
help = ("(Migration) Autofill the new field assigned_location_ids to existing users")
def handle(self, *args, **options):
self.options = options
user_ids = with_progress_bar(self.get_user_ids())
iter_update(CouchUser.get_db(), self.migrate_user, user_ids, verbose=True)
def migrate_user(self, doc):
if doc['doc_type'] == 'WebUser':
return self.migrate_web_user(doc)
elif doc['doc_type'] == 'CommCareUser':
return self.migrate_cc_user(doc)
def migrate_cc_user(self, doc):
# skip if doesn't have location
if not doc['location_id']:
return
# skip if already migrated
if doc['location_id'] in doc.get('assigned_location_ids', []):
user_data = doc.get('user_data', {})
expected = user_location_data(doc['assigned_location_ids'])
actual = user_data.get('commcare_location_ids', None)
if expected == actual:
return
apply_migration(doc)
apply_migration(doc['domain_membership'])
if doc['assigned_location_ids']:
doc['user_data'].update({
'commcare_location_ids': user_location_data(doc['assigned_location_ids'])
})
return DocUpdate(doc)
def migrate_web_user(self, doc):
def should_skip(dm):
if not dm['location_id']:
return True
if dm['location_id'] in dm.get('assigned_location_ids', []):
return True
if all([should_skip(dm) for dm in doc['domain_memberships']]):
return
for membership in doc['domain_memberships']:
if not should_skip(membership):
apply_migration(membership)
return DocUpdate(doc)
def get_user_ids(self):
res = (UserES()
.OR(user_filters.web_users(), user_filters.mobile_users())
.non_null('location_id')
.non_null('domain_memberships.location_id')
.exclude_source()
.run())
return list(res.doc_ids)
def apply_migration(doc):
# doc can be a user dict or a domain_membership dict
if doc['location_id']:
if 'assigned_location_ids' in doc:
doc['assigned_location_ids'].append(doc['location_id'])
else:
doc['assigned_location_ids'] = [doc['location_id']]
| Python | 0.000002 |
d7c5ca1a627ef1c31ffddba06cce63da768954ec | Use logging instead of print | pystatsd/statsd.py | pystatsd/statsd.py | # statsd.py
# Steve Ivy <steveivy@gmail.com>
# http://monkinetic.com
import logging
import socket
import random
# Sends statistics to the stats daemon over UDP
class Client(object):
def __init__(self, host='localhost', port=8125):
"""
Create a new Statsd client.
* host: the host where statsd is listening, defaults to localhost
* port: the port where statsd is listening, defaults to 8125
>>> from pystatsd import statsd
>>> stats_client = statsd.Statsd(host, port)
"""
self.host = host
self.port = port
self.log = logging.getLogger("pystatsd.client")
def timing(self, stat, time, sample_rate=1):
"""
Log timing information for a single stat
>>> from pystatsd import statsd
>>> statsd_client.timing('some.time','500|ms')
"""
stats = {stat: "%d|ms" % time}
self.send(stats, sample_rate)
def increment(self, stats, sample_rate=1):
"""
Increments one or more stats counters
>>> statsd_client.increment('some.int')
>>> statsd_client.increment('some.int',0.5)
"""
self.update_stats(stats, 1, sample_rate)
def decrement(self, stats, sample_rate=1):
"""
Decrements one or more stats counters
>>> statsd_client.decrement('some.int')
"""
self.update_stats(stats, -1, sample_rate)
def update_stats(self, stats, delta=1, sampleRate=1):
"""
Updates one or more stats counters by arbitrary amounts
>>> statsd_client.update_stats('some.int',10)
"""
if (type(stats) is not list):
stats = [stats]
data = {}
for stat in stats:
data[stat] = "%s|c" % delta
self.send(data, sampleRate)
def send(self, data, sample_rate=1):
"""
Squirt the metrics over UDP
"""
addr=(self.host, self.port)
sampled_data = {}
if(sample_rate < 1):
if random.random() <= sample_rate:
for stat, value in data.iteritems():
value = data[stat]
sampled_data[stat] = "%s|@%s" %(value, sample_rate)
else:
sampled_data=data
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
for stat, value in sampled_data.iteritems():
send_data = "%s:%s" % (stat, value)
udp_sock.sendto(send_data, addr)
except:
self.log.exception("unexpected error")
pass # we don't care | # statsd.py
# Steve Ivy <steveivy@gmail.com>
# http://monkinetic.com
import socket
import random
# Sends statistics to the stats daemon over UDP
class Client(object):
def __init__(self, host='localhost', port=8125):
"""
Create a new Statsd client.
* host: the host where statsd is listening, defaults to localhost
* port: the port where statsd is listening, defaults to 8125
>>> from pystatsd import statsd
>>> stats_client = statsd.Statsd(host, port)
"""
self.host = host
self.port = port
def timing(self, stat, time, sample_rate=1):
"""
Log timing information for a single stat
>>> from pystatsd import statsd
>>> statsd_client.timing('some.time','500|ms')
"""
stats = {stat: "%d|ms" % time}
self.send(stats, sample_rate)
def increment(self, stats, sample_rate=1):
"""
Increments one or more stats counters
>>> statsd_client.increment('some.int')
>>> statsd_client.increment('some.int',0.5)
"""
self.update_stats(stats, 1, sample_rate)
def decrement(self, stats, sample_rate=1):
"""
Decrements one or more stats counters
>>> statsd_client.decrement('some.int')
"""
self.update_stats(stats, -1, sample_rate)
def update_stats(self, stats, delta=1, sampleRate=1):
"""
Updates one or more stats counters by arbitrary amounts
>>> statsd_client.update_stats('some.int',10)
"""
if (type(stats) is not list):
stats = [stats]
data = {}
for stat in stats:
data[stat] = "%s|c" % delta
self.send(data, sampleRate)
def send(self, data, sample_rate=1):
"""
Squirt the metrics over UDP
"""
addr=(self.host, self.port)
sampled_data = {}
if(sample_rate < 1):
if random.random() <= sample_rate:
for stat, value in data.iteritems():
value = data[stat]
sampled_data[stat] = "%s|@%s" %(value, sample_rate)
else:
sampled_data=data
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
for stat, value in sampled_data.iteritems():
send_data = "%s:%s" % (stat, value)
udp_sock.sendto(send_data, addr)
except:
import sys
from pprint import pprint
print "Unexpected error:", pprint(sys.exc_info())
pass # we don't care | Python | 0.000001 |
45807fba2127612f2e7083250bf87720acc511c0 | Define codename for editor. | plugin/floo/editor.py | plugin/floo/editor.py | import sys
from collections import defaultdict
import time
import vim
try:
from .common import shared as G
from .common import msg
except (ImportError, ValueError):
import common.shared as G
from common import msg
timeouts = defaultdict(list)
top_timeout_id = 0
cancelled_timeouts = set()
calling_timeouts = False
line_endings = "\n"
welcome_text = 'Welcome %s!\n\nYou are all set to collaborate. You should check out our docs at https://%s/help/plugins/#sublime-usage. \
You must run \':FlooCompleteSignup\' before you can login to floobits.com.'
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Vim-py%s' % py_version
def codename():
return 'vim'
def windows(*args, **kwargs):
return []
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id + 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeouts:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
global calling_timeouts
if calling_timeouts:
return
calling_timeouts = True
now = time.time()
to_remove = []
for t, tos in timeouts.items():
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
calling_timeouts = False
def error_message(*args, **kwargs):
editor = getattr(G, 'editor', None)
if editor:
editor.error_message(*args, **kwargs)
else:
print(args, kwargs)
def status_message(msg):
editor = getattr(G, 'editor', None)
if editor:
editor.status_message(msg)
else:
print(msg)
def message_dialog(message):
msg.log(message)
def vim_choice(prompt, default, choices):
default = choices.index(default) + 1
choices_str = '\n'.join(['&%s' % choice for choice in choices])
try:
choice = int(vim.eval('confirm("%s", "%s", %s)' % (prompt, choices_str, default)))
except KeyboardInterrupt:
return None
if choice == 0:
return None
return choices[choice - 1]
def ok_cancel_dialog(prompt):
choice = vim_choice(prompt, 'ok', ['ok', 'cancel'])
return choice == 'ok'
def open_file(filename):
current_buffer = vim.eval('expand("%:p")')
if current_buffer != filename:
vim.command(':silent! edit! %s | :silent! :filetype detect' % filename)
def platform():
return sys.platform
def get_line_endings(path=None):
return line_endings
| import sys
from collections import defaultdict
import time
import vim
try:
from .common import shared as G
from .common import msg
except (ImportError, ValueError):
import common.shared as G
from common import msg
timeouts = defaultdict(list)
top_timeout_id = 0
cancelled_timeouts = set()
calling_timeouts = False
line_endings = "\n"
welcome_text = 'Welcome %s!\n\nYou are all set to collaborate. You should check out our docs at https://%s/help/plugins/#sublime-usage. \
You must run \':FlooCompleteSignup\' before you can login to floobits.com.'
def name():
if sys.version_info < (3, 0):
py_version = 2
else:
py_version = 3
return 'Vim-py%s' % py_version
def windows(*args, **kwargs):
return []
def set_timeout(func, timeout, *args, **kwargs):
global top_timeout_id
timeout_id = top_timeout_id
top_timeout_id + 1
if top_timeout_id > 100000:
top_timeout_id = 0
def timeout_func():
if timeout_id in cancelled_timeouts:
cancelled_timeouts.remove(timeout_id)
return
func(*args, **kwargs)
then = time.time() + (timeout / 1000.0)
timeouts[then].append(timeout_func)
return timeout_id
def cancel_timeout(timeout_id):
if timeout_id in timeouts:
cancelled_timeouts.add(timeout_id)
def call_timeouts():
global calling_timeouts
if calling_timeouts:
return
calling_timeouts = True
now = time.time()
to_remove = []
for t, tos in timeouts.items():
if now >= t:
for timeout in tos:
timeout()
to_remove.append(t)
for k in to_remove:
del timeouts[k]
calling_timeouts = False
def error_message(*args, **kwargs):
editor = getattr(G, 'editor', None)
if editor:
editor.error_message(*args, **kwargs)
else:
print(args, kwargs)
def status_message(msg):
editor = getattr(G, 'editor', None)
if editor:
editor.status_message(msg)
else:
print(msg)
def message_dialog(message):
msg.log(message)
def vim_choice(prompt, default, choices):
default = choices.index(default) + 1
choices_str = '\n'.join(['&%s' % choice for choice in choices])
try:
choice = int(vim.eval('confirm("%s", "%s", %s)' % (prompt, choices_str, default)))
except KeyboardInterrupt:
return None
if choice == 0:
return None
return choices[choice - 1]
def ok_cancel_dialog(prompt):
choice = vim_choice(prompt, 'ok', ['ok', 'cancel'])
return choice == 'ok'
def open_file(filename):
current_buffer = vim.eval('expand("%:p")')
if current_buffer != filename:
vim.command(':silent! edit! %s | :silent! :filetype detect' % filename)
def platform():
return sys.platform
def get_line_endings(path=None):
return line_endings
| Python | 0.000001 |
eed8d2b6da6e812cf50f83cf6776c8f0fe63d0f2 | correct naming of services | restservice/__init__.py | restservice/__init__.py | SERVICE_CHOICES = ((u'f2dhis2', u'f2dhis2'),(u'generic_json', u'JSON POST'),
(u'generic_xml', u'XML POST'),)
| SERVICE_CHOICES = ((u'f2dhis2', u'f2dhis2'),(u'json', u'json_exports'),(u'xml', u'xml_exports'),) | Python | 0.001925 |
700d7ad5a43e8483c11e19089636323c09879d80 | Add kwargs for __init__ | drivetestbed/http.py | drivetestbed/http.py | # mock http service that intercepts calls to allow Drive to work locally
import json
from urlparse import urlparse, parse_qs
from httplib2 import Response
from drivetestbed.services import ServiceDirectory
from routes import Mapper
class TestbedHttp(object):
def __init__(self, files=None, **kwargs):
self._services = ServiceDirectory(files)
def request(self, uri, method="GET", body=None, **kwargs):
parsed_uri = urlparse(uri)
if 'discovery' in parsed_uri.path:
# TODO -- use Routes for discovery service as well
resp = Response({'status': 200, 'reason': 'OK'})
fp = file("drivetestbed/schema.json", 'r')
try:
content = fp.read()
finally:
fp.close()
schema = json.loads(content)
path_dict = {}
map = Mapper()
with map.submapper(path_prefix="/drive/v2/") as m:
for r_name, r_data in schema['resources'].iteritems():
for meth_name, meth_data in r_data['methods'].iteritems():
m.connect(meth_data['path'], conditions={'method': [meth_data['httpMethod']]},
controller=r_name, action=meth_name)
self._map = map
return (resp, content)
else:
environ = {'REQUEST_METHOD': method}
matched = self._map.match(parsed_uri.path, environ=environ)
if matched:
query_params = parse_qs(parsed_uri.query)
# unwrap single value params from list
for key in query_params.keys():
if len(query_params[key]) == 1:
query_params[key] = query_params[key][0]
if body:
query_params['body'] = json.loads(body)
service = self._services.for_name(matched['controller'])
action_func = getattr(service, matched['action'])
if action_func:
del matched['controller']
del matched['action']
query_params.update(matched)
data = action_func(**query_params)
else:
return (Response({'status': 404, 'reason': 'No such action: %s' % matched['action']}), "")
resp = Response({'status': 200, 'reason': 'OK'})
return (resp, json.dumps(data))
else:
return (Response({'status': 404, 'reason': 'Bad request'}), "")
| # mock http service that intercepts calls to allow Drive to work locally
import json
from urlparse import urlparse, parse_qs
from httplib2 import Response
from drivetestbed.services import ServiceDirectory
from routes import Mapper
class TestbedHttp(object):
def __init__(self, files=None):
self._services = ServiceDirectory(files)
def request(self, uri, method="GET", body=None, **kwargs):
parsed_uri = urlparse(uri)
if 'discovery' in parsed_uri.path:
# TODO -- use Routes for discovery service as well
resp = Response({'status': 200, 'reason': 'OK'})
fp = file("drivetestbed/schema.json", 'r')
try:
content = fp.read()
finally:
fp.close()
schema = json.loads(content)
path_dict = {}
map = Mapper()
with map.submapper(path_prefix="/drive/v2/") as m:
for r_name, r_data in schema['resources'].iteritems():
for meth_name, meth_data in r_data['methods'].iteritems():
m.connect(meth_data['path'], conditions={'method': [meth_data['httpMethod']]},
controller=r_name, action=meth_name)
self._map = map
return (resp, content)
else:
environ = {'REQUEST_METHOD': method}
matched = self._map.match(parsed_uri.path, environ=environ)
if matched:
query_params = parse_qs(parsed_uri.query)
# unwrap single value params from list
for key in query_params.keys():
if len(query_params[key]) == 1:
query_params[key] = query_params[key][0]
if body:
query_params['body'] = json.loads(body)
service = self._services.for_name(matched['controller'])
action_func = getattr(service, matched['action'])
if action_func:
del matched['controller']
del matched['action']
query_params.update(matched)
data = action_func(**query_params)
else:
return (Response({'status': 404, 'reason': 'No such action: %s' % matched['action']}), "")
resp = Response({'status': 200, 'reason': 'OK'})
return (resp, json.dumps(data))
else:
return (Response({'status': 404, 'reason': 'Bad request'}), "")
| Python | 0.000002 |
1ee501468b07951ccceb263f91d5624f679f0321 | Update outputs store setup | polyaxon_client/stores/stores/outputs_store.py | polyaxon_client/stores/stores/outputs_store.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_client.stores.exceptions import PolyaxonStoresException
from polyaxon_client.stores.stores.base_store import Store
class OutputsStore(object):
"""
A convenient class to store experiment/job outputs to a given/configured store.
"""
def __init__(self, store=None, outputs_path=None):
self._outputs_path = outputs_path
if not store:
if outputs_path:
Store.get_store_for_path(path=outputs_path)
else:
store = Store.get_store()
if isinstance(store, Store):
self._store = store
else:
raise PolyaxonStoresException('Received an unrecognised store `{}`.'.format(store))
def set_store(self, store):
self._store = store
def set_outputs_path(self, outputs_path):
self._outputs_path = outputs_path
@property
def store(self):
return self._store
@property
def outputs_path(self):
return self._outputs_path
def upload_file(self, filename, **kwargs):
self.store.upload_file(filename, self.outputs_path, **kwargs)
def upload_dir(self, dirname, **kwargs):
self.store.upload_dir(dirname, self.outputs_path, **kwargs)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from polyaxon_client.stores.exceptions import PolyaxonStoresException
from polyaxon_client.stores.stores.base_store import Store
class OutputsStore(object):
"""
A convenient class to store experiment/job outputs to a given/configured store.
"""
def __init__(self, store=None, outputs_path=None):
self._outputs_path = outputs_path
store = store or Store.get_store_for_path(path=outputs_path)
if isinstance(store, Store):
self._store = store
else:
raise PolyaxonStoresException('Received an unrecognised store `{}`.'.format(store))
def set_store(self, store):
self._store = store
def set_outputs_path(self, outputs_path):
self._outputs_path = outputs_path
@property
def store(self):
return self._store
@property
def outputs_path(self):
return self._outputs_path
def upload_file(self, filename, **kwargs):
self.store.upload_file(filename, self.outputs_path, **kwargs)
def upload_dir(self, dirname, **kwargs):
self.store.upload_dir(dirname, self.outputs_path, **kwargs)
| Python | 0.000001 |
c37f1ca0f8fc73da95d32082ed8dfb8967e38a1c | Check status method | dvhb_hybrid/tests.py | dvhb_hybrid/tests.py | class BaseTestApi:
"""Base class to test API"""
API_KEY = 'API-KEY'
def __init__(self, client, user):
self.client = client
self.user = user
self.headers = {'content-type': 'application/json'}
@staticmethod
async def check_status(result, response=HTTPOk):
assert result.status == response.status_code, await result.text()
@staticmethod
async def prepare_result(r):
data = None
if 'application/json' in r.headers['Content-Type']:
data = await r.json()
return r, data
| class BaseTestApi:
"""Base class to test API"""
API_KEY = 'API-KEY'
def __init__(self, client, user):
self.client = client
self.user = user
self.headers = {'content-type': 'application/json'}
@staticmethod
async def prepare_result(r):
data = None
if 'application/json' in r.headers['Content-Type']:
data = await r.json()
return r, data
| Python | 0.000001 |
2acc441e70d67d035d7e1363ae8987e3117cb9ac | Rebuild FOV map rather than recreate it | roglick/systems/view.py | roglick/systems/view.py | from roglick.lib import libtcod
from roglick.engine import event
from roglick.engine.ecs import System
from roglick.components import PositionComponent,FoVComponent
from roglick.events import NewMapEvent,MapChangedEvent
class FoVSystem(System):
def __init__(self):
self._fov_algo = 0
self._light_walls = True
self._torch_radius = 5
@event.event_handler(NewMapEvent,MapChangedEvent)
def redraw_handler(self, redrawevent):
for entity, comp in self._entity_manager.get_entities_with_component(
FoVComponent):
comp[FoVComponent].x = None
comp[FoVComponent].y = None
self._build_map(comp[FoVComponent].fov)
def _build_map(self, fov_map):
current_map = self._world.current_map
width = current_map.width
height = current_map.height
for x in range(width):
for y in range(height):
libtcod.map_set_properties(
fov_map,
x,
y,
current_map.tiles[x][y].transparent,
current_map.tiles[x][y].passable)
def execute(self):
width = self._world.current_map.width
height = self._world.current_map.height
for entity, components in self._entity_manager.get_entities_with_components(
(FoVComponent,PositionComponent)):
fov = components[FoVComponent]
pos = components[PositionComponent]
if fov.fov is None:
fov.x = None
fov.y = None
fov.fov = libtcod.map_new(width, height)
self._build_map(fov.fov)
if pos.x != fov.x or pos.y != fov.y:
# Entity has moved, recompute FoV
libtcod.map_compute_fov(
fov.fov,
pos.x,
pos.y,
self._torch_radius,
self._light_walls,
self._fov_algo)
fov.x,fov.y = pos.x,pos.y
| from roglick.lib import libtcod
from roglick.engine import event
from roglick.engine.ecs import System
from roglick.components import PositionComponent,FoVComponent
from roglick.events import NewMapEvent,MapChangedEvent
class FoVSystem(System):
def __init__(self):
self._fov_algo = 0
self._light_walls = True
self._torch_radius = 5
@event.event_handler(NewMapEvent,MapChangedEvent)
def redraw_handler(self, redrawevent):
for entity, comp in self._entity_manager.get_entities_with_component(
FoVComponent):
comp[FoVComponent].fov = None
def execute(self):
current_map = self._world.current_map
width = current_map.width
height = current_map.height
for entity, components in self._entity_manager.get_entities_with_components(
(FoVComponent,PositionComponent)):
fov = components[FoVComponent]
pos = components[PositionComponent]
if fov.fov is None:
fov.x = None
fov.y = None
fov.fov = libtcod.map_new(width, height)
for x in range(width):
for y in range(height):
libtcod.map_set_properties(
fov.fov,
x,
y,
current_map.tiles[x][y].transparent,
current_map.tiles[x][y].passable)
if pos.x != fov.x or pos.y != fov.y:
# Entity has moved, recompute FoV
libtcod.map_compute_fov(
fov.fov,
pos.x,
pos.y,
self._torch_radius,
self._light_walls,
self._fov_algo)
fov.x,fov.y = pos.x,pos.y
| Python | 0 |
2641b64a712b3e766448d93cd11c097aa90cfdc5 | apply comments (def set_... --> def ...) | rootpy/plotting/func.py | rootpy/plotting/func.py | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from .. import QROOT
from ..decorators import snake_case_methods
from .base import Plottable
from ..base import NameOnlyObject
__all__ = [
'F1',
'F2',
'F3',
]
class BaseFunction(object):
class ParProxy(object):
def __init__(self, fcn, idx):
self.fcn_ = fcn
self.idx_ = idx
@property
def index(self):
return self.idx_
@property
def name(self):
return self.fcn_.GetParName(self.idx_)
@name.setter
def name(self, val):
return self.fcn_.SetParName(self.idx_, val)
@property
def value(self):
return self.fcn_.GetParameter(self.idx_)
@value.setter
def value(self, val):
self.fcn_.SetParameter(self.idx_, val)
@property
def error(self):
return self.fcn_.GetParError(self.idx_)
@error.setter
def error(self, val):
return self.fcn_.SetParError(self.idx_, val)
@property
def limits(self):
m = QROOT.Double()
M = QROOT.Double()
self.fcn_.GetParLimits(self.idx_, m, M)
return float(m), float(M)
@limits.setter
def limits(self, val):
if not hastattr(val, '__len__') and len(val) != 2:
raise RuntimeError('Function limits must be a tuple size 2')
self.fcn_.SetParLimits(self.idx_, val[0], val[1])
def __getitem__(self, value):
if isinstance(value, basestring):
idx = self.GetParNumber(value)
elif isinstance(value, int):
idx = value
else:
raise ValueError('Function index must be a integer or a string')
return BaseFunction.ParProxy(self, idx)
@snake_case_methods
class F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1):
_ROOT = QROOT.TF1
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F1, self).__init__(*args, name=name)
self._post_init(**kwargs)
@snake_case_methods
class F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2):
_ROOT = QROOT.TF2
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F2, self).__init__(*args, name=name)
self._post_init(**kwargs)
@snake_case_methods
class F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3):
_ROOT = QROOT.TF3
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F3, self).__init__(*args, name=name)
self._post_init(**kwargs)
| # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from .. import QROOT
from ..decorators import snake_case_methods
from .base import Plottable
from ..base import NameOnlyObject
__all__ = [
'F1',
'F2',
'F3',
]
class BaseFunction(object):
class ParProxy(object):
def __init__(self, fcn, idx):
self.fcn_ = fcn
self.idx_ = idx
@property
def index(self):
return self.idx_
@property
def name(self):
return self.fcn_.GetParName(self.idx_)
@name.setter
def set_name(self, val):
return self.fcn_.SetParName(self.idx_, val)
@property
def value(self):
return self.fcn_.GetParameter(self.idx_)
@value.setter
def set_value(self, val):
self.fcn_.SetParameter(self.idx_, val)
@property
def error(self):
return self.fcn_.GetParError(self.idx_)
@error.setter
def set_error(self, val):
return self.fcn_.SetParError(self.idx_, val)
@property
def limits(self):
m = QROOT.Double()
M = QROOT.Double()
self.fcn_.GetParLimits(self.idx_, m, M)
return float(m), float(M)
@limits.setter
def limits(self, val):
if not hastattr(val, '__len__') and len(val) != 2:
raise RuntimeError('Function limits must be a tuple size 2')
self.fcn_.SetParLimits(self.idx_, val[0], val[1])
def __getitem__(self, value):
if isinstance(value, basestring):
idx = self.GetParNumber(value)
elif isinstance(value, int):
idx = value
else:
raise ValueError('Function index must be a integer or a string')
return BaseFunction.ParProxy(self, idx)
@snake_case_methods
class F1(Plottable, NameOnlyObject, BaseFunction, QROOT.TF1):
_ROOT = QROOT.TF1
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F1, self).__init__(*args, name=name)
self._post_init(**kwargs)
@snake_case_methods
class F2(Plottable, NameOnlyObject, BaseFunction, QROOT.TF2):
_ROOT = QROOT.TF2
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F2, self).__init__(*args, name=name)
self._post_init(**kwargs)
@snake_case_methods
class F3(Plottable, NameOnlyObject, BaseFunction, QROOT.TF3):
_ROOT = QROOT.TF3
def __init__(self, *args, **kwargs):
name = kwargs.pop('name', None)
super(F3, self).__init__(*args, name=name)
self._post_init(**kwargs)
| Python | 0.000002 |
b14de33367ddf82d39ee5fe1671bc2526a5280b6 | correct module version | pos_mobile_restaurant/__manifest__.py | pos_mobile_restaurant/__manifest__.py | {
"name": """POS Mobile UI for Waiters""",
"summary": """Your Restaurant in the Mobile Version""",
"category": "Point of Sale",
"live_test_url": "http://apps.it-projects.info/shop/product/pos-mobile-ui?version=11.0",
"images": ["images/pos_mobile_restaurant.png"],
"version": "11.0.1.3.8",
"application": False,
"author": "IT-Projects LLC, Dinar Gabbasov",
"support": "pos@it-projects.info",
"website": "https://it-projects.info/team/GabbasovDinar",
"license": "LGPL-3",
"price": 100.00,
"currency": "EUR",
"depends": [
"pos_restaurant_base",
"pos_mobile",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
"views/pos_mobile_restaurant_template.xml",
"views/pos_mobile_restaurant_view.xml",
],
"qweb": [
"static/src/xml/pos.xml",
],
"demo": [
],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": True,
"installable": True,
}
| {
"name": """POS Mobile UI for Waiters""",
"summary": """Your Restaurant in the Mobile Version""",
"category": "Point of Sale",
"live_test_url": "http://apps.it-projects.info/shop/product/pos-mobile-ui?version=11.0",
"images": ["images/pos_mobile_restaurant.png"],
"version": "10.0.1.3.8",
"application": False,
"author": "IT-Projects LLC, Dinar Gabbasov",
"support": "pos@it-projects.info",
"website": "https://it-projects.info/team/GabbasovDinar",
"license": "LGPL-3",
"price": 100.00,
"currency": "EUR",
"depends": [
"pos_restaurant_base",
"pos_mobile",
],
"external_dependencies": {"python": [], "bin": []},
"data": [
"views/pos_mobile_restaurant_template.xml",
"views/pos_mobile_restaurant_view.xml",
],
"qweb": [
"static/src/xml/pos.xml",
],
"demo": [
],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": True,
"installable": True,
}
| Python | 0 |
99bb951cbbb670edb3d3361cc12a60f3cb04387d | correction to inf. probability | ndlib/models/epidemics/SIRModel.py | ndlib/models/epidemics/SIRModel.py | from ..DiffusionModel import DiffusionModel
import numpy as np
import networkx as nx
import future.utils
__author__ = "Giulio Rossetti"
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class SIRModel(DiffusionModel):
"""
Model Parameters to be specified via ModelConfig
:param beta: The infection rate (float value in [0,1])
:param gamma: The recovery rate (float value in [0,1])
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Removed": 2
}
self.parameters = {
"model": {
"beta": {
"descr": "Infection rate",
"range": [0, 1],
"optional": False},
"gamma": {
"descr": "Recovery rate",
"range": [0, 1],
"optional": False
}
},
"nodes": {},
"edges": {},
}
self.name = "SIR"
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
u_status = self.status[u]
eventp = np.random.random_sample()
neighbors = self.graph.neighbors(u)
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(u)
if u_status == 0:
infected_neighbors = len([v for v in neighbors if self.status[v] == 1])
if eventp < 1 - (1 - self.params['model']['beta']) ** infected_neighbors:
actual_status[u] = 1
elif u_status == 1:
if eventp < self.params['model']['gamma']:
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
| from ..DiffusionModel import DiffusionModel
import numpy as np
import networkx as nx
import future.utils
__author__ = "Giulio Rossetti"
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class SIRModel(DiffusionModel):
"""
Model Parameters to be specified via ModelConfig
:param beta: The infection rate (float value in [0,1])
:param gamma: The recovery rate (float value in [0,1])
"""
def __init__(self, graph):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Removed": 2
}
self.parameters = {
"model": {
"beta": {
"descr": "Infection rate",
"range": [0, 1],
"optional": False},
"gamma": {
"descr": "Recovery rate",
"range": [0, 1],
"optional": False
}
},
"nodes": {},
"edges": {},
}
self.name = "SIR"
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
u_status = self.status[u]
eventp = np.random.random_sample()
neighbors = self.graph.neighbors(u)
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(u)
if u_status == 0:
infected_neighbors = len([v for v in neighbors if self.status[v] == 1])
if eventp < self.params['model']['beta'] * infected_neighbors:
actual_status[u] = 1
elif u_status == 1:
if eventp < self.params['model']['gamma']:
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
| Python | 0.999999 |
669fa4443e9e4b551613ac1bb6b69c8818f382fc | Fix tweet format | twiboozer.py | twiboozer.py | # -*- encoding: utf-8 -*-
# TODO вынести, оформить как package
import os
import datetime
import random
import textwrap
from pymarkovchain import MarkovChain
from twibot import TwiBot
def format_tweet(tweet):
"""Format tweet after generation."""
max_len = 140
if len(tweet) > max_len:
tweet = textwrap.wrap(tweet, max_len - 1)[0]
if tweet[-1] not in ".?!":
tweet += get_end_tweet()
return tweet
def get_end_tweet():
"""Get random punctuation at the end of the sentence."""
endings = ('.', '!')
rate = 0.2
return endings[random.random() < rate]
def train(tweets):
"""Training of model from tweets based on a Markov chain."""
directory = "db"
filename = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
if not os.path.exists(directory):
os.makedirs(directory)
path = os.path.join(directory, filename)
model = MarkovChain(path)
model.generateDatabase("\n".join(tweets).encode("utf-8"))
return model
def main():
twibot = TwiBot()
tweets = twibot.get_timeline(count=300)
mc = train(tweets)
tweet = mc.generateString()
tweet = format_tweet(tweet)
twibot.post_tweet(tweet)
print(tweet)
if __name__ == "__main__":
main()
| # -*- encoding: utf-8 -*-
# TODO вынести, оформить как package
import os
import datetime
import random
import textwrap
from pymarkovchain import MarkovChain
from twibot import TwiBot
def format_tweet(tweet):
"""Format tweet after generation."""
if tweet[-1] not in ".?!":
tweet = "{0}{1}".format(tweet, get_end_tweet())
max_len = 140
if len(tweet) > max_len:
tweet = textwrap.wrap(tweet, max_len)[0]
return tweet
def get_end_tweet():
"""Get random punctuation at the end of the sentence."""
endings = ('.', '!')
rate = 0.2
return endings[random.random() < rate]
def train(tweets):
"""Training of model from tweets based on a Markov chain."""
directory = "db"
filename = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
if not os.path.exists(directory):
os.makedirs(directory)
path = os.path.join(directory, filename)
model = MarkovChain(path)
model.generateDatabase("\n".join(tweets).encode("utf-8"))
return model
def main():
twibot = TwiBot()
tweets = twibot.get_timeline(count=300)
mc = train(tweets)
tweet = mc.generateString()
tweet = format_tweet(tweet)
twibot.post_tweet(tweet)
print(tweet)
if __name__ == "__main__":
main()
| Python | 0.999999 |
ef1248dc4e150e72b9a347120f73b01909ff7522 | remove site requirement in pages app | pages/views.py | pages/views.py | from auslan.pages.models import Page
from django.template import loader, RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.core.xheaders import populate_xheaders
from django.utils.safestring import mark_safe
DEFAULT_TEMPLATE = 'pages/default.html'
def page(request, url):
"""
Flat page view.
Models: `pages.page`
Templates: Uses the template defined by the ``template_name`` field,
or `pages/default.html` if template_name is not defined.
Context:
page
`pages.page` object
"""
if not url.endswith('/') and settings.APPEND_SLASH:
return HttpResponseRedirect("%s/" % request.path)
if not url.startswith('/'):
url = "/" + url
# here I've removed the requirement that the page be for this site
# - this won't work if we ever have more than one site here
# which isn't planned
f = get_object_or_404(Page, url__exact=url)
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
# if there is a form var 'playlist' then generate a playlist
# xml file instead of the page itself
if request.GET.has_key('playlist'):
return render_to_response('pages/playlist.xml', {'page': f}, mimetype='application/xml')
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'page': f,
})
response = HttpResponse(t.render(c))
populate_xheaders(request, response, Page, f.id)
return response
| from auslan.pages.models import Page
from django.template import loader, RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.core.xheaders import populate_xheaders
from django.utils.safestring import mark_safe
DEFAULT_TEMPLATE = 'pages/default.html'
def page(request, url):
"""
Flat page view.
Models: `pages.page`
Templates: Uses the template defined by the ``template_name`` field,
or `pages/default.html` if template_name is not defined.
Context:
page
`pages.page` object
"""
if not url.endswith('/') and settings.APPEND_SLASH:
return HttpResponseRedirect("%s/" % request.path)
if not url.startswith('/'):
url = "/" + url
f = get_object_or_404(Page, url__exact=url, sites__id__exact=settings.SITE_ID)
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
# if there is a form var 'playlist' then generate a playlist
# xml file instead of the page itself
if request.GET.has_key('playlist'):
return render_to_response('pages/playlist.xml', {'page': f}, mimetype='application/xml')
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'page': f,
})
response = HttpResponse(t.render(c))
populate_xheaders(request, response, Page, f.id)
return response
| Python | 0 |
39d1217643ab67b1ef3931fc5a21d32d663fb5a0 | update min versions #35732 (#35733) | pandas/compat/_optional.py | pandas/compat/_optional.py | import distutils.version
import importlib
import types
import warnings
# Update install.rst when updating versions!
VERSIONS = {
"bs4": "4.6.0",
"bottleneck": "1.2.1",
"fsspec": "0.7.4",
"fastparquet": "0.3.2",
"gcsfs": "0.6.0",
"lxml.etree": "4.3.0",
"matplotlib": "2.2.3",
"numexpr": "2.6.8",
"odfpy": "1.3.0",
"openpyxl": "2.5.7",
"pandas_gbq": "0.12.0",
"pyarrow": "0.15.0",
"pytables": "3.4.4",
"pytest": "5.0.1",
"pyxlsb": "1.0.6",
"s3fs": "0.4.0",
"scipy": "1.2.0",
"sqlalchemy": "1.2.8",
"tables": "3.4.4",
"tabulate": "0.8.3",
"xarray": "0.12.0",
"xlrd": "1.2.0",
"xlwt": "1.3.0",
"xlsxwriter": "1.0.2",
"numba": "0.46.0",
}
def _get_version(module: types.ModuleType) -> str:
version = getattr(module, "__version__", None)
if version is None:
# xlrd uses a capitalized attribute name
version = getattr(module, "__VERSION__", None)
if version is None:
raise ImportError(f"Can't determine version for {module.__name__}")
return version
def import_optional_dependency(
name: str, extra: str = "", raise_on_missing: bool = True, on_version: str = "raise"
):
"""
Import an optional dependency.
By default, if a dependency is missing an ImportError with a nice
message will be raised. If a dependency is present, but too old,
we raise.
Parameters
----------
name : str
The module name. This should be top-level only, so that the
version may be checked.
extra : str
Additional text to include in the ImportError message.
raise_on_missing : bool, default True
Whether to raise if the optional dependency is not found.
When False and the module is not present, None is returned.
on_version : str {'raise', 'warn'}
What to do when a dependency's version is too old.
* raise : Raise an ImportError
* warn : Warn that the version is too old. Returns None
* ignore: Return the module, even if the version is too old.
It's expected that users validate the version locally when
using ``on_version="ignore"`` (see. ``io/html.py``)
Returns
-------
maybe_module : Optional[ModuleType]
The imported module, when found and the version is correct.
None is returned when the package is not found and `raise_on_missing`
is False, or when the package's version is too old and `on_version`
is ``'warn'``.
"""
msg = (
f"Missing optional dependency '{name}'. {extra} "
f"Use pip or conda to install {name}."
)
try:
module = importlib.import_module(name)
except ImportError:
if raise_on_missing:
raise ImportError(msg) from None
else:
return None
minimum_version = VERSIONS.get(name)
if minimum_version:
version = _get_version(module)
if distutils.version.LooseVersion(version) < minimum_version:
assert on_version in {"warn", "raise", "ignore"}
msg = (
f"Pandas requires version '{minimum_version}' or newer of '{name}' "
f"(version '{version}' currently installed)."
)
if on_version == "warn":
warnings.warn(msg, UserWarning)
return None
elif on_version == "raise":
raise ImportError(msg)
return module
| import distutils.version
import importlib
import types
import warnings
# Update install.rst when updating versions!
VERSIONS = {
"bs4": "4.6.0",
"bottleneck": "1.2.1",
"fsspec": "0.7.4",
"fastparquet": "0.3.2",
"gcsfs": "0.6.0",
"lxml.etree": "3.8.0",
"matplotlib": "2.2.2",
"numexpr": "2.6.2",
"odfpy": "1.3.0",
"openpyxl": "2.5.7",
"pandas_gbq": "0.12.0",
"pyarrow": "0.13.0",
"pytables": "3.4.3",
"pytest": "5.0.1",
"pyxlsb": "1.0.6",
"s3fs": "0.4.0",
"scipy": "1.2.0",
"sqlalchemy": "1.1.4",
"tables": "3.4.3",
"tabulate": "0.8.3",
"xarray": "0.8.2",
"xlrd": "1.2.0",
"xlwt": "1.2.0",
"xlsxwriter": "0.9.8",
"numba": "0.46.0",
}
def _get_version(module: types.ModuleType) -> str:
version = getattr(module, "__version__", None)
if version is None:
# xlrd uses a capitalized attribute name
version = getattr(module, "__VERSION__", None)
if version is None:
raise ImportError(f"Can't determine version for {module.__name__}")
return version
def import_optional_dependency(
name: str, extra: str = "", raise_on_missing: bool = True, on_version: str = "raise"
):
"""
Import an optional dependency.
By default, if a dependency is missing an ImportError with a nice
message will be raised. If a dependency is present, but too old,
we raise.
Parameters
----------
name : str
The module name. This should be top-level only, so that the
version may be checked.
extra : str
Additional text to include in the ImportError message.
raise_on_missing : bool, default True
Whether to raise if the optional dependency is not found.
When False and the module is not present, None is returned.
on_version : str {'raise', 'warn'}
What to do when a dependency's version is too old.
* raise : Raise an ImportError
* warn : Warn that the version is too old. Returns None
* ignore: Return the module, even if the version is too old.
It's expected that users validate the version locally when
using ``on_version="ignore"`` (see. ``io/html.py``)
Returns
-------
maybe_module : Optional[ModuleType]
The imported module, when found and the version is correct.
None is returned when the package is not found and `raise_on_missing`
is False, or when the package's version is too old and `on_version`
is ``'warn'``.
"""
msg = (
f"Missing optional dependency '{name}'. {extra} "
f"Use pip or conda to install {name}."
)
try:
module = importlib.import_module(name)
except ImportError:
if raise_on_missing:
raise ImportError(msg) from None
else:
return None
minimum_version = VERSIONS.get(name)
if minimum_version:
version = _get_version(module)
if distutils.version.LooseVersion(version) < minimum_version:
assert on_version in {"warn", "raise", "ignore"}
msg = (
f"Pandas requires version '{minimum_version}' or newer of '{name}' "
f"(version '{version}' currently installed)."
)
if on_version == "warn":
warnings.warn(msg, UserWarning)
return None
elif on_version == "raise":
raise ImportError(msg)
return module
| Python | 0 |
3b3418592331059f560bb641704a184d64734fc7 | fix evals | rubberband/constants.py | rubberband/constants.py | INFINITY_KEYS = ("separating/flowcover/maxslackroot", "separating/flowcover/maxslack",
"heuristics/undercover/maxcoversizeconss")
INFINITY_MASK = -1
ZIPPED_SUFFIX = ".gz"
FILES_DIR = "files/"
STATIC_FILES_DIR = FILES_DIR + "static/"
ALL_SOLU = STATIC_FILES_DIR + "all.solu"
IPET_EVALUATIONS = {
0: {"path": STATIC_FILES_DIR + "eval1.xml", "name": "evaluation1"},
1: {"path": STATIC_FILES_DIR + "eval2.xml", "name": "evaluation2"},
2: {"path": STATIC_FILES_DIR + "eval3.xml", "name": "evaluation3"},
3: {"path": STATIC_FILES_DIR + "eval4.xml", "name": "evaluation4"},
4: {"path": STATIC_FILES_DIR + "eval5.xml", "name": "evaluation5"},
5: {"path": STATIC_FILES_DIR + "eval6.xml", "name": "evaluation6"},
6: {"path": STATIC_FILES_DIR + "eval7.xml", "name": "evaluation7"},
7: {"path": STATIC_FILES_DIR + "eval8.xml", "name": "evaluation8"},
8: {"path": STATIC_FILES_DIR + "eval9.xml", "name": "evaluation9"},
9: {"path": STATIC_FILES_DIR + "eval10.xml", "name": "evaluation10"}
}
NONE_DISPLAY = "--"
EXPORT_DATA_FORMATS = ("gzip", "json", "csv", "raw")
EXPORT_FILE_TYPES = (".out", ".set", ".err", ".meta")
ELASTICSEARCH_INDEX = "solver-results"
FORMAT_DATE = "%Y-%m-%d"
FORMAT_DATETIME_LONG = "%B %d, %Y %H:%M"
FORMAT_DATETIME_SHORT = FORMAT_DATE + " %H:%M"
FORMAT_DATETIME = "%Y-%m-%d %H:%M:%S"
| INFINITY_KEYS = ("separating/flowcover/maxslackroot", "separating/flowcover/maxslack",
"heuristics/undercover/maxcoversizeconss")
INFINITY_MASK = -1
ZIPPED_SUFFIX = ".gz"
FILES_DIR = "files/"
STATIC_FILES_DIR = FILES_DIR + "static/"
ALL_SOLU = STATIC_FILES_DIR + "all.solu"
IPET_EVALUATIONS = {
0: {"path": STATIC_FILES_DIR + "eval1.xml", "name": "evaluation1"},
1: {"path": STATIC_FILES_DIR + "eval2.xml", "name": "evaluation2"},
2: {"path": STATIC_FILES_DIR + "eval3.xml", "name": "evaluation3"},
3: {"path": STATIC_FILES_DIR + "eval4.xml", "name": "evaluation4"},
4: {"path": STATIC_FILES_DIR + "eval5.xml", "name": "evaluation5"},
5: {"path": STATIC_FILES_DIR + "eval6.xml", "name": "evaluation6"},
6: {"path": STATIC_FILES_DIR + "eval1.xml", "name": "evaluation7"},
7: {"path": STATIC_FILES_DIR + "eval2.xml", "name": "evaluation8"},
8: {"path": STATIC_FILES_DIR + "eval3.xml", "name": "evaluation9"},
9: {"path": STATIC_FILES_DIR + "eval4.xml", "name": "evaluation10"}
}
NONE_DISPLAY = "--"
EXPORT_DATA_FORMATS = ("gzip", "json", "csv", "raw")
EXPORT_FILE_TYPES = (".out", ".set", ".err", ".meta")
ELASTICSEARCH_INDEX = "solver-results"
FORMAT_DATE = "%Y-%m-%d"
FORMAT_DATETIME_LONG = "%B %d, %Y %H:%M"
FORMAT_DATETIME_SHORT = FORMAT_DATE + " %H:%M"
FORMAT_DATETIME = "%Y-%m-%d %H:%M:%S"
| Python | 0.000003 |
9011b359bdf164994734f8d6890a2d5acb5fa865 | Replace joins with list_to_number in 32 | project_euler/solutions/problem_32.py | project_euler/solutions/problem_32.py | from itertools import permutations
from ..library.base import list_to_number
def solve() -> int:
pandigital = []
for permutation in permutations(range(1, 10)):
result = list_to_number(permutation[:4])
for i in range(1, 4):
left = list_to_number(permutation[4:4 + i])
right = list_to_number(permutation[4 + i:])
if left * right == result:
pandigital.append(result)
return sum(set(pandigital))
| from itertools import permutations
def solve() -> int:
pandigital = []
for permutation in permutations(range(1, 10)):
result = int(''.join(str(digit) for digit in permutation[:4]))
for i in range(1, 4):
left = int(''.join(str(digit) for digit in permutation[4:4 + i]))
right = int(''.join(str(digit) for digit in permutation[4 + i:]))
if left * right == result:
pandigital.append(result)
return sum(set(pandigital))
| Python | 0.001525 |
dc333069f4536fdc978d76924b098d10a1a8a50a | Fix error on status for last line in file. | ruby_coverage_status.py | ruby_coverage_status.py | import os
import sublime
import sublime_plugin
import json
import re
from .common.json_coverage_reader import JsonCoverageReader
STATUS_KEY = 'ruby-coverage-status'
class RubyCoverageStatusListener(sublime_plugin.EventListener):
"""Show coverage statistics in status bar."""
def on_load(self, view):
self.on_selection_modified(view)
def on_selection_modified(self, view):
if 'source.ruby' not in view.scope_name(0):
return
self.view = view
if sublime.load_settings('SublimeRubyCoverage.sublime-settings').get('coverage_status_in_status_bar'):
sublime.set_timeout_async(self.update_status, 0)
else:
self.erase_status()
def update_status(self):
view = self.view
view.set_status(STATUS_KEY, self.get_view_coverage_status())
def erase_status(self):
view = self.view
view.erase_status(STATUS_KEY)
def get_view_coverage_status(self):
view = self.view
filename = view.file_name()
if not filename:
self.erase_status()
r = JsonCoverageReader(filename)
coverage = r.get_file_coverage(filename) if r else None
if coverage is None:
self.erase_status()
return ''
line_number = self.get_line_number()
if line_number is None:
self.erase_status()
file_coverage = "File covered {:.1f}% ({}/{})".format(
coverage['covered_percent'],
coverage['covered_lines'],
coverage['lines_of_code']
)
line_coverage = coverage['coverage'][line_number] if len(coverage['coverage']) > line_number else None
if line_coverage is None:
line_coverage = 'Line not executable'
elif line_coverage > 0:
line_coverage = 'Line covered × {}'.format(line_coverage)
else:
line_coverage = 'Line not covered'
return file_coverage + ', ' + line_coverage
def get_line_number(self):
view = self.view
regions = view.sel()
if len(regions) > 1:
return
return view.rowcol(regions[0].a)[0]
| import os
import sublime
import sublime_plugin
import json
import re
from .common.json_coverage_reader import JsonCoverageReader
STATUS_KEY = 'ruby-coverage-status'
class RubyCoverageStatusListener(sublime_plugin.EventListener):
"""Show coverage statistics in status bar."""
def on_load(self, view):
self.on_selection_modified(view)
def on_selection_modified(self, view):
if 'source.ruby' not in view.scope_name(0):
return
self.view = view
if sublime.load_settings('SublimeRubyCoverage.sublime-settings').get('coverage_status_in_status_bar'):
sublime.set_timeout_async(self.update_status, 0)
else:
self.erase_status()
def update_status(self):
view = self.view
view.set_status(STATUS_KEY, self.get_view_coverage_status())
def erase_status(self):
view = self.view
view.erase_status(STATUS_KEY)
def get_view_coverage_status(self):
view = self.view
filename = view.file_name()
if not filename:
self.erase_status()
r = JsonCoverageReader(filename)
coverage = r.get_file_coverage(filename) if r else None
if coverage is None:
self.erase_status()
return ''
line_number = self.get_line_number()
if line_number is None:
self.erase_status()
file_coverage = "File covered {:.1f}% ({}/{})".format(
coverage['covered_percent'],
coverage['covered_lines'],
coverage['lines_of_code']
)
line_coverage = coverage['coverage'][line_number]
if line_coverage is None:
line_coverage = 'Line not executable'
elif line_coverage > 0:
line_coverage = 'Line covered × {}'.format(line_coverage)
else:
line_coverage = 'Line not covered'
return file_coverage + ', ' + line_coverage
def get_line_number(self):
view = self.view
regions = view.sel()
if len(regions) > 1:
return
return view.rowcol(regions[0].a)[0]
| Python | 0 |
921bdcc5d6f6ac4be7dfd0015e5b5fd6d06e6486 | Raise exception when --debug is specified to main script | runcommands/__main__.py | runcommands/__main__.py | import sys
from .config import RawConfig, RunConfig
from .exc import RunCommandsError
from .run import run, partition_argv, read_run_args
from .util import printer
def main(argv=None):
debug = None
try:
all_argv, run_argv, command_argv = partition_argv(argv)
cli_args = run.parse_args(RawConfig(run=RunConfig()), run_argv)
run_args = read_run_args(run)
run_args.update(cli_args)
debug = run_args.get('debug', run.parameters['debug'].default)
run.implementation(
None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv,
cli_args=cli_args, **run_args)
except RunCommandsError as exc:
if debug or debug is None:
# User specified --debug OR processing didn't get far enough
# to determine whether user specified --debug.
raise
printer.error(exc, file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| import sys
from .config import RawConfig, RunConfig
from .exc import RunCommandsError
from .run import run, partition_argv, read_run_args
from .util import printer
def main(argv=None):
try:
all_argv, run_argv, command_argv = partition_argv(argv)
cli_args = run.parse_args(RawConfig(run=RunConfig()), run_argv)
run_args = read_run_args(run)
run_args.update(cli_args)
run.implementation(
None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv,
cli_args=cli_args, **run_args)
except RunCommandsError as exc:
printer.error(exc, file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
5ca0e0683a663271c40d728e5f88ee19a26eca61 | Add ProfileSummary to defaults | devserver/settings.py | devserver/settings.py | DEVSERVER_MODULES = (
'devserver.modules.sql.SQLRealTimeModule',
'devserver.modules.profile.ProfileSummaryModule',
# 'devserver.modules.cache.CacheSummaryModule',
)
# This variable gets set to True when we're running the devserver
DEVSERVER_ACTIVE = False | DEVSERVER_MODULES = (
'devserver.modules.sql.SQLRealTimeModule',
'devserver.modules.cache.CacheSummaryModule',
)
# This variable gets set to True when we're running the devserver
DEVSERVER_ACTIVE = False | Python | 0 |
75ac453e873727675ba18e1f45b5bc0cfda26fd7 | Increment the version number | angel/__init__.py | angel/__init__.py | __title__ = 'angel'
__version__ = '0.0.2'
__author__ = 'Bugra Akyildiz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Bugra Akyildiz'
| __title__ = 'angel'
__version__ = '0.0.1'
__author__ = 'Bugra Akyildiz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2014 Bugra Akyildiz'
| Python | 0.999999 |
c39d6494d1bc27dedb2141970cdd7a51382f0af4 | Update version 0.5.0.dev1 -> 0.5.0 | dimod/package_info.py | dimod/package_info.py | __version__ = '0.5.0'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| __version__ = '0.5.0.dev1'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| Python | 0 |
ff504057223cd71f8ebbb7e7a53dc7982a9422a8 | Add stream logger config convenience function | boto3/__init__.py | boto3/__init__.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '0.0.1'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all boto3 messages to ``stdout``.
>>> import boto3
>>> boto3.set_stream_logger('boto3.resources', logging.INFO)
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: boto3.session.Sesssion
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(service):
"""
Create a low-level service client by name using the default session.
:type service: string
:param service: The name of a service, e.g. 's3' or 'ec2'
:return: Service client instance
"""
return _get_default_session().client(service)
def resource(service):
"""
Create a resource service client by name using the default session.
:type service: string
:param service: The name of a service, e.g. 's3' or 'ec2'
:return: Resource client instance
"""
return _get_default_session().resource(service)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '0.0.1'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
constructor. There is no need to call this unless you wish to pass custom
parameters, because a default session will be created for you.
"""
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def _get_default_session():
"""
Get the default session, creating one if needed.
:rtype: boto3.session.Sesssion
:return: The default session
"""
if DEFAULT_SESSION is None:
setup_default_session()
return DEFAULT_SESSION
def client(service):
"""
Create a low-level service client by name using the default session.
:type service: string
:param service: The name of a service, e.g. 's3' or 'ec2'
:return: Service client instance
"""
return _get_default_session().client(service)
def resource(service):
"""
Create a resource service client by name using the default session.
:type service: string
:param service: The name of a service, e.g. 's3' or 'ec2'
:return: Resource client instance
"""
return _get_default_session().resource(service)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger('boto3').addHandler(NullHandler())
| Python | 0 |
c88d5b0935f9cba8b3d76face5cf40098ef5a87b | Move difference file to build directory | preprint/latexdiff.py | preprint/latexdiff.py | #!/usr/bin/env python
# encoding: utf-8
"""
Command for running latexdiff.
"""
import logging
import os
import subprocess
import codecs
import shutil
import git
from preprint.textools import inline, inline_blob, remove_comments
from preprint.gittools import read_git_blob
from cliff.command import Command
class Diff(Command):
"""Run latexdiff between HEAD and a git ref."""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Diff, self).get_parser(prog_name)
parser.add_argument('prev_commit',
help="Commit SHA to compare HEAD against.")
parser.add_argument('-n', '--name',
default=None,
help="Name of the difference file.")
return parser
def take_action(self, parsed_args):
# Inline current and previous versions of the document
current_path = self._inline_current(self.app.options.master)
prev_path = self._inline_prev(parsed_args.prev_commit,
self.app.options.master)
if parsed_args.name is None:
name = "HEAD_{0}".format(parsed_args.prev_commit)
else:
name = parsed_args.name
# Run latexmk
diff_path = os.path.splitext(name)[0]
ldiff_cmd = "latexdiff {prev} {current} > {diff}.tex".format(
prev=prev_path,
current=current_path,
diff=diff_path)
subprocess.call(ldiff_cmd, shell=True)
# Compile the diff document with latexmk
ltmk_cmd = "latexmk -f -pdf -bibtex-cond -c -gg {0}.tex".format(
diff_path)
subprocess.call(ltmk_cmd, shell=True)
# Copy to build directory
if not os.path.exists("build"):
os.makedirs("build")
pdf_path = "{0}.pdf".format(name)
if os.path.exists(pdf_path):
shutil.move(pdf_path, os.path.join("build", pdf_path))
def _inline_current(self, root_tex):
"""Inline the current manuscript."""
with codecs.open(root_tex, 'r', encoding='utf-8') as f:
root_text = f.read()
root_text = remove_comments(root_text)
root_text = inline(root_text)
output_path = "_current.tex"
if os.path.exists(output_path):
os.remove(output_path)
with codecs.open(output_path, 'w', encoding='utf-8') as f:
f.write(root_text)
return output_path
def _inline_prev(self, commit_ref, root_tex):
"""Inline the previous manuscript in the git tree."""
root_text = read_git_blob(commit_ref, root_tex)
root_text = remove_comments(root_text)
root_text = inline_blob(commit_ref, root_text)
output_path = "_prev.tex"
if os.path.exists(output_path):
os.remove(output_path)
with codecs.open(output_path, 'w', encoding='utf-8') as f:
f.write(root_text)
return output_path
def _get_n_commits(self):
"""docstring for _get_n_commits"""
repo = git.Repo(".")
print "HEAD", repo.head.commit.hexsha
commits = list(repo.iter_commits())
n = len(commits)
return n
def _get_commits(self):
"""docstring for _get_commits"""
repo = git.Repo(".")
commits = list(repo.iter_commits())
# for cm in commits:
# print cm.committed_date, cm.hexsha
return commits
def _match_commit(self, sha):
"""Match the sha fragment to a commit."""
commits = self._get_commits()
for cm in commits:
if cm.hexsha.startswith(sha):
print sha, "match", cm.hexsha
return cm
return None
| #!/usr/bin/env python
# encoding: utf-8
"""
Command for running latexdiff.
"""
import logging
import os
import subprocess
import codecs
import git
from preprint.textools import inline, inline_blob, remove_comments
from preprint.gittools import read_git_blob
from cliff.command import Command
class Diff(Command):
"""Run latexdiff between HEAD and a git ref."""
log = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(Diff, self).get_parser(prog_name)
parser.add_argument('prev_commit',
help="Commit SHA to compare HEAD against.")
parser.add_argument('-n', '--name',
default="diff",
help="Name of the difference file.")
return parser
def take_action(self, parsed_args):
# Inline current and previous versions of the document
current_path = self._inline_current(self.app.options.master)
prev_path = self._inline_prev(parsed_args.prev_commit,
self.app.options.master)
# Run latexmk
diff_path = os.path.splitext(parsed_args.name)[0]
ldiff_cmd = "latexdiff {prev} {current} > {diff}.tex".format(
prev=prev_path,
current=current_path,
diff=diff_path)
subprocess.call(ldiff_cmd, shell=True)
# Compile the diff document with latexmk
ltmk_cmd = "latexmk -f -pdf -bibtex-cond -c -gg {0}.tex".format(
diff_path)
subprocess.call(ltmk_cmd, shell=True)
def _inline_current(self, root_tex):
"""Inline the current manuscript."""
with codecs.open(root_tex, 'r', encoding='utf-8') as f:
root_text = f.read()
root_text = remove_comments(root_text)
root_text = inline(root_text)
output_path = "_current.tex"
if os.path.exists(output_path):
os.remove(output_path)
with codecs.open(output_path, 'w', encoding='utf-8') as f:
f.write(root_text)
return output_path
def _inline_prev(self, commit_ref, root_tex):
"""Inline the previous manuscript in the git tree."""
root_text = read_git_blob(commit_ref, root_tex)
root_text = remove_comments(root_text)
root_text = inline_blob(commit_ref, root_text)
output_path = "_prev.tex"
if os.path.exists(output_path):
os.remove(output_path)
with codecs.open(output_path, 'w', encoding='utf-8') as f:
f.write(root_text)
return output_path
def _get_n_commits(self):
"""docstring for _get_n_commits"""
repo = git.Repo(".")
print "HEAD", repo.head.commit.hexsha
commits = list(repo.iter_commits())
n = len(commits)
return n
def _get_commits(self):
"""docstring for _get_commits"""
repo = git.Repo(".")
commits = list(repo.iter_commits())
# for cm in commits:
# print cm.committed_date, cm.hexsha
return commits
def _match_commit(self, sha):
"""Match the sha fragment to a commit."""
commits = self._get_commits()
for cm in commits:
if cm.hexsha.startswith(sha):
print sha, "match", cm.hexsha
return cm
return None
| Python | 0 |
3e6508e4036def376f1a41943499a30f034e665c | Fix __sub__ | rwrtrack/core/record.py | rwrtrack/core/record.py | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.hybrid import hybrid_property
from .db import DeclarativeBase, sesh
from .derivedstats import DerivedStats
from .exceptions import NoRecordError
class Record(DeclarativeBase, DerivedStats):
__tablename__ = "records"
date = Column(Integer, primary_key=True)
account_id = Column(Integer, ForeignKey("accounts._id"), primary_key=True)
username = Column(String, nullable=False)
xp = Column(Integer, nullable=False)
time_played = Column(Integer, nullable=False)
kills = Column(Integer, nullable=False)
deaths = Column(Integer, nullable=False)
kill_streak = Column(Integer, nullable=False)
targets_destroyed = Column(Integer, nullable=False)
vehicles_destroyed = Column(Integer, nullable=False)
soldiers_healed = Column(Integer, nullable=False)
team_kills = Column(Integer, nullable=False)
distance_moved = Column(Integer, nullable=False)
shots_fired = Column(Integer, nullable=False)
throwables_thrown = Column(Integer, nullable=False)
def __repr__(self):
return f"Record(date={self.date}, account_id={self.account_id}, username='{self.username}', " \
f"xp={self.xp}, time_played={self.time_played}, " \
f"kills={self.kills}, deaths={self.deaths}, score={self.score}, kdr={self.kdr}, " \
f"kill_streak={self.kill_streak}, " \
f"targets_destroyed={self.targets_destroyed}, vehicles_destroyed={self.vehicles_destroyed}, " \
f"soldiers_healed={self.soldiers_healed}, team_kills={self.team_kills}, " \
f"distance_moved={self.distance_moved}, " \
f"shots_fired={self.shots_fired}, throwables_thrown={self.throwables_thrown})"
def __sub__(self, other):
date = f"'diff:{other.date}-{self.date}'"
account_id = self.account_id
username = self.username
xp = self.xp - other.xp
time_played = self.time_played - other.time_played
kills = self.kills - other.kills
deaths = self.deaths - other.deaths
kill_streak = self.kill_streak - other.kill_streak
targets_destroyed = self.targets_destroyed - other.targets_destroyed
vehicles_destroyed = self.vehicles_destroyed - other.vehicles_destroyed
soldiers_healed = self.soldiers_healed - other.soldiers_healed
team_kills = self.team_kills - other.team_kills
distance_moved = self.distance_moved - other.distance_moved
shots_fired = self.shots_fired - other.shots_fired
throwables_thrown = self.throwables_thrown - other.throwables_thrown
r = Record(date=date, account_id=account_id, username=username, xp=xp, time_played=time_played,
kills=kills, deaths=deaths, kill_streak=kill_streak,
targets_destroyed=targets_destroyed, vehicles_destroyed=vehicles_destroyed,
soldiers_healed=soldiers_healed, team_kills=team_kills, distance_moved=distance_moved,
shots_fired=shots_fired, throwables_thrown=throwables_thrown)
return r
# Set aliases for Record to use in self-join scenarios
RA, RB = aliased(Record, name="ra"), aliased(Record, name="rb")
def get_records_on_date(date):
try:
return sesh.query(Record).filter_by(date=date).all()
except NoResultFound as e:
raise NoRecordError(f"No records on {date}") from e
| from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import aliased
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.ext.hybrid import hybrid_property
from .db import DeclarativeBase, sesh
from .derivedstats import DerivedStats
from .exceptions import NoRecordError
class Record(DeclarativeBase, DerivedStats):
__tablename__ = "records"
date = Column(Integer, primary_key=True)
account_id = Column(Integer, ForeignKey("accounts._id"), primary_key=True)
username = Column(String, nullable=False)
xp = Column(Integer, nullable=False)
time_played = Column(Integer, nullable=False)
kills = Column(Integer, nullable=False)
deaths = Column(Integer, nullable=False)
kill_streak = Column(Integer, nullable=False)
targets_destroyed = Column(Integer, nullable=False)
vehicles_destroyed = Column(Integer, nullable=False)
soldiers_healed = Column(Integer, nullable=False)
team_kills = Column(Integer, nullable=False)
distance_moved = Column(Integer, nullable=False)
shots_fired = Column(Integer, nullable=False)
throwables_thrown = Column(Integer, nullable=False)
def __repr__(self):
return f"Record(date={self.date}, account_id={self.account_id}, username='{self.username}', " \
f"xp={self.xp}, time_played={self.time_played}, " \
f"kills={self.kills}, deaths={self.deaths}, score={self.score}, kdr={self.kdr}, " \
f"kill_streak={self.kill_streak}, " \
f"targets_destroyed={self.targets_destroyed}, vehicles_destroyed={self.vehicles_destroyed}, " \
f"soldiers_healed={self.soldiers_healed}, team_kills={self.team_kills}, " \
f"distance_moved={self.distance_moved}, " \
f"shots_fired={self.shots_fired}, throwables_thrown={self.throwables_thrown})"
def __sub__(self, other):
date = f"'diff:{other.date}-{self.date}'"
account_id = self.account_id
username = self.username
xp = self.xp - other.xp
time_played = self.time_played - other.time_played
kills = self.kills - other.kills
deaths = self.deaths - other.deaths
kill_streak = self.kill_streak - other.kill_streak
targets_destroyed = self.targets_destroyed - other.targets_destroyed
vehicles_destroyed = self.vehicles_destroyed - other.vehicles_destroyed
soldiers_healed = self.soldiers_healed - other.soldiers_healed
team_kills = self.team_kills - other.team_kills
distance_moved = self.distance_moved - other.distance_moved
shots_fired = self.shots_fired - other.shots_fired
throwables_thrown = self.throwables_thrown - other.throwables_thrown
r = Record(date, account_id, username, xp, time_played, kills, deaths, kill_streak,
targets_destroyed, vehicles_destroyed, soldiers_healed, team_kills, distance_moved,
shots_fired, throwables_thrown)
return r
# Set aliases for Record to use in self-join scenarios
RA, RB = aliased(Record, name="ra"), aliased(Record, name="rb")
def get_records_on_date(date):
try:
return sesh.query(Record).filter_by(date=date).all()
except NoResultFound as e:
raise NoRecordError(f"No records on {date}") from e
| Python | 0.00023 |
7b071f3ccacd87f6dcb0e9a570d8ce386dbf7a4f | change FULLNAME to AUTHOR_FULLNAME | pelicanconf.py | pelicanconf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'jchen'
AUTHOR_FULLNAME = u'Jon Chen'
SITENAME = u'BURRITO 4 LYFE'
SITEURL = ''
TIMEZONE = 'ETC/UTC'
DEFAULT_LANG = u'en'
CSS_FILE = 'style.css'
# theme stuff
THEME = './theme'
# plugins
PLUGIN_PATH = './plugins'
PLUGINS = ['gravatar']
DISQUS_SITENAME = "voltaireblog"
# gravatar email
AUTHOR_EMAIL = 'dabestmayne@burrito.sh'
# social
TWITTER_USERNAME = 's_jchen'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = 10
DISPLAY_CATEGORIES_ON_MENU = False
DISPLAY_MENUITEMS_ON_MENU = False
DISPLAY_NAVBAR = False
DISPLAY_PAGES_ON_MENU = False
DEFAULT_DATE_FORMAT = ('%Y-%m-%d')
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
| #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'jchen'
FULLNAME = u'Jon Chen'
SITENAME = u'BURRITO 4 LYFE'
SITEURL = ''
TIMEZONE = 'ETC/UTC'
DEFAULT_LANG = u'en'
CSS_FILE = 'style.css'
# theme stuff
THEME = './theme'
# plugins
PLUGIN_PATH = './plugins'
PLUGINS = ['gravatar']
DISQUS_SITENAME = "voltaireblog"
# gravatar email
AUTHOR_EMAIL = 'dabestmayne@burrito.sh'
# social
TWITTER_USERNAME = 's_jchen'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = 10
DISPLAY_CATEGORIES_ON_MENU = False
DISPLAY_MENUITEMS_ON_MENU = False
DISPLAY_NAVBAR = False
DISPLAY_PAGES_ON_MENU = False
DEFAULT_DATE_FORMAT = ('%Y-%m-%d')
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
| Python | 0.999663 |
a3c582df681aae77034e2db08999c89866cd6470 | Refactor earth mover's distance implementation | utilities.py | utilities.py | import collections
def each(function, iterable):
for item in iterable:
function(item)
def each_unpack(function, iterable):
for item in iterable:
function(*item)
def minmax(*args):
min = None
max = None
for x in args:
if max < x:
max = x
if x > min:
min = x
return min, max
def map_inplace(function, list, depth=0):
if depth <= 0:
list[:] = map(function, list)
else:
for item in list:
map_inplace(function, item, depth - 1)
def count_if(function, iterable):
count = 0
for item in iterable:
if function(item):
count += 1
return count
def teemap(iterable, *functions):
map(lambda item: (f(item) for f in functions), iterable)
class ProbabilityDistribution(collections.defaultdict):
""""Holds a probability distribution and can compute the distance to other dists"""
def __init__(self):
collections.defaultdict.__init__(self, int)
def get(self, k, d = 0):
return dict.get(self, k, d)
def distance_to(self, compare_to):
return sum(
(abs(self.get(bin) - compare_to.get(bin))
for bin in self.viewkeys() | compare_to.viewkeys()),
0)
| import collections
def each(function, iterable):
for item in iterable:
function(item)
def each_unpack(function, iterable):
for item in iterable:
function(*item)
def minmax(*args):
min = None
max = None
for x in args:
if max < x:
max = x
if x > min:
min = x
return min, max
def map_inplace(function, list, depth=0):
if depth <= 0:
list[:] = map(function, list)
else:
for item in list:
map_inplace(function, item, depth - 1)
def count_if(function, iterable):
count = 0
for item in iterable:
if function(item):
count += 1
return count
def teemap(iterable, *functions):
map(lambda item: (f(item) for f in functions), iterable)
class ProbabilityDistribution(collections.defaultdict):
""""Holds a probability distribution and can compute the distance to other dists"""
def __init__(self):
collections.defaultdict.__init__(self, int)
def get(self, k, d = 0):
return dict.get(self, k, d)
def distance_to(self, compare_to):
key_set = self.viewkeys() | compare_to.viewkeys()
currentEMD = 0
lastEMD = 0
totaldistance = 0
for key in key_set:
lastEMD = currentEMD
currentEMD = (self.get(key, 0) + lastEMD) - compare_to.get(key, 0)
totaldistance += math.fabs(currentEMD)
return totaldistance | Python | 0 |
f7b964b2ce42d8c5cb6707cd571cca5eeadb2ff7 | Implement feedback on final state of firmware | confluent_server/confluent/firmwaremanager.py | confluent_server/confluent/firmwaremanager.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# provide managing firmware update process and firmware repository if/when
# the time comes
import confluent.exceptions as exc
import confluent.messages as msg
import eventlet
updatesbytarget = {}
def execupdate(handler, filename, updateobj):
try:
completion = handler(filename, progress=updateobj.handle_progress)
if completion is None:
completion = 'complete'
updateobj.handle_progress({'phase': completion, 'progress': 100.0})
except Exception as e:
updateobj.handle_progress({'phase': 'error', 'progress': 0.0,
'detail': str(e)})
class Updater(object):
def __init__(self, node, handler, filename, tenant=None, name=None):
self.node = node
self.phase = 'initializing'
self.detail = ''
self.percent = 0.0
self.updateproc = eventlet.spawn(execupdate, handler, filename, self)
if (node, tenant) not in updatesbytarget:
updatesbytarget[(node, tenant)] = {}
if name is None:
name = 1
while '{0}'.format(name) in updatesbytarget[(node, tenant)]:
name += 1
self.name = '{0}'.format(name)
updatesbytarget[(node, tenant)][self.name] = self
def handle_progress(self, progress):
self.phase = progress['phase']
self.percent = float(progress['progress'])
self.detail = progress.get('detail', '')
def cancel(self):
self.updateproc.kill()
@property
def progress(self):
return {'phase': self.phase, 'progress': self.percent,
'detail': self.detail}
def remove_updates(nodes, tenant, element):
if len(element) < 5:
raise exc.InvalidArgumentException()
upid = element[-1]
for node in nodes:
try:
upd = updatesbytarget[(node, tenant)][upid]
except KeyError:
raise exc.NotFoundException('No active update matches request')
upd.cancel()
del updatesbytarget[(node, tenant)][upid]
yield msg.DeletedResource(
'nodes/{0}/inventory/firmware/updates/active/{1}'.format(
node, upid))
def list_updates(nodes, tenant, element):
showmode = False
if len(element) > 4:
showmode = True
upid = element[-1]
for node in nodes:
if showmode:
try:
updater = updatesbytarget[(node, tenant)][upid]
except KeyError:
raise exc.NotFoundException('No matching update process found')
yield msg.KeyValueData(updater.progress, name=node)
else:
for updateid in updatesbytarget.get((node, tenant), {}):
yield msg.ChildCollection(updateid)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# provide managing firmware update process and firmware repository if/when
# the time comes
import confluent.exceptions as exc
import confluent.messages as msg
import eventlet
updatesbytarget = {}
def execupdate(handler, filename, updateobj):
try:
handler(filename, progress=updateobj.handle_progress)
updateobj.handle_progress({'phase': 'complete', 'progress': 100.0})
except Exception as e:
updateobj.handle_progress({'phase': 'error', 'progress': 0.0,
'detail': str(e)})
class Updater(object):
def __init__(self, node, handler, filename, tenant=None, name=None):
self.node = node
self.phase = 'initializing'
self.detail = ''
self.percent = 0.0
self.updateproc = eventlet.spawn(execupdate, handler, filename, self)
if (node, tenant) not in updatesbytarget:
updatesbytarget[(node, tenant)] = {}
if name is None:
name = 1
while '{0}'.format(name) in updatesbytarget[(node, tenant)]:
name += 1
self.name = '{0}'.format(name)
updatesbytarget[(node, tenant)][self.name] = self
def handle_progress(self, progress):
self.phase = progress['phase']
self.percent = float(progress['progress'])
self.detail = progress.get('detail', '')
def cancel(self):
self.updateproc.kill()
@property
def progress(self):
return {'phase': self.phase, 'progress': self.percent,
'detail': self.detail}
def remove_updates(nodes, tenant, element):
if len(element) < 5:
raise exc.InvalidArgumentException()
upid = element[-1]
for node in nodes:
try:
upd = updatesbytarget[(node, tenant)][upid]
except KeyError:
raise exc.NotFoundException('No active update matches request')
upd.cancel()
del updatesbytarget[(node, tenant)][upid]
yield msg.DeletedResource(
'nodes/{0}/inventory/firmware/updates/active/{1}'.format(
node, upid))
def list_updates(nodes, tenant, element):
showmode = False
if len(element) > 4:
showmode = True
upid = element[-1]
for node in nodes:
if showmode:
try:
updater = updatesbytarget[(node, tenant)][upid]
except KeyError:
raise exc.NotFoundException('No matching update process found')
yield msg.KeyValueData(updater.progress, name=node)
else:
for updateid in updatesbytarget.get((node, tenant), {}):
yield msg.ChildCollection(updateid)
| Python | 0 |
26cb100e7e4782cdc4d7a55f6a096a9da2db2b5c | fix bug 1495003: add search to GraphicsDeviceAdmin | webapp-django/crashstats/crashstats/admin.py | webapp-django/crashstats/crashstats/admin.py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
from crashstats.crashstats.models import (
GraphicsDevice,
Signature,
)
# Fix the Django Admin User list display so it shows the columns we care about
UserAdmin.list_display = [
'email',
'first_name',
'last_name',
'is_superuser',
'is_staff',
'is_active',
'date_joined',
'last_login'
]
ACTION_TO_NAME = {
ADDITION: 'add',
CHANGE: 'change',
DELETION: 'delete'
}
@admin.register(LogEntry)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
list_display = [
'action_time',
'user_email',
'content_type',
'object_repr',
'action_name',
'get_change_message'
]
def user_email(self, obj):
return obj.user.email
def action_name(self, obj):
return ACTION_TO_NAME[obj.action_flag]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
# FIXME(willkg): If this always returned False, then this modeladmin
# doesn't show up in the index. However, this means you get a change
# page that suggests you can change it, but errors out when saving.
#
# We can nix this and use has_view_permission when we upgrade to
# Django 2.1.
return request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def has_module_permission(self, request):
return True
@admin.register(GraphicsDevice)
class GraphicsDeviceAdmin(admin.ModelAdmin):
list_display = [
'id',
'vendor_hex',
'adapter_hex',
'vendor_name',
'adapter_name'
]
search_fields = [
'vendor_hex',
'adapter_hex',
'vendor_name',
'adapter_name'
]
@admin.register(Signature)
class Signature(admin.ModelAdmin):
list_display = [
'signature',
'first_build',
'first_date'
]
search_fields = [
'signature'
]
| from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
from crashstats.crashstats.models import (
GraphicsDevice,
Signature,
)
# Fix the Django Admin User list display so it shows the columns we care about
UserAdmin.list_display = [
'email',
'first_name',
'last_name',
'is_superuser',
'is_staff',
'is_active',
'date_joined',
'last_login'
]
ACTION_TO_NAME = {
ADDITION: 'add',
CHANGE: 'change',
DELETION: 'delete'
}
@admin.register(LogEntry)
class LogEntryAdmin(admin.ModelAdmin):
date_hierarchy = 'action_time'
list_display = [
'action_time',
'user_email',
'content_type',
'object_repr',
'action_name',
'get_change_message'
]
def user_email(self, obj):
return obj.user.email
def action_name(self, obj):
return ACTION_TO_NAME[obj.action_flag]
def has_add_permission(self, request):
return False
def has_change_permission(self, request, obj=None):
# FIXME(willkg): If this always returned False, then this modeladmin
# doesn't show up in the index. However, this means you get a change
# page that suggests you can change it, but errors out when saving.
#
# We can nix this and use has_view_permission when we upgrade to
# Django 2.1.
return request.method != 'POST'
def has_delete_permission(self, request, obj=None):
return False
def has_module_permission(self, request):
return True
@admin.register(GraphicsDevice)
class GraphicsDeviceAdmin(admin.ModelAdmin):
list_display = [
'id',
'vendor_hex',
'adapter_hex',
'vendor_name',
'adapter_name'
]
@admin.register(Signature)
class Signature(admin.ModelAdmin):
list_display = [
'signature',
'first_build',
'first_date'
]
| Python | 0 |
0d2079b1dcb97708dc55c32d9e2c1a0f12595875 | Replace string substitution with string formatting | salt/runners/launchd.py | salt/runners/launchd.py | # -*- coding: utf-8 -*-
'''
Manage launchd plist files
'''
# Import python libs
import os
import sys
def write_launchd_plist(program):
'''
Write a launchd plist for managing salt-master or salt-minion
CLI Example:
.. code-block:: bash
salt-run launchd.write_launchd_plist salt-master
'''
plist_sample_text = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>org.saltstack.{program}</string>
<key>ProgramArguments</key>
<array>
<string>{python}</string>
<string>{script}</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
'''.strip()
supported_programs = ['salt-master', 'salt-minion']
if program not in supported_programs:
sys.stderr.write('Supported programs: {0!r}\n'.format(supported_programs))
sys.exit(-1)
sys.stdout.write(
plist_sample_text.format(
program=program,
python=sys.executable,
script=os.path.join(os.path.dirname(sys.executable), program)
)
)
| # -*- coding: utf-8 -*-
'''
Manage launchd plist files
'''
# Import python libs
import os
import sys
def write_launchd_plist(program):
'''
Write a launchd plist for managing salt-master or salt-minion
CLI Example:
.. code-block:: bash
salt-run launchd.write_launchd_plist salt-master
'''
plist_sample_text = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>org.saltstack.{program}</string>
<key>ProgramArguments</key>
<array>
<string>{python}</string>
<string>{script}</string>
</array>
<key>RunAtLoad</key>
<true/>
</dict>
</plist>
""".strip()
supported_programs = ['salt-master', 'salt-minion']
if program not in supported_programs:
sys.stderr.write("Supported programs: %r\n" % supported_programs)
sys.exit(-1)
sys.stdout.write(
plist_sample_text.format(
program=program,
python=sys.executable,
script=os.path.join(os.path.dirname(sys.executable), program)
)
)
| Python | 0.001018 |
d967505ea9db8af0286abe1959a8fcba556b2d7a | add setting the slave of a specified master on a sentinel | salt/states/redismod.py | salt/states/redismod.py | # -*- coding: utf-8 -*-
'''
Management of Redis server
==========================
.. versionadded:: 2014.7.0
:depends: - redis Python module
:configuration: See :py:mod:`salt.modules.redis` for setup instructions.
.. code-block:: yaml
key_in_redis:
redis.string:
- value: string data
The redis server information specified in the minion config file can be
overridden in states using the following arguments: ``host``, ``post``, ``db``,
``password``.
.. code-block:: yaml
key_in_redis:
redis.string:
- value: string data
- host: localhost
- port: 6379
- db: 0
- password: somuchkittycat
'''
import copy
__virtualname__ = 'redis'
def __virtual__():
'''
Only load if the redis module is in __salt__
'''
if 'redis.set_key' in __salt__:
return __virtualname__
return False
def string(name, value, expire=None, expireat=None, **connection_args):
'''
Ensure that the key exists in redis with the value specified
name
Redis key to manage
value
Data to persist in key
expire
Sets time to live for key in seconds
expireat
Sets expiration time for key via UNIX timestamp, overrides `expire`
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Key already set to defined value'}
old_key = __salt__['redis.get_key'](name, **connection_args)
if old_key != value:
__salt__['redis.set_key'](name, value, **connection_args)
ret['changes'][name] = 'Value updated'
ret['comment'] = 'Key updated to new value'
if expireat:
__salt__['redis.expireat'](name, expireat, **connection_args)
ret['changes']['expireat'] = 'Key expires at {0}'.format(expireat)
elif expire:
__salt__['redis.expire'](name, expire, **connection_args)
ret['changes']['expire'] = 'TTL set to {0} seconds'.format(expire)
return ret
def absent(name, keys=None, **connection_args):
'''
Ensure key absent from redis
name
Key to ensure absent from redis
keys
list of keys to ensure absent, name will be ignored if this is used
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Key(s) specified already absent'}
if keys:
if not isinstance(keys, list):
ret['result'] = False
ret['comment'] = '`keys` not formed as a list type'
return ret
delete_list = [key for key in keys
if __salt__['redis.exists'](key, **connection_args)]
if not len(delete_list):
return ret
__salt__['redis.delete'](*delete_list, **connection_args)
ret['changes']['deleted'] = delete_list
ret['comment'] = 'Keys deleted'
return ret
if __salt__['redis.exists'](name, **connection_args):
__salt__['redis.delete'](name, **connection_args)
ret['comment'] = 'Key deleted'
ret['changes']['deleted'] = [name]
return ret
def slaveof(name, sentinel_host=None, sentinel_port=None, sentinel_password=None, **connection_args):
'''
Set this redis instance as a slave.
name
Master to make this a slave of
sentinel_host
Ip of the sentinel to check for the master
sentinel_port
Port of the sentinel to check for the master
'''
ret = {'name': name,
'changes': {},
'result': False,
'comment': 'Failed to setup slave'}
kwargs = copy.copy(connection_args)
sentinel_master = __salt__['redis.sentinel_get_master_ip'](name, sentinel_host, sentinel_port, sentinel_password)
if sentinel_master['master_host'] in __salt__['network.ip_addrs']():
ret['result'] = True
ret['comment'] = 'Minion is the master: '.format(name)
return ret
first_master = __salt__['redis.get_master_ip'](**connection_args)
if first_master == sentinel_master:
ret['result'] = True
ret['comment'] = 'Minion already slave of master: {0}'.format(name)
return ret
if __opts__['test'] == True:
ret['comment'] = 'Minion will be made a slave of {0}: {1}'.format(name, sentinel_master['host'])
ret['result'] = None
return ret
kwargs.update(**sentinel_master)
__salt__['redis.slaveof'](**kwargs)
current_master = __salt__['redis.get_master_ip'](**connection_args)
if current_master != sentinel_master:
return ret
ret['result'] = True
ret['changes'] = {
'old': first_master,
'new': current_master,
}
ret['comment'] = 'Minion successfully connected to master: {0}'.format(name)
return ret
| # -*- coding: utf-8 -*-
'''
Management of Redis server
==========================
.. versionadded:: 2014.7.0
:depends: - redis Python module
:configuration: See :py:mod:`salt.modules.redis` for setup instructions.
.. code-block:: yaml
key_in_redis:
redis.string:
- value: string data
The redis server information specified in the minion config file can be
overridden in states using the following arguments: ``host``, ``post``, ``db``,
``password``.
.. code-block:: yaml
key_in_redis:
redis.string:
- value: string data
- host: localhost
- port: 6379
- db: 0
- password: somuchkittycat
'''
__virtualname__ = 'redis'
def __virtual__():
'''
Only load if the redis module is in __salt__
'''
if 'redis.set_key' in __salt__:
return __virtualname__
return False
def string(name, value, expire=None, expireat=None, **connection_args):
'''
Ensure that the key exists in redis with the value specified
name
Redis key to manage
value
Data to persist in key
expire
Sets time to live for key in seconds
expireat
Sets expiration time for key via UNIX timestamp, overrides `expire`
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Key already set to defined value'}
old_key = __salt__['redis.get_key'](name, **connection_args)
if old_key != value:
__salt__['redis.set_key'](name, value, **connection_args)
ret['changes'][name] = 'Value updated'
ret['comment'] = 'Key updated to new value'
if expireat:
__salt__['redis.expireat'](name, expireat, **connection_args)
ret['changes']['expireat'] = 'Key expires at {0}'.format(expireat)
elif expire:
__salt__['redis.expire'](name, expire, **connection_args)
ret['changes']['expire'] = 'TTL set to {0} seconds'.format(expire)
return ret
def absent(name, keys=None, **connection_args):
'''
Ensure key absent from redis
name
Key to ensure absent from redis
keys
list of keys to ensure absent, name will be ignored if this is used
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Key(s) specified already absent'}
if keys:
if not isinstance(keys, list):
ret['result'] = False
ret['comment'] = '`keys` not formed as a list type'
return ret
delete_list = [key for key in keys
if __salt__['redis.exists'](key, **connection_args)]
if not len(delete_list):
return ret
__salt__['redis.delete'](*delete_list, **connection_args)
ret['changes']['deleted'] = delete_list
ret['comment'] = 'Keys deleted'
return ret
if __salt__['redis.exists'](name, **connection_args):
__salt__['redis.delete'](name, **connection_args)
ret['comment'] = 'Key deleted'
ret['changes']['deleted'] = [name]
return ret
| Python | 0.000001 |
a2fff1b9613d6c4349fea5001313bf9e0d1cc052 | remove unnecessary import | pergenie/lib/utils/demo.py | pergenie/lib/utils/demo.py | from uuid import uuid4
from datetime import timedelta
from django.utils import timezone
from django.conf import settings
from apps.authentication.models import User
from apps.genome.models import Genome
from lib.utils import clogging
log = clogging.getColorLogger(__name__)
# TODO: @transaction.atomic
def create_demo_user():
'''Create demo user records.
- demo Genome is defined as:
- owner = one of the admin users
- file_name = settings.DEMO_GENOME_FILE_NAME
- demo User is defined as:
- is_demo = True
'''
admin_user = User.objects.filter(is_admin=True).last()
if not admin_user:
raise Exception, '[FATAL] Before create demo user, you need to create admin user: $ python manage.py createsuperuser'
# Init demo genome (once)
genome, is_created = Genome.objects.get_or_create(owner=admin_user,
file_name=settings.DEMO_GENOME_FILE_NAME,
display_name='Demo VCF',
file_format=Genome.FILE_FORMAT_VCF,
population=Genome.POPULATION_UNKNOWN,
sex=Genome.SEX_UNKNOWN)
# TODO: Init demo genotype (once)
# Init demo user
email = '{}@{}'.format(uuid4(), settings.DOMAIN)
demo_user = User.objects.create_user(username=email,
email=email,
password='',
is_demo=True)
genome.readers.add(demo_user)
return demo_user
def prune_demo_user():
'''Prune old (not logged in 30 days) demo user records.
'''
date_30_days_ago = timezone.now() - timedelta(30)
not_logged_in_30_days_demo_users = User.objects.filter(is_demo=True, last_login__lt=date_30_days_ago)
admin_users = User.objects.filter(is_admin=True)
demo_genomes = Genome.objects.filter(owner__in=admin_users, file_name=settings.DEMO_GENOME_FILE_NAME)
for genome in demo_genomes:
for user in not_logged_in_30_days_demo_users:
if user in genome.readers.all():
genome.readers.remove(user)
not_logged_in_30_days_demo_users.delete()
| from uuid import uuid4
from datetime import timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db import models, transaction
from django.utils import timezone
from django.conf import settings
from apps.authentication.models import User
from apps.genome.models import Genome
from lib.utils import clogging
log = clogging.getColorLogger(__name__)
# TODO: @transaction.atomic
def create_demo_user():
'''Create demo user records.
- demo Genome is defined as:
- owner = one of the admin users
- file_name = settings.DEMO_GENOME_FILE_NAME
- demo User is defined as:
- is_demo = True
'''
admin_user = User.objects.filter(is_admin=True).last()
if not admin_user:
raise Exception, '[FATAL] Before create demo user, you need to create admin user: $ python manage.py createsuperuser'
# Init demo genome (once)
genome, is_created = Genome.objects.get_or_create(owner=admin_user,
file_name=settings.DEMO_GENOME_FILE_NAME,
display_name='Demo VCF',
file_format=Genome.FILE_FORMAT_VCF,
population=Genome.POPULATION_UNKNOWN,
sex=Genome.SEX_UNKNOWN)
# TODO: Init demo genotype (once)
# Init demo user
email = '{}@{}'.format(uuid4(), settings.DOMAIN)
demo_user = User.objects.create_user(username=email,
email=email,
password='',
is_demo=True)
genome.readers.add(demo_user)
return demo_user
def prune_demo_user():
'''Prune old (not logged in 30 days) demo user records.
'''
date_30_days_ago = timezone.now() - timedelta(30)
not_logged_in_30_days_demo_users = User.objects.filter(is_demo=True, last_login__lt=date_30_days_ago)
admin_users = User.objects.filter(is_admin=True)
demo_genomes = Genome.objects.filter(owner__in=admin_users, file_name=settings.DEMO_GENOME_FILE_NAME)
for genome in demo_genomes:
for user in not_logged_in_30_days_demo_users:
if user in genome.readers.all():
genome.readers.remove(user)
not_logged_in_30_days_demo_users.delete()
| Python | 0.000037 |
a2a6b336295e65d29881e83ba45e1758c4582bbb | add available filters | corehq/apps/reports/standard/users/reports.py | corehq/apps/reports/standard/users/reports.py | from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import DatespanMixin, ProjectReport
from corehq.apps.reports.util import datespan_from_beginning
from corehq.apps.users.models import UserHistory
class UserHistoryReport(DatespanMixin, GenericTabularReport, ProjectReport):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
# ToDo: Add pending filters
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
description = ugettext_lazy("History of user updates")
ajax_pagination = True
sortable = False
@property
def default_datespan(self):
return datespan_from_beginning(self.domain_object, self.timezone)
@property
def headers(self):
# ToDo: Add headers
h = [
DataTablesColumn(_("User")),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
# ToDo: add query based on params
return UserHistory.objects.none()
@property
def rows(self):
records = self._get_queryset().order_by('-changed_at')[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield _user_history_row(record)
def _user_history_row(record):
# ToDo: add render for each row
return []
| from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from memoized import memoized
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.dispatcher import UserManagementReportDispatcher
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import ProjectReport
from corehq.apps.users.models import UserHistory
class UserHistoryReport(GenericTabularReport, ProjectReport):
slug = 'user_history'
name = ugettext_lazy("User History")
section_name = ugettext_lazy("User Management")
dispatcher = UserManagementReportDispatcher
# ToDo: Add filters
fields = []
description = ugettext_lazy("History of user updates")
ajax_pagination = True
sortable = False
@property
def headers(self):
# ToDo: Add headers
h = [
DataTablesColumn(_("User")),
]
return DataTablesHeader(*h)
@property
def total_records(self):
return self._get_queryset().count()
@memoized
def _get_queryset(self):
# ToDo: add query based on params
return UserHistory.objects.none()
@property
def rows(self):
records = self._get_queryset().order_by('-changed_at')[
self.pagination.start:self.pagination.start + self.pagination.count
]
for record in records:
yield _user_history_row(record)
def _user_history_row(record):
# ToDo: add render for each row
return []
| Python | 0 |
e1f6e98d7e3a1840567b1b5e379f87ec1e0aa9dc | add two more views | connector8/__openerp__.py | connector8/__openerp__.py | # -*- coding: utf-8 -*-
{'name': 'Connector8',
'version': '0.1',
'author': 'Openerp Connector Core Editors and Amdeb',
'license': 'AGPL-3',
'category': 'Generic Modules',
'description': """
This is a port of OCA connector to Odoo 8.0
""",
'depends': ['mail'
],
'data': ['security/connector_security.xml',
'security/ir.model.access.csv',
'queue/model_view.xml',
'queue/queue_data.xml',
'checkpoint/checkpoint_view.xml',
'connector_menu.xml',
'setting_view.xml',
'res_partner_view.xml',
],
'installable': True,
'application': True,
}
| # -*- coding: utf-8 -*-
{'name': 'Connector8',
'version': '0.1',
'author': 'Openerp Connector Core Editors and Amdeb',
'license': 'AGPL-3',
'category': 'Generic Modules',
'description': """
This is a port of OCA connector to Odoo 8.0
""",
'depends': ['mail'
],
'data': ['security/connector_security.xml',
'security/ir.model.access.csv',
'queue/model_view.xml',
'queue/queue_data.xml',
'checkpoint/checkpoint_view.xml',
'res_partner_view.xml',
],
'installable': True,
'application': True,
}
| Python | 0 |
7369244fbfcda67e1b14ebedd9fb9467fe5d8870 | Update module list wizard should not miss search view of Modules | bin/addons/base/module/wizard/wizard_update_module.py | bin/addons/base/module/wizard/wizard_update_module.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import netsvc
import pooler
class wizard_update_module(wizard.interface):
arch = '''<?xml version="1.0"?>
<form string="Scan for new modules">
<label string="This function will check for new modules in the 'addons' path" colspan="4" align="0.0"/>
</form>'''
fields = {
}
arch_module = '''<?xml version="1.0"?>
<form string="New modules">
<field name="update" colspan="4"/>
<field name="add" colspan="4"/>
</form>'''
fields_module = {
'update': {'type': 'integer', 'string': 'Number of modules updated', 'readonly': True},
'add': {'type': 'integer', 'string': 'Number of modules added', 'readonly': True},
}
def _update_module(self, cr, uid, data, context):
update, add = pooler.get_pool(cr.dbname).get('ir.module.module').update_list(cr, uid)
return {'update': update, 'add': add}
def _action_module_open(self, cr, uid, data, context):
res = {
'domain': str([]),
'name': 'Module List',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.module.module',
'view_id': False,
'type': 'ir.actions.act_window'
}
search_view_id = pooler.get_pool(cr.dbname).get('ir.ui.view').search(cr, uid, [('name','=','ir.module.module.list.select')], context=context)
if search_view_id:
res.update({'search_view_id' : search_view_id[0]})
return res
states = {
'init': {
'actions': [],
'result': {'type': 'form', 'arch': arch, 'fields': fields,
'state': [
('end', 'Cancel', 'gtk-cancel'),
('update', 'Check new modules', 'gtk-ok', True)
]
}
},
'update': {
'actions': [_update_module],
'result': {'type': 'form', 'arch': arch_module, 'fields': fields_module,
'state': [
('open_window', 'Ok', 'gtk-ok', True)
]
}
},
'open_window': {
'actions': [],
'result': {'type': 'action', 'action': _action_module_open, 'state':'end'}
}
}
wizard_update_module('module.module.update')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import netsvc
import pooler
class wizard_update_module(wizard.interface):
arch = '''<?xml version="1.0"?>
<form string="Scan for new modules">
<label string="This function will check for new modules in the 'addons' path" colspan="4" align="0.0"/>
</form>'''
fields = {
}
arch_module = '''<?xml version="1.0"?>
<form string="New modules">
<field name="update" colspan="4"/>
<field name="add" colspan="4"/>
</form>'''
fields_module = {
'update': {'type': 'integer', 'string': 'Number of modules updated', 'readonly': True},
'add': {'type': 'integer', 'string': 'Number of modules added', 'readonly': True},
}
def _update_module(self, cr, uid, data, context):
update, add = pooler.get_pool(cr.dbname).get('ir.module.module').update_list(cr, uid)
return {'update': update, 'add': add}
def _action_module_open(self, cr, uid, data, context):
return {
'domain': str([]),
'name': 'Module List',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'ir.module.module',
'view_id': False,
'type': 'ir.actions.act_window'
}
states = {
'init': {
'actions': [],
'result': {'type': 'form', 'arch': arch, 'fields': fields,
'state': [
('end', 'Cancel', 'gtk-cancel'),
('update', 'Check new modules', 'gtk-ok', True)
]
}
},
'update': {
'actions': [_update_module],
'result': {'type': 'form', 'arch': arch_module, 'fields': fields_module,
'state': [
('open_window', 'Ok', 'gtk-ok', True)
]
}
},
'open_window': {
'actions': [],
'result': {'type': 'action', 'action': _action_module_open, 'state':'end'}
}
}
wizard_update_module('module.module.update')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0 |
199f0f3e8e02f53f570e5463cc4350568771395f | Refactor tasks.py | edx_shopify/tasks.py | edx_shopify/tasks.py | from celery import Task
from celery.utils.log import get_task_logger
from .models import Order, OrderItem
from .utils import auto_enroll_email
logger = get_task_logger(__name__)
class ProcessOrder(Task):
"""Process a newly received order, and enroll learners in courses
using their email address.
On failure, store the order in an ERROR state.
"""
def __init__(self):
"""Set up an order as an instance member, so we can manipulate it both
from run() and from on_failure().
"""
self.order = None
def run(self, data):
"""Parse input data for line items, and create enrollments.
On any error, raise the exception in order to be handled by
on_failure().
"""
logger.debug('Processing order data: %s' % data)
self.order = Order.objects.get(id=data['id'])
# If the order is anything but UNPROCESSED, abandon the attempt.
if self.order.status != Order.UNPROCESSED:
logger.warning('Order %s has already '
'been processed, ignoring' % self.order.id)
return
# Mark the order as being processed.
self.order.status = Order.PROCESSING
self.order.save()
# Process line items
for item in data['line_items']:
logger.debug('Processing line item: %s' % item)
try:
sku = item['sku']
email = next(
p['value'] for p in item['properties']
if p['name'] == 'email'
)
except:
logger.error('Malformed line item %s in order %s, '
'unable to process' % (item, self.order.id))
raise
# Store line item
order_item, created = OrderItem.objects.get_or_create(
order=self.order,
sku=sku,
email=email
)
if order_item.status == OrderItem.UNPROCESSED:
try:
# Enroll the email in the course
auto_enroll_email(sku, email)
except:
logger.error('Unable to enroll '
'%s in %s' % (email, sku))
raise
# Mark the item as processed
order_item.status = OrderItem.PROCESSED
order_item.save()
logger.debug('Successfully processed line item '
'%s for order %s' % (item, self.order.id))
# Mark the order status
self.order.status = Order.PROCESSED
logger.error('Successfully processed '
'order %s' % self.order.id)
self.order.save()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""Handle the run() method having raised an exception: log an
exception stack trace and a prose message, save the order with
an ERROR status.
"""
logger.error(exc, exc_info=True)
logger.error('Failed to fully '
'process order %s '
'(task ID %s)' % (self.order.id,
task_id))
self.order.status = Order.ERROR
self.order.save()
| from celery import Task
from celery.utils.log import get_task_logger
from .models import Order, OrderItem
from .utils import auto_enroll_email
logger = get_task_logger(__name__)
class ProcessOrder(Task):
"""
Process order creation event.
"""
def run(self, data):
logger.debug('Processing order data: %s' % data)
order = Order.objects.get(id=data['id'])
# If the order is anything but UNPROCESSED, abandon the attempt.
if order.status != Order.UNPROCESSED:
logger.warning('Order %s has already '
'been processed, ignoring' % order.id)
return
# Mark the order as being processed.
order.status = Order.PROCESSING
order.save()
# Process line items
order_error = False
for item in data['line_items']:
logger.debug('Processing line item: %s' % item)
try:
sku = item['sku']
email = next(
p['value'] for p in item['properties']
if p['name'] == 'email'
)
except (KeyError, StopIteration):
order_error = True
logger.error('Malformed line item %s in order %s, '
'unable to process' % (item, order.id))
continue
# Store line item
order_item, created = OrderItem.objects.get_or_create(
order=order,
sku=sku,
email=email
)
if order_item.status == OrderItem.UNPROCESSED:
try:
# Enroll the email in the course
auto_enroll_email(sku, email)
except:
logger.error('Unable to enroll '
'%s in %s' % (email, sku))
order_error = True
order_item.status = OrderItem.ERROR
order_item.save()
continue
# Mark the item as processed
order_item.status = OrderItem.PROCESSED
order_item.save()
logger.debug('Successfully processed line item '
'%s for order %s' % (item, order.id))
elif order_item.status == OrderItem.ERROR:
order_error = True
# Mark the order status
if order_error:
order.status = Order.ERROR
logger.error('Failed to fully '
'process order %s' % order.id)
else:
order.status = Order.PROCESSED
logger.error('Successfully processed '
'order %s' % order.id)
order.save()
| Python | 0 |
fe96f6539b40a880e88f7efe8502279cea1de506 | update test | corehq/apps/accounting/tests/test_model_validation.py | corehq/apps/accounting/tests/test_model_validation.py | from datetime import date
from django.core.exceptions import ValidationError
from corehq.apps.accounting.models import (
BillingAccount,
CreditAdjustment,
Invoice,
LineItem,
Subscriber,
Subscription,
)
from corehq.apps.accounting.tests import generator
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
class TestCreditAdjustmentValidation(BaseAccountingTest):
def tearDown(self):
CreditAdjustment.objects.all().delete()
LineItem.objects.all().delete()
Invoice.objects.all().delete()
generator.delete_all_subscriptions()
generator.delete_all_accounts()
super(TestCreditAdjustmentValidation, self).tearDown()
def test_clean(self):
account = BillingAccount.objects.create(
name='Test Account',
created_by='test@example.com',
currency=generator.init_default_currency(),
)
subscription = Subscription.objects.create(
account=account,
date_start=date.today(),
plan_version=generator.subscribable_plan(),
subscriber=Subscriber.objects.create(domain='test')
)
invoice = Invoice.objects.create(
date_start=date.today(),
date_end=date.today(),
subscription=subscription,
)
line_item = LineItem.objects.create(
invoice=invoice,
)
with self.assertRaises(ValidationError):
try:
CreditAdjustment(
invoice=invoice,
line_item=line_item,
).save()
except ValidationError as e:
self.assertIn('__all__', e.error_dict)
raise e
| from datetime import date
from django.core.exceptions import ValidationError
from django.test import TransactionTestCase
from corehq.apps.accounting.models import (
BillingAccount,
CreditAdjustment,
Invoice,
LineItem,
Subscriber,
Subscription,
)
from corehq.apps.accounting.tests import generator
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
class TestCreditAdjustmentValidation(BaseAccountingTest):
def tearDown(self):
CreditAdjustment.objects.all().delete()
LineItem.objects.all().delete()
Invoice.objects.all().delete()
generator.delete_all_subscriptions()
generator.delete_all_accounts()
super(TestCreditAdjustmentValidation, self).tearDown()
def test_clean(self):
account = BillingAccount.objects.create(
currency=generator.init_default_currency(),
)
subscription = Subscription.objects.create(
account=account,
date_start=date.today(),
plan_version=generator.subscribable_plan(),
subscriber=Subscriber.objects.create(domain='test')
)
invoice = Invoice.objects.create(
date_start=date.today(),
date_end=date.today(),
subscription=subscription,
)
line_item = LineItem.objects.create(
invoice=invoice,
)
with self.assertRaises(ValidationError):
try:
CreditAdjustment(
invoice=invoice,
line_item=line_item,
).save()
except ValidationError as e:
self.assertIn('__all__', e.error_dict)
raise e
| Python | 0.000001 |
7c46287f7f7b0b18d671dc91e69668961c98adee | update test_flake8 | _unittests/ut_module/test_flake8.py | _unittests/ut_module/test_flake8.py | """
@brief test log(time=0s)
"""
import sys
import os
import unittest
import warnings
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src",)))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import check_pep8
from pyquickhelper.pycode.utils_tests_helper import _extended_refactoring
class TestFlake8(unittest.TestCase):
def test_flake8_src(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2 or "Anaconda" in sys.executable \
or "condavir" in sys.executable:
warnings.warn(
"skipping test_flake8 because of Python 2 or " + sys.executable)
return
thi = os.path.abspath(os.path.dirname(__file__))
src_ = os.path.normpath(os.path.join(thi, "..", "..", "src"))
check_pep8(src_, fLOG=fLOG, extended=[("fLOG", _extended_refectoring)])
def test_flake8_test(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2 or "Anaconda" in sys.executable \
or "condavir" in sys.executable:
warnings.warn(
"skipping test_flake8 because of Python 2 or " + sys.executable)
return
thi = os.path.abspath(os.path.dirname(__file__))
test = os.path.normpath(os.path.join(thi, "..", ))
check_pep8(test, fLOG=fLOG, neg_filter="temp_.*",
skip=["'src' imported but unused",
"'skip_' imported but unused",
"'skip__' imported but unused",
"'skip___' imported but unused",
],
extended=[("fLOG", _extended_refectoring)])
if __name__ == "__main__":
unittest.main()
| """
@brief test log(time=0s)
"""
import sys
import os
import unittest
import warnings
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src",)))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import check_pep8
from pyquickhelper.pycode.utils_tests_helper import _extended_refectoring
class TestFlake8(unittest.TestCase):
def test_flake8_src(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2 or "Anaconda" in sys.executable \
or "condavir" in sys.executable:
warnings.warn(
"skipping test_flake8 because of Python 2 or " + sys.executable)
return
thi = os.path.abspath(os.path.dirname(__file__))
src_ = os.path.normpath(os.path.join(thi, "..", "..", "src"))
check_pep8(src_, fLOG=fLOG, extended=[("fLOG", _extended_refectoring)])
def test_flake8_test(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
if sys.version_info[0] == 2 or "Anaconda" in sys.executable \
or "condavir" in sys.executable:
warnings.warn(
"skipping test_flake8 because of Python 2 or " + sys.executable)
return
thi = os.path.abspath(os.path.dirname(__file__))
test = os.path.normpath(os.path.join(thi, "..", ))
check_pep8(test, fLOG=fLOG, neg_filter="temp_.*",
skip=["'src' imported but unused",
"'skip_' imported but unused",
"'skip__' imported but unused",
"'skip___' imported but unused",
],
extended=[("fLOG", _extended_refectoring)])
if __name__ == "__main__":
unittest.main()
| Python | 0.000001 |
c2f3ca6c2e3c2810b3c881e09cc613bfe15e598c | Fix up some small coding errors | alerts/geomodel_alert.py | alerts/geomodel_alert.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
import json
import os
import sys
import traceback
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, QueryStringMatch as QSMatch
from mozdef_util.utilities.logger import logger
import alerts.geomodel.alert as alert
import alerts.geomodel.config as config
import alerts.geomodel.locality as locality
_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'geomodel_alert.json')
class AlertGeoModel(AlertTask):
'''GeoModel alert runs a set of configured queries for events and
constructs locality state for users performing authenticated actions.
When activity is found that indicates a potential compromise of an
account, an alert is produced.
'''
def main(self):
cfg = self._load_config()
for query_index in range(len(cfg.events)):
try:
self._process(cfg, query_index)
except Exception as err:
traceback.print_exc(file=sys.stdout)
logger.error(
'Error process events; query="{0}"; error={1}'.format(
cfg.events[query_index].lucene_query,
err))
def onAggregation(self, agg):
username = agg['value']
events = agg['events']
cfg = agg['config']
localities = list(filter(
lambda state: state is not None,
map(locality.from_event, events)))
new_state = locality.State('locality', username, localities)
query = locality.wrap_query(self.es)
journal = locality.wrap_journal(self.es)
entry = locality.find(query, username, cfg.localities.es_index)
if entry is None:
entry = locality.Entry(
'', locality.State('localities', username, []))
updated = locality.Update.flat_map(
lambda state: locality.remove_outdated(
state,
cfg.localities.valid_duration_days),
locality.update(entry.state, new_state))
if updated.did_update:
entry.state = updated.state
journal(entry, cfg.localities.es_index)
new = alert.alert(entry.state, cfg.alerts.whitelist)
if new is not None:
# TODO: When we update to Python 3.7+, change to asdict(alert_produced)
alert_dict = self.createAlertDict(
new.summary,
new.category,
new.tags,
events)
alert_dict['details'] = {
'username': new.username,
'sourceipaddress': new.sourceipaddress,
'origin': dict(new.origin._asdict())
}
return alert_dict
return None
def _process(self, cfg: config.Config, qindex: int):
evt_cfg = cfg.events[qindex]
search = SearchQuery(minutes=evt_cfg.search_window.minutes)
search.add_must(QSMatch(evt_cfg.lucene_query))
self.filtersManual(search)
self.searchEventsAggregated(evt_cfg.username_path, samplesLimit=1000)
self.walkAggregations(threshold=1, config=cfg)
def _load_config(self):
with open(_CONFIG_FILE) as cfg_file:
cfg = json.load(cfg_file)
cfg['localities'] = config.Localities(**cfg['localities'])
for i, event in enumerate(cfg['events']):
cfg['events'][i]['search_window'] = config.SearchWindow(
**cfg['events'][i]['search_window'])
cfg['events'] = [config.Events(**dat) for dat in cfg['events']]
cfg['alerts']['whitelist'] = config.Whitelist(
**cfg['alerts']['whitelist'])
cfg['alerts'] = config.Alerts(**cfg['alerts'])
return config.Config(**cfg)
| #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# Copyright (c) 2015 Mozilla Corporation
import json
import os
import sys
import traceback
from lib.alerttask import AlertTask
from mozdef_util.query_models import SearchQuery, QueryStringMatch as QSMatch
from mozdef_util.utilities.logger import logger
import alerts.geomodel.alert as alert
import alerts.geomodel.config as config
import alerts.geomodel.locality as locality
_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'geomodel_alert.json')
class AlertGeoModel(AlertTask):
'''GeoModel alert runs a set of configured queries for events and
constructs locality state for users performing authenticated actions.
When activity is found that indicates a potential compromise of an
account, an alert is produced.
'''
def main(self):
cfg = self._load_config()
for query_index in range(len(cfg.events)):
try:
self._process(cfg, query_index)
except Exception as err:
traceback.print_exc(file=sys.stdout)
logger.error(
'Error process events; query="{0}"; error={1}'.format(
cfg.events[query_index].lucene_query,
err.message))
def onAggregation(self, agg):
username = agg['value']
events = agg['events']
cfg = agg['config']
localities = list(filter(map(locality.from_event, events)))
new_state = locality.State('locality', username, localities)
query = locality.wrap_query(self.es)
journal = locality.wrap_journal(self.es)
entry = locality.find(query, username, cfg.localities.es_index)
if entry is None:
entry = locality.Entry(
'', locality.State('localities', username, []))
updated = locality.Update.flat_map(
lambda state: locality.remove_outdated(
state,
cfg.localities.valid_duration_days),
locality.update(entry.state, new_state))
if updated.did_update:
entry.state = updated.state
journal(entry, cfg.localities.es_index)
new = alert.alert(entry.state, cfg.alerts.whitelist)
if new is not None:
# TODO: When we update to Python 3.7+, change to asdict(alert_produced)
alert_dict = self.createAlertDict(
new.summary,
new.category,
new.tags,
events)
alert_dict['details'] = {
'username': new.username,
'sourceipaddress': new.sourceipaddress,
'origin': dict(new.origin._asdict())
}
return alert_dict
return None
def _process(self, cfg: config.Config, qindex: int):
evt_cfg = cfg.events[qindex]
search = SearchQuery(minutes=evt_cfg.search_window.minutes)
search.add_must(QSMatch(evt_cfg.lucene_query))
self.filtersManual(search)
self.searchEventsAggregated(evt_cfg.username_path, samplesLimit=1000)
self.walkAggregations(threshold=1, config=cfg)
def _load_config(self):
with open(_CONFIG_FILE) as cfg_file:
cfg = json.load(cfg_file)
cfg['localities'] = config.Localities(**cfg['localities'])
for i, event in enumerate(cfg['events']):
cfg['events'][i]['search_window'] = config.SearchWindow(
**cfg['events'][i]['search_window'])
cfg['events'] = [config.Events(**dat) for dat in cfg['events']]
cfg['alerts']['whitelist'] = config.Whitelist(
**cfg['alerts']['whitelist'])
cfg['alerts'] = config.Alerts(**cfg['alerts'])
return config.Config(**cfg)
| Python | 0.999999 |
4f040d1d7730ee611f0c4a6768ecc181c6a43ff7 | Fix broken view test for select seats | karspexet/ticket/tests/test_views.py | karspexet/ticket/tests/test_views.py | # coding: utf-8
from django.shortcuts import reverse
from django.test import TestCase, RequestFactory
from django.utils import timezone
from karspexet.show.models import Show, Production
from karspexet.ticket import views
from karspexet.venue.models import Venue, SeatingGroup
import pytest
class TestHome(TestCase):
def setUp(self):
rf = RequestFactory()
self.request = rf.get(reverse(views.home))
self.tomorrow = timezone.now() + timezone.timedelta(days=1)
def test_home_lists_visible_upcoming_shows(self):
venue = Venue.objects.create(name="Teater 1")
production = Production.objects.create(name="Uppsättningen")
yesterday = timezone.now() - timezone.timedelta(days=1)
show = Show.objects.create(date=self.tomorrow, production=production, venue=venue)
invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False)
old_show = Show.objects.create(date=yesterday, production=production, venue=venue)
response = views.home(self.request)
shows = response.context_data["upcoming_shows"]
assert show in shows
assert old_show not in shows
def test_home_contains_only_visible_shows(self):
venue = Venue.objects.create(name="Teater 1")
production = Production.objects.create(name="Uppsättningen")
show = Show.objects.create(date=self.tomorrow, production=production, venue=venue)
invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False)
response = views.home(self.request)
shows = response.context_data["upcoming_shows"]
assert show in shows
assert invisible_show not in shows
class TestSelect_seats(TestCase):
def test_select_seats(self):
venue = Venue.objects.create(name="Teater 1")
seatinggroup = SeatingGroup.objects.create(name="prisgrupp 1", venue=venue)
production = Production.objects.create(name="Uppsättningen")
show = Show.objects.create(date=timezone.now(), production=production, venue=venue)
response = self.client.get(reverse(views.select_seats, args=[show.slug]))
self.assertContains(response, "Köp biljetter för Uppsättningen")
| # coding: utf-8
from django.shortcuts import reverse
from django.test import TestCase, RequestFactory
from django.utils import timezone
from karspexet.show.models import Show, Production
from karspexet.ticket import views
from karspexet.venue.models import Venue, SeatingGroup
import pytest
class TestHome(TestCase):
def setUp(self):
rf = RequestFactory()
self.request = rf.get(reverse(views.home))
self.tomorrow = timezone.now() + timezone.timedelta(days=1)
def test_home_lists_visible_upcoming_shows(self):
venue = Venue.objects.create(name="Teater 1")
production = Production.objects.create(name="Uppsättningen")
yesterday = timezone.now() - timezone.timedelta(days=1)
show = Show.objects.create(date=self.tomorrow, production=production, venue=venue)
invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False)
old_show = Show.objects.create(date=yesterday, production=production, venue=venue)
response = views.home(self.request)
shows = response.context_data["upcoming_shows"]
assert show in shows
assert old_show not in shows
def test_home_contains_only_visible_shows(self):
venue = Venue.objects.create(name="Teater 1")
production = Production.objects.create(name="Uppsättningen")
show = Show.objects.create(date=self.tomorrow, production=production, venue=venue)
invisible_show = Show.objects.create(date=self.tomorrow, production=production, venue=venue, visible=False)
response = views.home(self.request)
shows = response.context_data["upcoming_shows"]
assert show in shows
assert invisible_show not in shows
class TestSelect_seats(TestCase):
def test_select_seats(self):
venue = Venue.objects.create(name="Teater 1")
seatinggroup = SeatingGroup.objects.create(name="prisgrupp 1", venue=venue)
production = Production.objects.create(name="Uppsättningen")
show = Show.objects.create(date=timezone.now(), production=production, venue=venue)
response = self.client.get(reverse(views.select_seats, args=[show.id]))
self.assertContains(response, "Köp biljetter för Uppsättningen")
| Python | 0.000001 |
e57e4152229132c9a8a8a13bf0904b58f7edf6f8 | Update send_email.py | keepercommander/custom/send_email.py | keepercommander/custom/send_email.py | # _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2022 Keeper Security Inc.
# Contact: commander@keepersecurity.com
#
# Example code to run a BreachWatch status report for
# all users, and send users an email reminder to address their
# found issues. SMTP credentials must be supplied via a vault record
# in order to send the mail.
#
# This example also pulls configuration
# from config.json or writes the config file if it does not exist.
#
# Usage:
# python send_email.py
import base64
import getpass
import json
import os
import ssl
from smtplib import SMTP
from keepercommander import api, vault_extensions, vault
from keepercommander.commands.enterprise import SecurityAuditReportCommand
from keepercommander.params import KeeperParams
email_message = '''
From: {0}
Subject: Keeper BreachWatch Alert
Keeper BreachWatch detected the records at risk in your vault.
Please login to Keeper and review the records marked "At Risk"
'''
def read_config_file(params):
params.config_filename = os.path.join(os.path.dirname(__file__), 'config.json')
if os.path.isfile(params.config_filename):
with open(params.config_filename, 'r') as f:
params.config = json.load(f)
if 'user' in params.config:
params.user = params.config['user']
if 'password' in params.config:
params.password = params.config['password']
if 'mfa_token' in params.config:
params.mfa_token = params.config['mfa_token']
if 'server' in params.config:
params.server = params.config['server']
if 'device_id' in params.config:
device_id = base64.urlsafe_b64decode(params.config['device_id'] + '==')
params.rest_context.device_id = device_id
my_params = KeeperParams()
read_config_file(my_params)
while not my_params.user:
my_params.user = getpass.getpass(prompt='User(Email): ', stream=None)
while not my_params.password:
my_params.password = getpass.getpass(prompt='Master Password: ', stream=None)
report_command = SecurityAuditReportCommand()
report_json = report_command.execute(my_params, breachwatch=True, format='json')
report = json.loads(report_json)
emails = [x['email'] for x in report if x.get('at_risk') > 5]
if emails:
api.sync_down(my_params)
smtp_record = next(vault_extensions.find_records(my_params, search_str='smtp', record_type='serverCredentials'), None)
if isinstance(smtp_record, vault.TypedRecord):
smtp_host = None
smtp_port = 0
username = None
password = None
field = smtp_record.get_typed_field('host')
if field:
host_value = field.get_default_value()
if isinstance(host_value, dict):
smtp_host = host_value.get('hostName')
port = host_value.get('port')
if port:
try:
smtp_port = int(port)
except ValueError:
pass
if smtp_host:
field = smtp_record.get_typed_field('login')
if field:
username = field.get_default_value()
field = smtp_record.get_typed_field('password')
if field:
password = field.get_default_value()
if smtp_host:
with SMTP(host=smtp_host, port=smtp_port) as connection:
if username:
connection.starttls(context=ssl.create_default_context())
connection.login(user=username, password=password)
connection.sendmail(my_params.user, emails, email_message.format(my_params.user))
| # _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2022 Keeper Security Inc.
# Contact: commander@keepersecurity.com
#
# Example code to retrieve the password for a record
# stored in the vault. This example also pulls configuration
# from config.json or writes the config file if it does not exist.
#
# Usage:
# python send_email.py
import base64
import getpass
import json
import os
import ssl
from smtplib import SMTP
from keepercommander import api, vault_extensions, vault
from keepercommander.commands.enterprise import SecurityAuditReportCommand
from keepercommander.params import KeeperParams
email_message = '''
From: {0}
Subject: Keeper BreachWatch Alert
Keeper BreachWatch detected the records at risk in your vault.
Please login to Keeper and review the records marked "At Risk"
'''
def read_config_file(params):
params.config_filename = os.path.join(os.path.dirname(__file__), 'config.json')
if os.path.isfile(params.config_filename):
with open(params.config_filename, 'r') as f:
params.config = json.load(f)
if 'user' in params.config:
params.user = params.config['user']
if 'password' in params.config:
params.password = params.config['password']
if 'mfa_token' in params.config:
params.mfa_token = params.config['mfa_token']
if 'server' in params.config:
params.server = params.config['server']
if 'device_id' in params.config:
device_id = base64.urlsafe_b64decode(params.config['device_id'] + '==')
params.rest_context.device_id = device_id
my_params = KeeperParams()
read_config_file(my_params)
while not my_params.user:
my_params.user = getpass.getpass(prompt='User(Email): ', stream=None)
while not my_params.password:
my_params.password = getpass.getpass(prompt='Master Password: ', stream=None)
report_command = SecurityAuditReportCommand()
report_json = report_command.execute(my_params, breachwatch=True, format='json')
report = json.loads(report_json)
emails = [x['email'] for x in report if x.get('at_risk') > 5]
if emails:
api.sync_down(my_params)
smtp_record = next(vault_extensions.find_records(my_params, search_str='smtp', record_type='serverCredentials'), None)
if isinstance(smtp_record, vault.TypedRecord):
smtp_host = None
smtp_port = 0
username = None
password = None
field = smtp_record.get_typed_field('host')
if field:
host_value = field.get_default_value()
if isinstance(host_value, dict):
smtp_host = host_value.get('hostName')
port = host_value.get('port')
if port:
try:
smtp_port = int(port)
except ValueError:
pass
if smtp_host:
field = smtp_record.get_typed_field('login')
if field:
username = field.get_default_value()
field = smtp_record.get_typed_field('password')
if field:
password = field.get_default_value()
if smtp_host:
with SMTP(host=smtp_host, port=smtp_port) as connection:
if username:
connection.starttls(context=ssl.create_default_context())
connection.login(user=username, password=password)
connection.sendmail(my_params.user, emails, email_message.format(my_params.user))
| Python | 0.000001 |
14f0ed32b62e2d00443e99428516a2d17a68bc58 | Use COMPLEX_TEST_STRING for testing | coalib/tests/processes/communication/LogMessageTest.py | coalib/tests/processes/communication/LogMessageTest.py | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, ".")
from coalib.misc.i18n import _
from coalib.misc.StringConstants import StringConstants
from coalib.processes.communication.LOG_LEVEL import LOG_LEVEL
from coalib.processes.communication.LogMessage import LogMessage
import unittest
class LogMessageTestCase(unittest.TestCase):
def setUp(self):
self.uut = LogMessage()
def test_construction(self):
# take a look if defaults are good
self.assertEqual(self.uut.log_level, LOG_LEVEL.DEBUG)
self.assertEqual(self.uut.message, "")
# see that arguments are processed right
self.uut = LogMessage(LOG_LEVEL.WARNING, "a msg")
self.assertEqual(self.uut.log_level, LOG_LEVEL.WARNING)
self.assertEqual(self.uut.message, "a msg")
def test_to_str(self):
self.uut.message = StringConstants.COMPLEX_TEST_STRING
self.uut.log_level = LOG_LEVEL.ERROR
self.assertEqual(str(self.uut), "[{}] {}".format(_("ERROR"), StringConstants.COMPLEX_TEST_STRING))
self.uut.log_level = LOG_LEVEL.WARNING
self.assertEqual(str(self.uut), "[{}] {}".format(_("WARNING"), StringConstants.COMPLEX_TEST_STRING))
self.uut.log_level = LOG_LEVEL.DEBUG
self.assertEqual(str(self.uut), "[{}] {}".format(_("DEBUG"), StringConstants.COMPLEX_TEST_STRING))
if __name__ == '__main__':
unittest.main(verbosity=2)
| """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, ".")
from coalib.misc.i18n import _
from coalib.processes.communication.LOG_LEVEL import LOG_LEVEL
from coalib.processes.communication.LogMessage import LogMessage
import unittest
class LogMessageTestCase(unittest.TestCase):
def setUp(self):
self.uut = LogMessage()
def test_construction(self):
# take a look if defaults are good
self.assertEqual(self.uut.log_level, LOG_LEVEL.DEBUG)
self.assertEqual(self.uut.message, "")
# see that arguments are processed right
self.uut = LogMessage(LOG_LEVEL.WARNING, "a msg")
self.assertEqual(self.uut.log_level, LOG_LEVEL.WARNING)
self.assertEqual(self.uut.message, "a msg")
def test_to_str(self):
self.uut.message = "test message änd umlauts!"
self.uut.log_level = LOG_LEVEL.ERROR
self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("ERROR")))
self.uut.log_level = LOG_LEVEL.WARNING
self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("WARNING")))
self.uut.log_level = LOG_LEVEL.DEBUG
self.assertEqual(str(self.uut), "[{}] test message änd umlauts!".format(_("DEBUG")))
if __name__ == '__main__':
unittest.main(verbosity=2)
| Python | 0.000001 |
7cbf46b1c44791b6a1466b08e049b568d32cf2d3 | fix soil.tests.test_download_base:TestBlobDownload | corehq/ex-submodules/soil/tests/test_download_base.py | corehq/ex-submodules/soil/tests/test_download_base.py | from __future__ import absolute_import
from __future__ import unicode_literals
from io import BytesIO
from uuid import uuid4
from django.test import TestCase
from soil import BlobDownload
from soil.util import expose_blob_download
from corehq.blobs.tests.util import new_meta, TemporaryFilesystemBlobDB
class TestBlobDownload(TestCase):
identifier = 'identifier'
@classmethod
def setUpClass(cls):
super(TestBlobDownload, cls).setUpClass()
cls.db = TemporaryFilesystemBlobDB()
@classmethod
def tearDownClass(cls):
cls.db.close()
super(TestBlobDownload, cls).tearDownClass()
def test_expose_blob_download(self):
ref = expose_blob_download(
self.identifier,
expiry=60,
content_disposition='text/xml',
)
self.db.put(BytesIO(b'content'), meta=new_meta(key=ref.download_id))
response = BlobDownload.get(ref.download_id).toHttpResponse()
self.assertEqual(next(response.streaming_content), b'content')
def test_expose_blob_download_with_legacy_download_id(self):
self.db.put(BytesIO(b'legacy-blob'), self.identifier)
ref = BlobDownload(
self.identifier,
mimetype='text/plain',
content_disposition='text/xml',
)
ref.download_id = uuid4().hex # old download id format
ref.save(60)
response = BlobDownload.get(ref.download_id).toHttpResponse()
self.assertEqual(next(response.streaming_content), b'legacy-blob')
| from __future__ import absolute_import
from __future__ import unicode_literals
from io import BytesIO
from uuid import uuid4
from django.test import TestCase
from soil import BlobDownload
from soil.util import expose_blob_download
from corehq.blobs.tests.util import new_meta, TemporaryFilesystemBlobDB
class TestBlobDownload(TestCase):
identifier = 'identifier'
@classmethod
def setUpClass(cls):
super(TestBlobDownload, cls).setUpClass()
cls.db = TemporaryFilesystemBlobDB()
@classmethod
def tearDownClass(cls):
cls.db.close()
super(TestBlobDownload, cls).tearDownClass()
def test_expose_blob_download(self):
ref = expose_blob_download(
self.identifier,
expiry=60,
content_disposition='text/xml',
)
self.db.put(BytesIO(b'content'), meta=new_meta(key=ref.download_id))
response = BlobDownload.get(ref.download_id).toHttpResponse()
self.assertEqual(next(response.streaming_content), 'content')
def test_expose_blob_download_with_legacy_download_id(self):
self.db.put(BytesIO(b'legacy-blob'), self.identifier)
ref = BlobDownload(
self.identifier,
mimetype='text/plain',
content_disposition='text/xml',
)
ref.download_id = uuid4().hex # old download id format
ref.save(60)
response = BlobDownload.get(ref.download_id).toHttpResponse()
self.assertEqual(next(response.streaming_content), 'legacy-blob')
| Python | 0.000001 |
b15450fb774b8c6dbb1b1c181555b29f5846bc40 | test mqttclient | ambulances/mqttupdate.py | ambulances/mqttupdate.py | import atexit, sys, os, time
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from .mqttclient import BaseClient, MQTTException
from .models import client, Ambulance, Equipment, \
HospitalEquipment, Hospital
from .serializers import AmbulanceSerializer, HospitalSerializer, \
HospitalEquipmentSerializer, EquipmentSerializer, \
ExtendedProfileSerializer
# UpdateClient class
class UpdateClient(BaseClient):
def on_disconnect(self, client, userdata, rc):
# Exception is generated only if never connected
if not self.connected and rc:
raise MQTTException('Disconnected',
rc)
def update_topic(self, topic, serializer, qos=0, retain=False):
# Publish to topic
self.publish(topic,
JSONRenderer().render(serializer.data),
qos=qos,
retain=retain)
def remove_topic(self, topic, serializer, qos=0):
# Publish null to retained topic
self.publish(topic,
null,
qos=qos,
retain=True)
def update_profile(self, profile, qos=2, retain=True):
self.update_topic('user/{}/profile'.format(profile.user.username),
ExtendedProfileSerializer(profile),
qos=qos,
retain=retain)
def update_ambulance(self, ambulance, qos=2, retain=True):
self.update_topic('ambulance/{}/data'.format(ambulance.id),
AmbulanceSerializer(ambulance),
qos=qos,
retain=retain)
def remove_ambulance(self, ambulance):
self.remove_topic('ambulance/{}/data'.format(ambulance.id))
def update_hospital(self, hospital, qos=2, retain=True):
self.update_topic('hospital/{}/data'.format(hospital.id),
HospitalSerializer(hospital),
qos=qos,
retain=retain)
def remove_hospital(self, hospital):
self.remove_topic('hospital/{}/data'.format(hospital.id))
self.remove_topic('hospital/{}/metadata'.format(hospital.id))
def update_hospital_metadata(self, hospital, qos=2, retain=True):
hospital_equipment = hospital.hospitalequipment_set.values('equipment')
equipment = Equipment.objects.filter(id__in=hospital_equipment)
self.update_topic('hospital/{}/metadata'.format(hospital.id),
EquipmentSerializer(equipment, many=True),
qos=qos,
retain=retain)
def update_hospital_equipment(self, equipment, qos=2, retain=True):
self.update_topic('hospital/{}/equipment/{}/data'.format(equipment.hospital.id,
equipment.equipment.name),
HospitalEquipmentSerializer(equipment),
qos=qos,
retain=retain)
def remove_hospital_equipment(self, equipment):
self.remove_topic('hospital/{}/equipment/{}/data'.format(equipment.hospital.id,
equipment.equipment.name))
# Start client
from django.core.management.base import OutputWrapper
from django.core.management.color import color_style, no_style
from django.conf import settings
stdout = OutputWrapper(sys.stdout)
style = color_style()
# Instantiate broker
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'mqttupdate_' + str(os.getpid())
try:
# try to connect
print('Connecting to MQTT brocker...')
local_client = UpdateClient(broker, stdout, style, 0)
# wait for connection
while not local_client.connected:
local_client.loop()
# start loop
local_client.loop_start()
# register atexit handler to make sure it disconnects at exit
atexit.register(local_client.disconnect)
client = local_client
except Exception as e:
print('Could not connect to MQTT brocker. Using dumb client...')
| import atexit, sys, os, time
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from .mqttclient import BaseClient, MQTTException
from .models import client, Ambulance, Equipment, \
HospitalEquipment, Hospital
from .serializers import AmbulanceSerializer, HospitalSerializer, \
HospitalEquipmentSerializer, EquipmentSerializer, \
ExtendedProfileSerializer
# UpdateClient class
class UpdateClient(BaseClient):
def on_disconnect(self, client, userdata, rc):
# Exception is generated only if never connected
if not self.connected and rc:
raise MQTTException('Disconnected',
rc)
def publish(self, topic, message, *vargs, **kwargs):
self.client.publish(topic, message, *vargs, **kwargs)
def update_topic(self, topic, serializer, qos=0, retain=False):
# Publish to topic
self.publish(topic,
JSONRenderer().render(serializer.data),
qos=qos,
retain=retain)
def remove_topic(self, topic, serializer, qos=0):
# Publish null to retained topic
self.publish(topic,
null,
qos=qos,
retain=True)
def update_profile(self, profile, qos=2, retain=True):
self.update_topic('user/{}/profile'.format(profile.user.username),
ExtendedProfileSerializer(profile),
qos=qos,
retain=retain)
def update_ambulance(self, ambulance, qos=2, retain=True):
self.update_topic('ambulance/{}/data'.format(ambulance.id),
AmbulanceSerializer(ambulance),
qos=qos,
retain=retain)
def remove_ambulance(self, ambulance):
self.remove_topic('ambulance/{}/data'.format(ambulance.id))
def update_hospital(self, hospital, qos=2, retain=True):
self.update_topic('hospital/{}/data'.format(hospital.id),
HospitalSerializer(hospital),
qos=qos,
retain=retain)
def remove_hospital(self, hospital):
self.remove_topic('hospital/{}/data'.format(hospital.id))
self.remove_topic('hospital/{}/metadata'.format(hospital.id))
def update_hospital_metadata(self, hospital, qos=2, retain=True):
hospital_equipment = hospital.hospitalequipment_set.values('equipment')
equipment = Equipment.objects.filter(id__in=hospital_equipment)
self.update_topic('hospital/{}/metadata'.format(hospital.id),
EquipmentSerializer(equipment, many=True),
qos=qos,
retain=retain)
def update_hospital_equipment(self, equipment, qos=2, retain=True):
self.update_topic('hospital/{}/equipment/{}/data'.format(equipment.hospital.id,
equipment.equipment.name),
HospitalEquipmentSerializer(equipment),
qos=qos,
retain=retain)
def remove_hospital_equipment(self, equipment):
self.remove_topic('hospital/{}/equipment/{}/data'.format(equipment.hospital.id,
equipment.equipment.name))
# Start client
from django.core.management.base import OutputWrapper
from django.core.management.color import color_style, no_style
from django.conf import settings
stdout = OutputWrapper(sys.stdout)
style = color_style()
# Instantiate broker
broker = {
'HOST': 'localhost',
'PORT': 1883,
'KEEPALIVE': 60,
'CLEAN_SESSION': True
}
broker.update(settings.MQTT)
broker['CLIENT_ID'] = 'mqttupdate_' + str(os.getpid())
try:
# try to connect
print('Connecting to MQTT brocker...')
local_client = UpdateClient(broker, stdout, style, 0)
# wait for connection
while not local_client.connected:
local_client.loop()
# start loop
local_client.loop_start()
# register atexit handler to make sure it disconnects at exit
atexit.register(local_client.disconnect)
client = local_client
except Exception as e:
print('Could not connect to MQTT brocker. Using dumb client...')
| Python | 0.000001 |
c3367eaa7bccf5843abd12a438e14518d533cdbe | Allow API on Windows | platformio_api/__init__.py | platformio_api/__init__.py | # Copyright 2014-present Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
VERSION = (1, 18, 1)
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio-api"
__description__ = ("An API for PlatformIO")
__url__ = "https://github.com/ivankravets/platformio-api"
__author__ = "Ivan Kravets"
__email__ = "me@ikravets.com"
__license__ = "MIT License"
__copyright__ = "Copyright (C) 2014-2017 Ivan Kravets"
config = dict(
SQLALCHEMY_DATABASE_URI=None,
GITHUB_LOGIN=None,
GITHUB_PASSWORD=None,
DL_PIO_DIR=None,
DL_PIO_URL=None,
MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb
# Fuzzy search will not be applied to words shorter than the value below
SOLR_FUZZY_MIN_WORD_LENGTH=3,
LOGGING=dict(version=1)
)
assert "PIOAPI_CONFIG_PATH" in os.environ
with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f:
config.update(json.load(f))
# configure logging for packages
logging.basicConfig()
logging.config.dictConfig(config['LOGGING'])
# setup time zone to UTC globally
os.environ['TZ'] = "+00:00"
try:
from time import tzset
tzset()
except ImportError:
pass
| # Copyright 2014-present Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging.config
import os
from time import tzset
VERSION = (1, 18, 1)
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio-api"
__description__ = ("An API for PlatformIO")
__url__ = "https://github.com/ivankravets/platformio-api"
__author__ = "Ivan Kravets"
__email__ = "me@ikravets.com"
__license__ = "MIT License"
__copyright__ = "Copyright (C) 2014-2017 Ivan Kravets"
config = dict(
SQLALCHEMY_DATABASE_URI=None,
GITHUB_LOGIN=None,
GITHUB_PASSWORD=None,
DL_PIO_DIR=None,
DL_PIO_URL=None,
MAX_DLFILE_SIZE=1024 * 1024 * 150, # 150 Mb
# Fuzzy search will not be applied to words shorter than the value below
SOLR_FUZZY_MIN_WORD_LENGTH=3,
LOGGING=dict(version=1)
)
assert "PIOAPI_CONFIG_PATH" in os.environ
with open(os.environ.get("PIOAPI_CONFIG_PATH")) as f:
config.update(json.load(f))
# configure logging for packages
logging.basicConfig()
logging.config.dictConfig(config['LOGGING'])
# setup time zone to UTC globally
os.environ['TZ'] = "+00:00"
tzset()
| Python | 0.000001 |
7169cbb9aff103854cad7e8167446c56c7bcc5f4 | Revise to internal helper function _findKth() | lc004_median_of_two_sorted_arrays.py | lc004_median_of_two_sorted_arrays.py | """Leetcode 4. Median of Two Sorted Arrays
Hard
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays.
The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution(object):
def _findKth(self, nums1, nums2, k):
# Base cases for the divide-and-conquer method.
if not nums1:
return nums2[k]
if not nums2:
return nums1[k]
i1, i2 = len(nums1) // 2, len(nums2) // 2
n1, n2 = nums1[i1], nums2[i2]
# When k is smaller than or equal to the sum of nums1 & nums2's
# middle indices.
if k <= i1 + i2:
# When nums1's middle element is bigger than nums2's,
# the 2nd half of nums1 does not contain the kth.
if n1 > n2:
return self._findKth(nums1[:i1], nums2, k)
else:
return self._findKth(nums1, nums2[:i2], k)
# When k is bigger than the sum of nums1 & nums2's middle indices.
else:
# When nums1's middle element is bigger than nums2's,
# the 1st half of nums2 does not contain the kth.
if n1 > n2:
return self._findKth(nums1, nums2[(i2 + 1):], k - i2 - 1)
else:
return self._findKth(nums1[(i1 + 1):], nums2, k - i1 - 1)
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
Time complexity: O(log(m + n))
Space complexity: O(1)
"""
l = len(nums1) + len(nums2)
if l % 2 == 1:
# l // 2 + 1 - 1 => median index of even numbers
return self._findKth(nums1, nums2, l // 2)
else:
return (
self._findKth(nums1, nums2, l // 2 - 1)
+ self._findKth(nums1, nums2, l // 2)) / 2.0
def main():
import time
start_time = time.time()
# Ans: 2.
nums1 = [1, 3]
nums2 = [2]
print(Solution().findMedianSortedArrays(nums1, nums2))
# Ans: 2.5.
nums1 = [1, 2]
nums2 = [3, 4]
print(Solution().findMedianSortedArrays(nums1, nums2))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| """Leetcode 4. Median of Two Sorted Arrays
Hard
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays.
The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
Example 1:
nums1 = [1, 3]
nums2 = [2]
The median is 2.0
Example 2:
nums1 = [1, 2]
nums2 = [3, 4]
The median is (2 + 3)/2 = 2.5
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
Time complexity: O(log(m + n))
Space complexity: O(1)
"""
l = len(nums1) + len(nums2)
if l % 2 == 1:
# l // 2 + 1 - 1 => median index of even numbers
return self.findKth(nums1, nums2, l // 2)
else:
return (
self.findKth(nums1, nums2, l // 2 - 1)
+ self.findKth(nums1, nums2, l // 2)) / 2.0
def findKth(self, nums1, nums2, k):
# Base cases for the divide-and-conquer method.
if not nums1:
return nums2[k]
if not nums2:
return nums1[k]
i1, i2 = len(nums1) // 2, len(nums2) // 2
n1, n2 = nums1[i1], nums2[i2]
# When k is smaller than or equal to the sum of nums1 & nums2's
# middle indices.
if k <= i1 + i2:
# When nums1's middle element is bigger than nums2's,
# the 2nd half of nums1 does not contain the kth.
if n1 > n2:
return self.findKth(nums1[:i1], nums2, k)
else:
return self.findKth(nums1, nums2[:i2], k)
# When k is bigger than the sum of nums1 & nums2's middle indices.
else:
# When nums1's middle element is bigger than nums2's,
# the 1st half of nums2 does not contain the kth.
if n1 > n2:
return self.findKth(nums1, nums2[(i2 + 1):], k - i2 - 1)
else:
return self.findKth(nums1[(i1 + 1):], nums2, k - i1 - 1)
def main():
import time
start_time = time.time()
nums1 = [1, 3]
nums2 = [2]
print(Solution().findMedianSortedArrays(nums1, nums2))
nums1 = [1, 2]
nums2 = [3, 4]
print(Solution().findMedianSortedArrays(nums1, nums2))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| Python | 0.000804 |
dc743c63c52c7ef0bcab73d7b4fcf8f3f4a54ea6 | make median transmittance optional in plot_mean_transmittance. | plot_mean_transmittance.py | plot_mean_transmittance.py | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import common_settings
import mean_transmittance
import median_transmittance
lya_center = 1215.67
settings = common_settings.Settings()
enable_median_transmittance = False
def do_plot():
m = mean_transmittance.MeanTransmittance.from_file(settings.get_mean_transmittance_npy())
ar_z, mean = m.get_weighted_mean_with_minimum_count(1)
# low_pass_mean = m.get_low_pass_mean()[1]
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(2, 1, 1)
ax2 = ax1.twiny()
ax1.plot(ar_z, mean)
# ax1.plot(ar_z, low_pass_mean, color='red')
if enable_median_transmittance:
med = median_transmittance.MedianTransmittance.from_file(settings.get_median_transmittance_npy())
ar_z_med, ar_median = med.get_weighted_median_with_minimum_count(1)
ar_z_med, ar_unweighted_median = med.get_weighted_median_with_minimum_count(1, weighted=False)
ax1.plot(ar_z_med, ar_median, color='orange')
ax1.plot(ar_z_med, ar_unweighted_median, color='green')
ax1.set_ylabel(r"$\left< f_q(z)/C_q(z) \right> $")
plt.ylim(0.0, 1.2)
# add wavelength tick marks on top
x_lim2 = tuple([lya_center * (1 + z) for z in ax1.get_xlim()])
ax2.set_xlim(x_lim2)
plt.axis()
ax3 = host_subplot(2, 1, 2)
ax4 = ax3.twinx()
ax4.set_ylabel(r"$N_{Spectra}$")
ax3.plot(m.ar_z, m.ar_total_flux, color='blue', label=r"Total flux$\times$ weight")
ax3.plot(m.ar_z, m.ar_weights, ':', color='green', label='Total weight')
ax4.plot(m.ar_z, m.ar_count, ':', color='red', label='Spectra count')
ax3.set_xlim(ax1.get_xlim())
ax3.set_ylabel(r"$\sum_q f_q(z)/C_q(z)$")
ax3.set_xlabel(r"$z$")
ax3.legend(loc='best')
plt.show()
if __name__ == '__main__':
do_plot()
| import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import host_subplot
import common_settings
import mean_transmittance
import median_transmittance
lya_center = 1215.67
settings = common_settings.Settings()
def do_plot():
m = mean_transmittance.MeanTransmittance.from_file(settings.get_mean_transmittance_npy())
med = median_transmittance.MedianTransmittance.from_file(settings.get_median_transmittance_npy())
ar_z, mean = m.get_weighted_mean_with_minimum_count(1)
ar_z_med, ar_median = med.get_weighted_median_with_minimum_count(1)
ar_z_med, ar_unweighted_median = med.get_weighted_median_with_minimum_count(1, weighted=False)
# low_pass_mean = m.get_low_pass_mean()[1]
fig = plt.figure(figsize=(14, 10))
ax1 = fig.add_subplot(2, 1, 1)
ax2 = ax1.twiny()
ax1.plot(ar_z, mean)
# ax1.plot(ar_z, low_pass_mean, color='red')
ax1.plot(ar_z_med, ar_median, color='orange')
ax1.plot(ar_z_med, ar_unweighted_median, color='green')
ax1.set_ylabel(r"$\left< f_q(z)/C_q(z) \right> $")
plt.ylim(0.0, 1.2)
# add wavelength tick marks on top
x_lim2 = tuple([lya_center * (1 + z) for z in ax1.get_xlim()])
ax2.set_xlim(x_lim2)
plt.axis()
ax3 = host_subplot(2, 1, 2)
ax4 = ax3.twinx()
ax4.set_ylabel(r"$N_{Spectra}$")
ax3.plot(m.ar_z, m.ar_total_flux, color='blue', label=r"Total flux$\times$ weight")
ax3.plot(m.ar_z, m.ar_weights, ':', color='green', label='Total weight')
ax4.plot(m.ar_z, m.ar_count, ':', color='red', label='Spectra count')
ax3.set_xlim(ax1.get_xlim())
ax3.set_ylabel(r"$\sum_q f_q(z)/C_q(z)$")
ax3.set_xlabel(r"$z$")
ax3.legend(loc='best')
plt.show()
if __name__ == '__main__':
do_plot()
| Python | 0.000001 |
4173221d72356fc336be63273a7252c81831fd54 | fix datetime_to_string | ephim/utils.py | ephim/utils.py | from datetime import datetime
import string
def to_base(num, b, numerals=string.digits + string.ascii_lowercase):
return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def datetime_to_string(dt: datetime):
delta = dt - datetime.utcfromtimestamp(0)
### 0
# return dt.strftime('%Y-%m-%d %H.%M.%S.')
### 1
# ts = int(dt.timestamp())
# return '{sign}{ts}'.format(
# sign='N' if ts < 0 else 'P',
# ts=abs(ts),
# )
### 2
return '{sign}{days}_{seconds}'.format(
sign='0' if delta.days < 0 else '',
days=to_base(abs(delta.days), 36, string.digits + string.ascii_uppercase),
seconds=str(delta.seconds).zfill(5),
)
# return '{sign}{days}_{seconds}'.format(
# sign='n' if delta.days < 0 else 'p',
# days=to_base(abs(delta.days), 36),
# seconds=str(delta.seconds).zfill(5),
# )
# return str(dt.strftime('%Y%m%d')) + '_' + str(delta.seconds).zfill(5)
# return str(int(dt.timestamp()))
# return to_base(int(dt.timestamp()), 36)
# return '{days}_{seconds}'.format(
# days=to_base(abs(delta.days), 26, string.ascii_uppercase),
# seconds=str(delta.seconds).zfill(5),
# )
# print(delta.seconds)
# return '{days}{seconds}'.format(
# days=to_base(abs(delta.days), 26, string.ascii_uppercase),
# # hours=to_base(delta.seconds // 3600, 26, string.ascii_uppercase),
# seconds=str(delta.seconds).zfill(5),
# # seconds=dt.strftime('%H%M'),
# )
# return to_base(int(dt.timestamp()), 26, string.ascii_lowercase)
| from datetime import datetime
import string
def to_base(num, b, numerals=string.digits + string.ascii_lowercase):
return ((num == 0) and numerals[0]) or (to_base(num // b, b, numerals).lstrip(numerals[0]) + numerals[num % b])
def datetime_to_string(dt: datetime):
delta = dt - datetime.fromtimestamp(0)
### 0
# return dt.strftime('%Y-%m-%d %H.%M.%S.')
### 1
# ts = int(dt.timestamp())
# return '{sign}{ts}'.format(
# sign='N' if ts < 0 else 'P',
# ts=abs(ts),
# )
### 2
return '{sign}{days}_{seconds}'.format(
sign='0' if delta.days < 0 else '',
days=to_base(abs(delta.days), 36, string.digits + string.ascii_uppercase),
seconds=str(delta.seconds).zfill(5),
)
# return '{sign}{days}_{seconds}'.format(
# sign='n' if delta.days < 0 else 'p',
# days=to_base(abs(delta.days), 36),
# seconds=str(delta.seconds).zfill(5),
# )
# return str(dt.strftime('%Y%m%d')) + '_' + str(delta.seconds).zfill(5)
# return str(int(dt.timestamp()))
# return to_base(int(dt.timestamp()), 36)
# return '{days}_{seconds}'.format(
# days=to_base(abs(delta.days), 26, string.ascii_uppercase),
# seconds=str(delta.seconds).zfill(5),
# )
# print(delta.seconds)
# return '{days}{seconds}'.format(
# days=to_base(abs(delta.days), 26, string.ascii_uppercase),
# # hours=to_base(delta.seconds // 3600, 26, string.ascii_uppercase),
# seconds=str(delta.seconds).zfill(5),
# # seconds=dt.strftime('%H%M'),
# )
# return to_base(int(dt.timestamp()), 26, string.ascii_lowercase)
| Python | 0.004109 |
a4e14854339cc8f0677e32390d9a974266007c01 | Refactor, split into functions | django_smoke_tests/management/commands/smoke_tests.py | django_smoke_tests/management/commands/smoke_tests.py | import random
import string
from django.core.management import BaseCommand, call_command
try:
from django.urls import get_resolver
except ImportError:
from django.core.urlresolvers import get_resolver
from ...tests import SmokeTests
class Command(BaseCommand):
help = "Smoke"
METHODS_TO_TEST = ['GET', 'POST', 'DELETE']
def handle(self, *args, **options):
all_endpoints = get_resolver(None).reverse_dict
for endpoint, endpoint_params in all_endpoints.items():
self.create_tests_for_endpoint(endpoint, endpoint_params)
call_command('test', 'django_smoke_tests')
@staticmethod
def _test_generator(url, method, detail_url=False):
def test(self):
if method == 'GET':
response = self.client.get(url)
elif method == 'POST':
response = self.client.post(url, {})
elif method == 'DELETE':
response = self.client.delete(url)
allowed_status_codes = [200, 201, 301, 302, 304, 405]
if detail_url:
allowed_status_codes.append(404)
self.assertIn(response.status_code, allowed_status_codes)
return test
def create_tests_for_endpoint(self, endpoint, endpoint_params):
if isinstance(endpoint, str):
[(url_as_str, url_params)], url_pattern, _ = endpoint_params
fake_params = {param: self.create_random_string() for param in url_params}
url = self.create_url(url_as_str, fake_params)
self.create_tests_for_http_methods(url, endpoint, detail_url=bool(url_params))
@staticmethod
def create_random_string(length=5):
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
@staticmethod
def create_url(url_as_str, parameters):
url = url_as_str % parameters
return url if url.startswith('/') else '/{}'.format(url)
def create_tests_for_http_methods(self, url, endpoint, detail_url=False):
for method in self.METHODS_TO_TEST:
test = self._test_generator(url, method, detail_url)
setattr(SmokeTests, 'test_smoke_{}_{}'.format(method, endpoint), test)
| from django.core.management import BaseCommand, call_command
try:
from django.urls import get_resolver
except ImportError:
from django.core.urlresolvers import get_resolver
from ...tests import SmokeTests
def _test_generator(url, method, detail_url=False):
def test(self):
if method == 'GET':
response = self.client.get(url)
elif method == 'POST':
response = self.client.post(url, {})
elif method == 'DELETE':
response = self.client.delete(url)
allowed_status_codes = [200, 201, 301, 302, 304, 405]
if detail_url:
allowed_status_codes.append(404)
self.assertIn(response.status_code, allowed_status_codes)
return test
class Command(BaseCommand):
help = "Smoke"
METHODS_TO_TEST = ['GET', 'POST', 'DELETE']
def handle(self, *args, **options):
all_endpoints = get_resolver(None).reverse_dict
for endpoint, endpoint_params in all_endpoints.items():
if isinstance(endpoint, str):
[(url_as_str, url_params)], url_pattern, _ = endpoint_params
mocked_params = {param: 'random' for param in url_params}
ready_url = url_as_str % mocked_params
ready_url = ready_url if ready_url.startswith('/') else '/{}'.format(ready_url)
self.create_tests_for_endpoint(ready_url, endpoint, detail_url=bool(url_params))
call_command('test', 'django_smoke_tests')
def create_tests_for_endpoint(self, url, endpoint, detail_url=False):
for method in self.METHODS_TO_TEST:
test = _test_generator(url, method, detail_url)
setattr(SmokeTests, 'test_smoke_{}_{}'.format(method, endpoint), test)
| Python | 0.999773 |
f96cb9a60882bc19fef5f7b3be4a8063f2e99fa2 | Add exception handling to scheduler start experiment | api/spawner/scheduler.py | api/spawner/scheduler.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
import logging
import uuid
from django.conf import settings
from kubernetes.client.rest import ApiException
from polyaxon_schemas.utils import TaskType
from rest_framework import fields
from api.utils import config
from experiments.serializers import ExperimentJobSerializer
from repos.dockerize import get_image_info
from spawner import K8SSpawner
from experiments.models import ExperimentJob
from spawner.utils.constants import ExperimentLifeCycle
logger = logging.getLogger('polyaxon.scheduler')
def start_experiment(experiment):
# Update experiment status to show that its started
experiment.set_status(ExperimentLifeCycle.SCHEDULED)
project = experiment.project
group = experiment.experiment_group
job_docker_image = None # This will force the spawner to use the default docker image
if experiment.compiled_spec.run_exec:
image_name, image_tag = get_image_info(experiment=experiment)
job_docker_image = '{}:{}'.format(image_name, image_tag)
logger.info('Start experiment with built image `{}`'.format(job_docker_image))
else:
logger.info('Start experiment with default image.')
# Use spawner to start the experiment
spawner = K8SSpawner(project_name=project.unique_name,
experiment_name=experiment.unique_name,
experiment_group_name=group.unique_name if group else None,
project_uuid=project.uuid.hex,
experiment_group_uuid=group.uuid.hex if group else None,
experiment_uuid=experiment.uuid.hex,
spec_config=experiment.config,
k8s_config=settings.K8S_CONFIG,
namespace=settings.K8S_NAMESPACE,
in_cluster=True,
job_docker_image=job_docker_image,
use_sidecar=True,
sidecar_config=config.get_requested_params(to_str=True))
try:
resp = spawner.start_experiment()
except ApiException:
logger.warning('Could not start the experiment, please check your polyaxon spec.')
experiment.set_status(ExperimentLifeCycle.FAILED)
return
# Get the number of jobs this experiment started
master = resp[TaskType.MASTER]
job_uuid = master['pod']['metadata']['labels']['job_uuid']
job_uuid = uuid.UUID(job_uuid)
def get_definition(definition):
serializer = ExperimentJobSerializer(data={
'definition': json.dumps(definition, default=fields.DateTimeField().to_representation)
})
serializer.is_valid()
return json.loads(serializer.validated_data['definition'])
ExperimentJob.objects.create(uuid=job_uuid,
experiment=experiment,
definition=get_definition(master))
for worker in resp[TaskType.WORKER]:
job_uuid = worker['pod']['metadata']['labels']['job_uuid']
job_uuid = uuid.UUID(job_uuid)
ExperimentJob.objects.create(uuid=job_uuid,
experiment=experiment,
definition=get_definition(worker))
for ps in resp[TaskType.PS]:
job_uuid = ps['pod']['metadata']['labels']['job_uuid']
job_uuid = uuid.UUID(job_uuid)
ExperimentJob.objects.create(uuid=job_uuid,
experiment=experiment,
definition=get_definition(ps))
def stop_experiment(experiment, update_status=False):
project = experiment.project
group = experiment.experiment_group
spawner = K8SSpawner(project_name=project.unique_name,
experiment_name=experiment.unique_name,
experiment_group_name=group.unique_name if group else None,
project_uuid=project.uuid.hex,
experiment_group_uuid=group.uuid.hex if group else None,
experiment_uuid=experiment.uuid.hex,
spec_config=experiment.config,
k8s_config=settings.K8S_CONFIG,
namespace=settings.K8S_NAMESPACE,
in_cluster=True,
use_sidecar=True,
sidecar_config=config.get_requested_params(to_str=True))
spawner.stop_experiment()
if update_status:
# Update experiment status to show that its deleted
experiment.set_status(ExperimentLifeCycle.DELETED)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
import uuid
from django.conf import settings
from polyaxon_schemas.utils import TaskType
from rest_framework import fields
from api.utils import config
from experiments.serializers import ExperimentJobSerializer
from spawner import K8SSpawner
from experiments.models import ExperimentJob
from spawner.utils.constants import ExperimentLifeCycle
def start_experiment(experiment):
# Update experiment status to show that its started
experiment.set_status(ExperimentLifeCycle.SCHEDULED)
project = experiment.project
group = experiment.experiment_group
# Use spawner to start the experiment
spawner = K8SSpawner(project_name=project.unique_name,
experiment_name=experiment.unique_name,
experiment_group_name=group.unique_name if group else None,
project_uuid=project.uuid.hex,
experiment_group_uuid=group.uuid.hex if group else None,
experiment_uuid=experiment.uuid.hex,
spec_config=experiment.config,
k8s_config=settings.K8S_CONFIG,
namespace=settings.K8S_NAMESPACE,
in_cluster=True,
use_sidecar=True,
sidecar_config=config.get_requested_params(to_str=True))
resp = spawner.start_experiment()
# Get the number of jobs this experiment started
master = resp[TaskType.MASTER]
job_uuid = master['pod']['metadata']['labels']['job_uuid']
job_uuid = uuid.UUID(job_uuid)
def get_definition(definition):
serializer = ExperimentJobSerializer(data={
'definition': json.dumps(definition, default=fields.DateTimeField().to_representation)
})
serializer.is_valid()
return json.loads(serializer.validated_data['definition'])
ExperimentJob.objects.create(uuid=job_uuid,
experiment=experiment,
definition=get_definition(master))
for worker in resp[TaskType.WORKER]:
job_uuid = worker['pod']['metadata']['labels']['job_uuid']
job_uuid = uuid.UUID(job_uuid)
ExperimentJob.objects.create(uuid=job_uuid,
experiment=experiment,
definition=get_definition(worker))
for ps in resp[TaskType.PS]:
job_uuid = ps['pod']['metadata']['labels']['job_uuid']
job_uuid = uuid.UUID(job_uuid)
ExperimentJob.objects.create(uuid=job_uuid,
experiment=experiment,
definition=get_definition(ps))
def stop_experiment(experiment, update_status=False):
project = experiment.project
group = experiment.experiment_group
spawner = K8SSpawner(project_name=project.unique_name,
experiment_name=experiment.unique_name,
experiment_group_name=group.unique_name if group else None,
project_uuid=project.uuid.hex,
experiment_group_uuid=group.uuid.hex if group else None,
experiment_uuid=experiment.uuid.hex,
spec_config=experiment.config,
k8s_config=settings.K8S_CONFIG,
namespace=settings.K8S_NAMESPACE,
in_cluster=True,
use_sidecar=True,
sidecar_config=config.get_requested_params(to_str=True))
spawner.stop_experiment()
if update_status:
# Update experiment status to show that its deleted
experiment.set_status(ExperimentLifeCycle.DELETED)
| Python | 0.000001 |
58ab8c5ebafad2109b8d8f19c44adbb11fe18c02 | Fix broken or_else implementation | pygow/maybe.py | pygow/maybe.py | class Just:
a = None
def __init__(self, a):
self.a = a
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.a == other.a)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'Just(%s)' % self.a
def is_just(self):
return True
def map(self, f):
return Just(f(self.a))
def flat_map(self, f):
return f(self.a)
def get_or_else(self, x):
return self.a
def or_else(self, x):
return self
class Nothing:
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def is_just(self):
return False
def __str__(self):
return 'Nothing()'
def map(self, f):
return Nothing()
def flat_map(self, f):
return Nothing()
def get_or_else(self, x):
return x
def or_else(self, x):
return x
def get_maybe_env(name):
from os import getenv
value = getenv(name)
if value is None:
return Nothing()
else:
return Just(value)
def non_empty_string(x):
if len(x.strip()) is 0:
return Nothing()
else:
return Just(x)
def parse_int(x):
try:
return Just(int(x))
except:
return Nothing()
def maybe(x):
if x is None:
return Nothing()
else:
return Just(x)
| class Just:
a = None
def __init__(self, a):
self.a = a
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.a == other.a)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return 'Just(%s)' % self.a
def is_just(self):
return True
def map(self, f):
return Just(f(self.a))
def flat_map(self, f):
return f(self.a)
def or_else(self, x):
return self
def get_or_else(self, x):
return self.a
class Nothing:
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def is_just(self):
return False
def __str__(self):
return 'Nothing()'
def map(self, f):
return Nothing()
def flat_map(self, f):
return Nothing()
def or_else(self, x):
return self
def get_or_else(self, x):
return x
def get_maybe_env(name):
from os import getenv
value = getenv(name)
if value is None:
return Nothing()
else:
return Just(value)
def non_empty_string(x):
if len(x.strip()) is 0:
return Nothing()
else:
return Just(x)
def parse_int(x):
try:
return Just(int(x))
except:
return Nothing()
def maybe(x):
if x is None:
return Nothing()
else:
return Just(x)
| Python | 0.000003 |
2aca9f77b6f5b8171ec33906a66cd805f57937a0 | Fix mistake with previous commit. | localeurl/templatetags/localeurl_tags.py | localeurl/templatetags/localeurl_tags.py | # Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
from django import template
from django.template import Node, Token, TemplateSyntaxError
from django.template import resolve_variable, defaulttags
from django.template.defaultfilters import stringfilter
from django.conf import settings
from django.utils import translation
import localeurl
from localeurl.utils import is_locale_independent, strip_locale_prefix, \
get_language
register = template.Library()
def chlocale(path, locale):
"""
Changes the path's locale prefix if the path is not locale-independent.
Otherwise removes locale prefix.
"""
stripped_path = rmlocale(path)
if not localeurl.PREFIX_DEFAULT_LOCALE and \
get_language(locale) == get_language(settings.LANGUAGE_CODE):
return stripped_path
if is_locale_independent(stripped_path):
return stripped_path
else:
return '/' + get_language(locale) + stripped_path
chlocale = stringfilter(chlocale)
register.filter('chlocale', chlocale)
def rmlocale(url):
"""Removes the locale prefix from the path."""
return strip_locale_prefix(url)
rmlocale = stringfilter(rmlocale)
register.filter('rmlocale', rmlocale)
def locale_url(parser, token):
"""
Renders the url for the view with another locale prefix. The syntax is
like the 'url' tag, only with a locale before the view.
Examples:
{% locale_url "de" cal.views.day day %}
{% locale_url "nl" cal.views.home %}
{% locale_url "en-gb" cal.views.month month as month_url %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two arguments:"
" the locale and a view" % bits[0])
urltoken = Token(token.token_type, bits[0] + ' ' + ' '.join(bits[2:]))
urlnode = defaulttags.url(parser, urltoken)
return LocaleURLNode(bits[1], urlnode)
class LocaleURLNode(Node):
def __init__(self, locale, urlnode):
self.locale = locale
self.urlnode = urlnode
def render(self, context):
locale = resolve_variable(self.locale, context)
path = self.urlnode.render(context)
if self.urlnode.asvar:
self.urlnode.render(context)
context[self.urlnode.asvar] = chlocale(context[self.urlnode.asvar],
locale)
return ''
else:
return chlocale(path, locale)
register.tag('locale_url', locale_url)
| # Copyright (c) 2008 Joost Cassee
# Licensed under the terms of the MIT License (see LICENSE.txt)
from django import template
from django.template import Node, Token, TemplateSyntaxError
from django.template import resolve_variable, defaulttags
from django.template.defaultfilters import stringfilter
from django.conf import settings
from django.utils import translation
import localeurl
from localeurl.utils import strip_locale_prefix, get_language
register = template.Library()
def chlocale(path, locale):
"""
Changes the path's locale prefix if the path is not locale-independent.
Otherwise removes locale prefix.
"""
if not localeurl.PREFIX_DEFAULT_LOCALE and \
get_language(locale) == get_language(settings.LANGUAGE_CODE):
return rmlocale(path)
if is_locale_independent(rmed):
return rmlocale(path)
else:
return '/' + get_language(locale) + rmlocale(path)
chlocale = stringfilter(chlocale)
register.filter('chlocale', chlocale)
def rmlocale(url):
"""Removes the locale prefix from the path."""
return strip_locale_prefix(url)
rmlocale = stringfilter(rmlocale)
register.filter('rmlocale', rmlocale)
def locale_url(parser, token):
"""
Renders the url for the view with another locale prefix. The syntax is
like the 'url' tag, only with a locale before the view.
Examples:
{% locale_url "de" cal.views.day day %}
{% locale_url "nl" cal.views.home %}
{% locale_url "en-gb" cal.views.month month as month_url %}
"""
bits = token.split_contents()
if len(bits) < 3:
raise TemplateSyntaxError("'%s' takes at least two arguments:"
" the locale and a view" % bits[0])
urltoken = Token(token.token_type, bits[0] + ' ' + ' '.join(bits[2:]))
urlnode = defaulttags.url(parser, urltoken)
return LocaleURLNode(bits[1], urlnode)
class LocaleURLNode(Node):
def __init__(self, locale, urlnode):
self.locale = locale
self.urlnode = urlnode
def render(self, context):
locale = resolve_variable(self.locale, context)
path = self.urlnode.render(context)
if self.urlnode.asvar:
self.urlnode.render(context)
context[self.urlnode.asvar] = chlocale(context[self.urlnode.asvar],
locale)
return ''
else:
return chlocale(path, locale)
register.tag('locale_url', locale_url)
| Python | 0.999999 |
f18d675f2877e8f9356dc64a96bf8fba364cddd3 | Add search field to admin Terms. | controlled_vocabularies/admin.py | controlled_vocabularies/admin.py | from django.contrib import admin
from django import forms
from controlled_vocabularies.models import Vocabulary, Term, Property
class PropertyInline(admin.TabularInline):
model = Property
fk_name = "term_key"
extra = 1
class VocabularyAdmin(admin.ModelAdmin):
""" Vocabulary class that determines how comment appears in admin """
list_display = ('name', 'label', 'order', 'maintainer', 'created', 'modified')
fieldsets = (
(None, {
'classes': 'wide extrapretty',
'fields': ('name', 'label', 'order', 'maintainer', 'maintainerEmail', 'definition')
}),
)
class TermAdmin(admin.ModelAdmin):
""" Term class that determines how comment appears in admin """
list_display = ('id', 'name', 'get_vocab', 'label', 'order',)
search_fields = ['name', 'label']
fieldsets = (
(None, {
'classes': 'wide extrapretty',
'fields': ('vocab_list', 'name', 'label', 'order')
}),
)
list_filter = ('vocab_list',)
inlines = [PropertyInline]
class PropertyAdmin(admin.ModelAdmin):
""" Property class that determines how comment appears in admin """
list_display = ('property_name', 'get_vocab', 'get_term', 'label',)
fieldsets = (
(None, {
'classes': 'wide extrapretty',
'fields': ('term_key', 'property_name', 'label')
}),
)
def has_spaces(name):
""" Make sure there are no spaces """
if ' ' in name:
raise forms.ValidationError("Spaces are not allowed.")
else:
return name
class VocabularyAdminForm(forms.ModelForm):
""" Vocabulary class to specify how form data is handled in admin """
class Meta:
model = Vocabulary
fields = '__all__'
def clean_name(self):
""" Make sure there are no spaces in the name field """
return has_spaces(self.cleaned_data["name"])
admin.site.register(Vocabulary, VocabularyAdmin)
admin.site.register(Term, TermAdmin)
admin.site.register(Property, PropertyAdmin)
| from django.contrib import admin
from django import forms
from controlled_vocabularies.models import Vocabulary, Term, Property
class PropertyInline(admin.TabularInline):
model = Property
fk_name = "term_key"
extra = 1
class VocabularyAdmin(admin.ModelAdmin):
""" Vocabulary class that determines how comment appears in admin """
list_display = ('name', 'label', 'order', 'maintainer', 'created', 'modified')
fieldsets = (
(None, {
'classes': 'wide extrapretty',
'fields': ('name', 'label', 'order', 'maintainer', 'maintainerEmail', 'definition')
}),
)
class TermAdmin(admin.ModelAdmin):
""" Term class that determines how comment appears in admin """
list_display = ('id', 'name', 'get_vocab', 'label', 'order',)
fieldsets = (
(None, {
'classes': 'wide extrapretty',
'fields': ('vocab_list', 'name', 'label', 'order')
}),
)
list_filter = ('vocab_list',)
inlines = [PropertyInline]
class PropertyAdmin(admin.ModelAdmin):
""" Property class that determines how comment appears in admin """
list_display = ('property_name', 'get_vocab', 'get_term', 'label',)
fieldsets = (
(None, {
'classes': 'wide extrapretty',
'fields': ('term_key', 'property_name', 'label')
}),
)
def has_spaces(name):
""" Make sure there are no spaces """
if ' ' in name:
raise forms.ValidationError("Spaces are not allowed.")
else:
return name
class VocabularyAdminForm(forms.ModelForm):
""" Vocabulary class to specify how form data is handled in admin """
class Meta:
model = Vocabulary
fields = '__all__'
def clean_name(self):
""" Make sure there are no spaces in the name field """
return has_spaces(self.cleaned_data["name"])
admin.site.register(Vocabulary, VocabularyAdmin)
admin.site.register(Term, TermAdmin)
admin.site.register(Property, PropertyAdmin)
| Python | 0 |
d68f3251c7605220da13a308ebc794a35a0c12e6 | This is 0.9.1 | rb/__init__.py | rb/__init__.py | """
rb
~~
The redis blaster.
:copyright: (c) 2015 Functional Software Inc.
:license: Apache License 2.0, see LICENSE for more details.
"""
from rb.cluster import Cluster
from rb.clients import RoutingClient, MappingClient, FanoutClient
from rb.router import BaseRouter, ConsistentHashingRouter, PartitionRouter
from rb.promise import Promise
__version__ = '0.9.1'
__all__ = [
# cluster
'Cluster',
# client
'RoutingClient', 'MappingClient', 'FanoutClient',
# router
'BaseRouter', 'ConsistentHashingRouter', 'PartitionRouter',
# promise
'Promise',
]
| """
rb
~~
The redis blaster.
:copyright: (c) 2015 Functional Software Inc.
:license: Apache License 2.0, see LICENSE for more details.
"""
from rb.cluster import Cluster
from rb.clients import RoutingClient, MappingClient, FanoutClient
from rb.router import BaseRouter, ConsistentHashingRouter, PartitionRouter
from rb.promise import Promise
__version__ = '1.0.dev.0'
__all__ = [
# cluster
'Cluster',
# client
'RoutingClient', 'MappingClient', 'FanoutClient',
# router
'BaseRouter', 'ConsistentHashingRouter', 'PartitionRouter',
# promise
'Promise',
]
| Python | 0.999998 |
d3677042d17d3bc641c84981d5332f7fd0ddfe5d | Update process-schedules.py | cron/process-schedules.py | cron/process-schedules.py | #!/usr/bin/env python
import MySQLdb
#import datetime
#import urllib2
#import os
import datetime
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
now = datetime.datetime.now()
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cursorselect = cnx.cursor()
# Check schedule time and date
query = ("SELECT * FROM schedules WHERE enabled ='1';")
cursorselect.execute(query)
results_schedules =cursorselect.fetchall()
cursorselect.close()
for result in results_schedules:
print("* * * * * *")
SCHED_TEST_TIME = False
SCHED_TEST_DAY = False
SCHED_TEST_SENSORS = False
SCHED_TEST_MODES = False
SCHED_TEST_TIMERS = False
SCHED_ID = result[0]
SCHED_START = result[2]
SCHED_END = result[3]
SCHED_MON = result[4]
SCHED_TUE = result[5]
SCHED_WED = result[6]
SCHED_THU = result[7]
SCHED_FRI = result[8]
SCHED_SAT = result[9]
SCHED_SUN = result[10]
print( SCHED_ID )
#print( now )
#print( SCHED_START )
#print( SCHED_END )
#print( type(SCHED_END) )
#print("---")
SCHED_START_HOUR, remainder = divmod(SCHED_START.seconds,3600)
SCHED_START_MINUTE, sec = divmod(remainder, 60)
SCHED_END_HOUR, remainder = divmod(SCHED_END.seconds,3600)
SCHED_END_MINUTE, sec = divmod(remainder, 60)
#print( type(SCHED_START_MINUTE))
#print( SCHED_START_MINUTE)
#print("---")
SCHED_START_STR = str(SCHED_START_HOUR)+":"+str(SCHED_START_MINUTE)
SCHED_END_STR = str(SCHED_END_HOUR) + ":"+str(SCHED_END_MINUTE)
#print( SCHED_START_STR )
TIME_NOW = datetime.datetime.strptime(str(now.hour)+":"+str(now.minute), "%H:%M")
TIME_START = datetime.datetime.strptime(SCHED_START_STR, "%H:%M")
TIME_END = datetime.datetime.strptime(SCHED_END_STR, "%H:%M")
MIN_TO_START = TIME_NOW - TIME_START
MIN_TO_END = TIME_END - TIME_NOW
#print( MIN_TO_START.total_seconds() )
#print( MIN_TO_END.total_seconds() )
#print( SCHED_TEST_TIME )
if ( MIN_TO_START.total_seconds() > 0 and MIN_TO_END.total_seconds() > 0 ):
SCHED_TEST_TIME = True
print( SCHED_TEST_TIME )
print( SCHED_TEST_DAY )
print( SCHED_TEST_SENSORS )
print( SCHED_TEST_MODES )
print( SCHED_TEST_TIMERS )
# Check senso values
# Check modes
# Check timers
if ( SCHED_TEST_TIME and SCHED_TEST_DAY and SCHED_TEST_SENSORS and SCHED_TEST_MODES and SCHED_TEST_TIMERS == True):
print( "activate" )
else:
print( "deactivate" )
cnx.commit()
cnx.close()
| #!/usr/bin/env python
import MySQLdb
#import datetime
#import urllib2
#import os
import datetime
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
now = datetime.datetime.now()
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cursorselect = cnx.cursor()
# Check schedule time and date
query = ("SELECT * FROM schedules WHERE enabled ='1';")
cursorselect.execute(query)
results_schedules =cursorselect.fetchall()
cursorselect.close()
for result in results_schedules:
print("***")
SCHED_TEST_TIME = False
SCHED_TEST_DAY = False
SCHED_TEST_SENSORS = False
SCHED_TEST_MODES = False
SCHED_TEST_TIMERS = False
SCHED_ID = result[0]
SCHED_START = result[2]
SCHED_END = result[3]
SCHED_MON = result[4]
SCHED_TUE = result[5]
SCHED_WED = result[6]
SCHED_THU = result[7]
SCHED_FRI = result[8]
SCHED_SAT = result[9]
SCHED_SUN = result[10]
print( SCHED_ID )
#print( now )
#print( SCHED_START )
#print( SCHED_END )
#print( type(SCHED_END) )
#print("---")
SCHED_START_HOUR, remainder = divmod(SCHED_START.seconds,3600)
SCHED_START_MINUTE, sec = divmod(remainder, 60)
SCHED_END_HOUR, remainder = divmod(SCHED_END.seconds,3600)
SCHED_END_MINUTE, sec = divmod(remainder, 60)
#print( type(SCHED_START_MINUTE))
#print( SCHED_START_MINUTE)
#print("---")
SCHED_START_STR = str(SCHED_START_HOUR)+":"+str(SCHED_START_MINUTE)
SCHED_END_STR = str(SCHED_END_HOUR) + ":"+str(SCHED_END_MINUTE)
#print( SCHED_START_STR )
TIME_NOW = datetime.datetime.strptime(str(now.hour)+":"+str(now.minute), "%H:%M")
TIME_START = datetime.datetime.strptime(SCHED_START_STR, "%H:%M")
TIME_END = datetime.datetime.strptime(SCHED_END_STR, "%H:%M")
MIN_TO_START = TIME_NOW - TIME_START
MIN_TO_END = TIME_END - TIME_NOW
#print( MIN_TO_START.total_seconds() )
#print( MIN_TO_END.total_seconds() )
#print( SCHED_TEST_TIME )
if ( MIN_TO_START.total_seconds() > 0 and MIN_TO_END.total_seconds() > 0 ):
SCHED_TEST_TIME = True
print( SCHED_TEST_TIME )
# Check senso values
# Check modes
# Check timers
if ( SCHED_TEST_TIME and SCHED_TEST_DAY and SCHED_TEST_SENSORS and SCHED_TEST_MODES and SCHED_TEST_TIMERS == True):
print( "activate" )
else:
print( "deactivate" )
cnx.commit()
cnx.close()
| Python | 0.000001 |
e60ce628029e3100d6f2a8a8f7260e2ed229e6ac | Add helper method to retrieve review count per user in a skeleton | django/applications/catmaid/control/review.py | django/applications/catmaid/control/review.py | from collections import defaultdict
from catmaid.models import Review
from django.db import connection
def get_treenodes_to_reviews(treenode_ids=None, skeleton_ids=None,
umap=lambda r: r):
""" Returns a dictionary that contains all reviewed nodes of the
passed <treenode_ids> and/or <skeleton_ids> lists as keys. The
reviewer user IDs are kept in a list as values. A function can be
passed to which is executed for every reviewer_id to change the
value stored result (e.g. to use user names instead of an ID. It
defaults to the identity and therefore reviewer IDs.
"""
# Set up filters
reviews = Review.objects.all()
if treenode_ids:
reviews = reviews.filter(treenode_id__in=treenode_ids)
if skeleton_ids:
reviews = reviews.filter(skeleton_id__in=skeleton_ids)
# Only request treenode ID and reviewer ID
reviews = reviews.values_list('treenode_id', 'reviewer_id')
# Build dictionary
treenode_to_reviews = defaultdict(list)
for tid, rid in reviews:
treenode_to_reviews[tid].append(umap(rid))
return treenode_to_reviews
def get_review_count(skeleton_ids):
""" Returns a dictionary that maps skelton IDs to dictonaries that map
user_ids to a review count for this particular skeleton.
"""
# Count nodes that have been reviewed by each user in each partner skeleton
cursor = connection.cursor()
cursor.execute('''
SELECT skeleton_id, reviewer_id, count(skeleton_id)
FROM review
WHERE skeleton_id IN (%s)
GROUP BY reviewer_id, skeleton_id
''' % ",".join(str(skid) for skid in skeleton_ids))
# Build dictionary
reviews = defaultdict(lambda: defaultdict(int))
for row in cursor.fetchall():
reviews[row[0]][row[1]] = row[2]
return reviews
| from collections import defaultdict
from catmaid.models import Review
def get_treenodes_to_reviews(treenode_ids=None, skeleton_ids=None,
umap=lambda r: r):
""" Returns a dictionary that contains all reviewed nodes of the
passed <treenode_ids> and/or <skeleton_ids> lists as keys. The
reviewer user IDs are kept in a list as values. A function can be
passed to which is executed for every reviewer_id to change the
value stored result (e.g. to use user names instead of an ID. It
defaults to the identity and therefore reviewer IDs.
"""
# Set up filters
reviews = Review.objects.all()
if treenode_ids:
reviews = reviews.filter(treenode_id__in=treenode_ids)
if skeleton_ids:
reviews = reviews.filter(skeleton_id__in=skeleton_ids)
# Only request treenode ID and reviewer ID
reviews = reviews.values_list('treenode_id', 'reviewer_id')
# Build dictionary
treenode_to_reviews = defaultdict(list)
for tid, rid in reviews:
treenode_to_reviews[tid].append(umap(rid))
return treenode_to_reviews
| Python | 0 |
a36a7a0eb6560156c5be6f0cc5523c17e79591e4 | fix import errors | deepchem/models/tests/test_normalizing_flow_pytorch.py | deepchem/models/tests/test_normalizing_flow_pytorch.py | """
Test for Pytorch Normalizing Flow model and its transformations
"""
import pytest
import numpy as np
import unittest
try:
import torch
from torch.distributions import MultivariateNormal
from deepchem.models.torch_models.layers import Affine
has_torch = True
except:
has_torch = False
@unittest.skipIf(not has_torch, 'torch is not installed')
@pytest.mark.torch
def test_Affine():
"""
This test should evaluate if the transformation its being applied
correctly. When computing the logarithm of the determinant jacobian matrix
the result must be zero for any distribution when performing the first forward
and inverse pass (initialized). This is the expected
behavior since nothing is being learned yet.
input shape: (samples, dim)
output shape: (samples, dim)
"""
dim = 2
samples = 96
data = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
tensor = data.sample(torch.Size((samples, dim)))
_, log_det_jacobian = Affine(dim).forward(tensor)
_, inverse_log_det_jacobian = Affine(dim).inverse(tensor)
# The first pass of the transformation should be 0
log_det_jacobian = log_det_jacobian.detach().numpy()
inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy()
zeros = np.zeros((samples,))
assert np.array_equal(log_det_jacobian, zeros)
assert np.array_equal(inverse_log_det_jacobian, zeros)
| """
Test for Pytorch Normalizing Flow model and its transformations
"""
import pytest
import numpy as np
import unittest
try:
import torch
from torch.distributions import MultivariateNormal
from deepchem.models.torch_models.normalizing_flows_pytorch import Affine
has_torch = True
except:
has_torch = False
@unittest.skipIf(not has_torch, 'torch is not installed')
@pytest.mark.torch
def test_Affine():
"""
This test should evaluate if the transformation its being applied
correctly. When computing the logarithm of the determinant jacobian matrix
the result must be zero for any distribution as input when performing the
first forward and inverse pass (initialized). This is the expected
behavior because nothing is learned yet.
input shape: (samples, dim)
output shape: (samples, dim)
"""
dim = 2
samples = 96
data = MultivariateNormal(torch.zeros(dim), torch.eye(dim))
tensor = data.sample(torch.Size((samples, dim)))
_, log_det_jacobian = Affine(dim).forward(tensor)
_, inverse_log_det_jacobian = Affine(dim).inverse(tensor)
# The first pass of the transformation should be 0
log_det_jacobian = log_det_jacobian.detach().numpy()
inverse_log_det_jacobian = inverse_log_det_jacobian.detach().numpy()
zeros = np.zeros((samples,))
assert np.array_equal(log_det_jacobian, zeros)
assert np.array_equal(inverse_log_det_jacobian, zeros)
| Python | 0.000025 |
3dc54a1c845cf0b99fd0dfc6fd454659895ba888 | Fix import. | django_elasticsearch/contrib/restframework/__init__.py | django_elasticsearch/contrib/restframework/__init__.py | from rest_framework import VERSION
from django_elasticsearch.contrib.restframework.base import AutoCompletionMixin
if int(VERSION[0]) < 3:
from django_elasticsearch.contrib.restframework.restframework2 import IndexableModelMixin
from django_elasticsearch.contrib.restframework.restframework2 import ElasticsearchFilterBackend
else:
from django_elasticsearch.contrib.restframework.restframework3 import IndexableModelMixin
from django_elasticsearch.contrib.restframework.restframework3 import ElasticsearchFilterBackend
__all__ = [ElasticsearchFilterBackend,
IndexableModelMixin,
AutoCompletionMixin]
| from rest_framework import VERSION
from django_elasticsearch.contrib.restframework.restframework import AutoCompletionMixin
if int(VERSION[0]) < 3:
from django_elasticsearch.contrib.restframework.restframework2 import IndexableModelMixin
from django_elasticsearch.contrib.restframework.restframework2 import ElasticsearchFilterBackend
else:
from django_elasticsearch.contrib.restframework.restframework3 import IndexableModelMixin
from django_elasticsearch.contrib.restframework.restframework3 import ElasticsearchFilterBackend
__all__ = [ElasticsearchFilterBackend,
IndexableModelMixin,
AutoCompletionMixin]
| Python | 0 |
03ee56092d7f258dd37016ac06b6daf2439811f3 | change article abstract count to limit to in_doaj only | portality/models/search.py | portality/models/search.py | from portality.dao import DomainObject
from portality.models.cache import Cache
from portality.models import Journal, Article
class JournalArticle(DomainObject):
__type__ = 'journal,article'
__readonly__ = True # TODO actually heed this attribute in all DomainObject methods which modify data
@classmethod
def site_statistics(cls):
stats = Cache.get_site_statistics()
if stats is not None:
return stats
# we didn't get anything from the cache, so we need to generate and
# cache a new set
# prep the query and result objects
stats = { # Note these values all have to be strings
"journals" : "0",
"countries" : "0",
"abstracts" : "0",
"new_journals" : "0",
"no_apc" : "0"
}
# get the journal data
q = JournalStatsQuery()
journal_data = Journal.query(q=q.stats)
stats["journals"] = "{0:,}".format(journal_data.get("hits", {}).get("total", 0))
stats["countries"] = "{0:,}".format(len(journal_data.get("aggregations", {}).get("countries", {}).get("buckets", [])))
apc_buckets = journal_data.get("aggregations", {}).get("apcs", {}).get("buckets", [])
for b in apc_buckets:
if b.get("key") == "No":
stats["no_apc"] = "{0:,}".format(b.get("doc_count"))
break
stats["new_journals"] = "{0:,}".format(journal_data.get("aggregations", {}).get("creation", {}).get("buckets", [])[0].get("doc_count", 0))
# get the article data
qa = ArticleStatsQuery()
article_data = Article.query(q=qa.q)
stats["abstracts"] = "{0:,}".format(article_data.get("aggregations", {}).get("abstracts", {}).get("value", 0))
# now cache and return
Cache.cache_site_statistics(stats)
return stats
class JournalStatsQuery(object):
stats = {
"query": {
"bool": {
"must": [
{"term": {"admin.in_doaj": True}}
]
}
},
"size": 0,
"aggs": {
"countries" : {
"terms" : {"field" : "index.country.exact", "size" : 500}
},
"apcs" : {
"terms" : {"field" : "index.has_apc.exact"}
},
"creation" : {
"date_range" : {
"field" : "created_date",
"ranges" : [
{"from" : "now-1M"}
]
}
}
}
}
class ArticleStatsQuery(object):
q = {
"query": {
"bool": {
"must": [
{"term": {"admin.in_doaj": True}}
]
}
},
"size" : 0,
"aggs" : {
"abstracts" : {
"value_count" : {"field" : "bibjson.abstract.exact"}
}
}
} | from portality.dao import DomainObject
from portality.models.cache import Cache
from portality.models import Journal, Article
class JournalArticle(DomainObject):
__type__ = 'journal,article'
__readonly__ = True # TODO actually heed this attribute in all DomainObject methods which modify data
@classmethod
def site_statistics(cls):
stats = Cache.get_site_statistics()
if stats is not None:
return stats
# we didn't get anything from the cache, so we need to generate and
# cache a new set
# prep the query and result objects
stats = { # Note these values all have to be strings
"journals" : "0",
"countries" : "0",
"abstracts" : "0",
"new_journals" : "0",
"no_apc" : "0"
}
# get the journal data
q = JournalStatsQuery()
journal_data = Journal.query(q=q.stats)
stats["journals"] = "{0:,}".format(journal_data.get("hits", {}).get("total", 0))
stats["countries"] = "{0:,}".format(len(journal_data.get("aggregations", {}).get("countries", {}).get("buckets", [])))
apc_buckets = journal_data.get("aggregations", {}).get("apcs", {}).get("buckets", [])
for b in apc_buckets:
if b.get("key") == "No":
stats["no_apc"] = "{0:,}".format(b.get("doc_count"))
break
stats["new_journals"] = "{0:,}".format(journal_data.get("aggregations", {}).get("creation", {}).get("buckets", [])[0].get("doc_count", 0))
# get the article data
qa = ArticleStatsQuery()
article_data = Article.query(q=qa.q)
stats["abstracts"] = "{0:,}".format(article_data.get("aggregations", {}).get("abstracts", {}).get("value", 0))
# now cache and return
Cache.cache_site_statistics(stats)
return stats
class JournalStatsQuery(object):
stats = {
"query": {
"bool": {
"must": [
{"term": {"admin.in_doaj": True}}
]
}
},
"size": 0,
"aggs": {
"countries" : {
"terms" : {"field" : "index.country.exact", "size" : 500}
},
"apcs" : {
"terms" : {"field" : "index.has_apc.exact"}
},
"creation" : {
"date_range" : {
"field" : "created_date",
"ranges" : [
{"from" : "now-1M"}
]
}
}
}
}
class ArticleStatsQuery(object):
q = {
"query" : {"match_all" : {}},
"size" : 0,
"aggs" : {
"abstracts" : {
"value_count" : {"field" : "bibjson.abstract.exact"}
}
}
} | Python | 0 |
df553c4e0c536f7deaa180076658ba61e3af66b6 | Rework parsers to use subparsers | refresh/cli.py | refresh/cli.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2013 Thanh Ha
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import sys
def init(args):
print('init')
def add(args):
print('add')
def remove(args):
print('remove')
def verify(args):
print('verify')
def setup_parser_args(parser, subparsers):
"""Setup the main arguement parser"""
setup_parser_init(subparsers)
setup_parser_add(subparsers)
setup_parser_remove(subparsers)
setup_parser_verify(subparsers)
def setup_parser_init(subparsers):
"""Setup the init command parser"""
parser_init = subparsers.add_parser('init', help='Add dotfile for management')
parser_init.set_defaults(func = init)
def setup_parser_add(subparsers):
"""Setup the add command parser"""
parser_add = subparsers.add_parser('add', help='Add dotfile for management')
parser_add.add_argument('source')
parser_add.add_argument('destination')
parser_add.set_defaults(func = add)
def setup_parser_remove(subparsers):
"""Setup the remove command parser"""
parser_remove = subparsers.add_parser('remove', help='Remove dotfile from management')
parser_remove.add_argument('symlink')
parser_remove.set_defaults(func = remove)
def setup_parser_verify(subparsers):
"""Setup the verify command parser"""
parser_verify = subparsers.add_parser('verify', help='Verify dotfiles')
parser_verify.set_defaults(func = verify)
def parse_args():
"""Initialize the Argument Parser"""
parser = argparse.ArgumentParser(description='Refresh, dotfiles management tool')
subparsers = parser.add_subparsers(help='Command List')
setup_parser_args(parser, subparsers)
args = parser.parse_args()
args.func(args)
print("parse complete")
def main():
parse_args()
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2013 Thanh Ha
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import argparse
def setup_parser_args(parser):
'''Add arguments to parse'''
parser.add_argument('--add',
help='Add dotfile for management',
action='store_true')
parser.add_argument('--remove',
help='Remove dotfile from management',
action='store_true')
parser.add_argument('--check',
help='Check dotfile link',
action='store_true')
def parse_args():
'''Initialize the Argument Parser'''
parser = argparse.ArgumentParser(description='Refresh, dotfiles management tool')
setup_parser_args(parser)
args = parser.parse_args()
def main():
parse_args()
if __name__ == '__main__':
main()
| Python | 0 |
89dac8b14610f08b12db0ab6e00b7432b527fd89 | Remove trailing whitespace | python/balcaza/activity/local/text.py | python/balcaza/activity/local/text.py | from balcaza.t2types import *
from balcaza.t2activity import BeanshellCode
ByteArrayToString = BeanshellCode(
'''if ((bytes == void) || (bytes == null)) {
throw new RuntimeException("The 'bytes' parameter must be specified");
}
if (encoding == void) {
string = new String(bytes);
} else {
string = new String(bytes, encoding);
}
''',
inputs = dict(
bytes = String,
encoding = Optional[String]
),
outputs = dict(
string = String
),
defaultInput = 'bytes',
name = 'ByteArrayToString'
)
| from balcaza.t2types import *
from balcaza.t2activity import BeanshellCode
ByteArrayToString = BeanshellCode(
'''if ((bytes == void) || (bytes == null)) {
throw new RuntimeException("The 'bytes' parameter must be specified");
}
if (encoding == void) {
string = new String(bytes);
} else {
string = new String(bytes, encoding);
}
''',
inputs = dict(
bytes = String,
encoding = Optional[String]
),
outputs = dict(
string = String
),
defaultInput = 'bytes',
name = 'ByteArrayToString'
)
| Python | 0.999999 |
b324031ee683005be0307e3b323c4709ce3a01eb | Disable those new requirements because pip requires gcc to install them | python_apps/airtime_analyzer/setup.py | python_apps/airtime_analyzer/setup.py | from setuptools import setup
from subprocess import call
import sys
# Allows us to avoid installing the upstart init script when deploying airtime_analyzer
# on Airtime Pro:
if '--no-init-script' in sys.argv:
data_files = []
sys.argv.remove('--no-init-script') # super hax
else:
data_files = [('/etc/init', ['install/upstart/airtime_analyzer.conf'])]
print data_files
setup(name='airtime_analyzer',
version='0.1',
description='Airtime Analyzer Worker and File Importer',
url='http://github.com/sourcefabric/Airtime',
author='Albert Santoni',
author_email='albert.santoni@sourcefabric.org',
license='MIT',
packages=['airtime_analyzer'],
scripts=['bin/airtime_analyzer'],
install_requires=[
'mutagen',
'pika',
'python-magic',
'nose',
'coverage',
'mock',
'python-daemon',
'requests',
# These next 3 are required for requests to support SSL with SNI. Learned this the hard way...
# What sucks is that GCC is required to pip install these.
#'ndg-httpsclient',
#'pyasn1',
#'pyopenssl'
],
zip_safe=False,
data_files=data_files)
# Reload the initctl config so that "service start airtime_analyzer" works
if data_files:
print "Reloading initctl configuration"
call(['initctl', 'reload-configuration'])
print "Run \"sudo service airtime_analyzer restart\" now."
# TODO: Should we start the analyzer here or not?
| from setuptools import setup
from subprocess import call
import sys
# Allows us to avoid installing the upstart init script when deploying airtime_analyzer
# on Airtime Pro:
if '--no-init-script' in sys.argv:
data_files = []
sys.argv.remove('--no-init-script') # super hax
else:
data_files = [('/etc/init', ['install/upstart/airtime_analyzer.conf'])]
print data_files
setup(name='airtime_analyzer',
version='0.1',
description='Airtime Analyzer Worker and File Importer',
url='http://github.com/sourcefabric/Airtime',
author='Albert Santoni',
author_email='albert.santoni@sourcefabric.org',
license='MIT',
packages=['airtime_analyzer'],
scripts=['bin/airtime_analyzer'],
install_requires=[
'mutagen',
'pika',
'python-magic',
'nose',
'coverage',
'mock',
'python-daemon',
'requests',
# These next 3 are required for requests to support SSL with SNI. This is extremely important. Learned this the hard way...
'ndg-httpsclient',
'pyasn1',
'pyopenssl'
],
zip_safe=False,
data_files=data_files)
# Reload the initctl config so that "service start airtime_analyzer" works
if data_files:
print "Reloading initctl configuration"
call(['initctl', 'reload-configuration'])
print "Run \"sudo service airtime_analyzer restart\" now."
# TODO: Should we start the analyzer here or not?
| Python | 0.000002 |
2c1811fad85d6bacf8d3fcaf1299994bfc5efb78 | Support serializer path instead of "self" keyword | drf_extra_fields/relations.py | drf_extra_fields/relations.py | from collections import OrderedDict
from django.utils.module_loading import import_string
from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField
class PresentableRelatedFieldMixin(object):
def __init__(self, **kwargs):
self.presentation_serializer = kwargs.pop("presentation_serializer", None)
self.presentation_serializer_kwargs = kwargs.pop(
"presentation_serializer_kwargs", dict()
)
assert self.presentation_serializer is not None, (
self.__class__.__name__
+ " must provide a `presentation_serializer` argument"
)
super(PresentableRelatedFieldMixin, self).__init__(**kwargs)
def use_pk_only_optimization(self):
"""
Instead of sending pk only object, return full object. The object already retrieved from db by drf.
This doesn't cause an extra query.
It even might save from making an extra query on serializer.to_representation method.
Related source codes:
- https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L41
- https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L132
"""
return False
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([(item.pk, self.display_value(item)) for item in queryset])
def to_representation(self, data):
if isinstance(self.presentation_serializer, str):
self.presentation_serializer = import_string(self.presentation_serializer)
return self.presentation_serializer(
data, context=self.context, **self.presentation_serializer_kwargs
).data
class PresentablePrimaryKeyRelatedField(
PresentableRelatedFieldMixin, PrimaryKeyRelatedField
):
"""
Override PrimaryKeyRelatedField to represent serializer data instead of a pk field of the object.
"""
pass
class PresentableSlugRelatedField(PresentableRelatedFieldMixin, SlugRelatedField):
"""
Override SlugRelatedField to represent serializer data instead of a slug field of the object.
"""
pass
| from collections import OrderedDict
from rest_framework.relations import PrimaryKeyRelatedField, SlugRelatedField
class PresentableRelatedFieldMixin(object):
def __init__(self, **kwargs):
self.presentation_serializer = kwargs.pop("presentation_serializer", None)
self.presentation_serializer_kwargs = kwargs.pop(
"presentation_serializer_kwargs", dict()
)
assert self.presentation_serializer is not None, (
self.__class__.__name__
+ " must provide a `presentation_serializer` argument"
)
super(PresentableRelatedFieldMixin, self).__init__(**kwargs)
def use_pk_only_optimization(self):
"""
Instead of sending pk only object, return full object. The object already retrieved from db by drf.
This doesn't cause an extra query.
It even might save from making an extra query on serializer.to_representation method.
Related source codes:
- https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L41
- https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/relations.py#L132
"""
return False
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([(item.pk, self.display_value(item)) for item in queryset])
def to_representation(self, data):
return self.presentation_serializer(
data, context=self.context, **self.presentation_serializer_kwargs
).data
def bind(self, field_name, parent):
if self.presentation_serializer == "self":
self.presentation_serializer = parent.__class__
super(PresentableRelatedFieldMixin, self).bind(field_name, parent)
class PresentablePrimaryKeyRelatedField(
PresentableRelatedFieldMixin, PrimaryKeyRelatedField
):
"""
Override PrimaryKeyRelatedField to represent serializer data instead of a pk field of the object.
"""
pass
class PresentableSlugRelatedField(PresentableRelatedFieldMixin, SlugRelatedField):
"""
Override SlugRelatedField to represent serializer data instead of a slug field of the object.
"""
pass
| Python | 0 |
461b7c5bf5541fc3a56039d6756262d6b99e8428 | Add null count. | problem/column_explorer/column_explorer.py | problem/column_explorer/column_explorer.py | #! /usr/bin/env python3
# Copyright 2019 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
"""Systematically finds aggregate stats for a table's columns."""
import sqlalchemy as sa
import uszipcode
def get_zipcode_session():
return uszipcode.SearchEngine().ses
def get_zipcode_cs():
"""Returns a JDBC connect string for the zipcode database."""
# typical value: sqlite:////Users/foo/.uszipcode/simple_db.sqlite
return get_zipcode_session().connection().engine.url
class ColumnExplorer:
def __init__(self, cs_or_engine):
self.engine = sa.create_engine(cs_or_engine)
def report(self, table_name, round_digits=3):
meta = sa.MetaData(bind=self.engine)
tbl = sa.Table(table_name, meta, autoload=True)
cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone()
print(f'# {table_name}\n{cnt} rows, {len(tbl.c)} columns\n')
for column in self._get_col_names(tbl):
print('\n## ' + column)
for agg in ['min', 'avg', 'max', 'count(distinct ', 'nulls']:
if '(' not in agg:
agg += '('
select = f'select {agg}{column}) from {table_name}'
if agg.startswith('nulls'):
select = f'select count(*) from {table_name} where {column} is null'
stat, = self.engine.execute(select).fetchone()
if agg.startswith('avg'):
stat = round(stat, round_digits)
if agg.startswith('nulls'):
pct = round(100 * stat / cnt, round_digits)
stat = f'{stat} ({pct} %)'
print('-', agg.replace('(', ' '), stat)
print(f'\n{cnt} rows in {table_name}')
def _get_col_names(self, table):
for col in table.columns:
yield str(col).split('.')[1]
if __name__ == '__main__':
ColumnExplorer(get_zipcode_cs()).report('simple_zipcode')
| #! /usr/bin/env python3
# Copyright 2019 John Hanley.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
"""Systematically finds aggregate stats for a table's columns."""
import sqlalchemy as sa
import uszipcode
def get_zipcode_session():
return uszipcode.SearchEngine().ses
def get_zipcode_cs():
"""Returns a JDBC connect string for the zipcode database."""
# typical value: sqlite:////Users/foo/.uszipcode/simple_db.sqlite
return get_zipcode_session().connection().engine.url
class ColumnExplorer:
def __init__(self, cs_or_engine):
self.engine = sa.create_engine(cs_or_engine)
def report(self, table_name):
for column in self._get_col_names(table_name):
print('\n## ' + column)
for agg in ['min', 'avg', 'max', 'count(distinct ']:
if '(' not in agg:
agg += '('
select = f'select {agg}{column}) from {table_name}'
stat, = self.engine.execute(select).fetchone()
print('-', agg.replace('(', ' '), stat)
cnt, = self.engine.execute(f'select count(*) from {table_name}').fetchone()
print(f'\n{cnt} rows in {table_name}')
def _get_col_names(self, table_name):
meta = sa.MetaData(bind=self.engine)
tbl = sa.Table(table_name, meta, autoload=True)
return map(str, tbl.columns)
if __name__ == '__main__':
ColumnExplorer(get_zipcode_cs()).report('simple_zipcode')
| Python | 0.000001 |
f65c2466cf58c4024584423da3d927ad9522eec9 | Fix __repr__ if no person linked | indico/modules/events/contributions/models/persons.py | indico/modules/events/contributions/models/persons.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import db, PyIntEnum
from indico.util.string import return_ascii, format_repr
from indico.modules.events.models.persons import PersonLinkBase
from indico.util.struct.enum import IndicoEnum
class AuthorType(int, IndicoEnum):
none = 0
primary = 1
secondary = 2
class ContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and Contribution."""
__tablename__ = 'contribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'contribution_links'
person_link_unique_columns = ('contribution_id',)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
primary_key=True,
index=True
)
is_speaker = db.Column(
db.Boolean,
nullable=False,
default=False
)
author_type = db.Column(
PyIntEnum(AuthorType),
nullable=False,
default=AuthorType.none
)
# relationship backrefs:
# - contribution (Contribution.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'contribution_id', 'person_id', is_speaker=False, author_type=AuthorType.none,
_text=self.person.full_name if self.person else None)
class SubContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and SubContribution."""
__tablename__ = 'subcontribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'subcontribution_links'
person_link_unique_columns = ('subcontribution_id',)
# subcontribution persons are always speakers and never authors
# we provide these attributes to make subcontribution links
# compatible with contribution links
is_speaker = True
author_type = AuthorType.none
subcontribution_id = db.Column(
db.Integer,
db.ForeignKey('events.subcontributions.id'),
primary_key=True,
index=True
)
# relationship backrefs:
# - subcontribution (SubContribution.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'subcontribution_id', 'person_id',
_text=self.person.full_name if self.person else None)
| # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db.sqlalchemy import db, PyIntEnum
from indico.util.string import return_ascii, format_repr
from indico.modules.events.models.persons import PersonLinkBase
from indico.util.struct.enum import IndicoEnum
class AuthorType(int, IndicoEnum):
none = 0
primary = 1
secondary = 2
class ContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and Contribution."""
__tablename__ = 'contribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'contribution_links'
person_link_unique_columns = ('contribution_id',)
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
primary_key=True,
index=True
)
is_speaker = db.Column(
db.Boolean,
nullable=False,
default=False
)
author_type = db.Column(
PyIntEnum(AuthorType),
nullable=False,
default=AuthorType.none
)
# relationship backrefs:
# - contribution (Contribution.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'contribution_id', 'person_id', is_speaker=False, author_type=AuthorType.none,
_text=self.person.full_name)
class SubContributionPersonLink(PersonLinkBase):
"""Association between EventPerson and SubContribution."""
__tablename__ = 'subcontribution_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'subcontribution_links'
person_link_unique_columns = ('subcontribution_id',)
# subcontribution persons are always speakers and never authors
# we provide these attributes to make subcontribution links
# compatible with contribution links
is_speaker = True
author_type = AuthorType.none
subcontribution_id = db.Column(
db.Integer,
db.ForeignKey('events.subcontributions.id'),
primary_key=True,
index=True
)
# relationship backrefs:
# - subcontribution (SubContribution.person_links)
@return_ascii
def __repr__(self):
return format_repr(self, 'subcontribution_id', 'person_id', _text=self.person.full_name)
| Python | 0.00004 |
2943d0f68340bc16ab29a42072751b263cf76192 | Fix deleting images/pages with a legacy mapping | indico/modules/events/layout/models/legacy_mapping.py | indico/modules/events/layout/models/legacy_mapping.py | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.string import return_ascii
class LegacyImageMapping(db.Model):
"""Legacy image id mapping
Legacy images had event-unique numeric ids. Using this
mapping we can resolve old ones to their new id.
"""
__tablename__ = 'legacy_image_id_map'
__table_args__ = {'schema': 'events'}
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
index=True,
autoincrement=False
)
legacy_image_id = db.Column(
db.Integer,
primary_key=True,
index=True,
autoincrement=False
)
image_id = db.Column(
db.Integer,
db.ForeignKey('events.image_files.id'),
nullable=False
)
image = db.relationship(
'ImageFile',
lazy=False,
backref=db.backref(
'legacy_mapping',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
@return_ascii
def __repr__(self):
return '<LegacyImageMapping({}, {})>'.format(self.legacy_image_id, self.image_id)
class LegacyPageMapping(db.Model):
"""Legacy page id mapping
Legacy pages had event-unique numeric ids. Using this
mapping we can resolve old ones to their new id.
"""
__tablename__ = 'legacy_page_id_map'
__table_args__ = {'schema': 'events'}
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
index=True,
autoincrement=False
)
legacy_page_id = db.Column(
db.Integer,
primary_key=True,
index=True,
autoincrement=False
)
page_id = db.Column(
db.Integer,
db.ForeignKey('events.pages.id'),
nullable=False
)
page = db.relationship(
'EventPage',
lazy=False,
backref=db.backref(
'legacy_mapping',
cascade='all, delete-orphan',
uselist=False,
lazy=True
)
)
@return_ascii
def __repr__(self):
return '<LegacyPageMapping({}, {})>'.format(self.legacy_page_id, self.image_id)
| # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core.db import db
from indico.util.string import return_ascii
class LegacyImageMapping(db.Model):
"""Legacy image id mapping
Legacy images had event-unique numeric ids. Using this
mapping we can resolve old ones to their new id.
"""
__tablename__ = 'legacy_image_id_map'
__table_args__ = {'schema': 'events'}
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
index=True,
autoincrement=False
)
legacy_image_id = db.Column(
db.Integer,
primary_key=True,
index=True,
autoincrement=False
)
image_id = db.Column(
db.Integer,
db.ForeignKey('events.image_files.id'),
nullable=False
)
image = db.relationship(
'ImageFile',
lazy=False,
backref=db.backref('legacy_mapping', uselist=False, lazy=True)
)
@return_ascii
def __repr__(self):
return '<LegacyImageMapping({}, {})>'.format(self.legacy_image_id, self.image_id)
class LegacyPageMapping(db.Model):
"""Legacy page id mapping
Legacy pages had event-unique numeric ids. Using this
mapping we can resolve old ones to their new id.
"""
__tablename__ = 'legacy_page_id_map'
__table_args__ = {'schema': 'events'}
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
primary_key=True,
index=True,
autoincrement=False
)
legacy_page_id = db.Column(
db.Integer,
primary_key=True,
index=True,
autoincrement=False
)
page_id = db.Column(
db.Integer,
db.ForeignKey('events.pages.id'),
nullable=False
)
page = db.relationship(
'EventPage',
lazy=False,
backref=db.backref('legacy_mapping', uselist=False, lazy=True)
)
@return_ascii
def __repr__(self):
return '<LegacyPageMapping({}, {})>'.format(self.legacy_page_id, self.image_id)
| Python | 0.000006 |
272d40aa83a1adbb54ad129ce17790cc9301b6f7 | Set properties to the file. | lib/python/mod_python/FileSession.py | lib/python/mod_python/FileSession.py | #
# Copyright 2004 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Originally developed by Gregory Trubetskoy.
#
# $Id$
import cPickle
import tempfile
import os
from mod_python import Session
tempdir = tempfile.gettempdir()
# Credits : this was initially contributed by dharana <dharana@dharana.net>
class FileSession(Session.BaseSession):
def __init__(self, req, sid=0, secret=None, timeout=0, lock=1):
Session.BaseSession.__init__(self, req, sid=sid, secret=secret,
timeout=timeout, lock=lock)
def do_cleanup(self):
# is there any faster way of doing this?
for f in os.listdir(tempdir):
if not f.startswith('mp_sess_'):
continue
try:
fp = file('%s/%s' % (tempdir, f))
try:
dict = cPickle.load(fp)
if (time() - dict['_accessed']) > dict['_timeout']:
os.unlink('%s%s' % (tempdir, f))
finally:
fp.close()
except Exception:
# TODO : emit a warning to the Apache Log
pass
def do_load(self):
try:
# again, is there a more pythonic way of doing this check?
fp = file('%s/mp_sess_%s' % (tempdir, self._sid))
try:
data = cPickle.load(fp)
return data
finally:
fp.close()
except:
return None
def do_save(self, dict):
fp = file('%s/mp_sess_%s' % (tempdir, self._sid), 'w+')
try:
cPickle.dump(dict, fp)
finally:
fp.close()
def do_delete(self):
try:
os.unlink('%s/mp_sess_%s' % (tempdir, self._sid))
except Exception:
pass
| #
# Copyright 2004 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Originally developed by Gregory Trubetskoy.
#
# $Id: Session.py 106619 2004-11-25 22:10:52Z nd $
import cPickle
import tempfile
import os
from mod_python import Session
tempdir = tempfile.gettempdir()
# Credits : this was initially contributed by dharana <dharana@dharana.net>
class FileSession(Session.BaseSession):
def __init__(self, req, sid=0, secret=None, timeout=0, lock=1):
Session.BaseSession.__init__(self, req, sid=sid, secret=secret,
timeout=timeout, lock=lock)
def do_cleanup(self):
# is there any faster way of doing this?
for f in os.listdir(tempdir):
if not f.startswith('mp_sess_'):
continue
try:
fp = file('%s/%s' % (tempdir, f))
try:
dict = cPickle.load(fp)
if (time() - dict['_accessed']) > dict['_timeout']:
os.unlink('%s%s' % (tempdir, f))
finally:
fp.close()
except Exception:
# TODO : emit a warning to the Apache Log
pass
def do_load(self):
try:
# again, is there a more pythonic way of doing this check?
fp = file('%s/mp_sess_%s' % (tempdir, self._sid))
try:
data = cPickle.load(fp)
return data
finally:
fp.close()
except:
return None
def do_save(self, dict):
fp = file('%s/mp_sess_%s' % (tempdir, self._sid), 'w+')
try:
cPickle.dump(dict, fp)
finally:
fp.close()
def do_delete(self):
try:
os.unlink('%s/mp_sess_%s' % (tempdir, self._sid))
except Exception:
pass
| Python | 0 |
86d088835a88c00af69090b6b7f1bae42ff5c09a | remove monetdb typo | lib/sqlalchemy/databases/__init__.py | lib/sqlalchemy/databases/__init__.py | # __init__.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__all__ = [
'sqlite', 'postgres', 'mysql', 'oracle', 'mssql', 'firebird',
'sybase', 'access', 'maxdb'
]
| # __init__.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
__all__ = [
'sqlite', 'postgres', 'mysql', 'oracle', 'mssql', 'firebird',
'sybase', 'access', 'maxdb', 'monetdb'
]
| Python | 0.999923 |
4dc1f2b85c7728102a6cc4b149fb5200a0ffa736 | Fix get_user_command to comply with new AirWatch logic TelekomLabs-DCO-1.1-Signed-off-by: Łukasz Biernot <lukasz.biernot@gmail.com> (github: ElmoVanKielmo) | src/tenants/management/commands/get_user.py | src/tenants/management/commands/get_user.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand
from optparse import make_option
from provisioning import okta
from provisioning.models import Okta
from litedesk.lib.airwatch import user
from litedesk.lib.airwatch import group
from provisioning.models import AirWatch
import json
class Command(BaseCommand):
help = 'Get information about a user.'
option_list = BaseCommand.option_list + (
make_option('--username',
default="bruce.wayne",
help='Username to find. Default="bruce.wayne"'),
)
def handle(self, *args, **options):
result = {'okta': {}, 'airwatch': {}}
okta_service = Okta.objects.all().get()
client = okta.Client(okta_service.domain, okta_service.api_token)
okta_user = client.get(okta.User, options["username"])
if okta_user:
# self.stdout.write("got the Okta user with the id")
result['okta']['id'] = okta_user.id
result['okta']['status'] = okta_user.status
result['okta']['applications'] = []
okta_apps = client.user_applications(okta_user)
for app in okta_apps:
result['okta']['applications'].append(app['name'])
airwatch_service = AirWatch.objects.all().get()
airwatch_client = airwatch_service.get_client()
airwatch_user = user.User.get_remote(airwatch_client, options["username"])
if airwatch_user != None:
result['airwatch']['id'] = airwatch_user.id
result['airwatch']['Status'] = airwatch_user.Status
result['airwatch']['applications'] = []
aw_assets = airwatch_service.airwatch.tenantserviceasset_set.all()
for asset in aw_assets:
smartgroup_id = asset.metadata['smartgroup_id']
if options["username"] in (
user['Name'] for user in group.SmartGroup.get_remote(
airwatch_client, smartgroup_id
).UserAdditions
):
result['airwatch']['applications'].append(asset.asset.name)
self.stdout.write(json.dumps(result))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.management.base import BaseCommand
from optparse import make_option
from provisioning import okta
from provisioning.models import Okta
from litedesk.lib.airwatch import user
from litedesk.lib.airwatch import group
from provisioning.models import AirWatch
import json
class Command(BaseCommand):
help = 'Get information about a user.'
option_list = BaseCommand.option_list + (
make_option('--username',
default="bruce.wayne",
help='Username to find. Default="bruce.wayne"'),
)
def handle(self, *args, **options):
result = {'okta': {}, 'airwatch': {}}
okta_service = Okta.objects.all().get()
client = okta.Client(okta_service.domain, okta_service.api_token)
okta_user = client.get(okta.User, options["username"])
if okta_user:
# self.stdout.write("got the Okta user with the id")
result['okta']['id'] = okta_user.id
result['okta']['status'] = okta_user.status
result['okta']['applications'] = []
okta_apps = client.user_applications(okta_user)
for app in okta_apps:
result['okta']['applications'].append(app['name'])
airwatch_service = AirWatch.objects.all().get()
airwatch_client = airwatch_service.get_client()
airwatch_user = user.User.get_remote(airwatch_client, options["username"])
if airwatch_user != None:
result['airwatch']['id'] = airwatch_user.id
result['airwatch']['Status'] = airwatch_user.Status
result['airwatch']['applications'] = []
aw_assets = airwatch_service.airwatch.tenantserviceasset_set.all()
for asset in aw_assets:
group_id = asset.metadata['group_id']
if options["username"] in group.UserGroup.usernames_by_group_id(airwatch_client, group_id):
result['airwatch']['applications'].append(asset.asset.name)
self.stdout.write(json.dumps(result))
| Python | 0 |
71b2fcdd85187520a38ead5736d1b64b1e69afa4 | Fix hedu enrolled, entrant and graduate columns | app/models/hedu.py | app/models/hedu.py | from sqlalchemy import Column, Integer, String, func
from app import db
class Hedu(db.Model):
__tablename__ = 'hedu'
year = Column(Integer, primary_key=True)
region = Column(String(1), primary_key=True)
mesoregion = Column(String(4), primary_key=True)
microregion = Column(String(5), primary_key=True)
state = Column(String(2), primary_key=True)
municipality = Column(String(7), primary_key=True)
university = Column(String(5), primary_key=True)
university_campus = Column(String(7), primary_key=True)
funding_type = Column(String(1), primary_key=True)
school_type = Column(String(1), primary_key=True)
hedu_course_field = Column(String(2), primary_key=True)
hedu_course = Column(String(6), primary_key=True)
enrolled = Column(String(12), primary_key=True)
graduate = Column(String(1), primary_key=True)
entrant = Column(String(1), primary_key=True)
academic_degree = Column(String(2), primary_key=True)
distance_learning = Column(String(1), primary_key=True)
shift = Column(String(2), primary_key=True)
gender = Column(String(2), primary_key=True)
age = Column(Integer)
ethnicity = Column(String(2), primary_key=True)
state_of_birth = Column(String(2), primary_key=True)
municipality_of_birth = Column(String(7), primary_key=True)
admission_year = Column(String(4))
admission_month = Column(String(2))
@classmethod
def dimensions(cls):
return [
'year',
'region',
'mesoregion',
'microregion',
'state',
'municipality',
'university',
'university_campus',
'funding_type',
'school_type',
'hedu_course_field',
'hedu_course',
'enrolled',
'graduate',
'entrant',
'academic_degree',
'distance_learning',
'shift',
'gender',
'age',
'ethnicity',
'state_of_birth',
'municipality_of_birth',
'admission_year',
'admission_month'
]
@classmethod
def aggregate(cls, value):
return {
'enrolleds': func.count(),
'entrants': func.sum(cls.entrant),
'graduates': func.sum(cls.graduate),
'average_age': func.avg(cls.age)
}[value]
@classmethod
def values(cls):
return ['enrolleds', 'entrants', 'graduates', 'average_age']
| from sqlalchemy import Column, Integer, String, func
from app import db
class Hedu(db.Model):
__tablename__ = 'hedu'
year = Column(Integer, primary_key=True)
region = Column(String(1), primary_key=True)
mesoregion = Column(String(4), primary_key=True)
microregion = Column(String(5), primary_key=True)
state = Column(String(2), primary_key=True)
municipality = Column(String(7), primary_key=True)
university = Column(String(5), primary_key=True)
university_campus = Column(String(7), primary_key=True)
funding_type = Column(String(1), primary_key=True)
school_type = Column(String(1), primary_key=True)
hedu_course_field = Column(String(2), primary_key=True)
hedu_course = Column(String(6), primary_key=True)
enrolled = Column(String(12), primary_key=True)
graduates = Column(String(1), primary_key=True)
entrants = Column(String(1), primary_key=True)
academic_degree = Column(String(2), primary_key=True)
distance_learning = Column(String(1), primary_key=True)
shift = Column(String(2), primary_key=True)
gender = Column(String(2), primary_key=True)
age = Column(Integer)
ethnicity = Column(String(2), primary_key=True)
state_of_birth = Column(String(2), primary_key=True)
municipality_of_birth = Column(String(7), primary_key=True)
admission_year = Column(String(4))
admission_month = Column(String(2))
@classmethod
def dimensions(cls):
return [
'year',
'region',
'mesoregion',
'microregion',
'state',
'municipality',
'university',
'university_campus',
'funding_type',
'school_type',
'hedu_course_field',
'hedu_course',
'enrolled',
'graduates',
'entrants',
'academic_degree',
'distance_learning',
'shift',
'gender',
'age',
'ethnicity',
'state_of_birth',
'municipality_of_birth',
'admission_year',
'admission_month'
]
@classmethod
def aggregate(cls, value):
return {
'enrolled': func.count(),
'entrants': func.sum(cls.entrants),
'graduates': func.sum(cls.graduates),
'average_age': func.avg(cls.age)
}[value]
@classmethod
def values(cls):
return ['enrolled', 'entrants', 'graduates', 'average_age']
| Python | 0.000001 |
4956868cd605faa085ae2fc4ab44076760a4fd80 | Add some more auto_meta tests | metafunctions/tests/test_pipe.py | metafunctions/tests/test_pipe.py | import operator
import unittest
from metafunctions.tests.util import BaseTestCase
from metafunctions.decorators import pipe_node
class TestIntegration(BaseTestCase):
def test_basic_usage(self):
self.assertEqual(a('_'), '_a')
def test_wraps(self):
@pipe_node
def d():
'a docstring for d'
self.assertEqual(d.__doc__, 'a docstring for d')
def test_auto_meta(self):
'''If possible, we upgrade functions to meta functions on the fly.'''
def y(x):
return x + 'y'
ay = a | y
ya = y | a
ayyyy = a | y | y | y | y
# Can't do this
#ayy = a | y + y
# But this should work
yayy = y | a + y
yy_ya = y | y + a
self.assertEqual(ay('_'), '_ay')
self.assertEqual(ya('_'), '_ya')
self.assertEqual(ayyyy('_'), '_ayyyy')
self.assertEqual(yayy('_'), '_ya_yy')
self.assertEqual(yy_ya('_'), '_yy_ya')
def test_basic_composition(self):
composite = a | b | c | d
self.assertEqual(composite('_'), '_abcd')
def test_advanced_str(self):
cmp = a | b + c + d | e
self.assertEqual(str(cmp), '(a | ((b + c) + d) | e)')
self.assertEqual(cmp('_'), '_ab_ac_ade')
def test_non_callable_composition(self):
'''Anything that is not callable in a composition is applied at call time (to the results
of the composed functions).
'''
@pipe_node
def g(x):
return x
cmps_to_expected = (
(g + 1, 11),
(g - 1, 9),
(g * 2, 20),
(g / 2, 5),
)
for cmp, expected in cmps_to_expected:
with self.subTest():
self.assertEqual(cmp(10), expected)
@unittest.skip("Making this work doesn't make sense anymore")
def test_or(self):
'''Assert that we can still use or'''
@pipe_node
def return_a_set(x):
return set(*x)
#Just wrap anything that isn't callable in a lambda, to put it off until call time
outer_set = set((1, 2, 3))
cmp = return_a_set | outer_set
reverse_cmp = outer_set | return_a_set
self.assertSetEqual(cmp('abc'), set('abc'))
self.assertSetEqual(reverse_cmp('abc'), set('abc'))
def test_single_calls(self):
'''every function is only called once'''
call_count = 0
@pipe_node
def y(x):
nonlocal call_count
call_count += 1
return x + 'y'
cmp = y | y * 2 | y + y | y
self.assertEqual(cmp('_'), '_yy_yyy_yy_yyyy')
self.assertEqual(call_count, 5)
def test_repr(self):
cmp = a | b | c | (lambda x: None)
self.assertEqual(str(cmp), '(a | b | c | <lambda>)')
### Simple Sample Functions ###
@pipe_node
def a(x):
return x + 'a'
@pipe_node
def b(x):
return x + 'b'
@pipe_node
def c(x):
return x + 'c'
@pipe_node
def d(x):
return x + 'd'
@pipe_node
def e(x):
return x + 'e'
| import operator
import unittest
from metafunctions.tests.util import BaseTestCase
from metafunctions.decorators import pipe_node
class TestUnit(BaseTestCase):
def test_basic_usage(self):
self.assertEqual(a('_'), '_a')
def test_wraps(self):
@pipe_node
def d():
'a docstring for d'
self.assertEqual(d.__doc__, 'a docstring for d')
def test_auto_meta(self):
'''If possible, we upgrade functions to meta functions on the fly.'''
def y(x):
return x + 'y'
ay = a | y
ya = y | a
ayyyy = a | y | y | y | y
self.assertEqual(ay('_'), '_ay')
self.assertEqual(ya('_'), '_ya')
self.assertEqual(ayyyy('_'), '_ayyyy')
def test_basic_composition(self):
composite = a | b | c | d
self.assertEqual(composite('_'), '_abcd')
def test_advanced_str(self):
cmp = a | b + c + d | e
self.assertEqual(str(cmp), '(a | ((b + c) + d) | e)')
self.assertEqual(cmp('_'), '_ab_ac_ade')
def test_non_callable_composition(self):
'''Anything that is not callable in a composition is applied at call time (to the results
of the composed functions).
'''
@pipe_node
def g(x):
return x
cmps_to_expected = (
(g + 1, 11),
(g - 1, 9),
(g * 2, 20),
(g / 2, 5),
)
for cmp, expected in cmps_to_expected:
with self.subTest():
self.assertEqual(cmp(10), expected)
@unittest.skip("Making this work doesn't make sense anymore")
def test_or(self):
'''Assert that we can still use or'''
@pipe_node
def return_a_set(x):
return set(*x)
#Just wrap anything that isn't callable in a lambda, to put it off until call time
outer_set = set((1, 2, 3))
cmp = return_a_set | outer_set
reverse_cmp = outer_set | return_a_set
self.assertSetEqual(cmp('abc'), set('abc'))
self.assertSetEqual(reverse_cmp('abc'), set('abc'))
def test_single_calls(self):
'''every function is only called once'''
call_count = 0
@pipe_node
def y(x):
nonlocal call_count
call_count += 1
return x + 'y'
cmp = y | y * 2 | y + y | y
self.assertEqual(cmp('_'), '_yy_yyy_yy_yyyy')
self.assertEqual(call_count, 5)
def test_repr(self):
cmp = a | b | c | (lambda x: None)
self.assertEqual(str(cmp), '(a | b | c | <lambda>)')
### Simple Sample Functions ###
@pipe_node
def a(x):
return x + 'a'
@pipe_node
def b(x):
return x + 'b'
@pipe_node
def c(x):
return x + 'c'
@pipe_node
def d(x):
return x + 'd'
@pipe_node
def e(x):
return x + 'e'
| Python | 0 |
a4f475245c3af8470337fe0c25b136e58189a607 | Update griddy to use CoordinatorEntity (#39392) | homeassistant/components/griddy/sensor.py | homeassistant/components/griddy/sensor.py | """Support for August sensors."""
import logging
from homeassistant.const import ENERGY_KILO_WATT_HOUR
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import CONF_LOADZONE, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the August sensors."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
settlement_point = config_entry.data[CONF_LOADZONE]
async_add_entities([GriddyPriceSensor(settlement_point, coordinator)], True)
class GriddyPriceSensor(CoordinatorEntity):
"""Representation of an August sensor."""
def __init__(self, settlement_point, coordinator):
"""Initialize the sensor."""
super().__init__(coordinator)
self._settlement_point = settlement_point
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return f"¢/{ENERGY_KILO_WATT_HOUR}"
@property
def name(self):
"""Device Name."""
return f"{self._settlement_point} Price Now"
@property
def icon(self):
"""Device Ice."""
return "mdi:currency-usd"
@property
def unique_id(self):
"""Device Uniqueid."""
return f"{self._settlement_point}_price_now"
@property
def state(self):
"""Get the current price."""
return round(float(self.coordinator.data.now.price_cents_kwh), 4)
| """Support for August sensors."""
import logging
from homeassistant.const import ENERGY_KILO_WATT_HOUR
from homeassistant.helpers.entity import Entity
from .const import CONF_LOADZONE, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the August sensors."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
settlement_point = config_entry.data[CONF_LOADZONE]
async_add_entities([GriddyPriceSensor(settlement_point, coordinator)], True)
class GriddyPriceSensor(Entity):
"""Representation of an August sensor."""
def __init__(self, settlement_point, coordinator):
"""Initialize the sensor."""
self._coordinator = coordinator
self._settlement_point = settlement_point
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return f"¢/{ENERGY_KILO_WATT_HOUR}"
@property
def name(self):
"""Device Name."""
return f"{self._settlement_point} Price Now"
@property
def icon(self):
"""Device Ice."""
return "mdi:currency-usd"
@property
def unique_id(self):
"""Device Uniqueid."""
return f"{self._settlement_point}_price_now"
@property
def available(self):
"""Return True if entity is available."""
return self._coordinator.last_update_success
@property
def state(self):
"""Get the current price."""
return round(float(self._coordinator.data.now.price_cents_kwh), 4)
@property
def should_poll(self):
"""Return False, updates are controlled via coordinator."""
return False
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self._coordinator.async_request_refresh()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self._coordinator.async_add_listener(self.async_write_ha_state)
)
| Python | 0 |
8021b027ae4a617712741d1e9a0668817bad9193 | Fix import | experiments/tests/test_admin.py | experiments/tests/test_admin.py | from __future__ import absolute_import
import json
from django.contrib.auth.models import User, Permission
from django.urls import reverse import reverse
from django.test import TestCase
from experiments.models import Experiment, CONTROL_STATE, ENABLED_STATE
from experiments.utils import participant
class AdminTestCase(TestCase):
def test_set_state(self):
experiment = Experiment.objects.create(name='test_experiment', state=CONTROL_STATE)
User.objects.create_superuser(username='user', email='deleted@mixcloud.com', password='pass')
self.client.login(username='user', password='pass')
self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, CONTROL_STATE)
response = self.client.post(reverse('admin:experiment_admin_set_state'), {
'experiment': experiment.name,
'state': ENABLED_STATE,
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, ENABLED_STATE)
self.assertIsNone(Experiment.objects.get(pk=experiment.pk).end_date)
response = self.client.post(reverse('admin:experiment_admin_set_state'), {
'experiment': experiment.name,
'state': CONTROL_STATE,
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, CONTROL_STATE)
self.assertIsNotNone(Experiment.objects.get(pk=experiment.pk).end_date)
def test_set_alternative(self):
experiment = Experiment.objects.create(name='test_experiment', state=ENABLED_STATE)
user = User.objects.create_superuser(username='user', email='deleted@mixcloud.com', password='pass')
self.client.login(username='user', password='pass')
participant(user=user).enroll('test_experiment', alternatives=['other1', 'other2'])
for alternative in ('other2', 'control', 'other1'):
response = self.client.post(reverse('admin:experiment_admin_set_alternative'), {
'experiment': experiment.name,
'alternative': alternative,
})
self.assertDictEqual(json.loads(response.content.decode('utf-8')), {
'success': True,
'alternative': alternative,
})
self.assertEqual(participant(user=user).get_alternative('test_experiment'), alternative)
def test_permissions(self):
# redirect to login if not logged in
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
response = self.client.post(reverse('admin:experiment_admin_set_alternative'), {})
self.assertEqual(response.status_code, 302)
# non staff user
user = User.objects.create_user(username='user', password='pass')
user.save()
self.client.login(username='user', password='pass')
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
user.is_staff = True
user.save()
self.assertEqual(403, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(403, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
permission = Permission.objects.get(codename='change_experiment')
user.user_permissions.add(permission)
self.assertEqual(400, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(400, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
| from __future__ import absolute_import
import json
from django.contrib.auth.models import User, Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from experiments.models import Experiment, CONTROL_STATE, ENABLED_STATE
from experiments.utils import participant
class AdminTestCase(TestCase):
def test_set_state(self):
experiment = Experiment.objects.create(name='test_experiment', state=CONTROL_STATE)
User.objects.create_superuser(username='user', email='deleted@mixcloud.com', password='pass')
self.client.login(username='user', password='pass')
self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, CONTROL_STATE)
response = self.client.post(reverse('admin:experiment_admin_set_state'), {
'experiment': experiment.name,
'state': ENABLED_STATE,
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, ENABLED_STATE)
self.assertIsNone(Experiment.objects.get(pk=experiment.pk).end_date)
response = self.client.post(reverse('admin:experiment_admin_set_state'), {
'experiment': experiment.name,
'state': CONTROL_STATE,
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Experiment.objects.get(pk=experiment.pk).state, CONTROL_STATE)
self.assertIsNotNone(Experiment.objects.get(pk=experiment.pk).end_date)
def test_set_alternative(self):
experiment = Experiment.objects.create(name='test_experiment', state=ENABLED_STATE)
user = User.objects.create_superuser(username='user', email='deleted@mixcloud.com', password='pass')
self.client.login(username='user', password='pass')
participant(user=user).enroll('test_experiment', alternatives=['other1', 'other2'])
for alternative in ('other2', 'control', 'other1'):
response = self.client.post(reverse('admin:experiment_admin_set_alternative'), {
'experiment': experiment.name,
'alternative': alternative,
})
self.assertDictEqual(json.loads(response.content.decode('utf-8')), {
'success': True,
'alternative': alternative,
})
self.assertEqual(participant(user=user).get_alternative('test_experiment'), alternative)
def test_permissions(self):
# redirect to login if not logged in
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
response = self.client.post(reverse('admin:experiment_admin_set_alternative'), {})
self.assertEqual(response.status_code, 302)
# non staff user
user = User.objects.create_user(username='user', password='pass')
user.save()
self.client.login(username='user', password='pass')
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(302, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
user.is_staff = True
user.save()
self.assertEqual(403, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(403, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
permission = Permission.objects.get(codename='change_experiment')
user.user_permissions.add(permission)
self.assertEqual(400, self.client.post(reverse('admin:experiment_admin_set_state'), {}).status_code)
self.assertEqual(400, self.client.post(reverse('admin:experiment_admin_set_alternative'), {}).status_code)
| Python | 0.000002 |
86679c5b1a31d9125ebc3f55785cb9219e41ed27 | Add mechanism to validate backends against buckets | edgedb/lang/common/buckets.py | edgedb/lang/common/buckets.py | ##
# Copyright (c) 2012, 2013 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import weakref
from metamagic.utils import abc, config
class BucketMeta(abc.AbstractMeta, config.ConfigurableMeta):
def __new__(mcls, name, bases, dct):
cls = super().__new__(mcls, name, bases, dct)
if len([base for base in bases if isinstance(base, mcls)]) > 1:
raise TypeError('Bucket classes can have only one base Bucket class')
cls._instances = weakref.WeakSet()
return cls
class Bucket(metaclass=BucketMeta):
def __new__(cls, *args, **kwargs):
if super().__new__ is object.__new__:
instance = super().__new__(cls)
else:
instance = super().__new__(cls, *args, **kwargs)
cls._register_instance(instance)
instance._cached_implementation = None
return instance
def __init__(self, *, parent=None):
if parent is not None:
cls = type(self)
mro = cls.__mro__[:-2] # Skip 'object' and 'abstract.Bucket'
if type(parent) not in mro:
raise ValueError('parent bucket {!r} must be an instance of one of the '
'ancestor classes {!r}'.format(parent, mro))
parent._register_child(self)
self._parent = parent
self._children = []
def _register_child(self, bucket):
self._children.append(bucket)
def _get_implementation(self):
if self._cached_implementation is None:
backends = type(self).get_backends()
if not backends:
return
self._cached_implementation = type(self).get_implementation()(backends)
return self._cached_implementation
def _ensure_implementation(self):
impl = self._get_implementation()
if not impl:
raise KeyError('non-initialized bucket: no backends/implementation set')
return impl
@classmethod
def _register_instance(cls, instance):
cls._instances.add(instance)
@classmethod
def set_backends(cls, *backends):
# First validate backends against the current Implementation
impl = cls.get_implementation()
for p in backends:
if not isinstance(p, impl.compatible_backend_classes):
raise TypeError('backend {!r} is not compatible with installed implementation '
'{!r}, must be an instance of {!r}'.
format(p, impl, impl.compatible_backend_classes))
# Secondly, validate backends against each child bucket class and self
for child in cls._iter_children(include_self=True):
for backend in backends:
child.validate_backend(backend)
cls._backends = backends
@classmethod
def get_backends(cls):
return getattr(cls, '_backends', None)
@classmethod
def set_implementation(cls, implementation):
if not issubclass(implementation, Implementation):
raise ValueError('a subclass of Implementation was expected')
if hasattr(cls, '_implementation') and '_implementation' not in cls.__dict__:
holder = None
for sub in cls.__mro__[1:-1]:
if '_implementation' in sub.__dict__:
holder = sub
break
raise ValueError('implementation was already defined in one of '
'the parent buckets: {!r}'.format(holder))
cls._implementation = implementation
@classmethod
def get_implementation(cls):
return cls._implementation
@classmethod
def validate_backend(cls, backend):
"""Called recursively for all derived buckets of a bucket on which
"set_backends" is called"""
@classmethod
def _iter_children(cls, include_self=False):
seen = set()
def children(cls):
if cls in seen:
return
seen.add(cls)
for child in cls.__subclasses__():
yield child
yield from children(child)
if include_self:
yield cls
yield from children(cls)
class ImplementationMeta(abc.AbstractMeta):
pass
class Implementation(metaclass=ImplementationMeta):
compatible_backend_classes = None
def __init__(self, backends):
self._backends = backends
class BackendMeta(abc.AbstractMeta, config.ConfigurableMeta):
pass
class Backend(config.Configurable, metaclass=BackendMeta):
pass
| ##
# Copyright (c) 2012, 2013 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import weakref
from metamagic.utils import abc, config
class BucketMeta(abc.AbstractMeta, config.ConfigurableMeta):
def __new__(mcls, name, bases, dct):
cls = super().__new__(mcls, name, bases, dct)
if len([base for base in bases if isinstance(base, mcls)]) > 1:
raise TypeError('Bucket classes can have only one base Bucket class')
cls._instances = weakref.WeakSet()
return cls
class Bucket(metaclass=BucketMeta):
def __new__(cls, *args, **kwargs):
if super().__new__ is object.__new__:
instance = super().__new__(cls)
else:
instance = super().__new__(cls, *args, **kwargs)
cls._register_instance(instance)
instance._cached_implementation = None
return instance
def __init__(self, *, parent=None):
if parent is not None:
cls = type(self)
mro = cls.__mro__[:-2] # Skip 'object' and 'abstract.Bucket'
if type(parent) not in mro:
raise ValueError('parent bucket {!r} must be an instance of one of the '
'ancestor classes {!r}'.format(parent, mro))
parent._register_child(self)
self._parent = parent
self._children = []
def _register_child(self, bucket):
self._children.append(bucket)
def _get_implementation(self):
if self._cached_implementation is None:
backends = type(self).get_backends()
if not backends:
return
self._cached_implementation = type(self).get_implementation()(backends)
return self._cached_implementation
def _ensure_implementation(self):
impl = self._get_implementation()
if not impl:
raise KeyError('non-initialized bucket: no backends/implementation set')
return impl
@classmethod
def _register_instance(cls, instance):
cls._instances.add(instance)
@classmethod
def set_backends(cls, *backends):
impl = cls.get_implementation()
for p in backends:
if not isinstance(p, impl.compatible_backend_classes):
raise TypeError('backend {!r} is not compatible with installed implementation '
'{!r}, must be an instance of {!r}'.
format(p, impl, impl.compatible_backend_classes))
@classmethod
def get_backends(cls):
return getattr(cls, '_backends', None)
@classmethod
def set_implementation(cls, implementation):
if not issubclass(implementation, Implementation):
raise ValueError('a subclass of Implementation was expected')
if hasattr(cls, '_implementation') and '_implementation' not in cls.__dict__:
holder = None
for sub in cls.__mro__[1:-1]:
if '_implementation' in sub.__dict__:
holder = sub
break
raise ValueError('implementation was already defined in one of '
'the parent buckets: {!r}'.format(holder))
cls._implementation = implementation
@classmethod
def get_implementation(cls):
return cls._implementation
class ImplementationMeta(abc.AbstractMeta):
pass
class Implementation(metaclass=ImplementationMeta):
compatible_backend_classes = None
def __init__(self, backends):
self._backends = backends
class BackendMeta(abc.AbstractMeta, config.ConfigurableMeta):
pass
class Backend(metaclass=BackendMeta):
pass
| Python | 0.000002 |
331f5b0a951e13f816e752609ac348df272e1b1e | Update conf_template.py | docs/conf_template.py | docs/conf_template.py | # Statement for enabling the development environment
DEBUG = True
# Define the application directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Define the database - we are working with
# SQLite for this example
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')
DATABASE_CONNECT_OPTIONS = {}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
ORCID_CLIENT_ID = ""
ORCID_SECRET = ""
ORCID_API_URL = "https://orcid.org/oauth/token"
ORCID_REDIRECT_URL = "http://localhost:4200/login"
GITHUB_CLIENT_ID = ""
GITHUB_SECRET = ""
GITHUB_API_URL = "https://github.com/login/oauth/access_token"
GITHUB_USER_API_URL = "https://api.github.com/user"
SHARE_API_URL = "https://share.osf.io/api/v2/search/creativeworks/_search"
SLIDESHARE_API_URL = "https://www.slideshare.net/api/2/get_slideshows_by_user"
SLIDESHARE_PARAMS = "?api_key={api_key}&ts={ts}&hash={hash}&username_for={username}"
SLIDESHARE_API_KEY = ""
SLIDESHARE_SECRET = ""
OPENAIRE_PUBLICATION_API_URL = "http://api.openaire.eu/search/publications?author={author}"
OPENAIRE_DATASET_API_URL = "http://api.openaire.eu/search/datasets?author={author}"
SPARQL_QUERY_ENDPOINT = "http://localhost:3030/ro2share/sparql"
SPARQL_UPLOAD_ENDPOINT = "http://localhost:3030/ro2share/update"
BASE_URI = 'http://ro2share.org/'
TMP_DIR = '/tmp/'
| # Statement for enabling the development environment
DEBUG = True
# Define the application directory
import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Define the database - we are working with
# SQLite for this example
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASE_DIR, 'app.db')
DATABASE_CONNECT_OPTIONS = {}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
# Enable protection agains *Cross-site Request Forgery (CSRF)*
CSRF_ENABLED = True
# Use a secure, unique and absolutely secret key for
# signing the data.
CSRF_SESSION_KEY = "secret"
# Secret key for signing cookies
SECRET_KEY = "secret"
ORCID_CLIENT_ID = ""
ORCID_SECRET = ""
ORCID_API_URL = "https://orcid.org/oauth/token"
ORCID_REDIRECT_URL = "http://localhost:4200/login"
GITHUB_CLIENT_ID = ""
GITHUB_SECRET = ""
GITHUB_API_URL = "https://github.com/login/oauth/access_token"
GITHUB_USER_API_URL = "https://api.github.com/user"
SHARE_API_URL = "https://share.osf.io/api/v2/search/creativeworks/_search"
SLIDESHARE_API_URL = "https://www.slideshare.net/api/2/get_slideshows_by_user"
SLIDESHARE_PARAMS = "?api_key={api_key}&ts={ts}&hash={hash}&username_for={username}"
SLIDESHARE_API_KEY = ""
SLIDESHARE_SECRET = ""
OPENAIRE_PUBLICATION_API_URL = "http://api.openaire.eu/search/publications?author={author}"
OPENAIRE_DATASET_API_URL = "http://api.openaire.eu/search/datasets?author={author}"
SPARQL_QUERY_ENDPOINT = "http://localhost:3030/ro2share/sparql"
SPARQL_UPLOAD_ENDPOINT = "http://localhost:3030/ro2share/update"
BASE_URI = 'http://ro2share.org/'
TMP_DIR = 'tmp/'
| Python | 0.000002 |
67f9a47c3476c189543bffba369d791035e0f159 | add per-link logs | empower/apps/survey/survey.py | empower/apps/survey/survey.py | #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Survey App."""
from empower.core.app import EmpowerApp
from empower.core.app import DEFAULT_PERIOD
from empower.core.resourcepool import BANDS
from empower.datatypes.etheraddress import EtherAddress
DEFAULT_ADDRESS = "ff:ff:ff:ff:ff:ff"
class Survey(EmpowerApp):
"""Survey App.
Command Line Parameters:
tenant_id: tenant id
addr: the address to be tracked (optional, default ff:ff:ff:ff:ff:ff)
every: loop period in ms (optional, default 5000ms)
Example:
./empower-runtime.py apps.survey.survey \
--tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
def __init__(self, **kwargs):
self.__addr = None
EmpowerApp.__init__(self, **kwargs)
self.links = {}
self.wtpup(callback=self.wtp_up_callback)
@property
def addr(self):
"""Return addr."""
return self.__addr
@addr.setter
def addr(self, value):
"""Set addr."""
self.__addr = EtherAddress(value)
def wtp_up_callback(self, wtp):
"""New WTP."""
for block in wtp.supports:
self.summary(addr=self.addr, block=block,
callback=self.summary_callback)
def to_dict(self):
""" Return a JSON-serializable dictionary representing the Summary """
out = super().to_dict()
out['links'] = self.links
return out
def summary_callback(self, summary):
""" New stats available. """
self.log.info("New summary from %s addr %s frames %u", summary.block,
summary.addr, len(summary.frames))
# per block log
filename = "survey_%s_%u_%s.csv" % (summary.block.addr,
summary.block.channel,
BANDS[summary.block.band])
for frame in summary.frames:
line = "%u,%g,%s,%d,%u,%s,%s,%s,%s,%s\n" % \
(frame['tsft'], frame['rate'], frame['rtype'], frame['rssi'],
frame['length'], frame['type'], frame['subtype'],
frame['ra'], frame['ta'], frame['seq'])
with open(filename, 'a') as file_d:
file_d.write(line)
# per link log
for frame in summary.frames:
link = "%s_%s_%u_%s" % (frame['ta'], summary.block.addr,
summary.block.channel,
BANDS[summary.block.band])
filename = "link_%s.csv" % link
if link not in self.links:
self.links[link] = {}
if frame['rssi'] not in self.links[link]:
self.links[link][frame['rssi']] = 0
self.links[link][frame['rssi']] += 1
line = "%u,%g,%s,%d,%u,%s,%s,%s,%s,%s\n" % \
(frame['tsft'], frame['rate'], frame['rtype'], frame['rssi'],
frame['length'], frame['type'], frame['subtype'],
frame['ra'], frame['ta'], frame['seq'])
with open(filename, 'a') as file_d:
file_d.write(line)
def launch(tenant_id, addr=DEFAULT_ADDRESS, every=DEFAULT_PERIOD):
""" Initialize the module. """
return Survey(tenant_id=tenant_id, addr=addr, every=every)
| #!/usr/bin/env python3
#
# Copyright (c) 2016 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Survey App."""
from empower.core.app import EmpowerApp
from empower.core.app import DEFAULT_PERIOD
from empower.core.resourcepool import BANDS
from empower.datatypes.etheraddress import EtherAddress
DEFAULT_ADDRESS = "ff:ff:ff:ff:ff:ff"
class Survey(EmpowerApp):
"""Survey App.
Command Line Parameters:
tenant_id: tenant id
addr: the address to be tracked (optional, default ff:ff:ff:ff:ff:ff)
every: loop period in ms (optional, default 5000ms)
Example:
./empower-runtime.py apps.survey.survey \
--tenant_id=52313ecb-9d00-4b7d-b873-b55d3d9ada26
"""
def __init__(self, **kwargs):
self.__addr = None
EmpowerApp.__init__(self, **kwargs)
self.wtpup(callback=self.wtp_up_callback)
@property
def addr(self):
"""Return addr."""
return self.__addr
@addr.setter
def addr(self, value):
"""Set addr."""
self.__addr = EtherAddress(value)
def wtp_up_callback(self, wtp):
"""New WTP."""
for block in wtp.supports:
self.summary(addr=self.addr, block=block,
callback=self.summary_callback)
def summary_callback(self, summary):
""" New stats available. """
self.log.info("New summary from %s addr %s frames %u", summary.block,
summary.addr, len(summary.frames))
filename = "survey_%s_%u_%s.csv" % (summary.block.addr,
summary.block.channel,
BANDS[summary.block.band])
for frame in summary.frames:
line = "%u,%g,%s,%d,%u,%s,%s,%s,%s,%s\n" % \
(frame['tsft'], frame['rate'], frame['rtype'], frame['rssi'],
frame['length'], frame['type'], frame['subtype'],
frame['ra'], frame['ta'], frame['seq'])
with open(filename, 'a') as file_d:
file_d.write(line)
def launch(tenant_id, addr=DEFAULT_ADDRESS, every=DEFAULT_PERIOD):
""" Initialize the module. """
return Survey(tenant_id=tenant_id, addr=addr, every=every)
| Python | 0.000001 |
dc7158048d491e28322af443c7918ffd0de0d22d | Use logger instead of stdout and stderr | speeches/management/import_commands.py | speeches/management/import_commands.py | import logging
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from instances.models import Instance
logger = logging.getLogger(__name__)
class ImportCommand(BaseCommand):
importer_class = None
document_extension = ''
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', help='Whether to commit to the database or not'),
make_option('--instance', action='store', help='Label of instance to add data to'),
make_option('--file', action='store', help='document to import'),
make_option('--dir', action='store', help='directory of documents to import'),
make_option('--start-date', action='store', default='', help='earliest date to process, in yyyy-mm-dd format'),
make_option('--dump-users', action='store', default='', help='dump a json list to <file> (only valid with --dir for now)'),
)
def handle(self, *args, **options):
verbosity = int(options['verbosity'])
if options['commit']:
if not options['instance']:
raise CommandError("You must specify an instance")
instance, _ = Instance.objects.get_or_create(label=options['instance'])
else:
instance = Instance(label=options['instance'])
options['instance'] = instance
if options['file']:
filename = os.path.expanduser(options['file'])
(section, speakers) = self.import_document(filename, **options)
if verbosity > 1:
if section and section.id:
logger.info("Imported section %d\n\n" % section.id)
elif options['dir']:
files = self.document_list(options)
if len(files):
imports = [self.import_document(f, **options) for f in files]
if options['commit']:
sections = [a for a,_ in imports]
if verbosity > 1:
logger.info("Imported sections %s\n\n"
% str( [s.id for s in sections]))
dump_users = os.path.expanduser(options['dump_users'])
if dump_users:
speakers = {}
for (_,d) in imports:
speakers.update(d)
out = open(dump_users, 'w')
speakers_list = [ (k, speakers[k]) for k in speakers]
out.write( json.dumps( speakers_list, indent=4 ) )
if verbosity > 1:
logger.info("Saved speakers list to %s\n" % dump_users)
else:
logger.info("No .%s files found in directory" % self.document_extension)
else:
logger.info(self.help)
def document_list(self, options):
dir = os.path.expanduser(options['dir'])
start_date = options['start_date']
valid = lambda f: f >= start_date if start_date else lambda _: True
return [ os.path.join(root, filename)
for (root, _, files)
in os.walk(dir)
for filename in files
if filename[-4:] == '.%s' % self.document_extension
and valid(filename)]
def document_valid(self, path):
return os.path.isfile(path)
def import_document(self, path, **options):
verbosity = int(options['verbosity'])
if not self.document_valid(path):
raise CommandError("No document found")
if verbosity > 1:
logger.info("Starting import: %s\n" % path)
if self.importer_class == None:
raise CommandError("No importer_class specified!")
importer = self.importer_class(**options)
try:
section = importer.import_document(path)
except Exception as e:
logger.error(str(e))
return (None, {})
return (section, importer.speakers)
| import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from instances.models import Instance
class ImportCommand(BaseCommand):
importer_class = None
document_extension = ''
option_list = BaseCommand.option_list + (
make_option('--commit', action='store_true', help='Whether to commit to the database or not'),
make_option('--instance', action='store', help='Label of instance to add data to'),
make_option('--file', action='store', help='document to import'),
make_option('--dir', action='store', help='directory of documents to import'),
make_option('--start-date', action='store', default='', help='earliest date to process, in yyyy-mm-dd format'),
make_option('--dump-users', action='store', default='', help='dump a json list to <file> (only valid with --dir for now)'),
)
def handle(self, *args, **options):
verbosity = int(options['verbosity'])
if options['commit']:
if not options['instance']:
raise CommandError("You must specify an instance")
instance, _ = Instance.objects.get_or_create(label=options['instance'])
else:
instance = Instance(label=options['instance'])
options['instance'] = instance
if options['file']:
filename = os.path.expanduser(options['file'])
(section, speakers) = self.import_document(filename, **options)
if verbosity > 1:
if section and section.id:
self.stdout.write("Imported section %d\n\n" % section.id)
elif options['dir']:
files = self.document_list(options)
if len(files):
imports = [self.import_document(f, **options) for f in files]
if options['commit']:
sections = [a for a,_ in imports]
if verbosity > 1:
self.stdout.write("Imported sections %s\n\n"
% str( [s.id for s in sections]))
dump_users = os.path.expanduser(options['dump_users'])
if dump_users:
speakers = {}
for (_,d) in imports:
speakers.update(d)
out = open(dump_users, 'w')
speakers_list = [ (k, speakers[k]) for k in speakers]
out.write( json.dumps( speakers_list, indent=4 ) )
if verbosity > 1:
self.stdout.write("Saved speakers list to %s\n" % dump_users)
else:
self.stdout.write("No .%s files found in directory" % self.document_extension)
else:
self.stdout.write(self.help)
def document_list(self, options):
dir = os.path.expanduser(options['dir'])
start_date = options['start_date']
valid = lambda f: f >= start_date if start_date else lambda _: True
return [ os.path.join(root, filename)
for (root, _, files)
in os.walk(dir)
for filename in files
if filename[-4:] == '.%s' % self.document_extension
and valid(filename)]
def document_valid(self, path):
return os.path.isfile(path)
def import_document(self, path, **options):
verbosity = int(options['verbosity'])
if not self.document_valid(path):
raise CommandError("No document found")
if verbosity > 1:
self.stdout.write("Starting import: %s\n" % path)
if self.importer_class == None:
raise CommandError("No importer_class specified!")
importer = self.importer_class(**options)
try:
section = importer.import_document(path)
except Exception as e:
self.stderr.write(str(e))
return (None, {})
return (section, importer.speakers)
| Python | 0.000019 |
fe56573f318578b6359d06d0454af6992f344c20 | load optimizer from config | hypergan/trainers/simultaneous_trainer.py | hypergan/trainers/simultaneous_trainer.py | import numpy as np
import torch
import hyperchamber as hc
import inspect
from hypergan.gan_component import ValidationException, GANComponent
from hypergan.trainers.base_trainer import BaseTrainer
from hypergan.optimizers.adamirror import Adamirror
TINY = 1e-12
class SimultaneousTrainer(BaseTrainer):
""" Steps G and D simultaneously """
def _create(self):
#self.optimizer = torch.optim.Adam(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.0,.999))
#self.optimizer = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.0,.999))
#self.adamirror = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.9074537537537538,.997))
#self.adamirror2 = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"]*3, betas=(0.9074537537537538,.997))
#self.optimizer = self.adamirror
#self.gan.add_component("optimizer", self.optimizer)
#self.gan.add_component("optimizer", self.adamirror2)
#self.gan.add_component("optimizer", self.adamirror)
defn = self.config.optimizer
klass = GANComponent.lookup_function(None, defn['class'])
del defn["class"]
self.optimizer = klass(self.gan.parameters(), **defn)
self.gan.add_component("optimizer", self.optimizer)
def required(self):
return "".split()
def _step(self, feed_dict):
gan = self.gan
config = self.config
loss = gan.loss
metrics = gan.metrics()
self.before_step(self.current_step, feed_dict)
d_grads, g_grads = self.calculate_gradients()
for hook in self.train_hooks:
d_grads, g_grads = hook.gradients(d_grads, g_grads)
for p, np in zip(self.gan.d_parameters(), d_grads):
p.grad = np
for p, np in zip(self.gan.g_parameters(), g_grads):
p.grad = np
self.optimizer.step()
if self.current_step % 10 == 0:
self.print_metrics(self.current_step)
def calculate_gradients(self):
self.optimizer.zero_grad()
d_loss, g_loss = self.gan.forward_loss()
self.d_loss = d_loss
self.g_loss = g_loss
for hook in self.train_hooks:
loss = hook.forward()
if loss[0] is not None:
d_loss += loss[0]
if loss[1] is not None:
g_loss += loss[1]
for p in self.gan.g_parameters():
p.requires_grad = True
for p in self.gan.d_parameters():
p.requires_grad = False
g_loss = g_loss.mean()
g_loss.backward(retain_graph=True)
for p in self.gan.d_parameters():
p.requires_grad = True
for p in self.gan.g_parameters():
p.requires_grad = False
d_loss = d_loss.mean()
d_loss.backward(retain_graph=True)
for p in self.gan.g_parameters():
p.requires_grad = True
d_grads = [p.grad for p in self.gan.d_parameters()]
g_grads = [p.grad for p in self.gan.g_parameters()]
return d_grads, g_grads
def print_metrics(self, step):
metrics = self.gan.metrics()
metric_values = self.output_variables(metrics)
print(str(self.output_string(metrics) % tuple([step] + metric_values)))
| import numpy as np
import torch
import hyperchamber as hc
import inspect
from hypergan.trainers.base_trainer import BaseTrainer
from hypergan.optimizers.adamirror import Adamirror
TINY = 1e-12
class SimultaneousTrainer(BaseTrainer):
""" Steps G and D simultaneously """
def _create(self):
self.optimizer = torch.optim.Adam(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.0,.999))
#self.optimizer = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.0,.999))
#self.adamirror = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"], betas=(0.9074537537537538,.997))
#self.adamirror2 = Adamirror(self.gan.parameters(), lr=self.config.optimizer["learn_rate"]*3, betas=(0.9074537537537538,.997))
#self.optimizer = self.adamirror
self.gan.add_component("optimizer", self.optimizer)
#self.gan.add_component("optimizer", self.adamirror2)
#self.gan.add_component("optimizer", self.adamirror)
def required(self):
return "".split()
def _step(self, feed_dict):
gan = self.gan
config = self.config
loss = gan.loss
metrics = gan.metrics()
self.before_step(self.current_step, feed_dict)
d_grads, g_grads = self.calculate_gradients()
for hook in self.train_hooks:
d_grads, g_grads = hook.gradients(d_grads, g_grads)
for p, np in zip(self.gan.d_parameters(), d_grads):
p.grad = np
for p, np in zip(self.gan.g_parameters(), g_grads):
p.grad = np
self.optimizer.step()
if self.current_step % 10 == 0:
self.print_metrics(self.current_step)
def calculate_gradients(self):
self.optimizer.zero_grad()
d_loss, g_loss = self.gan.forward_loss()
self.d_loss = d_loss
self.g_loss = g_loss
for hook in self.train_hooks:
loss = hook.forward()
if loss[0] is not None:
d_loss += loss[0]
if loss[1] is not None:
g_loss += loss[1]
for p in self.gan.g_parameters():
p.requires_grad = True
for p in self.gan.d_parameters():
p.requires_grad = False
g_loss = g_loss.mean()
g_loss.backward(retain_graph=True)
for p in self.gan.d_parameters():
p.requires_grad = True
for p in self.gan.g_parameters():
p.requires_grad = False
d_loss = d_loss.mean()
d_loss.backward(retain_graph=True)
for p in self.gan.g_parameters():
p.requires_grad = True
d_grads = [p.grad for p in self.gan.d_parameters()]
g_grads = [p.grad for p in self.gan.g_parameters()]
return d_grads, g_grads
def print_metrics(self, step):
metrics = self.gan.metrics()
metric_values = self.output_variables(metrics)
print(str(self.output_string(metrics) % tuple([step] + metric_values)))
| Python | 0 |
30db4b3ae377669b3b598c9d4d22b5fbff2082ab | Fix typo on model | app/backend/aquifers/serializers.py | app/backend/aquifers/serializers.py | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from aquifers.models import Aquifer
class AquiferSerializer(serializers.ModelSerializer):
"""Serialize a aquifer list"""
demand_description = serializers.SlugRelatedField(source='demand', read_only=True, slug_field='description')
material_description = serializers.SlugRelatedField(source='material', read_only=True, slug_field='description')
productivity_description = serializers.SlugRelatedField(source='productivity', read_only=True, slug_field='description')
subtype_description = serializers.SlugRelatedField(source='subtype', read_only=True, slug_field='description')
vulnerability_description = serializers.SlugRelatedField(source='vulnerability', read_only=True, slug_field='description')
quality_concern_description = serializers.SlugRelatedField(source='quality_concern', read_only=True, slug_field='description')
class Meta:
model = Aquifer
fields = (
'aquifer_id',
'aquifer_name',
'area',
'demand_description',
'demand',
'litho_stratographic_unit',
'location_description',
'mapping_year',
'material_description',
'material',
'productivity_description',
'productivity',
'quality_concern_description',
'quality_concern',
'subtype_description',
'subtype',
'vulnerability_description',
'vulnerability'
) | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from rest_framework import serializers
from aquifers.models import Aquifer
class AquiferSerializer(serializers.ModelSerializer):
"""Serialize a aquifer list"""
demand_description = serializers.SlugRelatedField(source='demand', read_only=True, slug_field='description')
material_description = serializers.SlugRelatedField(source='material', read_only=True, slug_field='description')
productivity_description = serializers.SlugRelatedField(source='productivity', read_only=True, slug_field='description')
subtype_description = serializers.SlugRelatedField(source='subtype', read_only=True, slug_field='description')
vulnerability_description = serializers.SlugRelatedField(source='vulnerability', read_only=True, slug_field='description')
quality_concern_description = serializers.SlugRelatedField(source='quality_concert', read_only=True, slug_field='description')
class Meta:
model = Aquifer
fields = (
'aquifer_id',
'aquifer_name',
'area',
'demand_description',
'demand',
'litho_stratographic_unit',
'location_description',
'mapping_year',
'material_description',
'material',
'productivity_description',
'productivity',
'quality_concern_description',
'quality_concern',
'subtype_description',
'subtype',
'vulnerability_description',
'vulnerability'
) | Python | 0.000585 |
306f597faad120ad5b5327544b40537fd0724f96 | stop url render errors failing the plugin | flexget/plugins/output/prowl.py | flexget/plugins/output/prowl.py | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.utils.template import RenderError
log = logging.getLogger('prowl')
class OutputProwl(object):
"""
Send prowl notifications
Example::
prowl:
apikey: xxxxxxx
[application: application name, default FlexGet]
[event: event title, default New Release]
[priority: -2 - 2 (2 = highest), default 0]
[description: notification to send]
Configuration parameters are also supported from entries (eg. through set).
"""
schema = {
'type': 'object',
'properties': {
'apikey': {'type': 'string'},
'application': {'type': 'string', 'default': 'FlexGet'},
'event': {'type': 'string', 'default': 'New Release'},
'priority': {'type': 'integer', 'default': 0},
'description': {'type': 'string'},
'url': {'type': 'string'}
},
'required': ['apikey'],
'additionalProperties': False
}
# Run last to make sure other outputs are successful before sending notification
@plugin.priority(0)
def on_task_output(self, task, config):
for entry in task.accepted:
# get the parameters
apikey = entry.get('apikey', config['apikey'])
application = entry.get('application', config['application'])
event = entry.get('event', config['event'])
priority = entry.get('priority', config['priority'])
description = config.get('description', entry['title'])
message_url = config.get('url', '')
# If event has jinja template, render it
try:
event = entry.render(event)
except RenderError as e:
log.error('Error rendering jinja event: %s' % e)
# If description has jinja template, render it
try:
description = entry.render(description)
except RenderError as e:
description = entry['title']
log.error('Error rendering jinja description: %s' % e)
# If url has jinja template, render it
try:
message_url = entry.render(message_url)
except RenderError as e:
message_url = ''
log.error('Error rendering jinja url: %s' % e)
url = 'https://api.prowlapp.com/publicapi/add'
data = {'priority': priority, 'application': application, 'apikey': apikey,
'event': event.encode('utf-8'), 'description': description, 'url': message_url}
if task.options.test:
log.info('Would send prowl message about: %s', entry['title'])
log.debug('options: %s' % data)
continue
try:
response = task.requests.post(url, data=data, raise_status=False)
except RequestException as e:
log.error('Error with request: %s' % e)
continue
# Check if it succeeded
request_status = response.status_code
# error codes and messages from http://prowl.weks.net/api.php
if request_status == 200:
log.debug("Prowl message sent")
elif request_status == 400:
log.error("Bad request, the parameters you provided did not validate")
elif request_status == 401:
log.error("Not authorized, the API key given is not valid, and does not correspond to a user.")
elif request_status == 406:
log.error("Not acceptable, your IP address has exceeded the API limit.")
elif request_status == 409:
log.error("Not approved, the user has yet to approve your retrieve request.")
elif request_status == 500:
log.error("Internal server error, something failed to execute properly on the Prowl side.")
else:
log.error("Unknown error when sending Prowl message")
@event('plugin.register')
def register_plugin():
plugin.register(OutputProwl, 'prowl', api_ver=2)
| from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from requests import RequestException
from flexget import plugin
from flexget.event import event
from flexget.utils.template import RenderError
log = logging.getLogger('prowl')
class OutputProwl(object):
"""
Send prowl notifications
Example::
prowl:
apikey: xxxxxxx
[application: application name, default FlexGet]
[event: event title, default New Release]
[priority: -2 - 2 (2 = highest), default 0]
[description: notification to send]
Configuration parameters are also supported from entries (eg. through set).
"""
schema = {
'type': 'object',
'properties': {
'apikey': {'type': 'string'},
'application': {'type': 'string', 'default': 'FlexGet'},
'event': {'type': 'string', 'default': 'New Release'},
'priority': {'type': 'integer', 'default': 0},
'description': {'type': 'string'},
'url': {'type': 'string'}
},
'required': ['apikey'],
'additionalProperties': False
}
# Run last to make sure other outputs are successful before sending notification
@plugin.priority(0)
def on_task_output(self, task, config):
for entry in task.accepted:
# get the parameters
apikey = entry.get('apikey', config['apikey'])
application = entry.get('application', config['application'])
event = entry.get('event', config['event'])
priority = entry.get('priority', config['priority'])
description = config.get('description', entry['title'])
message_url = config.get('url', '')
# If event has jinja template, render it
try:
event = entry.render(event)
except RenderError as e:
log.error('Error rendering jinja event: %s' % e)
# If description has jinja template, render it
try:
description = entry.render(description)
except RenderError as e:
description = entry['title']
log.error('Error rendering jinja description: %s' % e)
# If url has jinja template, render it
try:
message_url = entry.render(message_url)
except RenderError as e:
log.error('Error rendering jinja url: %s' % e)
url = 'https://api.prowlapp.com/publicapi/add'
data = {'priority': priority, 'application': application, 'apikey': apikey,
'event': event.encode('utf-8'), 'description': description, 'url': message_url}
if task.options.test:
log.info('Would send prowl message about: %s', entry['title'])
log.debug('options: %s' % data)
continue
try:
response = task.requests.post(url, data=data, raise_status=False)
except RequestException as e:
log.error('Error with request: %s' % e)
continue
# Check if it succeeded
request_status = response.status_code
# error codes and messages from http://prowl.weks.net/api.php
if request_status == 200:
log.debug("Prowl message sent")
elif request_status == 400:
log.error("Bad request, the parameters you provided did not validate")
elif request_status == 401:
log.error("Not authorized, the API key given is not valid, and does not correspond to a user.")
elif request_status == 406:
log.error("Not acceptable, your IP address has exceeded the API limit.")
elif request_status == 409:
log.error("Not approved, the user has yet to approve your retrieve request.")
elif request_status == 500:
log.error("Internal server error, something failed to execute properly on the Prowl side.")
else:
log.error("Unknown error when sending Prowl message")
@event('plugin.register')
def register_plugin():
plugin.register(OutputProwl, 'prowl', api_ver=2)
| Python | 0.000001 |
2954d63dbd4f48eb6141fdb1298290c2adaf5814 | Fix installed-unit builder. | flocker/provision/_rackspace.py | flocker/provision/_rackspace.py | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Rackspace provisioner.
"""
from characteristic import attributes, Attribute
from ._libcloud import get_size, get_image
from ._install import provision
# _node isn't immutable, since libcloud provides new instances
# with updated data.
@attributes([Attribute('_node'), 'address', 'distribution'])
class RackspaceNode(object):
def destroy(self):
self._node.destroy()
def provision(self, package_source):
"""
Provision flocker on this node.
"""
provision(
self.address, username="root",
package_source=package_source,
distribution=self.distribution,
)
return self.address
IMAGE_NAMES = {
'fedora-20': u'Fedora 20 (Heisenbug) (PVHVM)',
}
@attributes([Attribute('_keyname')], apply_immutable=True)
class Rackspace(object):
def __init__(self, username, key, region):
# Import these here, so that this can be imported without
# installng libcloud.
from libcloud.compute.providers import get_driver, Provider
self._driver = get_driver(Provider.RACKSPACE)(
key=username,
secret=key,
region=region)
def create_node(self, name, distribution,
userdata=None,
size="performance1-2", disk_size=8,
keyname=None, metadata={}):
"""
:param str name: The name of the node.
:param str base_ami: The name of the ami to use.
:param bytes userdata: User data to pass to the instance.
:param bytes size: The name of the size to use.
:param int disk_size: The size of disk to allocate.
:param dict metadata: Metadata to associate with the node.
"""
if keyname is None:
keyname = self._keyname
image_name = IMAGE_NAMES[distribution]
node = self._driver.create_node(
name=name,
image=get_image(self._driver, image_name),
size=get_size(self._driver, size),
ex_keyname=keyname,
ex_userdata=userdata,
ex_config_drive="true",
ex_metadata=metadata,
)
node, addresses = self._driver.wait_until_running([node])[0]
public_address = addresses[0]
return RackspaceNode(node=node, address=public_address,
distribution=distribution)
| # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Rackspace provisioner.
"""
from libcloud.compute.providers import get_driver, Provider
from characteristic import attributes, Attribute
from ._libcloud import get_size, get_image
from ._install import provision
# _node isn't immutable, since libcloud provides new instances
# with updated data.
@attributes([Attribute('_node'), 'address', 'distribution'])
class RackspaceNode(object):
def destroy(self):
self._node.destroy()
def provision(self, package_source):
"""
Provision flocker on this node.
"""
provision(
self.address, username="root",
package_source=package_source,
distribution=self.distribution,
)
return self.address
IMAGE_NAMES = {
'fedora-20': u'Fedora 20 (Heisenbug) (PVHVM)',
}
@attributes([Attribute('_keyname')], apply_immutable=True)
class Rackspace(object):
def __init__(self, username, key, region):
self._driver = get_driver(Provider.RACKSPACE)(
key=username,
secret=key,
region=region)
def create_node(self, name, distribution,
userdata=None,
size="performance1-2", disk_size=8,
keyname=None, metadata={}):
"""
:param str name: The name of the node.
:param str base_ami: The name of the ami to use.
:param bytes userdata: User data to pass to the instance.
:param bytes size: The name of the size to use.
:param int disk_size: The size of disk to allocate.
:param dict metadata: Metadata to associate with the node.
"""
if keyname is None:
keyname = self._keyname
image_name = IMAGE_NAMES[distribution]
node = self._driver.create_node(
name=name,
image=get_image(self._driver, image_name),
size=get_size(self._driver, size),
ex_keyname=keyname,
ex_userdata=userdata,
ex_config_drive="true",
ex_metadata=metadata,
)
node, addresses = self._driver.wait_until_running([node])[0]
public_address = addresses[0]
return RackspaceNode(node=node, address=public_address,
distribution=distribution)
| Python | 0 |
96bc8bdec2736a1f86528d55331e935ecf13a529 | np nan validator | pycqed/instrument_drivers/pq_parameters.py | pycqed/instrument_drivers/pq_parameters.py | from qcodes.instrument.parameter import ManualParameter
from qcodes.utils.validators import Validator, Strings
import numpy as np
class NP_NANs(Validator):
is_numeric = True
def __init__(self):
self._valid_values = [np.nan]
def __repr__(self):
return '<nan>'
def validate(self, value, context=''):
try:
if not np.isnan(value):
raise ValueError('{} is not nan; {}'.format(
repr(value), context))
except:
raise ValueError('{} is not nan; {}'.format(
repr(value), context))
class InstrumentParameter(ManualParameter):
"""
Args:
name (string): the name of the instrument that one wants to add.
instrument (Optional[Instrument]): the "parent" instrument this
parameter is attached to, if any.
initial_value (Optional[string]): starting value, the
only invalid value allowed, and None is only allowed as an initial
value, it cannot be set later
**kwargs: Passed to Parameter parent class
"""
def get_instr(self):
"""
Returns the instance of the instrument with the name equal to the
value of this parameter.
"""
instrument_name = self.get()
# note that _instrument refers to the instrument this parameter belongs
# to, while the instrument_name is the instrument that is the value
# of this parameter.
return self._instrument.find_instrument(instrument_name)
def set_validator(self, vals):
"""
Set a validator `vals` for this parameter.
Args:
vals (Validator): validator to set
"""
if vals is None:
self.vals = Strings()
elif isinstance(vals, Validator):
self.vals = vals
else:
raise TypeError('vals must be a Validator')
class ConfigParameter(ManualParameter):
# TODO: move this to qcodes as a pull request
"""
Define one parameter that reflects a manual configuration setting.
Args:
name (string): the local name of this parameter
instrument (Optional[Instrument]): the instrument this applies to,
if any.
initial_value (Optional[string]): starting value, the
only invalid value allowed, and None is only allowed as an initial
value, it cannot be set later
**kwargs: Passed to Parameter parent class
"""
def __init__(self, name, instrument=None, initial_value=None, **kwargs):
super().__init__(name=name, **kwargs)
self._instrument = instrument
# if the instrument does not have _config_changed attribute creates it
if not hasattr(self._instrument, '_config_changed'):
self._instrument._config_changed = True
self._meta_attrs.extend(['instrument', 'initial_value'])
if initial_value is not None:
self.validate(initial_value)
self._save_val(initial_value)
def set(self, value):
"""
Validate and saves value.
If the value is different from the latest value it sets the
Args:
value (any): value to validate and save
"""
self.validate(value)
if value != self.get_latest():
self._instrument._config_changed = True
self._save_val(value)
def get(self):
""" Return latest value"""
return self.get_latest()
| from qcodes.instrument.parameter import ManualParameter
from qcodes.utils.validators import Validator, Strings
import numpy as np
class NP_NANs(Validator):
is_numeric = True
def __init__(self):
pass
def __repr__(self):
return '<nan>'
def validate(self, value, context=''):
try:
if not np.isnan(value):
raise ValueError('{} is not nan; {}'.format(
repr(value), context))
except:
raise ValueError('{} is not nan; {}'.format(
repr(value), context))
class InstrumentParameter(ManualParameter):
"""
Args:
name (string): the name of the instrument that one wants to add.
instrument (Optional[Instrument]): the "parent" instrument this
parameter is attached to, if any.
initial_value (Optional[string]): starting value, the
only invalid value allowed, and None is only allowed as an initial
value, it cannot be set later
**kwargs: Passed to Parameter parent class
"""
def get_instr(self):
"""
Returns the instance of the instrument with the name equal to the
value of this parameter.
"""
instrument_name = self.get()
# note that _instrument refers to the instrument this parameter belongs
# to, while the instrument_name is the instrument that is the value
# of this parameter.
return self._instrument.find_instrument(instrument_name)
def set_validator(self, vals):
"""
Set a validator `vals` for this parameter.
Args:
vals (Validator): validator to set
"""
if vals is None:
self.vals = Strings()
elif isinstance(vals, Validator):
self.vals = vals
else:
raise TypeError('vals must be a Validator')
class ConfigParameter(ManualParameter):
# TODO: move this to qcodes as a pull request
"""
Define one parameter that reflects a manual configuration setting.
Args:
name (string): the local name of this parameter
instrument (Optional[Instrument]): the instrument this applies to,
if any.
initial_value (Optional[string]): starting value, the
only invalid value allowed, and None is only allowed as an initial
value, it cannot be set later
**kwargs: Passed to Parameter parent class
"""
def __init__(self, name, instrument=None, initial_value=None, **kwargs):
super().__init__(name=name, **kwargs)
self._instrument = instrument
# if the instrument does not have _config_changed attribute creates it
if not hasattr(self._instrument, '_config_changed'):
self._instrument._config_changed = True
self._meta_attrs.extend(['instrument', 'initial_value'])
if initial_value is not None:
self.validate(initial_value)
self._save_val(initial_value)
def set(self, value):
"""
Validate and saves value.
If the value is different from the latest value it sets the
Args:
value (any): value to validate and save
"""
self.validate(value)
if value != self.get_latest():
self._instrument._config_changed = True
self._save_val(value)
def get(self):
""" Return latest value"""
return self.get_latest()
| Python | 0.999504 |
8995d7314bddcf4418a08cb39b2fabbc8704706e | Use conservative defaults for local facebook settings. | pykeg/src/pykeg/contrib/facebook/models.py | pykeg/src/pykeg/contrib/facebook/models.py | import datetime
from django.db import models
from django.db.models.signals import post_save
from socialregistration import models as sr_models
PRIVACY_CHOICES = (
('EVERYONE', 'Everyone'),
('ALL_FRIENDS', 'Friends'),
('FRIENDS_OF_FRIENDS', 'Friends of Friends'),
('NETWORK_FRIENDS', 'Networks and Friends'),
#('CUSTOM', 'Custom permissions'),
)
class FacebookSession(models.Model):
"""Stores the session id for a user."""
profile = models.ForeignKey(sr_models.FacebookProfile, unique=True,
related_name='session')
session_id = models.CharField(max_length=255, blank=False, null=False)
updated = models.DateTimeField(default=datetime.datetime.now)
@classmethod
def get_session(cls, request):
if not hasattr(request, 'facebook'):
raise ValueError, "no facebook"
return None
fb = request.facebook
if not fb.uid:
raise ValueError, "no uid"
return None
profile = sr_models.FacebookProfile.objects.get(uid=fb.uid)
if not profile:
raise ValueError, "no profile"
return None
session, new = FacebookSession.objects.get_or_create(profile=profile)
if new or session.session_id != fb.session_key:
session.session_id = fb.session_key
session.save()
def add_permission(self, perm):
qs = self.profile.permission_set.filter(permission=perm)
if not qs.count():
perm = FacebookPermission(profile=self.profile, permission=perm)
perm.save()
def rm_permission(self, perm):
qs = self.profile.permission_set.filter(permission=perm)
if qs.count():
qs.delete()
def profile_post_save(sender, instance, **kwargs):
"""Create default settings on new profile."""
settings, new = FacebookSettings.objects.get_or_create(
profile=instance)
post_save.connect(profile_post_save, sender=sr_models.FacebookProfile)
class FacebookPermission(models.Model):
"""Records a granted permission."""
profile = models.ForeignKey(sr_models.FacebookProfile, unique=True,
related_name='permission_set')
permission = models.CharField(max_length=255, blank=False, null=False,
unique=True)
class FacebookSettings(models.Model):
profile = models.ForeignKey(sr_models.FacebookProfile, unique=True,
related_name='settings')
# stream.publish stuff
# http://wiki.developers.facebook.com/index.php/Stream.publish
publish_events = models.BooleanField(default=False,
help_text='Post each drink to your wall.')
include_link = models.BooleanField(default=False,
help_text='Add a link to this kegbot when publishing to wall.')
publish_status = models.BooleanField(default=False,
help_text='Update status on start of a new drinking session.')
privacy = models.CharField(max_length=64, choices=PRIVACY_CHOICES,
default='ALL_FRIENDS',
help_text='Privacy setting for drink posts.')
| import datetime
from django.db import models
from django.db.models.signals import post_save
from socialregistration import models as sr_models
PRIVACY_CHOICES = (
('EVERYONE', 'Everyone'),
('ALL_FRIENDS', 'Friends'),
('FRIENDS_OF_FRIENDS', 'Friends of Friends'),
('NETWORK_FRIENDS', 'Networks and Friends'),
#('CUSTOM', 'Custom permissions'),
)
class FacebookSession(models.Model):
"""Stores the session id for a user."""
profile = models.ForeignKey(sr_models.FacebookProfile, unique=True,
related_name='session')
session_id = models.CharField(max_length=255, blank=False, null=False)
updated = models.DateTimeField(default=datetime.datetime.now)
@classmethod
def get_session(cls, request):
if not hasattr(request, 'facebook'):
raise ValueError, "no facebook"
return None
fb = request.facebook
if not fb.uid:
raise ValueError, "no uid"
return None
profile = sr_models.FacebookProfile.objects.get(uid=fb.uid)
if not profile:
raise ValueError, "no profile"
return None
session, new = FacebookSession.objects.get_or_create(profile=profile)
if new or session.session_id != fb.session_key:
session.session_id = fb.session_key
session.save()
def add_permission(self, perm):
qs = self.profile.permission_set.filter(permission=perm)
if not qs.count():
perm = FacebookPermission(profile=self.profile, permission=perm)
perm.save()
def rm_permission(self, perm):
qs = self.profile.permission_set.filter(permission=perm)
if qs.count():
qs.delete()
def profile_post_save(sender, instance, **kwargs):
"""Create default settings on new profile."""
settings, new = FacebookSettings.objects.get_or_create(
profile=instance)
post_save.connect(profile_post_save, sender=sr_models.FacebookProfile)
class FacebookPermission(models.Model):
"""Records a granted permission."""
profile = models.ForeignKey(sr_models.FacebookProfile, unique=True,
related_name='permission_set')
permission = models.CharField(max_length=255, blank=False, null=False,
unique=True)
class FacebookSettings(models.Model):
profile = models.ForeignKey(sr_models.FacebookProfile, unique=True,
related_name='settings')
# stream.publish stuff
# http://wiki.developers.facebook.com/index.php/Stream.publish
publish_events = models.BooleanField(default=True,
help_text='Post each drink to your wall.')
include_link = models.BooleanField(default=True,
help_text='Add a link to this kegbot when publishing to wall.')
publish_status = models.BooleanField(default=False,
help_text='Update status on start of a new drinking session.')
privacy = models.CharField(max_length=64, choices=PRIVACY_CHOICES,
default='ALL_FRIENDS',
help_text='Privacy setting for drink posts.')
| Python | 0 |
50d11a45ddbd2b535111de2307a0c6a1b0443577 | Fix imports | motobot/core_plugins/privmsg_handlers.py | motobot/core_plugins/privmsg_handlers.py | from motobot import hook
from time import strftime, localtime
import re
@hook('PRIVMSG')
def __handle_privmsg(bot, message):
""" Handle the privmsg commands.
Will send the reply back to the channel the command was sent from,
or back to the user whom sent it in the case of a private message.
Commands (prefixed with command_prefix) are executed, CTCP is handled,
and the matches are checked.
"""
response = None
message.message = strip_control_codes(message.message)
target = message.channel \
if is_channel(message.channel) \
else message.nick
if message.message.startswith(bot.command_prefix):
command = message.message.split(' ')[0][len(bot.command_prefix):]
response = bot.commands[command](bot, message, bot.database)
if response is not None:
response = 'PRIVMSG {} :{}'.format(target, response)
elif is_ctcp(message):
response = ctcp_response(message.message[1:-1])
if response is not None:
response = 'NOTICE {} :\u0001{}\u0001'.format(target, response)
else:
for pattern, func in bot.patterns:
if pattern.search(message.message):
response = func(bot, message, bot.database)
if response is not None:
response = 'PRIVMSG {} :{}'.format(target, response)
if response is None:
for sink in bot.sinks:
response = sink(bot, message, bot.database)
if response is not None:
response = 'PRIVMSG {} :{}'.format(target, response)
break
return response
def strip_control_codes(input):
""" Strip the control codes from the input. """
pattern = re.compile(r'\x03[0-9]{0,2},?[0-9]{0,2}|\x02|\x1D|\x1F|\x16|\x0F')
output = pattern.sub('', input)
return output
def is_channel(name):
""" Check if a name is a valid channel name or not. """
valid = ['&', '#', '+', '!']
invalid = [' ', ',', '\u0007']
return (name[0] in valid) and all(c not in invalid for c in name)
def is_ctcp(message):
""" Check if a message object is a ctcp message or not. """
return message.message.startswith('\u0001') and \
message.message.endswith('\u0001')
def ctcp_response(message):
""" Return the appropriate response to a CTCP request. """
mapping = {
'VERSION': 'MotoBot Version 2.0',
'FINGER': 'Oh you dirty man!',
'TIME': strftime('%a %b %d %H:%M:%S', localtime()),
'PING': message
}
return mapping.get(message.split(' ')[0].upper(), None)
| from motobot import hook
from time import strftime, localtime
@hook('PRIVMSG')
def __handle_privmsg(bot, message):
""" Handle the privmsg commands.
Will send the reply back to the channel the command was sent from,
or back to the user whom sent it in the case of a private message.
Commands (prefixed with command_prefix) are executed, CTCP is handled,
and the matches are checked.
"""
response = None
message.message = strip_control_codes(message.message)
target = message.channel \
if is_channel(message.channel) \
else message.nick
if message.message.startswith(bot.command_prefix):
command = message.message.split(' ')[0][len(bot.command_prefix):]
response = bot.commands[command](bot, message, bot.database)
if response is not None:
response = 'PRIVMSG {} :{}'.format(target, response)
elif is_ctcp(message):
response = ctcp_response(message.message[1:-1])
if response is not None:
response = 'NOTICE {} :\u0001{}\u0001'.format(target, response)
else:
for pattern, func in bot.patterns:
if pattern.search(message.message):
response = func(bot, message, bot.database)
if response is not None:
response = 'PRIVMSG {} :{}'.format(target, response)
if response is None:
for sink in bot.sinks:
response = sink(bot, message, bot.database)
if response is not None:
response = 'PRIVMSG {} :{}'.format(target, response)
break
return response
def strip_control_codes(input):
""" Strip the control codes from the input. """
pattern = re.compile(r'\x03[0-9]{0,2},?[0-9]{0,2}|\x02|\x1D|\x1F|\x16|\x0F')
output = pattern.sub('', input)
return output
def is_channel(name):
""" Check if a name is a valid channel name or not. """
valid = ['&', '#', '+', '!']
invalid = [' ', ',', '\u0007']
return (name[0] in valid) and all(c not in invalid for c in name)
def is_ctcp(message):
""" Check if a message object is a ctcp message or not. """
return message.message.startswith('\u0001') and \
message.message.endswith('\u0001')
def ctcp_response(message):
""" Return the appropriate response to a CTCP request. """
mapping = {
'VERSION': 'MotoBot Version 2.0',
'FINGER': 'Oh you dirty man!',
'TIME': strftime('%a %b %d %H:%M:%S', localtime()),
'PING': message
}
return mapping.get(message.split(' ')[0].upper(), None)
| Python | 0.000002 |
cfb7e2a4659d8537b8026c2928c5a635e16070ee | Speed up the user admin page | apps/auth/admin.py | apps/auth/admin.py | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import datetime
from django import forms
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from models import CustomUser, Announcement
class CustomUserCreationForm(UserCreationForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput)
email = forms.EmailField(label=_('Email'))
class Meta:
model = CustomUser
fields = ("username", "email")
class UserChangeList(ChangeList):
def get_ordering(self, request, queryset):
# The default ChangeList code adds CustomUser.id to the list of
# ordering fields to make things deterministic. However this kills
# performance because the ORDER BY clause includes columns from 2
# different tables (auth_user.username, auth_customuser.id).
#
# Also, sorting by any column other than user also kills performance
# since our user table is quite large at this point.
#
# So we just override everything and force the sort to be username.
# Username is a unique key so the sort will be fast and deterministic.
return ['username']
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff',
'is_superuser', 'last_ip', 'partner')
search_fields = ('username', 'first_name', 'last_name', 'email', 'id')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}
),
)
def get_changelist(self, request, **kwargs):
return UserChangeList
class AnnouncementAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': widgets.AdminTextareaWidget}
}
list_display = ('content', 'created', 'visible')
actions = ['make_hidden']
def get_form(self, request, obj=None, **kwargs):
form = super(AnnouncementAdmin, self).get_form(request, obj=None, **kwargs)
default_help_text = form.base_fields['created'].help_text
now = datetime.now()
form.base_fields['created'].help_text = default_help_text+\
u'</br>Current server time is %s. Value is saved without timezone converting.' % now.strftime('%m/%d/%Y %H:%M:%S')
return form
def visible(self, obj):
return not obj.hidden
visible.boolean = True
def make_hidden(self, request, queryset):
Announcement.clear_cache()
queryset.update(hidden=True)
make_hidden.short_description = _(u'Hide')
admin.site.register(Announcement, AnnouncementAdmin)
admin.site.unregister(User)
admin.site.register(CustomUser, CustomUserAdmin)
| # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import datetime
from django import forms
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
from models import CustomUser, Announcement
class CustomUserCreationForm(UserCreationForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^\w+$',
help_text = _("Required. 30 characters or fewer. Alphanumeric characters only (letters, digits and underscores)."),
error_message = _("This value must contain only letters, numbers and underscores."))
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"), widget=forms.PasswordInput)
email = forms.EmailField(label=_('Email'))
class Meta:
model = CustomUser
fields = ("username", "email")
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff',
'is_superuser', 'last_ip', 'partner')
search_fields = ('username', 'first_name', 'last_name', 'email', 'id')
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'email', 'password1', 'password2')}
),
)
class AnnouncementAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': widgets.AdminTextareaWidget}
}
list_display = ('content', 'created', 'visible')
actions = ['make_hidden']
def get_form(self, request, obj=None, **kwargs):
form = super(AnnouncementAdmin, self).get_form(request, obj=None, **kwargs)
default_help_text = form.base_fields['created'].help_text
now = datetime.now()
form.base_fields['created'].help_text = default_help_text+\
u'</br>Current server time is %s. Value is saved without timezone converting.' % now.strftime('%m/%d/%Y %H:%M:%S')
return form
def visible(self, obj):
return not obj.hidden
visible.boolean = True
def make_hidden(self, request, queryset):
Announcement.clear_cache()
queryset.update(hidden=True)
make_hidden.short_description = _(u'Hide')
admin.site.register(Announcement, AnnouncementAdmin)
admin.site.unregister(User)
admin.site.register(CustomUser, CustomUserAdmin)
| Python | 0.999004 |
eff9a7fa2c25739926a8c583c51f30fee66185c9 | return plugin name changed at loading | keystoneauth_oidc_refreshtoken/loading.py | keystoneauth_oidc_refreshtoken/loading.py | # coding=utf-8
# Copyright 2017 JOSÉ JOAQUÍN ESCOBAR GÓMEZ
# File: loading.py
# Description:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import loading
from keystoneauth1.loading._plugins.identity import v3
from keystoneauth_oidc_refreshtoken import plugin
class OpenIDConnectRefreshToken(v3._OpenIDConnectBase):
@property
def plugin_class(self):
return plugin.OidcRefreshToken
def get_options(self):
options = super(OpenIDConnectRefreshToken, self).get_options()
options.extend([
loading.Opt('refresh_token', required=True,
help='OAuth 2.0 Refresh Token')
])
return options
| # coding=utf-8
# Copyright 2017 JOSÉ JOAQUÍN ESCOBAR GÓMEZ
# File: loading.py
# Description:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneauth1 import loading
from keystoneauth1.loading._plugins.identity import v3
from keystoneauth_oidc_refreshtoken import plugin
class OpenIDConnectRefreshToken(v3._OpenIDConnectBase):
@property
def plugin_class(self):
return plugin.v3oidcrefreshtoken
def get_options(self):
options = super(OpenIDConnectRefreshToken, self).get_options()
options.extend([
loading.Opt('refresh_token', required=True,
help='OAuth 2.0 Refresh Token')
])
return options
| Python | 0 |
b978d2a1f2f9cc9942971a6e252ccd1209a9269b | remove message (#8163) | pytorch_lightning/metrics/__init__.py | pytorch_lightning/metrics/__init__.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.metrics.classification import ( # noqa: F401
Accuracy,
AUC,
AUROC,
AveragePrecision,
ConfusionMatrix,
F1,
FBeta,
HammingDistance,
IoU,
Precision,
PrecisionRecallCurve,
Recall,
ROC,
StatScores,
)
from pytorch_lightning.metrics.metric import Metric, MetricCollection # noqa: F401
from pytorch_lightning.metrics.regression import ( # noqa: F401
ExplainedVariance,
MeanAbsoluteError,
MeanSquaredError,
MeanSquaredLogError,
PSNR,
R2Score,
SSIM,
)
| # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytorch_lightning.metrics.classification import ( # noqa: F401
Accuracy,
AUC,
AUROC,
AveragePrecision,
ConfusionMatrix,
F1,
FBeta,
HammingDistance,
IoU,
Precision,
PrecisionRecallCurve,
Recall,
ROC,
StatScores,
)
from pytorch_lightning.metrics.metric import Metric, MetricCollection # noqa: F401
from pytorch_lightning.metrics.regression import ( # noqa: F401
ExplainedVariance,
MeanAbsoluteError,
MeanSquaredError,
MeanSquaredLogError,
PSNR,
R2Score,
SSIM,
)
from pytorch_lightning.utilities import rank_zero_deprecation
rank_zero_deprecation(
"`pytorch_lightning.metrics.*` module has been renamed to `torchmetrics.*` and split off to its own package"
" (https://github.com/PyTorchLightning/metrics) since v1.3 and will be removed in v1.5"
)
| Python | 0 |
d6b9cc4acb4800aa63cc91957c05c75312a081e5 | update language_by_size from trunk r9110, add new sq-site | pywikibot/families/wikinews_family.py | pywikibot/families/wikinews_family.py | # -*- coding: utf-8 -*-
from pywikibot import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wikinews
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'wikinews'
self.languages_by_size = [
'sr', 'en', 'pl', 'de', 'fr', 'it', 'es', 'pt', 'zh', 'ja', 'sv',
'ru', 'ta', 'fi', 'cs', 'he', 'ro', 'bg', 'ar', 'hu', 'sd', 'tr',
'uk', 'fa', 'ca', 'no', 'sq', 'bs', 'th', 'ko', 'eo',
]
for lang in self.languages_by_size:
self.langs[lang] = '%s.wikinews.org' % lang
self.obsolete = {
'jp': 'ja',
'nb': 'no',
'nl': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325
'zh-tw': 'zh',
'zh-cn': 'zh'
}
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are put
# after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
}
# Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['cs', 'hu',]
# CentralAuth cross avaliable projects.
self.cross_projects = [
'wikipedia', 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikiversity',
'meta', 'mediawiki', 'test', 'incubator', 'commons', 'species'
]
def code2encoding(self, code):
return 'utf-8'
def version(self, code):
return '1.17wmf1'
def shared_image_repository(self, code):
return ('commons', 'commons')
| # -*- coding: utf-8 -*-
from pywikibot import family
__version__ = '$Id$'
# The Wikimedia family that is known as Wikinews
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'wikinews'
self.languages_by_size = [
'sr', 'en', 'pl', 'de', 'fr', 'it', 'es', 'pt', 'zh', 'ja', 'sv',
'ru', 'ta', 'fi', 'cs', 'he', 'ro', 'bg', 'ar', 'hu', 'sd', 'tr',
'uk', 'ca', 'fa', 'no', 'bs', 'th', 'ko', 'eo',
]
for lang in self.languages_by_size:
self.langs[lang] = '%s.wikinews.org' % lang
self.obsolete = {
'jp': 'ja',
'nb': 'no',
'nl': None, # https://bugzilla.wikimedia.org/show_bug.cgi?id=20325
'zh-tw': 'zh',
'zh-cn': 'zh'
}
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are put
# after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
}
# Global bot allowed languages on http://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = ['cs', 'hu',]
# CentralAuth cross avaliable projects.
self.cross_projects = [
'wikipedia', 'wiktionary', 'wikibooks', 'wikiquote', 'wikisource', 'wikiversity',
'meta', 'mediawiki', 'test', 'incubator', 'commons', 'species'
]
def code2encoding(self, code):
return 'utf-8'
def version(self, code):
return '1.17wmf1'
def shared_image_repository(self, code):
return ('commons', 'commons')
| Python | 0 |
06a851590f32acad0bc1e5b0d87cc4b1148b644c | Add unique index to patient_numbers | radar/radar/models/patient_numbers.py | radar/radar/models/patient_numbers.py | from sqlalchemy import Column, Integer, ForeignKey, String, Index
from sqlalchemy.orm import relationship
from radar.database import db
from radar.models import MetaModelMixin
from radar.models.common import uuid_pk_column, patient_id_column, patient_relationship
class PatientNumber(db.Model, MetaModelMixin):
__tablename__ = 'patient_numbers'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('patient_numbers')
data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False)
data_source = relationship('DataSource')
organisation_id = Column(Integer, ForeignKey('organisations.id'), nullable=False)
organisation = relationship('Organisation')
number = Column(String, nullable=False)
# Data source, organisation and number must be unique
Index('patient_numbers_data_source_id_organisation_id_number_idx', PatientNumber.data_source_id, PatientNumber.organisation_id, PatientNumber.number, unique=True)
Index('patient_numbers_patient_id_idx', PatientNumber.patient_id)
Index('patient_numbers_organisation_id_idx', PatientNumber.organisation_id)
| from sqlalchemy import Column, Integer, ForeignKey, String, Index
from sqlalchemy.orm import relationship
from radar.database import db
from radar.models import MetaModelMixin
from radar.models.common import uuid_pk_column, patient_id_column, patient_relationship
class PatientNumber(db.Model, MetaModelMixin):
__tablename__ = 'patient_numbers'
id = uuid_pk_column()
patient_id = patient_id_column()
patient = patient_relationship('patient_numbers')
data_source_id = Column(Integer, ForeignKey('data_sources.id'), nullable=False)
data_source = relationship('DataSource')
organisation_id = Column(Integer, ForeignKey('organisations.id'), nullable=False)
organisation = relationship('Organisation')
number = Column(String, nullable=False)
# TODO add unique index on data_source_id, organisation_id, number
Index('patient_numbers_patient_id_idx', PatientNumber.patient_id)
Index('patient_numbers_organisation_id_idx', PatientNumber.organisation_id)
| Python | 0.998605 |
e44115c6785fcc80f378e61e98e4ccd32cc9498f | Renames `update_step()` to `train_step()`. | flax/training/train_state.py | flax/training/train_state.py | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable
from flax import core
from flax import struct
import optax
class TrainState(struct.PyTreeNode):
"""Simple train state for the common case with a single Optax optimizer.
Synopsis:
state = TrainState.create(
apply_fn=model.apply,
params=variables['params'],
tx=tx)
grad_fn = jax.grad(make_loss_fn(state.apply_fn))
for batch in data:
grads = grad_fn(state.params, batch)
state = state.apply_gradients(grads=grads)
Note that you can easily extend this dataclass by subclassing it for storing
additional data (e.g. additional variable collections).
For more exotic usecases (e.g. multiple optimizers) it's probably best to
fork the class and modify it.
Attributes:
step: Counter starts at 0 and is incremented by every call to
`.apply_gradients()`.
apply_fn: Usually set to `model.apply()`. Kept in this dataclass for
convenience to have a shorter params list for the `train_step()` function
in your training loop.
tx: An Optax gradient transformation.
opt_state: The state for `tx`.
"""
step: int
apply_fn: Callable = struct.field(pytree_node=False)
params: core.FrozenDict[str, Any]
tx: optax.GradientTransformation = struct.field(pytree_node=False)
opt_state: optax.OptState
def apply_gradients(self, *, grads, **kwargs):
"""Updates `step`, `params`, `opt_state` and `**kwargs` in return value.
Note that internally this function calls `.tx.update()` followed by a call
to `optax.apply_updates()` to update `params` and `opt_state`.
Args:
grads: Gradients that have the same pytree structure as `.params`.
**kwargs: Additional dataclass attributes that should be `.replace()`-ed.
Returns:
An updated instance of `self` with `step` incremented by one, `params`
and `opt_state` updated by applying `grads`, and additional attributes
replaced as specified by `kwargs`.
"""
updates, new_opt_state = self.tx.update(
grads, self.opt_state, self.params)
new_params = optax.apply_updates(self.params, updates)
return self.replace(
step=self.step + 1,
params=new_params,
opt_state=new_opt_state,
**kwargs,
)
@classmethod
def create(cls, *, apply_fn, params, tx, **kwargs):
"""Creates a new instance with `step=0` and initialized `opt_state`."""
opt_state = tx.init(params)
return cls(
step=0,
apply_fn=apply_fn,
params=params,
tx=tx,
opt_state=opt_state,
**kwargs,
)
| # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable
from flax import core
from flax import struct
import optax
class TrainState(struct.PyTreeNode):
"""Simple train state for the common case with a single Optax optimizer.
Synopsis:
state = TrainState.create(
apply_fn=model.apply,
params=variables['params'],
tx=tx)
grad_fn = jax.grad(make_loss_fn(state.apply_fn))
for batch in data:
grads = grad_fn(state.params, batch)
state = state.apply_gradients(grads=grads)
Note that you can easily extend this dataclass by subclassing it for storing
additional data (e.g. additional variable collections).
For more exotic usecases (e.g. multiple optimizers) it's probably best to
fork the class and modify it.
Attributes:
step: Counter starts at 0 and is incremented by every call to
`.apply_gradients()`.
apply_fn: Usually set to `model.apply()`. Kept in this dataclass for
convenience to have a shorter params list for `update_step()`.
tx: An Optax gradient transformation.
opt_state: The state for `tx`.
"""
step: int
apply_fn: Callable = struct.field(pytree_node=False)
params: core.FrozenDict[str, Any]
tx: optax.GradientTransformation = struct.field(pytree_node=False)
opt_state: optax.OptState
def apply_gradients(self, *, grads, **kwargs):
"""Updates `step`, `params`, `opt_state` and `**kwargs` in return value.
Note that internally this function calls `.tx.update()` followed by a call
to `optax.apply_updates()` to update `params` and `opt_state`.
Args:
grads: Gradients that have the same pytree structure as `.params`.
**kwargs: Additional dataclass attributes that should be `.replace()`-ed.
Returns:
An updated instance of `self` with `step` incremented by one, `params`
and `opt_state` updated by applying `grads`, and additional attributes
replaced as specified by `kwargs`.
"""
updates, new_opt_state = self.tx.update(
grads, self.opt_state, self.params)
new_params = optax.apply_updates(self.params, updates)
return self.replace(
step=self.step + 1,
params=new_params,
opt_state=new_opt_state,
**kwargs,
)
@classmethod
def create(cls, *, apply_fn, params, tx, **kwargs):
"""Creates a new instance with `step=0` and initialized `opt_state`."""
opt_state = tx.init(params)
return cls(
step=0,
apply_fn=apply_fn,
params=params,
tx=tx,
opt_state=opt_state,
**kwargs,
)
| Python | 0.999865 |
10a8946a18c953d64648639b5a545ec8fa5da6e8 | Update print_format.py | frappe/utils/print_format.py | frappe/utils/print_format.py | from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from jinja2 import TemplateNotFound
from frappe.utils import cint, strip_html
from frappe.utils.pdf import get_pdf,cleanup
import cups
from PyPDF2 import PdfFileWriter, PdfFileReader
no_cache = 1
no_sitemap = 1
base_template_path = "templates/www/printview.html"
standard_format = "templates/print_formats/standard.html"
@frappe.whitelist()
def download_multi_pdf(doctype, name, format=None):
# name can include names of many docs of the same doctype.
import json
result = json.loads(name)
# Concatenating pdf files
output = PdfFileWriter()
for i, ss in enumerate(result):
output = frappe.get_print(doctype, ss, format, as_pdf = True, output = output)
frappe.local.response.filename = "{doctype}.pdf".format(doctype=doctype.replace(" ", "-").replace("/", "-"))
frappe.local.response.filecontent = read_multi_pdf(output)
frappe.local.response.type = "download"
def read_multi_pdf(output):
# Get the content of the merged pdf files
fname = os.path.join("/tmp", "frappe-pdf-{0}.pdf".format(frappe.generate_hash()))
output.write(open(fname,"wb"))
with open(fname, "rb") as fileobj:
filedata = fileobj.read()
return filedata
@frappe.whitelist()
def download_pdf(doctype, name, format=None, doc=None, no_letterhead=0):
html = frappe.get_print(doctype, name, format, doc=doc, no_letterhead=no_letterhead)
frappe.local.response.filename = "{name}.pdf".format(name=name.replace(" ", "-").replace("/", "-"))
frappe.local.response.filecontent = get_pdf(html)
frappe.local.response.type = "download"
@frappe.whitelist()
def report_to_pdf(html, orientation="Landscape"):
frappe.local.response.filename = "report.pdf"
frappe.local.response.filecontent = get_pdf(html, {"orientation": orientation})
frappe.local.response.type = "download"
@frappe.whitelist()
def print_by_server(doctype, name, format=None, doc=None, no_letterhead=0):
print_settings = frappe.get_doc("Print Settings")
try:
cups.setServer(print_settings.server_ip)
cups.setPort(print_settings.port)
conn = cups.Connection()
output = PdfFileWriter()
output = frappe.get_print(doctype, name, format, doc=doc, no_letterhead=no_letterhead, as_pdf = True, output = output)
file = os.path.join("/tmp", "frappe-pdf-{0}.pdf".format(frappe.generate_hash()))
output.write(open(file,"wb"))
conn.printFile(print_settings.printer_name,file , name, {})
except IOError as e:
if ("ContentNotFoundError" in e.message
or "ContentOperationNotPermittedError" in e.message
or "UnknownContentError" in e.message
or "RemoteHostClosedError" in e.message):
frappe.throw(_("PDF generation failed"))
except cups.IPPError:
frappe.throw(_("Printing failed"))
finally:
cleanup(file,{})
| from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from jinja2 import TemplateNotFound
from frappe.utils import cint, strip_html
from frappe.utils.pdf import get_pdf,cleanup
import cups
from PyPDF2 import PdfFileWriter, PdfFileReader
no_cache = 1
no_sitemap = 1
base_template_path = "templates/www/printview.html"
standard_format = "templates/print_formats/standard.html"
@frappe.whitelist()
def download_multi_pdf(doctype, name, format=None):
# name can include names of many docs of the same doctype.
import json
result = json.loads(name)
# Concatenating pdf files
output = PdfFileWriter()
for i, ss in enumerate(result):
output = frappe.get_print(doctype, ss, format, as_pdf = True, output = output)
frappe.local.response.filename = "{doctype}.pdf".format(doctype=doctype.replace(" ", "-").replace("/", "-"))
frappe.local.response.filecontent = read_multi_pdf(output)
frappe.local.response.type = "download"
def read_multi_pdf(output):
# Get the content of the merged pdf files
fname = os.path.join("/tmp", "frappe-pdf-{0}.pdf".format(frappe.generate_hash()))
output.write(open(fname,"wb"))
with open(fname, "rb") as fileobj:
filedata = fileobj.read()
return filedata
@frappe.whitelist()
def download_pdf(doctype, name, format=None, doc=None, no_letterhead=0):
html = frappe.get_print(doctype, name, format, doc=doc, no_letterhead=no_letterhead)
frappe.local.response.filename = "{name}.pdf".format(name=name.replace(" ", "-").replace("/", "-"))
frappe.local.response.filecontent = get_pdf(html)
frappe.local.response.type = "download"
@frappe.whitelist()
def report_to_pdf(html, orientation="Landscape"):
frappe.local.response.filename = "report.pdf"
frappe.local.response.filecontent = get_pdf(html, {"orientation": orientation})
frappe.local.response.type = "download"
@frappe.whitelist()
def print_by_server(doctype, name, format=None, doc=None, no_letterhead=0):
print_settings = frappe.get_doc("Print Settings")
try:
cups.setServer(print_settings.server_ip)
cups.setPort(print_settings.port)
conn = cups.Connection()
output = PdfFileWriter()
output = frappe.get_print(doctype, name, format, doc=doc, no_letterhead=no_letterhead, as_pdf = True, output = output)
file = os.path.join("/tmp", "frappe-pdf-{0}.pdf".format(frappe.generate_hash()))
output.write(open(file,"wb"))
conn.printFile("Generic-text-only",file , name, {})
except IOError as e:
if ("ContentNotFoundError" in e.message
or "ContentOperationNotPermittedError" in e.message
or "UnknownContentError" in e.message
or "RemoteHostClosedError" in e.message):
frappe.throw(_("PDF generation failed"))
except cups.IPPError:
frappe.throw(_("Unsupported document-format 'application/pdf'."))
finally:
cleanup(file,{})
| Python | 0.000011 |
ab2d635f6f52c6cbc6c59d3fa887176852e186ff | Move Ko-Fi notifications to private channels. | KofiFriend_Brain.py | KofiFriend_Brain.py | import traceback
import json
import util_functions
from discord.ext import commands
import discord
import sys
import re
import os
import asyncio
from aiohttp import web
import datetime
botToken = os.environ.get('botToken')
def run_app(app, *, host='0.0.0.0', port=None, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128):
"""Run an app"""
if port is None:
if not ssl_context:
port = 8080
else:
port = 8443
loop = app.loop
handler = app.make_handler()
server = loop.create_server(handler, host, port, ssl=ssl_context, backlog=backlog)
srv, startup_res = loop.run_until_complete(asyncio.gather(server, app.startup(), loop=loop))
scheme = 'https' if ssl_context else 'http'
print("======== Running on {scheme}://{host}:{port}/ ========\n"
"(Press CTRL+C to quit)".format(
scheme=scheme, host=host, port=port))
async def tba_handler(request):
data = await request.post()
data = json.loads(data['data'])
print("Accepted request:\n{}".format(data))
print("{}".format(data))
embed = discord.Embed(
title="Ko-Fi Received!",
url="https://ko-fi.com/eylesis",
description="{} has sent ${}.".format(data['from_name'], data['amount']))
embed.set_footer(text="Ko-Fi Notification")
if data['message'] == "":
data['message'] == "No Message."
embed.add_field(name="__Message__", value=data['message'])
channelids = {'470455397912674305'}
for channelid in channelids:
await bot.send_message(bot.get_channel(channelid), embed=embed)
return web.Response()
bot = commands.Bot(command_prefix='*')
loop = bot.loop
app = web.Application(loop=loop)
app.router.add_post('/endpoint', tba_handler)
if __name__ == "__main__":
run_app(app, host=os.environ.get('HOST'), port=os.environ.get('PORT'))
bot.run(botToken) | import traceback
import json
import util_functions
from discord.ext import commands
import discord
import sys
import re
import os
import asyncio
from aiohttp import web
import datetime
botToken = os.environ.get('botToken')
def run_app(app, *, host='0.0.0.0', port=None, shutdown_timeout=60.0, ssl_context=None, print=print, backlog=128):
"""Run an app"""
if port is None:
if not ssl_context:
port = 8080
else:
port = 8443
loop = app.loop
handler = app.make_handler()
server = loop.create_server(handler, host, port, ssl=ssl_context, backlog=backlog)
srv, startup_res = loop.run_until_complete(asyncio.gather(server, app.startup(), loop=loop))
scheme = 'https' if ssl_context else 'http'
print("======== Running on {scheme}://{host}:{port}/ ========\n"
"(Press CTRL+C to quit)".format(
scheme=scheme, host=host, port=port))
async def tba_handler(request):
data = await request.post()
data = json.loads(data['data'])
print("Accepted request:\n{}".format(data))
print("{}".format(data))
embed = discord.Embed(
title="Crooq's Computer Quest Updated!",
url="https://ko-fi.com/eylesis",
description="{} has given ${} to the cause! The donation is appreciated!".format(data['from_name'], data['amount']))
embed.set_footer(text="Ko-Fi Notification")
if data['message'] == "":
data['message'] == "No Message."
embed.add_field(name="__Message__", value=data['message'])
channelids = {'470455397912674305', '391157967493267457'}
for channelid in channelids:
await bot.send_message(bot.get_channel(channelid), embed=embed)
return web.Response()
bot = commands.Bot(command_prefix='*')
loop = bot.loop
app = web.Application(loop=loop)
app.router.add_post('/endpoint', tba_handler)
if __name__ == "__main__":
run_app(app, host=os.environ.get('HOST'), port=os.environ.get('PORT'))
bot.run(botToken) | Python | 0 |
b980d69fe3d2da87814a915c6a85ef930d832860 | Change simple_blend to simply average the predictions | scripts/simple_blend.py | scripts/simple_blend.py | import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.average(np.ravel(prediction_set))
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
| import numpy as np
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from utils.data_paths import SUBMISSIONS_DIR_PATH
OUTPUT_FILE_PATH = os.path.join(SUBMISSIONS_DIR_PATH, 'simple_blend.dta')
PREDICTION_FILE_PATHS = [os.path.join(SUBMISSIONS_DIR_PATH, 'predictions1.dta'),
os.path.join(SUBMISSIONS_DIR_PATH, 'predictions2.dta')]
PREDICTION_COEFFICIENTS = [0.4,
0.6]
def main():
predictions = get_predictions()
write(predictions)
def get_predictions():
predictions = np.array([])
for i, prediction_file_path in enumerate(PREDICTION_FILE_PATHS):
with open(prediction_file_path, 'r') as prediction_file:
prediction = np.transpose(np.array([prediction_file.read().split()],
dtype=np.float32))
if predictions.size == 0:
predictions = prediction
else:
predictions = np.append(predictions, prediction, axis=1)
return np.matrix(predictions)
def write(predictions):
coefficients = np.array(PREDICTION_COEFFICIENTS)
with open(OUTPUT_FILE_PATH, 'w+') as output_file:
for prediction_set in predictions:
prediction = np.dot(np.ravel(prediction_set), coefficients)
output_file.write('{}\n'.format(prediction))
if __name__ == '__main__':
main()
| Python | 0.000001 |
4657a4fafb1218fe73b76d142c554bd8f347d81f | Make the correct None check | regserver/regulations/views/chrome.py | regserver/regulations/views/chrome.py | from django.conf import settings
from django.http import Http404
from django.views.generic.base import TemplateView
from regulations.generator import generator
from regulations.generator.versions import fetch_grouped_history
from regulations.views import utils
from regulations.views.partial import *
from regulations.views.sidebar import SideBarView
class ChromeView(TemplateView):
""" Base class for views which wish to include chrome. """
template_name = 'chrome.html'
def add_extras(self, context):
context['env'] = 'source' if settings.DEBUG else 'built'
context['GOOGLE_ANALYTICS_SITE'] = settings.GOOGLE_ANALYTICS_SITE
context['GOOGLE_ANALYTICS_ID'] = settings.GOOGLE_ANALYTICS_ID
return context
def get_context_data(self, **kwargs):
context = super(ChromeView, self).get_context_data(**kwargs)
label_id = context['label_id']
version = context['version']
# Hack solution: pull in full regulation, then the partial
# @todo: just query the meta and toc layers
part = label_id.split('-')[0]
full_tree = generator.get_regulation(part, version)
relevant_tree = generator.get_tree_paragraph(label_id, version)
if full_tree is None or relevant_tree is None:
raise Http404
partial_view = self.partial_class.as_view()
response = partial_view(
self.request, label_id=label_id, version=version)
response.render()
context['partial_content'] = response.content
sidebar_view = SideBarView.as_view()
response = sidebar_view(self.request, label_id=label_id,
version=version)
response.render()
context['sidebar_content'] = response.content
appliers = utils.handle_specified_layers(
'toc,meta', part, version, self.partial_class.sectional_links)
builder = generate_html(full_tree, appliers)
context['tree'] = full_tree
self.add_extras(context)
context['part'] = part
context['history'] = fetch_grouped_history(part)
return context
class ChromeInterpView(ChromeView):
"""Interpretation of regtext section/paragraph or appendix with chrome"""
partial_class = PartialInterpView
class ChromeSectionView(ChromeView):
"""Regtext section with chrome"""
partial_class = PartialSectionView
class ChromeParagraphView(ChromeView):
"""Regtext paragraph with chrome"""
partial_class = PartialParagraphView
class ChromeRegulationView(ChromeView):
"""Entire regulation with chrome"""
partial_class = PartialRegulationView
| from django.conf import settings
from django.http import Http404
from django.views.generic.base import TemplateView
from regulations.generator import generator
from regulations.generator.versions import fetch_grouped_history
from regulations.views import utils
from regulations.views.partial import *
from regulations.views.sidebar import SideBarView
class ChromeView(TemplateView):
""" Base class for views which wish to include chrome. """
template_name = 'chrome.html'
def add_extras(self, context):
context['env'] = 'source' if settings.DEBUG else 'built'
context['GOOGLE_ANALYTICS_SITE'] = settings.GOOGLE_ANALYTICS_SITE
context['GOOGLE_ANALYTICS_ID'] = settings.GOOGLE_ANALYTICS_ID
return context
def get_context_data(self, **kwargs):
context = super(ChromeView, self).get_context_data(**kwargs)
label_id = context['label_id']
version = context['version']
# Hack solution: pull in full regulation, then the partial
# @todo: just query the meta and toc layers
part = label_id.split('-')[0]
full_tree = generator.get_regulation(part, version)
relevant_tree = generator.get_tree_paragraph(label_id, version)
if not full_tree or relevant_tree:
raise Http404
partial_view = self.partial_class.as_view()
response = partial_view(
self.request, label_id=label_id, version=version)
response.render()
context['partial_content'] = response.content
sidebar_view = SideBarView.as_view()
response = sidebar_view(self.request, label_id=label_id,
version=version)
response.render()
context['sidebar_content'] = response.content
appliers = utils.handle_specified_layers(
'toc,meta', part, version, self.partial_class.sectional_links)
builder = generate_html(full_tree, appliers)
context['tree'] = full_tree
self.add_extras(context)
context['part'] = part
context['history'] = fetch_grouped_history(part)
return context
class ChromeInterpView(ChromeView):
"""Interpretation of regtext section/paragraph or appendix with chrome"""
partial_class = PartialInterpView
class ChromeSectionView(ChromeView):
"""Regtext section with chrome"""
partial_class = PartialSectionView
class ChromeParagraphView(ChromeView):
"""Regtext paragraph with chrome"""
partial_class = PartialParagraphView
class ChromeRegulationView(ChromeView):
"""Entire regulation with chrome"""
partial_class = PartialRegulationView
| Python | 0.999589 |
e09d1f7f3b078b6de0f87b05939d776bc43ee483 | Comment about hack | pax/pax.py | pax/pax.py | import logging
import inspect
import pprint
import configparser
from pax import units
import os
from pluginbase import PluginBase
from pax import units
def EvaluateConfiguration(config):
evaled_config = {}
for key, value in config.items():
#Eval value with globals = everything from units...
evaled_config[key] = eval(value, {
name : getattr(units, name)
for name in dir(units)
})
return evaled_config
def Instantiate(name, plugin_source, config_values):
"""take class name and build class from it"""
name_module, name_class = name.split('.')
plugin_module = plugin_source.load_plugin(name_module)
if config_values.has_section(name):
this_config = config_values[name]
else:
this_config = config_values['DEFAULT']
this_config = EvaluateConfiguration(this_config)
return getattr(plugin_module, name_class)(this_config)
def Processor(input, transform, output):
# Check input types
# TODO (tunnell): Don't use asserts, but raise ValueError() with
# informative error
assert isinstance(input, str)
assert isinstance(transform, (str, list))
assert isinstance(output, (str, list))
# If 'transform' or 'output' aren't lists, turn them into lists
if not isinstance(transform, list):
transform = [transform]
if not isinstance(output, list):
output = [output]
# What we do on data...
actions = transform + output
# Find location of this file
absolute_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
dir = os.path.dirname(absolute_path)
interpolation = configparser.ExtendedInterpolation()
config = configparser.ConfigParser(interpolation=interpolation,
inline_comment_prefixes='#',
strict=True)
# Allow for case-sensitive configuration keys
config.optionxform = str
# Load the default configuration
config.read(os.path.join(dir, 'default.ini'))
default_config = EvaluateConfiguration(config['DEFAULT'])
# Setup logging
string_level = default_config['loglevel']
numeric_level = getattr(logging, string_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % string_level)
FORMAT = '%(asctime)-15s %(name)s L%(lineno)s - %(levelname)s %(message)s'
logging.basicConfig(level=numeric_level, format=FORMAT)
log = logging.getLogger('Processor')
# Print settings to log
log.debug(pprint.pformat(config, compact=True))
# Setup plugins (which involves finding the plugin directory.
plugin_base = PluginBase(package='pax.plugins')
searchpath = ['./plugins'] + config['DEFAULT']['plugin_paths'].split()
# Find the absolute path, then director, then find plugin directory
searchpath += [os.path.join(dir, '..', 'plugins')]
log.debug("Search path for plugins is %s" % str(searchpath))
plugin_source = plugin_base.make_plugin_source(searchpath=searchpath)
# Instantiate requested plugins
input = Instantiate(input, plugin_source, config)
actions = [Instantiate(x, plugin_source, config) for x in actions]
# This is the *actual* event loop
for i, event in enumerate(input.GetEvents()):
log.info("Event %d" % i)
for j, block in enumerate(actions):
log.debug("Step %d with %s", j, block.__class__.__name__)
event = block.ProcessEvent(event)
| import logging
import inspect
import pprint
import configparser
from pax import units
import os
from pluginbase import PluginBase
from pax import units
def EvaluateConfiguration(config):
evaled_config = {}
for key, value in config.items():
evaled_config[key] = eval(value, {
name : getattr(units, name)
for name in dir(units)
})
return evaled_config
def Instantiate(name, plugin_source, config_values):
"""take class name and build class from it"""
name_module, name_class = name.split('.')
plugin_module = plugin_source.load_plugin(name_module)
if config_values.has_section(name):
this_config = config_values[name]
else:
this_config = config_values['DEFAULT']
this_config = EvaluateConfiguration(this_config)
return getattr(plugin_module, name_class)(this_config)
def Processor(input, transform, output):
# Check input types
# TODO (tunnell): Don't use asserts, but raise ValueError() with
# informative error
assert isinstance(input, str)
assert isinstance(transform, (str, list))
assert isinstance(output, (str, list))
# If 'transform' or 'output' aren't lists, turn them into lists
if not isinstance(transform, list):
transform = [transform]
if not isinstance(output, list):
output = [output]
# What we do on data...
actions = transform + output
# Find location of this file
absolute_path = os.path.abspath(inspect.getfile(inspect.currentframe()))
dir = os.path.dirname(absolute_path)
interpolation = configparser.ExtendedInterpolation()
config = configparser.ConfigParser(interpolation=interpolation,
inline_comment_prefixes='#',
strict=True)
# Allow for case-sensitive configuration keys
config.optionxform = str
# Load the default configuration
config.read(os.path.join(dir, 'default.ini'))
default_config = EvaluateConfiguration(config['DEFAULT'])
# Setup logging
string_level = default_config['loglevel']
numeric_level = getattr(logging, string_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % string_level)
FORMAT = '%(asctime)-15s %(name)s L%(lineno)s - %(levelname)s %(message)s'
logging.basicConfig(level=numeric_level, format=FORMAT)
log = logging.getLogger('Processor')
# Print settings to log
log.debug(pprint.pformat(config, compact=True))
# Setup plugins (which involves finding the plugin directory.
plugin_base = PluginBase(package='pax.plugins')
searchpath = ['./plugins'] + config['DEFAULT']['plugin_paths'].split()
# Find the absolute path, then director, then find plugin directory
searchpath += [os.path.join(dir, '..', 'plugins')]
log.debug("Search path for plugins is %s" % str(searchpath))
plugin_source = plugin_base.make_plugin_source(searchpath=searchpath)
# Instantiate requested plugins
input = Instantiate(input, plugin_source, config)
actions = [Instantiate(x, plugin_source, config) for x in actions]
# This is the *actual* event loop
for i, event in enumerate(input.GetEvents()):
log.info("Event %d" % i)
for j, block in enumerate(actions):
log.debug("Step %d with %s", j, block.__class__.__name__)
event = block.ProcessEvent(event)
| Python | 0 |
2779fdd14279e017f35780fe343b1c7243898397 | Fix extension description and remove unused exception | neutron/extensions/l3_ext_gw_mode.py | neutron/extensions/l3_ext_gw_mode.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira Networks, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Salvatore Orlando, Nicira, Inc
#
from neutron.api import extensions
from neutron.api.v2 import attributes as attrs
from neutron.extensions import l3
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {l3.EXTERNAL_GW_INFO:
{'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate':
{'type:dict_or_nodata':
{'network_id': {'type:uuid': None, 'required': True},
'enable_snat': {'type:boolean': None, 'required': False,
'convert_to': attrs.convert_to_boolean}}
}}}}
class L3_ext_gw_mode(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron L3 Configurable external gateway mode"
@classmethod
def get_alias(cls):
return "ext-gw-mode"
@classmethod
def get_description(cls):
return ("Extension of the router abstraction for specifying whether "
"SNAT should occur on the external gateway")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/ext-gw-mode/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-03-28T10:00:00-00:00"
def get_required_extensions(self):
return ["router"]
def get_extended_resources(self, version):
if version == "2.0":
return dict(EXTENDED_ATTRIBUTES_2_0.items())
else:
return {}
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira Networks, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Salvatore Orlando, Nicira, Inc
#
from neutron.api import extensions
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as qexception
from neutron.extensions import l3
class RouterDNatDisabled(qexception.BadRequest):
message = _("DNat is disabled for the router %(router_id)s. Floating IPs "
"cannot be associated.")
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {l3.EXTERNAL_GW_INFO:
{'allow_post': True,
'allow_put': True,
'is_visible': True,
'default': None,
'enforce_policy': True,
'validate':
{'type:dict_or_nodata':
{'network_id': {'type:uuid': None, 'required': True},
'enable_snat': {'type:boolean': None, 'required': False,
'convert_to': attrs.convert_to_boolean}}
}}}}
class L3_ext_gw_mode(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Neutron L3 Configurable external gateway mode"
@classmethod
def get_alias(cls):
return "ext-gw-mode"
@classmethod
def get_description(cls):
return ("Extension of the router abstraction for specifying whether "
"SNAT, DNAT or both should occur on the external gateway")
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/neutron/ext-gw-mode/api/v1.0"
@classmethod
def get_updated(cls):
return "2013-03-28T10:00:00-00:00"
def get_required_extensions(self):
return ["router"]
def get_extended_resources(self, version):
if version == "2.0":
return dict(EXTENDED_ATTRIBUTES_2_0.items())
else:
return {}
| Python | 0 |
aaa6142718827ea6d568eccc75c624598b0bc9c9 | Update __init__.py | pymeasure/instruments/thorlabs/__init__.py | pymeasure/instruments/thorlabs/__init__.py | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from .thorlabspm100usb import ThorlabsPM100USB
from .thorlabspro8000 import thorlabsPro8000
| #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2020 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from .thorlabspm100usb import ThorlabsPM100USB
| Python | 0.000072 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.