commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
8e53783953285c3d33ef55dce1c33a3bd46458a9 | Add concurrency capability to client | eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog | 2017/async-socket-server/simple-client.py | 2017/async-socket-server/simple-client.py | import argparse
import logging
import socket
import sys
import threading
import time
class ReadThread(threading.Thread):
def __init__(self, name, sockobj):
super().__init__()
self.sockobj = sockobj
self.name = name
self.bufsize = 8 * 1024
def run(self):
while True:
buf = self.sockobj.recv(self.bufsize)
logging.info('{0} Received: {1}'.format(self.name, buf))
if b'1111' in buf:
break
def make_new_connection(name, host, port):
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.connect((host, port))
logging.info('{0} connected...'.format(name))
rthread = ReadThread(name, sockobj)
rthread.start()
sockobj.send(b'foo^1234$jo')
time.sleep(1.0)
sockobj.send(b'sdfsdfsdfsdf^a')
time.sleep(1.0)
sockobj.send(b'fkfkf0000$dfk^$sdf^a$^kk$')
logging.info('{0} finished sending'.format(name))
time.sleep(0.1)
sockobj.close()
rthread.join()
def main():
argparser = argparse.ArgumentParser('Simple TCP client')
argparser.add_argument('host', help='Server host name')
argparser.add_argument('port', type=int, help='Server port')
argparser.add_argument('-n', '--num_concurrent', type=int,
default=1,
help='Number of concurrent connections')
args = argparser.parse_args()
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s:%(message)s')
t1 = time.time()
connections = []
for i in range(args.num_concurrent):
name = 'conn{0}'.format(i)
tconn = threading.Thread(target=make_new_connection,
args=(name, args.host, args.port))
tconn.start()
connections.append(tconn)
for conn in connections:
conn.join()
print('Elapsed:', time.time() - t1)
if __name__ == '__main__':
main()
| import logging
import socket
import sys
import threading
import time
class ReadThread(threading.Thread):
def __init__(self, sockobj):
super().__init__()
self.sockobj = sockobj
self.bufsize = 8 * 1024
def run(self):
while True:
buf = self.sockobj.recv(self.bufsize)
logging.info('Received: {0}'.format(buf))
if b'1111' in buf:
break
def make_new_connection(name, host, port):
sockobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockobj.connect((host, port))
rthread = ReadThread(sockobj)
rthread.start()
logging.info('Starting send')
sockobj.send(b'foo^1234$jo')
time.sleep(1.0)
sockobj.send(b'sdfsdfsdfsdf^a')
time.sleep(1.0)
sockobj.send(b'fkfkf0000$dfk^$sdf^a$^kk$')
time.sleep(1.0)
sockobj.close()
rthread.join()
def main():
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)s:%(asctime)s:%(message)s')
if len(sys.argv) <= 2:
print("Error, expecting <host> <port>")
sys.exit(1)
host = sys.argv[1]
port = int(sys.argv[2])
make_new_connection("foo", host, port)
if __name__ == '__main__':
main()
| unlicense | Python |
bb9b380e93da9e9fc65d5aac2ba3650272fd2bba | print syntax. | yishayv/lyacorr,yishayv/lyacorr | mpi_helper.py | mpi_helper.py | from __future__ import print_function
from mpi4py import MPI
import numpy as np
comm = MPI.COMM_WORLD
def r_print(*args):
"""
print message on the root node (rank 0)
:param args:
:return:
"""
if comm.rank == 0:
print('ROOT:', end=' ')
for i in args:
print(i, end=' ')
print()
def l_print(*args):
"""
print message on each node, synchronized
:param args:
:return:
"""
for rank in range(0, comm.size):
comm.Barrier()
if rank == comm.rank:
l_print_no_barrier(*args)
comm.Barrier()
def l_print_no_barrier(*args):
"""
print message on each node
:param args:
:return:
"""
print(comm.rank, ':', end=' ')
for i in args:
print(i, end=' ')
print
def get_chunks(num_items, num_steps):
"""
divide items into n=num_steps chunks
:param num_items:
:param num_steps:
:return: chunk sizes, chunk offsets
"""
chunk_sizes = np.zeros(num_steps, dtype=int)
chunk_sizes[:] = num_items // num_steps
chunk_sizes[:num_items % num_steps] += 1
chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)
chunk_offsets[0] = 0
return chunk_sizes, chunk_offsets
| from mpi4py import MPI
import numpy as np
from __future__ import print_function
comm = MPI.COMM_WORLD
def r_print(*args):
"""
print message on the root node (rank 0)
:param args:
:return:
"""
if comm.rank == 0:
print 'ROOT:',
for i in args:
print((i, end=' '))
print
def l_print(*args):
"""
print message on each node, synchronized
:param args:
:return:
"""
for rank in range(0, comm.size):
comm.Barrier()
if rank == comm.rank:
l_print_no_barrier(*args)
comm.Barrier()
def l_print_no_barrier(*args):
"""
print message on each node
:param args:
:return:
"""
print(comm.rank, ':', end=' ')
for i in args:
print((i, end=' '))
print
def get_chunks(num_items, num_steps):
"""
divide items into n=num_steps chunks
:param num_items:
:param num_steps:
:return: chunk sizes, chunk offsets
"""
chunk_sizes = np.zeros(num_steps, dtype=int)
chunk_sizes[:] = num_items // num_steps
chunk_sizes[:num_items % num_steps] += 1
chunk_offsets = np.roll(np.cumsum(chunk_sizes), 1)
chunk_offsets[0] = 0
return chunk_sizes, chunk_offsets
| mit | Python |
baa379107e9b229830a12efecba491f63c8afe02 | Order dashboard url overwrites | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/apps/dashboard/urls.py | meinberlin/apps/dashboard/urls.py | from django.conf.urls import url
from meinberlin.apps.bplan.views import BplanProjectCreateView
from meinberlin.apps.dashboard2.urls import \
urlpatterns as a4dashboard_urlpatterns
from meinberlin.apps.extprojects.views import ExternalProjectCreateView
from meinberlin.apps.projectcontainers.views import ContainerCreateView
from meinberlin.apps.projectcontainers.views import ContainerListView
from . import views
app_name = 'a4dashboard'
urlpatterns = [
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/settings/$',
views.DashboardOrganisationUpdateView.as_view(),
name='organisation-edit'),
url(r'^newsletters/(?P<organisation_slug>[-\w_]+)/create/$',
views.DashboardNewsletterCreateView.as_view(),
name='newsletter-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/containers/$',
ContainerListView.as_view(),
name='container-list'),
url(r'^projects/(?P<project_slug>[-\w_]+)/blueprints/$',
views.ModuleBlueprintListView.as_view(),
name='module-blueprint-list'),
url(r'^projects/(?P<project_slug>[-\w_]+)/blueprints/'
'(?P<blueprint_slug>[-\w_]+)/$',
views.ModuleCreateView.as_view(),
name='module-create'),
# Overwrite adhocracy4 core urls with meinBerlin urls
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/blueprints/'
r'external-project/$',
ExternalProjectCreateView.as_view(),
name='external-project-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/blueprints/'
r'bplan/$',
BplanProjectCreateView.as_view(),
name='bplan-project-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/blueprints/'
r'container/$',
ContainerCreateView.as_view(),
name='container-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/projects/$',
views.DashboardProjectListView.as_view(),
name='project-list'),
] + a4dashboard_urlpatterns
| from django.conf.urls import url
from meinberlin.apps.bplan.views import BplanProjectCreateView
from meinberlin.apps.dashboard2.urls import \
urlpatterns as a4dashboard_urlpatterns
from meinberlin.apps.extprojects.views import ExternalProjectCreateView
from meinberlin.apps.projectcontainers.views import ContainerCreateView
from meinberlin.apps.projectcontainers.views import ContainerListView
from . import views
app_name = 'a4dashboard'
urlpatterns = [
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/settings/$',
views.DashboardOrganisationUpdateView.as_view(),
name='organisation-edit'),
url(r'^newsletters/(?P<organisation_slug>[-\w_]+)/create/$',
views.DashboardNewsletterCreateView.as_view(),
name='newsletter-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/containers/$',
ContainerListView.as_view(),
name='container-list'),
# Overwrite the ProjectUpdateView with meinBerlin urls
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/blueprints/'
r'external-project/$',
ExternalProjectCreateView.as_view(),
name='external-project-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/blueprints/'
r'bplan/$',
BplanProjectCreateView.as_view(),
name='bplan-project-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/blueprints/'
r'container/$',
ContainerCreateView.as_view(),
name='container-create'),
url(r'^organisations/(?P<organisation_slug>[-\w_]+)/projects/$',
views.DashboardProjectListView.as_view(),
name='project-list'),
url(r'^projects/(?P<project_slug>[-\w_]+)/blueprints/$',
views.ModuleBlueprintListView.as_view(),
name='module-blueprint-list'),
url(r'^projects/(?P<project_slug>[-\w_]+)/blueprints/'
'(?P<blueprint_slug>[-\w_]+)/$',
views.ModuleCreateView.as_view(),
name='module-create'),
] + a4dashboard_urlpatterns
| agpl-3.0 | Python |
7ffe699d4f146a6bf695fb544689a5f35703d368 | Add startdir to files listed by os.listdir() | hughdbrown/git-tools | src/common/__init__.py | src/common/__init__.py | from __future__ import print_function
from hashlib import sha1
import sys
import os
import os.path
from subprocess import Popen, PIPE
BUFSIZE = 16 * 1024 * 1024
def message(msg):
print(msg, file=sys.stderr)
def binary_in_path(binary):
return any(os.path.exists(os.path.join(path, binary)) for path in set(os.environ["PATH"].split(':')))
def test_for_required_binaries(needed_binaries):
found = [(binary, binary_in_path(binary)) for binary in needed_binaries]
if not all(found_binary for _, found_binary in found):
message("Certain additional binaries are required to run:")
for binary, found_binary in found:
message("\t{0}: {1}".format(binary, "Found" if found_binary else "Not found"))
sys.exit(1)
def sha1_file(filename):
with open(filename) as f:
return sha1(f.read()).hexdigest()
def git_commit(fullpath, comment, dryrun, verbose, author=None):
cmd = ["git", "commit", fullpath, "-m", "{0}: {1}".format(fullpath, comment)]
if author:
cmd += ["--author", author]
if dryrun or verbose:
message(" ".join(cmd))
if not dryrun:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, bufsize=BUFSIZE)
_, errors = p.communicate()
if p.returncode:
raise Exception(errors)
def get_filelist(start_dir, recurse, ext=None):
if recurse:
file_list = [
os.path.join(root, f)
for root, _, files in os.walk(start_dir)
for f in files
]
else:
file_list = [os.path.join(start_dir, filename) for filename in os.listdir(start_dir)]
return file_list if not ext else [path for path in file_list if os.path.splitext(path)[1] == ext]
__all__ = [
"message",
"sha1_file",
"test_for_required_binaries",
"git_commit",
"get_filelist",
]
| from __future__ import print_function
from hashlib import sha1
import sys
import os
import os.path
from subprocess import Popen, PIPE
BUFSIZE = 16 * 1024 * 1024
def message(msg):
print(msg, file=sys.stderr)
def binary_in_path(binary):
return any(os.path.exists(os.path.join(path, binary)) for path in set(os.environ["PATH"].split(':')))
def test_for_required_binaries(needed_binaries):
found = [(binary, binary_in_path(binary)) for binary in needed_binaries]
if not all(found_binary for _, found_binary in found):
message("Certain additional binaries are required to run:")
for binary, found_binary in found:
message("\t{0}: {1}".format(binary, "Found" if found_binary else "Not found"))
sys.exit(1)
def sha1_file(filename):
with open(filename) as f:
return sha1(f.read()).hexdigest()
def git_commit(fullpath, comment, dryrun, verbose, author=None):
cmd = ["git", "commit", fullpath, "-m", "{0}: {1}".format(fullpath, comment)]
if author:
cmd += ["--author", author]
if dryrun or verbose:
message(" ".join(cmd))
if not dryrun:
p = Popen(cmd, stdout=PIPE, stderr=PIPE, bufsize=BUFSIZE)
_, errors = p.communicate()
if p.returncode:
raise Exception(errors)
def get_filelist(start_dir, recurse, ext=None):
if recurse:
file_list = [
os.path.join(root, f)
for root, _, files in os.walk(start_dir)
for f in files
]
else:
file_list = os.listdir(start_dir)
return file_list if not ext else [path for path in file_list if os.path.splitext(path)[1] == ext]
__all__ = [
"message",
"sha1_file",
"test_for_required_binaries",
"git_commit",
"get_filelist",
]
| mit | Python |
04058304e68fe88cb52402e6134ab15fa9563ad1 | add new methods in ProgressMonitor | TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl | AlphaTwirl/ProgressBar/ProgressMonitor.py | AlphaTwirl/ProgressBar/ProgressMonitor.py | # Tai Sakuma <tai.sakuma@cern.ch>
from ProgressReporter import ProgressReporter
##__________________________________________________________________||
class Queue(object):
def __init__(self, presentation):
self.presentation = presentation
def put(self, report):
self.presentation.present(report)
##__________________________________________________________________||
class ProgressMonitor(object):
def __init__(self, presentation):
self.queue = Queue(presentation = presentation)
def begin(self): pass
def monitor(self): pass
def end(self): pass
def createReporter(self):
reporter = ProgressReporter(self.queue)
return reporter
##__________________________________________________________________||
| # Tai Sakuma <tai.sakuma@cern.ch>
from ProgressReporter import ProgressReporter
##__________________________________________________________________||
class Queue(object):
def __init__(self, presentation):
self.presentation = presentation
def put(self, report):
self.presentation.present(report)
##__________________________________________________________________||
class ProgressMonitor(object):
def __init__(self, presentation):
self.queue = Queue(presentation = presentation)
def monitor(self): pass
def createReporter(self):
reporter = ProgressReporter(self.queue)
return reporter
##__________________________________________________________________||
| bsd-3-clause | Python |
c089ef827954436d30b4b6718ef1bcceac11d1fa | Make 0 an invalid CEI | poliquin/brazilnum | brazilnum/cei.py | brazilnum/cei.py | #!/usr/bin/env python
from __future__ import absolute_import
import re
import random
from .util import clean_id, pad_id
"""
Functions for working with Brazilian CEI identifiers.
"""
NONDIGIT = re.compile(r'[^0-9]')
CEI_WEIGHTS = [7, 4, 1, 8, 5, 2, 1, 6, 3, 7, 4]
def validate_cei(cei, autopad=True):
"""Check whether CEI is valid. Optionally pad if too short."""
cei = clean_id(cei)
# all complete CEI are 12 digits long
if len(cei) != 12:
return validate_cei(pad_cei(cei), False) if autopad else False
if cei == '000000000000':
return False
digits = [int(k) for k in cei] # identifier digits
return _cei_check(digits[:-1]) == digits[-1]
def cei_check_digit(cei):
"""Find check digit needed to make a CEI valid."""
cei = clean_id(cei)
if len(cei) < 11:
raise ValueError('CEI must have at least 11 digits: {0}'.format(cei))
digits = [int(k) for k in cei[:12]]
return _cei_check(digits)
def format_cei(cei):
"""Applies typical 00.000.00000/00 formatting to CEI."""
cei = pad_cei(cei)
fmt = '{0}.{1}.{2}/{3}'
return fmt.format(cei[:2], cei[2:5], cei[5:10], cei[10:])
def pad_cei(cei, validate=False):
"""Takes a CEI that probably had leading zeros and pads it."""
padded = pad_id(cei, '%0.012i')
if validate:
return padded, validate_cei(padded)
return padded
def random_cei(formatted=True):
"""Create a random, valid CEI identifier."""
uf = random.randint(11, 53)
stem = '{0}{1}'.format(uf, random.randint(100000000, 999999999))
cei = '{0}{1}'.format(stem, cei_check_digit(stem))
if formatted:
return format_cei(cei)
return cei
def _cei_check(digits):
"""Calculate check digit from iterable of integers."""
digsum = sum(w * k for w, k in zip(CEI_WEIGHTS, digits))
modulo = (sum(divmod(digsum % 100, 10)) % 10)
if modulo == 0:
return 0
return 10 - modulo
| #!/usr/bin/env python
from __future__ import absolute_import
import re
import random
from .util import clean_id, pad_id
"""
Functions for working with Brazilian CEI identifiers.
"""
NONDIGIT = re.compile(r'[^0-9]')
CEI_WEIGHTS = [7, 4, 1, 8, 5, 2, 1, 6, 3, 7, 4]
def validate_cei(cei, autopad=True):
"""Check whether CEI is valid. Optionally pad if too short."""
cei = clean_id(cei)
# all complete CEI are 12 digits long
if len(cei) != 12:
return validate_cei(pad_cei(cei), False) if autopad else False
digits = [int(k) for k in cei] # identifier digits
return _cei_check(digits[:-1]) == digits[-1]
def cei_check_digit(cei):
"""Find check digit needed to make a CEI valid."""
cei = clean_id(cei)
if len(cei) < 11:
raise ValueError('CEI must have at least 11 digits: {0}'.format(cei))
digits = [int(k) for k in cei[:12]]
return _cei_check(digits)
def format_cei(cei):
"""Applies typical 00.000.00000/00 formatting to CEI."""
cei = pad_cei(cei)
fmt = '{0}.{1}.{2}/{3}'
return fmt.format(cei[:2], cei[2:5], cei[5:10], cei[10:])
def pad_cei(cei, validate=False):
"""Takes a CEI that probably had leading zeros and pads it."""
padded = pad_id(cei, '%0.012i')
if validate:
return padded, validate_cei(padded)
return padded
def random_cei(formatted=True):
"""Create a random, valid CEI identifier."""
uf = random.randint(11, 53)
stem = '{0}{1}'.format(uf, random.randint(100000000, 999999999))
cei = '{0}{1}'.format(stem, cei_check_digit(stem))
if formatted:
return format_cei(cei)
return cei
def _cei_check(digits):
"""Calculate check digit from iterable of integers."""
digsum = sum(w * k for w, k in zip(CEI_WEIGHTS, digits))
modulo = (sum(divmod(digsum % 100, 10)) % 10)
if modulo == 0:
return 0
return 10 - modulo
| mit | Python |
331222d553c0778840f522e21cb399226d89310e | Fix version | Yelp/yelp_clog,Yelp/yelp_clog | clog/__init__.py | clog/__init__.py | # Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
:mod:`clog` is a package for handling log data. It can be used for the
following:
Python Logging Handler
---------------------------------
:class:`clog.handlers.ScribeHandler` can be used to send standard python
:mod:`logging` to a scribe stream.
Logging Operational and Mission Critical Data
---------------------------------------------
:func:`clog.loggers.ScribeLogger.log_line` can be used to log mission critical,
machine readable, and opertional data to scribe. There is also a global
:func:`clog.log_line` which has the same purpose but requires global
configuration (see :mod:`clog.config`). Use of the global is discouraged.
Reading Scribe Logs
-------------------
:mod:`clog.readers` provides classes for reading scribe logs locally or
from a server.
"""
from __future__ import absolute_import
from builtins import map
from clog.loggers import ScribeLogger, ScribeIsNotForkSafeError
from clog.global_state import log_line, reset_default_loggers
_pyflakes_ignore = [
ScribeLogger,
ScribeIsNotForkSafeError,
log_line,
reset_default_loggers,
]
version_info = 2, 2, 11
__version__ = '.'.join(map(str, version_info))
| # Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""
:mod:`clog` is a package for handling log data. It can be used for the
following:
Python Logging Handler
---------------------------------
:class:`clog.handlers.ScribeHandler` can be used to send standard python
:mod:`logging` to a scribe stream.
Logging Operational and Mission Critical Data
---------------------------------------------
:func:`clog.loggers.ScribeLogger.log_line` can be used to log mission critical,
machine readable, and opertional data to scribe. There is also a global
:func:`clog.log_line` which has the same purpose but requires global
configuration (see :mod:`clog.config`). Use of the global is discouraged.
Reading Scribe Logs
-------------------
:mod:`clog.readers` provides classes for reading scribe logs locally or
from a server.
"""
from __future__ import absolute_import
from builtins import map
from clog.loggers import ScribeLogger, ScribeIsNotForkSafeError
from clog.global_state import log_line, reset_default_loggers
_pyflakes_ignore = [
ScribeLogger,
ScribeIsNotForkSafeError,
log_line,
reset_default_loggers,
]
version_info = 2, 2, 9
__version__ = '.'.join(map(str, version_info))
| apache-2.0 | Python |
38f87a266a0dbc2b09f80feb0a3eeb01db4b096b | improve cnn date reporting | flupzor/bijgeschaafd,COLABORATI/newsdiffs,catcosmo/newsdiffs,bjowi/newsdiffs,amandabee/newsdiffs,catcosmo/newsdiffs,catcosmo/newsdiffs,COLABORATI/newsdiffs,flupzor/newsdiffs,amandabee/newsdiffs,COLABORATI/newsdiffs,flupzor/newsdiffs,bjowi/newsdiffs,flupzor/bijgeschaafd,flupzor/newsdiffs,bjowi/newsdiffs,amandabee/newsdiffs,flupzor/bijgeschaafd,flupzor/bijgeschaafd,flupzor/newsdiffs | parsers/cnn.py | parsers/cnn.py | from baseparser import BaseParser
import re
from BeautifulSoup import BeautifulSoup
from datetime import datetime, timedelta
DATE_FORMAT = '%B %d, %Y at %l:%M%P EDT'
class CNNParser(BaseParser):
domains = ['edition.cnn.com']
feeder_base = 'http://edition.cnn.com/'
feeder_pat = '^http://edition.cnn.com/201'
def _parse(self, html):
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
p_tags = soup.findAll('p', attrs={'class':re.compile(r'\bcnn_storypgraphtxt\b')})
if not p_tags:
self.real_article = False
return
self.meta = soup.findAll('meta')
self.title = soup.find('meta', attrs={'itemprop':'headline'}).get('content')
datestr = soup.find('meta', attrs={'itemprop':'dateModified'}).get('content')
if datestr:
date = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%SZ') - timedelta(hours=4)
else:
datestr = ''
self.date = date.strftime(DATE_FORMAT)
self.byline = soup.find('meta', attrs={'itemprop':'author'}).get('content')
lede = p_tags[0].previousSibling.previousSibling
editornotes = soup.findAll('p', attrs={'class':'cnnEditorialNote'})
contributors = soup.findAll('p', attrs={'class':'cnn_strycbftrtxt'})
self.body = '\n'+'\n\n'.join([p.getText() for p in
editornotes + [lede] + p_tags + contributors])
| from baseparser import BaseParser
import re
from BeautifulSoup import BeautifulSoup
from datetime import datetime, timedelta
DATE_FORMAT = '%B %d, %Y at %l:%M%P EDT'
class CNNParser(BaseParser):
domains = ['edition.cnn.com']
feeder_base = 'http://edition.cnn.com/'
feeder_pat = '^http://edition.cnn.com/201'
def _parse(self, html):
soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES,
fromEncoding='utf-8')
p_tags = soup.findAll('p', attrs={'class':re.compile(r'\bcnn_storypgraphtxt\b')})
if not p_tags:
self.real_article = False
return
self.meta = soup.findAll('meta')
self.title = soup.find('meta', attrs={'itemprop':'headline'}).get('content')
datestr = soup.find('meta', attrs={'itemprop':'dateModified'}).get('content')
date = datetime.strptime(datestr, '%Y-%m-%dT%H:%M:%SZ') - timedelta(hours=4)
self.date = date.strftime(DATE_FORMAT)
self.byline = soup.find('meta', attrs={'itemprop':'author'}).get('content')
lede = p_tags[0].previousSibling.previousSibling
editornotes = soup.findAll('p', attrs={'class':'cnnEditorialNote'})
contributors = soup.findAll('p', attrs={'class':'cnn_strycbftrtxt'})
self.body = '\n'+'\n\n'.join([p.getText() for p in
editornotes + [lede] + p_tags + contributors])
| mit | Python |
b16b61914e1fa6ed0c3036404b913026dcc24a24 | Update organizers.py | pyconca/2017-web,pyconca/2017-web,pyconca/2017-web,pyconca/2017-web | config/organizers.py | config/organizers.py |
ORGANIZERS = {
('Francis Deslauriers', 'https://twitter.com/frdeso_'),
('Myles Braithwaite', 'https://mylesb.ca/'),
('Peter McCormick', 'https://twitter.com/pdmccormick'),
('Terry Yanchynskyy', 'https://github.com/onebit0fme'),
# Add you name and url here ^ and submit a pull request
# Order does not matter, final result is sorted by name
}
|
ORGANIZERS = {
('Francis Deslauriers', 'https://twitter.com/francisDeslaur'),
('Myles Braithwaite', 'https://mylesb.ca/'),
('Peter McCormick', 'https://twitter.com/pdmccormick'),
('Terry Yanchynskyy', 'https://github.com/onebit0fme'),
# Add you name and url here ^ and submit a pull request
# Order does not matter, final result is sorted by name
}
| mit | Python |
84d02292f9cc9749b619d823aac1cde88be97edd | change site name | bsdlp/burrito.sh,fly/burrito.sh,fly/burrito.sh,bsdlp/burrito.sh | pelicanconf.py | pelicanconf.py | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'jchen'
AUTHOR_FULLNAME = u'Jon Chen'
SITENAME = u'burritos r us'
SITEURL = 'http://burrito.sh'
TIMEZONE = 'ETC/UTC'
DEFAULT_LANG = u'en'
CSS_FILE = 'style.css'
# theme stuff
THEME = './theme'
# plugins
PLUGIN_PATH = './plugins'
PLUGINS = ['gravatar', 'thumbnailer']
# gravatar email
AUTHOR_EMAIL = 'dabestmayne@burrito.sh'
# social
TWITTER_USERNAME = 's_jchen'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = 10
DISPLAY_CATEGORIES_ON_MENU = False
DISPLAY_MENUITEMS_ON_MENU = False
DISPLAY_NAVBAR = False
DISPLAY_PAGES_ON_MENU = False
DEFAULT_DATE_FORMAT = ('%Y-%m-%d')
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# add paths to pelican
STATIC_PATHS = ['img']
# Thumbnailer plugin options
IMAGE_PATH = 'img'
THUMBNAIL_DIR = 'thumbs'
THUMBNAIL_SIZES = {
'thumbnail_square': '270',
'thumbnail_wide': '540x?',
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'jchen'
AUTHOR_FULLNAME = u'Jon Chen'
SITENAME = u'burrito?'
SITEURL = 'http://burrito.sh'
TIMEZONE = 'ETC/UTC'
DEFAULT_LANG = u'en'
CSS_FILE = 'style.css'
# theme stuff
THEME = './theme'
# plugins
PLUGIN_PATH = './plugins'
PLUGINS = ['gravatar', 'thumbnailer']
# gravatar email
AUTHOR_EMAIL = 'dabestmayne@burrito.sh'
# social
TWITTER_USERNAME = 's_jchen'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
DEFAULT_PAGINATION = 10
DISPLAY_CATEGORIES_ON_MENU = False
DISPLAY_MENUITEMS_ON_MENU = False
DISPLAY_NAVBAR = False
DISPLAY_PAGES_ON_MENU = False
DEFAULT_DATE_FORMAT = ('%Y-%m-%d')
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
# add paths to pelican
STATIC_PATHS = ['img']
# Thumbnailer plugin options
IMAGE_PATH = 'img'
THUMBNAIL_DIR = 'thumbs'
THUMBNAIL_SIZES = {
'thumbnail_square': '270',
'thumbnail_wide': '540x?',
}
| bsd-3-clause | Python |
3104834e9ee7525a8f8b43edcb00443d913e0792 | Update config settings. | edx/ecommerce,edx/ecommerce,janusnic/ecommerce,mferenca/HMS-ecommerce,eduNEXT/edunext-ecommerce,eduNEXT/edunext-ecommerce,janusnic/ecommerce,eduNEXT/edunext-ecommerce,edx/ecommerce,mferenca/HMS-ecommerce,janusnic/ecommerce,edx/ecommerce,eduNEXT/edunext-ecommerce,mferenca/HMS-ecommerce | ecommerce/settings/production.py | ecommerce/settings/production.py | """Production settings and globals."""
from os import environ
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
import yaml
from ecommerce.settings.base import *
from ecommerce.settings.logger import get_logger_config
# Enable offline compression of CSS/JS
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
# Minify CSS
COMPRESS_CSS_FILTERS += [
'compressor.filters.cssmin.CSSMinFilter',
]
LOGGING = get_logger_config()
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# ######### HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['*']
########## END HOST CONFIGURATION
CONFIG_FILE = get_env_setting('ECOMMERCE_CFG')
with open(CONFIG_FILE) as f:
config_from_yaml = yaml.load(f)
vars().update(config_from_yaml)
DB_OVERRIDES = dict(
PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']),
ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
for override, value in DB_OVERRIDES.iteritems():
DATABASES['default'][override] = value
# Depends on DOCUMENTATION_LOAD_ERROR_URL, so evaluate at the end
DOCUMENTATION_LOAD_ERROR_MESSAGE = 'This data may not be available for your course. ' \
'<a href="{error_documentation_link}" target="_blank">Read more</a>.'.format(error_documentation_link=DOCUMENTATION_LOAD_ERROR_URL)
| """Production settings and globals."""
from os import environ
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
import yaml
from ecommerce.settings.base import *
from ecommerce.settings.logger import get_logger_config
# Enable offline compression of CSS/JS
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = True
# Minify CSS
COMPRESS_CSS_FILTERS += [
'compressor.filters.cssmin.CSSMinFilter',
]
LOGGING = get_logger_config()
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
# ######### HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['*']
########## END HOST CONFIGURATION
CONFIG_FILE = get_env_setting('EDX_ECOMMERCE_CFG')
with open(CONFIG_FILE) as f:
config_from_yaml = yaml.load(f)
vars().update(config_from_yaml)
DB_OVERRIDES = dict(
PASSWORD=environ.get('DB_MIGRATION_PASS', DATABASES['default']['PASSWORD']),
ENGINE=environ.get('DB_MIGRATION_ENGINE', DATABASES['default']['ENGINE']),
USER=environ.get('DB_MIGRATION_USER', DATABASES['default']['USER']),
NAME=environ.get('DB_MIGRATION_NAME', DATABASES['default']['NAME']),
HOST=environ.get('DB_MIGRATION_HOST', DATABASES['default']['HOST']),
PORT=environ.get('DB_MIGRATION_PORT', DATABASES['default']['PORT']),
)
for override, value in DB_OVERRIDES.iteritems():
DATABASES['default'][override] = value
# Depends on DOCUMENTATION_LOAD_ERROR_URL, so evaluate at the end
DOCUMENTATION_LOAD_ERROR_MESSAGE = 'This data may not be available for your course. ' \
'<a href="{error_documentation_link}" target="_blank">Read more</a>.'.format(error_documentation_link=DOCUMENTATION_LOAD_ERROR_URL)
| agpl-3.0 | Python |
2a3ecf262ac2057503ea8b9a4576068aaa8cfa1f | Fix filter gte (>=)/lte (<=) | AndrzejR/mining,mlgruby/mining,chrisdamba/mining,jgabriellima/mining,avelino/mining,chrisdamba/mining,AndrzejR/mining,seagoat/mining,seagoat/mining,mining/mining,mlgruby/mining,mlgruby/mining,mining/mining,avelino/mining,jgabriellima/mining | mining/utils.py | mining/utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unicodedata
import re
from decimal import Decimal
from pandas import tslib, date_range
def fix_render(value):
if type(value) is str:
try:
return unicode(value)
except UnicodeDecodeError:
return unicode(value.decode('latin1'))
elif type(value) is tslib.Timestamp:
return value.strftime("%Y-%m-%d %H:%M:%S")
elif type(value) is Decimal:
return str(value)
return value
def pandas_to_dict(df):
return [{colname: fix_render(row[i])
for i, colname in enumerate(df.columns)}
for row in df.values]
def slugfy(text):
slug = unicodedata.normalize("NFKD", text).encode("UTF-8", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug:
return None
return slug
def df_generate(df, argument, str_field):
s = str_field.split('__')
field = s[1]
operator = s[2]
value = argument(str_field)
try:
t = s[3]
except:
t = "str"
if t == "date":
try:
mark = s[4].replace(":", "%")
except:
mark = "%Y-%m-%d"
if operator == "gte":
return (df[field] >= value)
elif operator == "lte":
return (df[field] <= value)
elif operator == "is":
return (df[field] == value)
elif operator == "in":
return u"{} in {}".format(field, [i for i in value.split(',')])
elif operator == "notin":
return u"{} not in {}".format([i for i in value.split(',')], field)
elif operator == "between":
_range = []
between = value.split(":")
if t == "date":
_range = [i.strftime(mark)
for i in date_range(between[0], between[1]).tolist()]
elif t == "int":
_range = [i for i in xrange(between[0], between[1]+1)]
return u"{} in {}".format(field, _range)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unicodedata
import re
from decimal import Decimal
from pandas import tslib, date_range
def fix_render(value):
if type(value) is str:
try:
return unicode(value)
except UnicodeDecodeError:
return unicode(value.decode('latin1'))
elif type(value) is tslib.Timestamp:
return value.strftime("%Y-%m-%d %H:%M:%S")
elif type(value) is Decimal:
return str(value)
return value
def pandas_to_dict(df):
return [{colname: fix_render(row[i])
for i, colname in enumerate(df.columns)}
for row in df.values]
def slugfy(text):
slug = unicodedata.normalize("NFKD", text).encode("UTF-8", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug:
return None
return slug
def df_generate(df, argument, str_field):
s = str_field.split('__')
field = s[1]
operator = s[2]
value = argument(str_field)
try:
t = s[3]
except:
t = "str"
if t == "date":
try:
mark = s[4].replace(":", "%")
except:
mark = "%Y-%m-%d"
if operator == "gte":
return (df[field] > value)
elif operator == "lte":
return (df[field] < value)
elif operator == "is":
return (df[field] == value)
elif operator == "in":
return u"{} in {}".format(field, [i for i in value.split(',')])
elif operator == "notin":
return u"{} not in {}".format([i for i in value.split(',')], field)
elif operator == "between":
_range = []
between = value.split(":")
if t == "date":
_range = [i.strftime(mark)
for i in date_range(between[0], between[1]).tolist()]
elif t == "int":
_range = [i for i in xrange(between[0], between[1]+1)]
return u"{} in {}".format(field, _range)
| mit | Python |
290838a09bc932e11f95140f5de449c732110a71 | add x access mode to Log/ | yasokada/python-151113-lineMonitor,yasokada/python-151113-lineMonitor | utilLogger.py | utilLogger.py | import os.path
import datetime
import time
# for chmod
import os
from stat import *
'''
v0.7 2015/12/03
- change access mode of the file and folder(Log/) to make OTHER writable
v0.6 2015/12/02
- add msec string for time stamp
v0.5 2015/12/01
- remove CRLF at the end of the line
- save to Log/
- use [0] * 10 to declare a List
- add time stamp to the save strings
v0.4 2015/11/30
- comment out test run
- add from sentence to import CUtilLogger
v0.3 2015/11/30
- change array declaration to those using range()
- __init__() does not take saveto arg
- automatically get file name based on the date
v0.2 2015/11/30
- update add() to handle auto save feature
v0.1 2015/11/30
- add save()
- add add()
- add __init__()
'''
class CUtilLogger:
def __init__(self):
self.idx = 0
self.bufferNum = 5
self.strs = [0] * 10
return
def clear(self):
for idx in range(0, self.idx):
self.strs[idx] = ""
self.idx = 0
def add(self,instr):
today = datetime.datetime.today()
yymmddhhnnss = today.strftime("%Y/%m/%d,%H:%M:%S")
msec = str(int(round(time.time() * 1000) % 1000))
text = yymmddhhnnss + "," + msec + "," + instr
self.strs[self.idx] = text
self.idx = self.idx + 1
# print self.idx
if self.idx >= self.bufferNum:
self.save()
self.clear()
def makeFolder(self):
if os.path.isdir("Log") == False:
os.mkdir("Log")
os.chmod("Log", S_IWUSR | S_IRUSR | S_IXUSR | S_IWGRP | S_IRGRP | S_IXGRP | S_IWOTH | S_IROTH | S_IXOTH)
def save(self):
self.makeFolder()
today = datetime.datetime.today()
yymmdd = today.strftime("%y%m%d")
filename = "Log/" + yymmdd + ".log"
with open(filename, "a") as logfd:
for idx in range(0, self.idx):
text = self.strs[idx]
logfd.write(text)
os.chmod(filename, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP | S_IWOTH | S_IROTH)
# Usage
'''
import time
from utilLogger import CUtilLogger
logger = CUtilLogger()
for loop in range(0, 31):
logger.add("test")
time.sleep(0.3)
logger.save() # to save the rest
logger = None
'''
# TODO: 0m > log files cannot be deleted by user "pi"
| import os.path
import datetime
import time
# for chmod
import os
from stat import *
'''
v0.7 2015/12/03
- change access mode of the file and folder(Log/) to make OTHER writable
v0.6 2015/12/02
- add msec string for time stamp
v0.5 2015/12/01
- remove CRLF at the end of the line
- save to Log/
- use [0] * 10 to declare a List
- add time stamp to the save strings
v0.4 2015/11/30
- comment out test run
- add from sentence to import CUtilLogger
v0.3 2015/11/30
- change array declaration to those using range()
- __init__() does not take saveto arg
- automatically get file name based on the date
v0.2 2015/11/30
- update add() to handle auto save feature
v0.1 2015/11/30
- add save()
- add add()
- add __init__()
'''
class CUtilLogger:
def __init__(self):
self.idx = 0
self.bufferNum = 5
self.strs = [0] * 10
return
def clear(self):
for idx in range(0, self.idx):
self.strs[idx] = ""
self.idx = 0
def add(self,instr):
today = datetime.datetime.today()
yymmddhhnnss = today.strftime("%Y/%m/%d,%H:%M:%S")
msec = str(int(round(time.time() * 1000) % 1000))
text = yymmddhhnnss + "," + msec + "," + instr
self.strs[self.idx] = text
self.idx = self.idx + 1
# print self.idx
if self.idx >= self.bufferNum:
self.save()
self.clear()
def makeFolder(self):
if os.path.isdir("Log") == False:
os.mkdir("Log")
# os.chmod("Log", S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP | S_IWOTH | S_IROTH)
def save(self):
self.makeFolder()
today = datetime.datetime.today()
yymmdd = today.strftime("%y%m%d")
filename = "Log/" + yymmdd + ".log"
with open(filename, "a") as logfd:
for idx in range(0, self.idx):
text = self.strs[idx]
logfd.write(text)
os.chmod(filename, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP | S_IWOTH | S_IROTH)
# Usage
'''
import time
from utilLogger import CUtilLogger
logger = CUtilLogger()
for loop in range(0, 31):
logger.add("test")
time.sleep(0.3)
logger.save() # to save the rest
logger = None
'''
# TODO: 0m > log files cannot be deleted by user "pi"
| mit | Python |
b5de756da50fb699eb497226ce4ee1e45759ac2a | Add port drawing to GraphBlock | anton-golubkov/Garland,anton-golubkov/Garland | src/gui/graphblock.py | src/gui/graphblock.py | # -*- coding: utf-8 -*-
from PySide import QtGui
class GraphBlock(QtGui.QGraphicsWidget):
""" GraphBlock represents IPFBlock in graphics scene
"""
block_width = 40
block_height = 32
port_size = 10
def __init__(self, block):
super(GraphBlock, self).__init__()
self.block = block
self.rect_item = QtGui.QGraphicsRectItem(self)
self.rect_item.setRect(0, 0, self.block_width, self.block_height)
self.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
self.resize(self.block_width, self.block_height)
self.name_item = QtGui.QGraphicsTextItem(self.rect_item)
self.name_item.setTextWidth(self.block_width)
font = self.name_item.font()
font.setPixelSize(5)
self.name_item.setFont(font)
self.name_item.setHtml("<center>%s</center>" % (self.block.type))
self.input_ports_items = dict()
self.output_ports_items = dict()
for iport in self.block.input_ports:
self.input_ports_items[iport] = QtGui.QGraphicsEllipseItem(self.rect_item)
for oport in self.block.output_ports:
self.output_ports_items[oport] = QtGui.QGraphicsEllipseItem(self.rect_item)
iport_count = len(self.input_ports_items)
if iport_count > 0:
iport_distance = self.block_width / (iport_count + 1)
for i, iport_item in enumerate(self.input_ports_items.values()):
iport_item.setRect( (i+1) * iport_distance - self.port_size / 2,
0 - self.port_size / 2,
self.port_size,
self.port_size )
oport_count = len(self.output_ports_items)
if oport_count > 0:
oport_distance = self.block_width / (oport_count + 1)
for i, oport_item in enumerate(self.output_ports_items.values()):
oport_item.setRect( (i+1) * oport_distance - self.port_size / 2,
self.block_height - self.port_size / 2,
self.port_size,
self.port_size )
| # -*- coding: utf-8 -*-
from PySide import QtGui
class GraphBlock(QtGui.QGraphicsWidget):
""" GraphBlock represents IPFBlock in graphics scene
"""
block_width = 40
block_height = 32
def __init__(self, block):
super(GraphBlock, self).__init__()
self.block = block
self.rect_item = QtGui.QGraphicsRectItem(self)
self.rect_item.setRect(0, 0, self.block_width, self.block_height)
self.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
self.resize(self.block_width, self.block_height)
self.name_item = QtGui.QGraphicsTextItem(self.rect_item)
self.name_item.setTextWidth(self.block_width)
font = self.name_item.font()
font.setPixelSize(5)
self.name_item.setFont(font)
self.name_item.setHtml("<center>%s</center>" % (self.block.type))
self.input_ports_items = dict()
self.output_ports_items = dict()
| lgpl-2.1 | Python |
36700dff3bfc010a31cb406108473d4823467dee | Update check_updates.py | site24x7/plugins,site24x7/plugins,site24x7/plugins | check_updates/check_updates.py | check_updates/check_updates.py | #!/usr/bin/python
import sys
import json
import subprocess
PYTHON_MAJOR_VERSION = sys.version_info[0]
if PYTHON_MAJOR_VERSION == 3:
import distro as platform
elif PYTHON_MAJOR_VERSION == 2:
import platform
os_info = platform.linux_distribution()[0].lower()
PLUGIN_VERSION = "1"
HEARTBEAT="true"
data={}
data['plugin_version'] = PLUGIN_VERSION
data['heartbeat_required']=HEARTBEAT
data['packages_to_be_updated']=0
data['security_updates']=0
command="yum check-update --security | grep -i 'needed for security'"
def get_command_output(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
return output
if 'centos' in os_info or 'red hat' in os_info:
out = get_command_output(command)
if out:
out = out.rstrip()
count = out.split("needed for security")
security_count = count[0].split()[0]
if security_count == 'No':
data['security_updates'] = 0
else:
data['security_updates'] = security_count
packages_count = count[1].split()
for each in packages_count:
if each.isdigit():
data['packages_to_be_updated']=each
else:
file_path='/var/lib/update-notifier/updates-available'
lines = [line.strip('\n') for line in open(file_path)]
for line in lines:
if line:
if ( 'packages can be updated' in line ) or ('can be installed immediately' in line ) or ('can be applied immediately' in line):
data['packages_to_be_updated'] = line.split()[0]
if ('updates are security updates' in line) or ('updates are standard security updates' in line):
data['security_updates'] = line.split()[0]
print(json.dumps(data))
| #!/usr/bin/python
import json
import platform
import subprocess
PLUGIN_VERSION = "1"
HEARTBEAT="true"
data={}
data['plugin_version'] = PLUGIN_VERSION
data['heartbeat_required']=HEARTBEAT
data['packages_to_be_updated']=0
data['security_updates']=0
command="yum check-update --security | grep -i 'needed for security'"
os_info = platform.linux_distribution()[0].lower()
def get_command_output(command):
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
return output
if 'centos' in os_info or 'red hat' in os_info:
out = get_command_output(command)
if out:
out = out.rstrip()
count = out.split("needed for security")
security_count = count[0].split()[0]
if security_count == 'No':
data['security_updates'] = 0
else:
data['security_updates'] = security_count
packages_count = count[1].split()
for each in packages_count:
if each.isdigit():
data['packages_to_be_updated']=each
else:
file_path='/var/lib/update-notifier/updates-available'
lines = [line.strip('\n') for line in open(file_path)]
for line in lines:
if line:
if ( 'packages can be updated' in line ) or ('can be installed immediately' in line ):
data['packages_to_be_updated'] = line.split()[0]
if 'updates are security updates' in line:
data['security_updates'] = line.split()[0]
print(json.dumps(data)) | bsd-2-clause | Python |
8fd9bd1a1084f1d8d1f61774fdf28a45be9cb4e5 | Undo faulty work | limbera/django-nap | nap/rpc/views.py | nap/rpc/views.py |
import inspect
import json
from django.views.generic import View
from nap import http
from nap.utils import JsonMixin
RPC_MARKER = '_rpc'
def method(view):
'''Mark a view as accessible via RPC'''
setattr(view, RPC_MARKER, True)
return view
def is_rpc_method(m):
return getattr(m, RPC_MARKER, False)
class RPCMixin(JsonMixin):
'''Mix in to a standard View to provide RPC actions'''
permit_introspect = False
def dispatch(self, request, *args, **kwargs):
method = request.META.get('HTTP_X_RPC_ACTION', None)
if request.method != 'POST' or method is None:
return super(RPCMixin, self).dispatch(request, *args, **kwargs)
func = getattr(self, method, None)
if not is_rpc_method(func):
return http.PreconditionFailed()
try:
data = self.get_request_data({})
# Ensure data is valid for passing as **kwargs
(lambda **kwargs: None)(**data)
except (ValueError, TypeError):
return http.BadRequest()
resp = self.execute(func, data)
return http.JsonResponse(resp)
def options(self, request, *args, **kwargs):
response = super(RPCMixin, self).options(request, *args, **kwargs)
if self.permit_introspect:
response['Content-Type'] = 'application/json'
response.write(json.dumps(self._introspect()))
return response
def _introspect(self):
methods = {}
for name, prop in inspect.getmembers(self, is_rpc_method):
argspec = inspect.getargspec(prop)
methods[name] = {
'args': argspec.args[1:],
'doc': inspect.getdoc(prop),
'defaults': argspec.defaults,
}
return methods
def execute(self, handler, data):
'''Helpful hook to ease wrapping the handler'''
return handler(**data)
class RPCView(RPCMixin, View):
'''courtesy class to avoid having to mix it yourself.'''
pass
|
import inspect
import json
from django.views.generic import View
from nap import http
from nap.utils import JsonMixin
RPC_MARKER = '_rpc'
def method(view):
'''Mark a view as accessible via RPC'''
setattr(view, RPC_MARKER, True)
return view
def is_rpc_method(m):
return getattr(m, RPC_MARKER, False)
class RPCMixin(JsonMixin):
'''Mix in to a standard View to provide RPC actions'''
permit_introspect = False
def dispatch(self, request, *args, **kwargs):
method = request.META.get('HTTP_X_RPC_ACTION', None)
if request.method != 'POST' or method is None:
return super(RPCMixin, self).dispatch(request, *args, **kwargs)
func = getattr(self, method, None)
if not is_rpc_method(func):
return http.PreconditionFailed()
try:
data = self.get_request_data({})
# Ensure data is valid for passing as **kwargs
(lambda **kwargs: None)(**data)
except (ValueError, TypeError) as err:
return http.BadRequest(self.dumps(err.args))
resp = self.execute(func, data)
return http.JsonResponse(resp)
def options(self, request, *args, **kwargs):
response = super(RPCMixin, self).options(request, *args, **kwargs)
if self.permit_introspect:
response['Content-Type'] = 'application/json'
response.write(json.dumps(self._introspect()))
return response
def _introspect(self):
methods = {}
for name, prop in inspect.getmembers(self, is_rpc_method):
argspec = inspect.getargspec(prop)
methods[name] = {
'args': argspec.args[1:],
'doc': inspect.getdoc(prop),
'defaults': argspec.defaults,
}
return methods
def execute(self, handler, data):
'''Helpful hook to ease wrapping the handler'''
return handler(**data)
class RPCView(RPCMixin, View):
'''courtesy class to avoid having to mix it yourself.'''
pass
| bsd-3-clause | Python |
4f6d9bd2539e4a97c91703f1719936e4e53dfcc1 | Add check for netifaces | RagBillySandstone/SysAdmin_Stuffs,RagBillySandstone/SysAdmin_Stuffs | pingGateway.py | pingGateway.py | #!/usr/bin/env python
# pingDefault.py -- pings the machine's default gateway
# In most home networks, this is 192.168.0.1, but this script does not
# make that assumption
# It does, however, assume linux ping utility
# Linux is notorious for dropping WiFi connections, especially on laptops
# I found that pinging Google.com every few minutes seems to help
# Eventually it occurred to me that all that was really necessary was to
# ping the wireless router. Setting cron to run this script every five mins
# doesn't completely alleviate the problem, but helps
import os
try:
import netifaces
except ImportError:
print "We need to install netifaces"
os.system('sudo apt-get install python-netifaces')
import netifaces
gws=netifaces.gateways()
try:
# I found this line in a Google search. I admittedly do not understand the
# gws['default'][netifaces.AF_INET][0] thing. The same results seem to be
# returned with
# gws['default'][2][0] -- Which I DO understand
response = os.system("ping -c 1 -w 2 > /dev/null " + gws['default'][netifaces.AF_INET][0])
except KeyError:
print 'No connection found'
exit(2)
# Check the response
if response == 0:
print 'Gateway is up'
exit(0)
else:
print 'Gateway is down'
exit(1)
| #!/usr/bin/env python
# pingDefault.py -- pings the machine's default gateway
# In most home networks, this is 192.168.0.1, but this script does not
# make that assumption
# It does, however, assume linux ping utility
# Linux is notorious for dropping WiFi connections, especially on laptops
# I found that pinging Google.com every few minutes seems to help
# Eventually it occurred to me that all that was really necessary was to
# ping the wireless router. Setting cron to run this script every five mins
# doesn't completely alleviate the problem, but helps
import os, netifaces
gws=netifaces.gateways()
try:
# I found this line in a Google search. I admittedly do not understand the
# gws['default'][netifaces.AF_INET][0] thing. The same results seem to be
# returned with
# gws['default'][2][0] -- Which I DO understand
response = os.system("ping -c 1 -w 2 > /dev/null " + gws['default'][netifaces.AF_INET][0])
except KeyError:
print 'No connection found'
exit(2)
# Check the response
if response == 0:
print 'Gateway is up'
exit(0)
else:
print 'Gateway is down'
exit(1)
| apache-2.0 | Python |
50ea692ffa5a5e42cd0e94b8b0b8c6c8fe5aca76 | Correct indentation | scitran/api,scitran/core,scitran/api,scitran/core,scitran/core,scitran/core | bin/oneoffs/load_external_data.py | bin/oneoffs/load_external_data.py | #!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
| #!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
| mit | Python |
32566b73fae3a07fea95d20aa12962754b46efb1 | use django convention for importing urls defaults (gets handler404, etc.) | peterayeni/rapidsms,ehealthafrica-ci/rapidsms,caktus/rapidsms,dimagi/rapidsms,lsgunth/rapidsms,peterayeni/rapidsms,peterayeni/rapidsms,lsgunth/rapidsms,catalpainternational/rapidsms,ken-muturi/rapidsms,ehealthafrica-ci/rapidsms,ken-muturi/rapidsms,eHealthAfrica/rapidsms,unicefuganda/edtrac,catalpainternational/rapidsms,rapidsms/rapidsms-core-dev,lsgunth/rapidsms,catalpainternational/rapidsms,unicefuganda/edtrac,caktus/rapidsms,unicefuganda/edtrac,eHealthAfrica/rapidsms,lsgunth/rapidsms,catalpainternational/rapidsms,eHealthAfrica/rapidsms,rapidsms/rapidsms-core-dev,caktus/rapidsms,dimagi/rapidsms,dimagi/rapidsms-core-dev,ken-muturi/rapidsms,dimagi/rapidsms-core-dev,ehealthafrica-ci/rapidsms,peterayeni/rapidsms | lib/rapidsms/djangoproject/urls.py | lib/rapidsms/djangoproject/urls.py | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import os
from django.conf.urls.defaults import *
from rapidsms.utils.modules import try_import
from ..conf import settings
# this list will be populated with the urls from the urls.urlpatterns of
# each installed app, then found by django as if we'd listed them here.
urlpatterns = []
for module_name in settings.INSTALLED_APPS:
# leave django contrib apps alone. (many of them include urlpatterns
# which shouldn't be auto-mapped.) this is a hack, but i like the
# automatic per-app mapping enough to keep it. (for now.)
if module_name.startswith("django."):
continue
# attempt to import this app's urls
module = try_import("%s.urls" % (module_name))
if not hasattr(module, "urlpatterns"): continue
# add the explicitly defined urlpatterns
urlpatterns += module.urlpatterns
# if the MEDIA_URL does not contain a hostname (ie, it's just an
# http path), and we are running in DEBUG mode, we will also serve
# the media for this app via this development server. in production,
# these files should be served directly
if settings.DEBUG:
if not settings.MEDIA_URL.startswith("http://"):
media_prefix = settings.MEDIA_URL.strip("/")
module_suffix = module_name.split(".")[-1]
# does urls.py have a sibling "static" dir? (media is always
# served from "static", regardless of what MEDIA_URL says)
module_path = os.path.dirname(module.__file__)
static_dir = "%s/static" % (module_path)
if os.path.exists(static_dir):
# map to {{ MEDIA_URL }}/appname
urlpatterns += patterns("", url(
"^%s/%s/(?P<path>.*)$" % (
media_prefix,
module_suffix),
"django.views.static.serve",
{"document_root": static_dir}
))
| #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import os
from django.conf.urls.defaults import patterns, url
from rapidsms.utils.modules import try_import
from ..conf import settings
# this list will be populated with the urls from the urls.urlpatterns of
# each installed app, then found by django as if we'd listed them here.
urlpatterns = []
for module_name in settings.INSTALLED_APPS:
# leave django contrib apps alone. (many of them include urlpatterns
# which shouldn't be auto-mapped.) this is a hack, but i like the
# automatic per-app mapping enough to keep it. (for now.)
if module_name.startswith("django."):
continue
# attempt to import this app's urls
module = try_import("%s.urls" % (module_name))
if not hasattr(module, "urlpatterns"): continue
# add the explicitly defined urlpatterns
urlpatterns += module.urlpatterns
# if the MEDIA_URL does not contain a hostname (ie, it's just an
# http path), and we are running in DEBUG mode, we will also serve
# the media for this app via this development server. in production,
# these files should be served directly
if settings.DEBUG:
if not settings.MEDIA_URL.startswith("http://"):
media_prefix = settings.MEDIA_URL.strip("/")
module_suffix = module_name.split(".")[-1]
# does urls.py have a sibling "static" dir? (media is always
# served from "static", regardless of what MEDIA_URL says)
module_path = os.path.dirname(module.__file__)
static_dir = "%s/static" % (module_path)
if os.path.exists(static_dir):
# map to {{ MEDIA_URL }}/appname
urlpatterns += patterns("", url(
"^%s/%s/(?P<path>.*)$" % (
media_prefix,
module_suffix),
"django.views.static.serve",
{"document_root": static_dir}
))
| bsd-3-clause | Python |
82a1cbc477bcf57629f8d5e07184cb03e536b603 | Check for device connection before sending packet. | Motsai/neblina-python,Motsai/neblina-python | neblinaDevice.py | neblinaDevice.py | #!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import asyncio
from neblina import *
from neblinaUART import NeblinaUART
bleSupported = True
try:
from neblinaBLE import NeblinaBLE
except ImportError:
print("Unable to import BLE. BLE is unsupported and can not be used.")
bleSupported = False
###################################################################################
class NeblinaDevice(object):
def __init__(self, address, interface):
self.address = address
if interface is Interface.UART:
self.communication = NeblinaUART(self.address)
else:
assert bleSupported
self.communication = NeblinaBLE(self.address)
def connect(self):
self.communication.connect()
def disconnect(self):
self.communication.disconnect()
def isConnected(self):
return self.communication.isConnected()
def receivePacket(self):
if self.isConnected():
return self.communication.receivePacket()
else:
return None
def sendPacket(self, packet):
if self.isConnected():
self.communication.sendPacket(packet)
| #!/usr/bin/env python
###################################################################################
#
# Copyright (c) 2010-2016 Motsai
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###################################################################################
import asyncio
from neblina import *
from neblinaUART import NeblinaUART
bleSupported = True
try:
from neblinaBLE import NeblinaBLE
except ImportError:
print("Unable to import BLE. BLE is unsupported and can not be used.")
bleSupported = False
###################################################################################
class NeblinaDevice(object):
def __init__(self, address, interface):
self.address = address
if interface is Interface.UART:
self.communication = NeblinaUART(self.address)
else:
assert bleSupported
self.communication = NeblinaBLE(self.address)
def connect(self):
self.communication.connect()
def disconnect(self):
self.communication.disconnect()
def isConnected(self):
return self.communication.isConnected()
def receivePacket(self):
if self.isConnected():
return self.communication.receivePacket()
else:
return None
def sendPacket(self, packet):
self.communication.sendPacket(packet)
| mit | Python |
335bb6cc9bc93320915aa08d56c5e5d24eb70b18 | replace $ with command prefix | sammdot/circa | modules/help.py | modules/help.py | class HelpModule:
def __init__(self, circa):
self.circa = circa
self.events = {
"cmd.help": [self.help]
}
self.docs = {
"help": "help [<module>[.<command>]|$<command>] → show usage info for a command, or list all commands in a module, or list all modules"
}
def help(self, fr, to, msg, m):
pfx = self.circa.conf["prefix"]
msg = msg.split(" ", 1)[0]
if msg:
c = msg.split(".", 1)
if c[0] not in self.circa.modules:
self.circa.notice(fr, "Module {0} doesn't exist or is not loaded".format(c[0]))
return
module = self.circa.modules[c[0]]
if not hasattr(module, "docs"):
self.circa.notice(fr, "No help available for module {0}".format(c[0]))
if len(c) == 1: # module
if isinstance(module.docs, dict):
commands = sorted([pfx + cmd for cmd in module.docs.keys()])
self.circa.notice(fr, "Available commands: " + ", ".join(commands))
self.circa.notice(fr, "Type {0}help {1}.<command> for command help.".format(pfx, c[0]))
else:
# in this case the module likely doesn't offer plain commands
self.circa.notice(fr, str(module.docs))
else: # command
if "cmd." + c[1] not in module.events:
self.circa.notice(fr, "No command {0}{1} in module {2}".format(pfx, c[1], c[0]))
return
if c[1] not in module.docs:
self.circa.notice(fr, "No help for {0}{1} in module {2}".format(pfx, c[1], c[0]))
return
command = module.docs[c[1]].replace("$", pfx)
self.circa.notice(fr, pfx + command)
else:
modules = sorted([i for i in self.circa.modules.keys() if \
hasattr(self.circa.modules[i], "docs")])
self.circa.notice(fr, "Available modules: " + ", ".join(modules))
self.circa.notice(fr, "Type {0}help <module> for a list of commands, or {0}<command> for individual command help.".format(pfx))
module = HelpModule
| class HelpModule:
def __init__(self, circa):
self.circa = circa
self.events = {
"cmd.help": [self.help]
}
self.docs = {
"help": "help [<module>[.<command>]|$<command>] → show usage info for a command, or list all commands in a module, or list all modules"
}
def help(self, fr, to, msg, m):
pfx = self.circa.conf["prefix"]
msg = msg.split(" ", 1)[0]
if msg:
c = msg.split(".", 1)
if c[0] not in self.circa.modules:
self.circa.notice(fr, "Module {0} doesn't exist or is not loaded".format(c[0]))
return
module = self.circa.modules[c[0]]
if not hasattr(module, "docs"):
self.circa.notice(fr, "No help available for module {0}".format(c[0]))
if len(c) == 1: # module
if isinstance(module.docs, dict):
commands = sorted([pfx + cmd for cmd in module.docs.keys()])
self.circa.notice(fr, "Available commands: " + ", ".join(commands))
self.circa.notice(fr, "Type {0}help {1}.<command> for command help.".format(pfx, c[0]))
else:
# in this case the module likely doesn't offer plain commands
self.circa.notice(fr, str(module.docs))
else: # command
if "cmd." + c[1] not in module.events:
self.circa.notice(fr, "No command {0}{1} in module {2}".format(pfx, c[1], c[0]))
return
if c[1] not in module.docs:
self.circa.notice(fr, "No help for {0}{1} in module {2}".format(pfx, c[1], c[0]))
return
command = module.docs[c[1]]
self.circa.notice(fr, pfx + command)
else:
modules = sorted([i for i in self.circa.modules.keys() if \
hasattr(self.circa.modules[i], "docs")])
self.circa.notice(fr, "Available modules: " + ", ".join(modules))
self.circa.notice(fr, "Type {0}help <module> for a list of commands, or {0}<command> for individual command help.".format(pfx))
module = HelpModule
| bsd-3-clause | Python |
60ea2738b39b38bdc1f25594a759aace0f520501 | Add utility function to dump flask env | usgo/online-ratings,usgo/online-ratings,usgo/online-ratings,Kashomon/online-ratings,Kashomon/online-ratings,Kashomon/online-ratings | web/manage.py | web/manage.py | from flask.ext.script import Manager
from app import get_app
from create_db import drop_all_tables, create_barebones_data, create_all_data, create_server
app = get_app('config.DockerConfiguration')
manager = Manager(app)
manager.command(drop_all_tables)
manager.command(create_barebones_data)
manager.command(create_all_data)
manager.command(create_server)
@manager.command
def config():
'Print out all config values from the fully assembled flask app'
print('\n'.join('%s=%s' % item for item in sorted(app.config.items())))
if __name__ == '__main__':
manager.run() | from flask.ext.script import Manager
from app import get_app
from create_db import drop_all_tables, create_barebones_data, create_all_data, create_server
app = get_app('config.DockerConfiguration')
manager = Manager(app)
manager.command(drop_all_tables)
manager.command(create_barebones_data)
manager.command(create_all_data)
manager.command(create_server)
if __name__ == '__main__':
manager.run() | mit | Python |
b4078ad91d56d653c575d10d32ee34168b1df5b3 | Add list_of_numbers | eagafonov/json_schema_helpers | json_schema_helpers/helpers.py | json_schema_helpers/helpers.py |
# Simple types
type_string = dict(type="string")
type_null = dict(type="null")
type_integer = dict(type="integer")
type_number = dict(type="number")
type_object = dict(type="object")
type_list = dict(type="array") # Python clashed with JavaScript :-)
type_boolean = dict(type="boolean")
# Simple type or null
type_string_or_null = dict(oneOf=[type_string, type_null])
type_integer_or_null = dict(oneOf=[type_integer, type_null])
type_number_or_null = dict(oneOf=[type_number, type_null])
type_object_or_null = dict(oneOf=[type_object, type_null])
type_list_or_null = dict(oneOf=[type_list, type_null])
type_boolean_or_null = dict(oneOf=[type_boolean, type_null])
list_of_strings = dict(type="array", items=[{"type": "string"}])
list_of_numbers = dict(type="array", items=[type_number])
# Complex
def list_of(ref, minItems=None, maxItems=None, exactItems=None):
d = dict(type="array", items=[{"$ref": "#/definitions/%s" % ref}])
if exactItems is not None:
minItems = exactItems
maxItems = exactItems
if minItems is not None:
d['minItems'] = minItems
if maxItems is not None:
d['maxItems'] = maxItems
return d
def ref(ref_id):
'''
Reference to type
'''
return {"$ref": "#/definitions/%s" % ref_id}
def schema(schema_options, **kwargs):
s = {
"$schema": "http://json-schema.org/draft-04/schema#"
}
s.update(schema_options)
s.update(kwargs)
return s
|
# Simple types
type_string = dict(type="string")
type_null = dict(type="null")
type_integer = dict(type="integer")
type_number = dict(type="number")
type_object = dict(type="object")
type_list = dict(type="array") # Python clashed with JavaScript :-)
type_boolean = dict(type="boolean")
# Simple type or null
type_string_or_null = dict(oneOf=[type_string, type_null])
type_integer_or_null = dict(oneOf=[type_integer, type_null])
type_number_or_null = dict(oneOf=[type_number, type_null])
type_object_or_null = dict(oneOf=[type_object, type_null])
type_list_or_null = dict(oneOf=[type_list, type_null])
type_boolean_or_null = dict(oneOf=[type_boolean, type_null])
list_of_strings = dict(type="array", items=[{"type": "string"}])
# Complex
def list_of(ref, minItems=None, maxItems=None, exactItems=None):
d = dict(type="array", items=[{"$ref": "#/definitions/%s" % ref}])
if exactItems is not None:
minItems = exactItems
maxItems = exactItems
if minItems is not None:
d['minItems'] = minItems
if maxItems is not None:
d['maxItems'] = maxItems
return d
def ref(ref_id):
'''
Reference to type
'''
return {"$ref": "#/definitions/%s" % ref_id}
def schema(schema_options, **kwargs):
s = {
"$schema": "http://json-schema.org/draft-04/schema#"
}
s.update(schema_options)
s.update(kwargs)
return s
| mit | Python |
9001989292761e40adc648cb14d421dcb9703ad8 | Change port number and bind to default ip address | haramaki/spark-openstack | webhookapp.py | webhookapp.py | # import Flask
from flask import Flask, request
# import custom-made modules
import sparkmessage
import argparse
# Create an instance of Flask
app = Flask(__name__)
TOKEN = ""
# Index page will trigger index() function
@app.route('/')
def index():
return 'Hello World'
# Webhook page will trigger webhooks() function
@app.route("/webhook", methods=['POST'])
def webhooks():
# Get the json data
json = request.json
# parse the message id, person id, person email, and room id
message_id = json["data"]["id"]
person_id = json["data"]["personId"]
person_email = json["data"]["personEmail"]
room_id = json["data"]["roomId"]
# convert the message id into readable text
message = sparkmessage.get(TOKEN, message_id)
print(message)
# check if the message is the command to get hosts
if message == "Hi":
sparkmessage.post(TOKEN, person_id, person_email, room_id, "Hi")
# if message == "GET HOSTS":
# # get list of hosts from APIC-EM Controller
# hosts = gethosts.main()
# # post the list of hosts into the Spark room
# sparkmessage.post(person_id, person_email, room_id, hosts)
else:
print("do nothing")
#@app.route("/token", methods=['GET'])
#def gettoken():
# return TOKEN
# run the application
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("-token", default="")
args = p.parse_args()
TOKEN = args.token
print (TOKEN)
app.run(host="0.0.0.0", port=8000)
| # import Flask
from flask import Flask, request
# import custom-made modules
import sparkmessage
import argparse
# Create an instance of Flask
app = Flask(__name__)
TOKEN = ""
# Index page will trigger index() function
@app.route('/')
def index():
return 'Hello World'
# Webhook page will trigger webhooks() function
@app.route("/webhook", methods=['POST'])
def webhooks():
# Get the json data
json = request.json
# parse the message id, person id, person email, and room id
message_id = json["data"]["id"]
person_id = json["data"]["personId"]
person_email = json["data"]["personEmail"]
room_id = json["data"]["roomId"]
# convert the message id into readable text
message = sparkmessage.get(TOKEN, message_id)
print(message)
# check if the message is the command to get hosts
if message == "Hi":
sparkmessage.post(TOKEN, person_id, person_email, room_id, "Hi")
# if message == "GET HOSTS":
# # get list of hosts from APIC-EM Controller
# hosts = gethosts.main()
# # post the list of hosts into the Spark room
# sparkmessage.post(person_id, person_email, room_id, hosts)
else:
print("do nothing")
#@app.route("/token", methods=['GET'])
#def gettoken():
# return TOKEN
# run the application
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("-token", default="")
args = p.parse_args()
TOKEN = args.token
print (TOKEN)
app.run()
| apache-2.0 | Python |
2daace6af733e833246830ecaa129bea09c247a9 | Fix topic rewrite | sebastinas/debian-devel-changes-bot | DebianDevelChangesBot/utils/rewrite_topic.py | DebianDevelChangesBot/utils/rewrite_topic.py | # -*- coding: utf-8 -*-
#
# Debian Changes Bot
# Copyright (C) 2015 Sebastian Ramacher <sramacher@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
def rewrite_topic(topic, prefix, value):
if not len(prefix):
return topic
if prefix == 'dinstall':
regex = re.compile(r'dinstall: [^|]*')
else:
regex = re.compile(r'{}: \d+'.format(prefix))
def update(p):
if regex.match(p) is not None:
return '{}: {}'.format(prefix, value)
return p
return ' | '.join(update(p) for p in topic.split(' | '))
| # -*- coding: utf-8 -*-
#
# Debian Changes Bot
# Copyright (C) 2015 Sebastian Ramacher <sramacher@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
def rewrite_topic(topic, prefix, value):
if not len(prefix):
return topic
if prefix == 'dinstall':
regex = re.compile(r'dinstall: (running|not running)')
else:
regex = re.compile(r'{}: \d+'.format(prefix))
def update(p):
if regex.match(p) is not None:
return '{}: {}'.format(prefix, value)
return p
return ' | '.join(update(p) for p in topic.split(' | '))
| agpl-3.0 | Python |
0d4023d629b14d4396d370c395a68fa87b59ddb2 | rename to __version__ | b-mueller/mythril,b-mueller/mythril,b-mueller/mythril,b-mueller/mythril | mythril/__version__.py | mythril/__version__.py | """This file contains the current Mythril version.
This file is suitable for sourcing inside POSIX shell, e.g. bash as well
as for importing into Python.
"""
__version__ = "v0.20.8"
| """This file contains the current Mythril version.
This file is suitable for sourcing inside POSIX shell, e.g. bash as well
as for importing into Python.
"""
VERSION = "v0.20.8" # NOQA
| mit | Python |
6de456df864fcf3f801f41e247d1fcf551195429 | fix conflict | n0stack/n0core | n0core/compute/main.py | n0core/compute/main.py | import sys
sys.path.append('../../') # NOQA
import pulsar
from n0core.lib.n0mq import N0MQ
from n0core.lib.proto import CreateVMRequest
client = N0MQ('pulsar://localhost:6650')
consumer = client.subscribe('persistent://sample/standalone/compute/handle')
@consumer.on('CreateVMRequest')
def create_VM_request(message, auto_ack=False):
print('create vm request')
print(message.data)
consumer.ack(message)
if __name__ == '__main__':
client.listen()
=======
from mqhandler import MQHandler
mqhandler = MQHandler('pulsar://localhost:6550',
'persistent://sample/standalone/volumer/114514',
subscription_name='compute')
@mqhandler.handle('CreateVMRequest')
def create_VM_handler(inner_msg, messenger):
print('create vm')
print(inner_msg)
@mqhandler.handle('DeleteVMRequest')
def delete_vm_handler(inner_msg, messenger):
print('delete vm')
print(inner_msg)
if __name__ == '__main__':
mqhandler.listen()
| import sys
sys.path.append('../../') # NOQA
import pulsar
from n0core.lib.n0mq import N0MQ
from n0core.lib.proto import CreateVMRequest
client = N0MQ('pulsar://localhost:6650')
consumer = client.subscribe('persistent://sample/standalone/compute/handle')
@consumer.on('CreateVMRequest')
def create_VM_request(message, auto_ack=False):
print('create vm request')
print(message.data)
consumer.ack(message)
if __name__ == '__main__':
client.listen()
| bsd-2-clause | Python |
c3626cdea8b73d81f21aa3acf14a9e018df64790 | Fix tests/functional/get_installed_test.py | Yelp/venv-update,Yelp/pip-faster,Yelp/pip-faster,Yelp/venv-update,Yelp/pip-faster,Yelp/venv-update | tests/functional/get_installed_test.py | tests/functional/get_installed_test.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from testing import run
from testing import venv_update_script
def get_installed():
out, err = venv_update_script('''\
import pip_faster as p
for p in sorted(p.reqnames(p.pip_get_installed())):
print(p)''', venv='myvenv')
assert err == ''
# Most python distributions which have argparse in the stdlib fail to
# expose it to setuptools as an installed package (it seems all but ubuntu
# do this). This results in argparse sometimes being installed locally,
# sometimes not, even for a specific version of python.
# We normalize by never looking at argparse =/
out = out.replace('argparse\n', '', 1)
return out.split()
@pytest.mark.usefixtures('pypi_server_with_fallback')
def test_pip_get_installed(tmpdir):
tmpdir.chdir()
run('virtualenv', 'myvenv')
run('rm', '-rf', 'myvenv/local')
assert get_installed() == ['pip', 'setuptools']
run(
'myvenv/bin/pip', 'install',
'hg+https://bitbucket.org/bukzor/coverage.py@__main__-support#egg=coverage',
'git+git://github.com/bukzor/cov-core.git@master#egg=cov-core',
'-e', 'git+git://github.com/bukzor/pytest-cov.git@master#egg=pytest-cov',
)
assert get_installed() == ['cov-core', 'coverage', 'pip', 'py', 'pytest', 'pytest-cov', 'setuptools']
run('myvenv/bin/pip', 'uninstall', '--yes', 'cov-core', 'coverage', 'py', 'pytest', 'pytest-cov')
assert get_installed() == ['pip', 'setuptools']
run('myvenv/bin/pip', 'install', 'flake8')
assert get_installed() == ['flake8', 'mccabe', 'pep8', 'pip', 'pyflakes', 'setuptools']
run('myvenv/bin/pip', 'uninstall', '--yes', 'flake8')
assert get_installed() == ['mccabe', 'pep8', 'pip', 'pyflakes', 'setuptools']
| from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from testing import run
from testing import venv_update_script
def get_installed():
out, err = venv_update_script('''\
import pip_faster as p
for p in sorted(p.reqnames(p.pip_get_installed())):
print(p)''', venv='myvenv')
assert err == ''
# Most python distributions which have argparse in the stdlib fail to
# expose it to setuptools as an installed package (it seems all but ubuntu
# do this). This results in argparse sometimes being installed locally,
# sometimes not, even for a specific version of python.
# We normalize by never looking at argparse =/
out = out.replace('argparse\n', '', 1)
return out.split()
def test_pip_get_installed(tmpdir):
tmpdir.chdir()
run('virtualenv', 'myvenv')
run('rm', '-rf', 'myvenv/local')
assert get_installed() == ['pip', 'setuptools']
run(
'myvenv/bin/pip', 'install',
'hg+https://bitbucket.org/bukzor/coverage.py@__main__-support#egg=coverage',
'git+git://github.com/bukzor/cov-core.git@master#egg=cov-core',
'-e', 'git+git://github.com/bukzor/pytest-cov.git@master#egg=pytest-cov',
)
assert get_installed() == ['cov-core', 'coverage', 'pip', 'py', 'pytest', 'pytest-cov', 'setuptools']
run('myvenv/bin/pip', 'uninstall', '--yes', 'cov-core', 'coverage', 'py', 'pytest', 'pytest-cov')
assert get_installed() == ['pip', 'setuptools']
run('myvenv/bin/pip', 'install', 'flake8')
assert get_installed() == ['flake8', 'mccabe', 'pep8', 'pip', 'pyflakes', 'setuptools']
run('myvenv/bin/pip', 'uninstall', '--yes', 'flake8')
assert get_installed() == ['mccabe', 'pep8', 'pip', 'pyflakes', 'setuptools']
| mit | Python |
7024a02daeabd98cd6fa02c63f339a60995f9200 | Rename SpacyVectors->StaticVectors, and refactor to make it easier to customise vector loading | explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc | thinc/neural/_classes/spacy_vectors.py | thinc/neural/_classes/spacy_vectors.py | import numpy
from ...import describe
from ...describe import Dimension, Synapses, Gradient
from .._lsuv import LSUVinit
from ..ops import NumpyOps
from ...api import layerize
from .model import Model
from ...extra.load_nlp import get_vectors
try:
import cupy
except ImportError:
cupy = None
@layerize
def get_word_ids(docs, drop=0.):
'''Get word forms.'''
seqs = []
ops = Model.ops
for doc in docs:
arr = numpy.zeros((len(doc)+1,), dtype='uint64')
for token in doc:
arr[token.i] = token.lex_id or token.orth
arr[len(doc)] = 0
seqs.append(ops.asarray(arr))
return seqs, None
@describe.on_data(LSUVinit)
@describe.attributes(
nM=Dimension("Vector dimensions"),
nO=Dimension("Size of output"),
W=Synapses(
"A projection matrix, to change vector dimensionality",
lambda obj: (obj.nO, obj.nM),
lambda W, ops: ops.xavier_uniform_init(W)),
d_W=Gradient("W"),
)
class StaticVectors(Model):
'''Load a static embedding table, and learn a linear projection from it.
Out-of-vocabulary items are modded into the table, receiving an arbitrary
vector (but the same word will always receive the same vector).
'''
name = 'static-vectors'
def __init__(self, lang, nO):
Model.__init__(self)
self.nO = nO
# This doesn't seem the cleverest solution,
# but it ensures multiple models load the
# same copy of spaCy if they're deserialised.
self.lang = lang
vectors = self.get_vectors(self.ops, lang)
self.nM = vectors.shape[1]
if self.nM == 0:
raise ValueError(
"Cannot create vectors table with dimension 0.\n"
"If you're using pre-trained vectors, are the vectors loaded?")
self.nV = vectors.shape[0]
def get_vectors(self)
return get_vectors(self.ops, lang)
def begin_update(self, ids, drop=0.):
vector_table = self.get_vectors()
vectors = vector_table[ids % vector_table.shape[0]]
def finish_update(gradients, sgd=None):
self.d_W += self.ops.batch_outer(gradients, vectors)
if sgd is not None:
sgd(self._mem.weights, self._mem.gradient, key=self.id)
return None
dotted = self.ops.batch_dot(vectors, self.W)
return dotted, finish_update
| import numpy
from ...import describe
from ...describe import Dimension, Synapses, Gradient
from .._lsuv import LSUVinit
from ..ops import NumpyOps
from ...api import layerize
from .model import Model
from ...extra.load_nlp import get_vectors
try:
import cupy
except ImportError:
cupy = None
@layerize
def get_word_ids(docs, drop=0.):
'''Get word forms.'''
seqs = []
ops = Model.ops
for doc in docs:
arr = numpy.zeros((len(doc)+1,), dtype='uint64')
for token in doc:
arr[token.i] = token.lex_id or token.orth
arr[len(doc)] = 0
seqs.append(ops.asarray(arr))
return seqs, None
@describe.on_data(LSUVinit)
@describe.attributes(
nM=Dimension("Vector dimensions"),
nO=Dimension("Size of output"),
W=Synapses(
"A projection matrix, to change vector dimensionality",
lambda obj: (obj.nO, obj.nM),
lambda W, ops: ops.xavier_uniform_init(W)),
d_W=Gradient("W"),
)
class SpacyVectors(Model):
name = 'spacy-vectors'
def __init__(self, lang, nO):
Model.__init__(self)
self.nO = nO
# This doesn't seem the cleverest solution,
# but it ensures multiple models load the
# same copy of spaCy if they're deserialised.
vectors = get_vectors(self.ops, lang)
self.lang = lang
self.nM = vectors.shape[1]
if self.nM == 0:
raise ValueError(
"Cannot create vectors table with dimension 0.\n"
"If you're using pre-trained vectors, are the vectors loaded?")
self.nV = vectors.shape[0]
def begin_update(self, ids, drop=0.):
vector_table = get_vectors(self.ops, self.lang)
vectors = vector_table[ids % vector_table.shape[0]]
def finish_update(gradients, sgd=None):
self.d_W += self.ops.batch_outer(gradients, vectors)
if sgd is not None:
sgd(self._mem.weights, self._mem.gradient, key=self.id)
return None
dotted = self.ops.batch_dot(vectors, self.W)
return dotted, finish_update
| mit | Python |
b13a0bdbc3d66cc8fb63c916e9337e5e715e0ef4 | update options | mrpau/kolibri,mrpau/kolibri,indirectlylit/kolibri,learningequality/kolibri,mrpau/kolibri,learningequality/kolibri,learningequality/kolibri,indirectlylit/kolibri,learningequality/kolibri,indirectlylit/kolibri,mrpau/kolibri,indirectlylit/kolibri | kolibri/plugins/html5_viewer/options.py | kolibri/plugins/html5_viewer/options.py | import logging
logger = logging.getLogger(__name__)
# Source: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe#attr-sandbox
allowable_sandbox_tokens = set(
[
"allow-downloads-without-user-activation",
"allow-forms",
"allow-modals",
"allow-orientation-lock",
"allow-pointer-lock",
"allow-popups",
"allow-popups-to-escape-sandbox",
"allow-presentation",
"allow-same-origin",
"allow-scripts",
"allow-storage-access-by-user-activation ",
"allow-top-navigation",
"allow-top-navigation-by-user-activation",
]
)
def clean_sandbox(sandbox_string):
"""
Clean up sandbox string to ensure it only contains valid items.
"""
sandbox_tokens = []
illegal_tokens = []
for token in sandbox_string.split(" "):
if token in allowable_sandbox_tokens:
sandbox_tokens.append(token)
else:
illegal_tokens.append(token)
if illegal_tokens:
logger.warn(
"Invalid sandbox token passed to options {}".format(
" ".join(illegal_tokens)
)
)
return " ".join(sandbox_tokens)
option_spec = {
"HTML5": {
"SANDBOX": {
"type": "string",
"default": "allow-scripts allow-same-origin allow-forms", # TODO: revert to `allow-scripts` before merging
"envvars": ("KOLIBRI_HTML5_SANDBOX",),
"clean": clean_sandbox,
}
}
}
| import logging
logger = logging.getLogger(__name__)
# Source: https://developer.mozilla.org/en-US/docs/Web/HTML/Element/iframe#attr-sandbox
allowable_sandbox_tokens = set(
[
"allow-downloads-without-user-activation",
"allow-forms",
"allow-modals",
"allow-orientation-lock",
"allow-pointer-lock",
"allow-popups",
"allow-popups-to-escape-sandbox",
"allow-presentation",
"allow-same-origin",
"allow-scripts",
"allow-storage-access-by-user-activation ",
"allow-top-navigation",
"allow-top-navigation-by-user-activation",
]
)
def clean_sandbox(sandbox_string):
"""
Clean up sandbox string to ensure it only contains valid items.
"""
sandbox_tokens = []
illegal_tokens = []
for token in sandbox_string.split(" "):
if token in allowable_sandbox_tokens:
sandbox_tokens.append(token)
else:
illegal_tokens.append(token)
if illegal_tokens:
logger.warn(
"Invalid sandbox token passed to options {}".format(
" ".join(illegal_tokens)
)
)
return " ".join(sandbox_tokens)
option_spec = {
"HTML5": {
"SANDBOX": {
"type": "string",
"default": "allow-scripts allow-same-origin", # TODO: revert to `allow-scripts` before merging
"envvars": ("KOLIBRI_HTML5_SANDBOX",),
"clean": clean_sandbox,
}
}
}
| mit | Python |
89e1040f4a3beae18655a16366e8661ae08378db | Add a /favicon.ico url to fix errors in dev/test | StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite,StartupsPoleEmploi/labonneboite | labonneboite/web/root/views.py | labonneboite/web/root/views.py | # coding: utf8
from flask import Blueprint, current_app
from flask import abort, send_from_directory, redirect, render_template, request
from labonneboite.common import doorbell
from labonneboite.common import pro
from labonneboite.conf import settings
from labonneboite.web.search.forms import CompanySearchForm
from labonneboite.web.utils import fix_csrf_session
rootBlueprint = Blueprint('root', __name__)
@rootBlueprint.route('/')
def home():
fix_csrf_session()
return render_template('home.html', form=CompanySearchForm())
@rootBlueprint.route('/favicon.ico')
def favicon():
return send_from_directory(current_app.static_folder, 'images/favicon.ico')
@rootBlueprint.route('/robots.txt')
@rootBlueprint.route('/googleaece67026df0ee76.html')
def static_from_root():
return send_from_directory(current_app.static_folder, request.path[1:])
@rootBlueprint.route('/kit.pdf')
def kit():
if pro.pro_version_enabled():
return send_from_directory(current_app.static_folder, 'kit.pdf')
abort(404)
@rootBlueprint.route('/espace-presse')
def press():
context = {
'doorbell_tags': doorbell.get_tags('press'),
}
return render_template('root/press.html', **context)
@rootBlueprint.route('/comment-faire-une-candidature-spontanee')
def lbb_help():
context = {
'doorbell_tags': doorbell.get_tags('help'),
}
return render_template('root/help.html', **context)
@rootBlueprint.route('/faq')
def faq():
context = {
'doorbell_tags': doorbell.get_tags('faq'),
}
return render_template('root/faq.html', **context)
@rootBlueprint.route('/conditions-generales')
def cgu():
host = settings.SERVER_NAME
return render_template('root/cgu.html', host=host)
@rootBlueprint.route('/cookbook')
def cookbook():
return render_template('root/cookbook.html')
@rootBlueprint.route('/stats')
def stats():
return redirect('https://datastudio.google.com/open/0B0PPPCjOppNIdVNXVVM0QnJHNEE')
| # coding: utf8
from flask import Blueprint, current_app
from flask import abort, send_from_directory, redirect, render_template, request
from labonneboite.common import doorbell
from labonneboite.common import pro
from labonneboite.conf import settings
from labonneboite.web.search.forms import CompanySearchForm
from labonneboite.web.utils import fix_csrf_session
rootBlueprint = Blueprint('root', __name__)
@rootBlueprint.route('/')
def home():
fix_csrf_session()
return render_template('home.html', form=CompanySearchForm())
@rootBlueprint.route('/robots.txt')
@rootBlueprint.route('/googleaece67026df0ee76.html')
def static_from_root():
return send_from_directory(current_app.static_folder, request.path[1:])
@rootBlueprint.route('/kit.pdf')
def kit():
if pro.pro_version_enabled():
return send_from_directory(current_app.static_folder, 'kit.pdf')
abort(404)
@rootBlueprint.route('/espace-presse')
def press():
context = {
'doorbell_tags': doorbell.get_tags('press'),
}
return render_template('root/press.html', **context)
@rootBlueprint.route('/comment-faire-une-candidature-spontanee')
def lbb_help():
context = {
'doorbell_tags': doorbell.get_tags('help'),
}
return render_template('root/help.html', **context)
@rootBlueprint.route('/faq')
def faq():
context = {
'doorbell_tags': doorbell.get_tags('faq'),
}
return render_template('root/faq.html', **context)
@rootBlueprint.route('/conditions-generales')
def cgu():
host = settings.SERVER_NAME
return render_template('root/cgu.html', host=host)
@rootBlueprint.route('/cookbook')
def cookbook():
return render_template('root/cookbook.html')
@rootBlueprint.route('/stats')
def stats():
return redirect('https://datastudio.google.com/open/0B0PPPCjOppNIdVNXVVM0QnJHNEE')
| agpl-3.0 | Python |
fa4dd74bca5d89db946dae78c20e2f5f0f758dd7 | Add fake @decorator_tag | whyflyru/django-cacheops,ErwinJunge/django-cacheops,bourivouh/django-cacheops,Suor/django-cacheops,rutube/django-cacheops,andwun/django-cacheops,LPgenerator/django-cacheops | cacheops/fake.py | cacheops/fake.py | from funcy import ContextDecorator
from django.db.models import Manager
from django.db.models.query import QuerySet
# query
def cached_as(*samples, **kwargs):
return lambda func: func
cached_view_as = cached_as
def install_cacheops():
if not hasattr(Manager, 'get_queryset'):
Manager.get_queryset = lambda self: self.get_query_set()
# query
QuerySet._cache_key = lambda self, extra=None: None
QuerySet.nocache = lambda self: self
QuerySet.cache = lambda self: self
QuerySet.inplace = lambda self: self
Manager.nocache = lambda self: self.get_queryset().nocache()
Manager.cache = lambda self: self.get_queryset().cache()
Manager.inplace = lambda self: self.get_queryset().inplace()
# invalidation
def invalidate_obj(obj):
pass
def invalidate_model(model):
pass
def invalidate_all():
pass
# simple
from cacheops.simple import BaseCache, CacheMiss
class DummyCache(BaseCache):
def get(self, cache_key):
raise CacheMiss
def set(self, cache_key, data, timeout=None):
pass
def delete(self, cache_key):
pass
cache = DummyCache()
cached = cache.cached
cached_view = cache.cached_view
file_cache = DummyCache()
# templates
def invalidate_fragment(fragment_name, *extra):
pass
class _no_invalidation(ContextDecorator):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
no_invalidation = _no_invalidation()
def decorator_tag(func=None, takes_context=False):
raise NotImplementedError
| from funcy import ContextDecorator
from django.db.models import Manager
from django.db.models.query import QuerySet
# query
def cached_as(*samples, **kwargs):
return lambda func: func
cached_view_as = cached_as
def install_cacheops():
if not hasattr(Manager, 'get_queryset'):
Manager.get_queryset = lambda self: self.get_query_set()
# query
QuerySet._cache_key = lambda self, extra=None: None
QuerySet.nocache = lambda self: self
QuerySet.cache = lambda self: self
QuerySet.inplace = lambda self: self
Manager.nocache = lambda self: self.get_queryset().nocache()
Manager.cache = lambda self: self.get_queryset().cache()
Manager.inplace = lambda self: self.get_queryset().inplace()
# invalidation
def invalidate_obj(obj):
pass
def invalidate_model(model):
pass
def invalidate_all():
pass
# simple
from cacheops.simple import BaseCache, CacheMiss
class DummyCache(BaseCache):
def get(self, cache_key):
raise CacheMiss
def set(self, cache_key, data, timeout=None):
pass
def delete(self, cache_key):
pass
cache = DummyCache()
cached = cache.cached
cached_view = cache.cached_view
file_cache = DummyCache()
# templates
def invalidate_fragment(fragment_name, *extra):
pass
class _no_invalidation(ContextDecorator):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
no_invalidation = _no_invalidation()
| bsd-3-clause | Python |
eaa397715c2f667c375e936581296feb609c799d | Add tabswitching.typical_25 benchmark | M4sse/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dednal/chromium.src,dednal/chromium.src,chuan9/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Just-D/chromium-1,axinging/chromium-crosswalk,jaruba/chromium.src,ondra-novak/chromium.src,ltilve/chromium,ondra-novak/chromium.src,ondra-novak/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,jaruba/chromium.src,Just-D/chromium-1,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk,Chilledheart/chromium,ondra-novak/chromium.src,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,Fireblend/chromium-crosswalk,dushu1203/chromium.src,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,dednal/chromium.src,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,chuan9/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,fujunwei/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,chuan9/chromium-crosswalk,Chilledheart/chromium,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,markYoungH/chromium.src,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,M4sse/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,markYoungH/chromium.src,dednal/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,markYoungH/chromium.src,ondra-novak/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,dednal/chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,markYoungH/chromium.src,ltilve/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,dednal/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,M4sse/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,fujunwei/chromium-crosswalk,jaruba/chromium.src,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl | tools/perf/benchmarks/tab_switching.py | tools/perf/benchmarks/tab_switching.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurements import tab_switching
class TabSwitchingTop10(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/top_10.py'
class TabSwitchingTypical25(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/typical_25.py'
class TabSwitchingFiveBlankTabs(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/five_blank_pages.py'
options = {'pageset_repeat': 10}
@test.Disabled('android') # crbug.com/379561
class TabSwitchingToughEnergyCases(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/tough_energy_cases.py'
options = {'pageset_repeat': 10}
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurements import tab_switching
class TabSwitchingTop10(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/top_10.py'
class TabSwitchingFiveBlankTabs(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/five_blank_pages.py'
options = {'pageset_repeat': 10}
@test.Disabled('android') # crbug.com/379561
class TabSwitchingToughEnergyCases(test.Test):
test = tab_switching.TabSwitching
page_set = 'page_sets/tough_energy_cases.py'
options = {'pageset_repeat': 10}
| bsd-3-clause | Python |
f57616df11408514e43dec70aee121b4221039a7 | store the total budget | mdietrichc2c/vertical-ngo,jorsea/vertical-ngo,jorsea/vertical-ngo,yvaucher/vertical-ngo | logistic_budget/model/sale_order.py | logistic_budget/model/sale_order.py | # -*- coding: utf-8 -*-
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import models, fields, api
class SaleOrder(models.Model):
_inherit = 'sale.order'
budget_holder_id = fields.Many2one(
'res.users',
string='Budget Holder')
date_budget_holder = fields.Datetime(
'Budget Holder Validation Date')
budget_holder_remark = fields.Text(
'Budget Holder Remark')
finance_officer_id = fields.Many2one(
'res.users',
string='Finance Officer')
date_finance_officer = fields.Datetime(
'Finance Officer Validation Date')
finance_officer_remark = fields.Text(
'Finance Officer Remark')
total_budget = fields.Float("Total Budget", compute='_total_budget',
store=True)
@api.one
@api.depends('order_line.budget_amount')
def _total_budget(self):
self.total_budget = sum([l.budget_amount for l in self.order_line])
@api.onchange('budget_holder_id')
def onchange_set_date_budget_holder(self):
self.date_budget_holder = fields.Datetime.now()
@api.onchange('finance_officer_id')
def onchange_set_date_finance_officer(self):
self.date_finance_officer = fields.Datetime.now()
@api.multi
def over_budget(self):
self.ensure_one()
return self.amount_total > self.total_budget
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
budget_amount = fields.Float("Budget Amount")
| # -*- coding: utf-8 -*-
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp import models, fields, api
class SaleOrder(models.Model):
_inherit = 'sale.order'
budget_holder_id = fields.Many2one(
'res.users',
string='Budget Holder')
date_budget_holder = fields.Datetime(
'Budget Holder Validation Date')
budget_holder_remark = fields.Text(
'Budget Holder Remark')
finance_officer_id = fields.Many2one(
'res.users',
string='Finance Officer')
date_finance_officer = fields.Datetime(
'Finance Officer Validation Date')
finance_officer_remark = fields.Text(
'Finance Officer Remark')
total_budget = fields.Float("Total Budget", compute='_total_budget')
@api.one
@api.depends('order_line.budget_amount')
def _total_budget(self):
self.total_budget = sum([l.budget_amount for l in self.order_line])
@api.onchange('budget_holder_id')
def onchange_set_date_budget_holder(self):
self.date_budget_holder = fields.Datetime.now()
@api.onchange('finance_officer_id')
def onchange_set_date_finance_officer(self):
self.date_finance_officer = fields.Datetime.now()
@api.multi
def over_budget(self):
self.ensure_one()
return self.amount_total > self.total_budget
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
budget_amount = fields.Float("Budget Amount")
| agpl-3.0 | Python |
a5847a40104e17d243e0b41ae40bc4820dcd9725 | update head comments typo | Maple0/Algorithm,Maple0/Algorithm | leetcode/merge-overlapping-intervals.py | leetcode/merge-overlapping-intervals.py | #Author: Maple0
#Github:https://github.com/Maple0
#4th Sep 2016
#Given a collection of intervals, merge all overlapping intervals.
#For example,
#Given [1,3],[2,6],[8,10],[15,18],
#return [1,6],[8,10],[15,18].
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Merge_ResultSet(object):
def __init__(self,is_modified,merged_numbers):
self.is_modified = is_modified
self.merged_numbers = merged_numbers
class Solution(object):
def inner_merge(self,numbers):
is_modified=False
length=len(numbers)
merged_numbers=[numbers[0]]
for i in range(1,length):
c_start=numbers[i].start
c_end=numbers[i].end
check_status=0
for merged_num in merged_numbers:
m_start=merged_num.start
m_end=merged_num.end
if c_start >= m_start and c_end <=m_end:
check_status=1
if c_start < m_start and c_end>=m_start and c_end <= m_end:
merged_num.start=c_start
check_status=2
elif c_start >= m_start and c_start<=m_end and c_end > m_end:
merged_num.end=c_end
check_status=2
elif c_start<= m_start and c_end>=m_end:
if merged_num.start!=c_start:
merged_num.start=c_start
check_status=2
if merged_num.end!=c_end:
merged_num.end=c_end
check_status=2
if check_status==0:
merged_numbers.append(numbers[i])
if check_status==2:
is_modified=True
return Merge_ResultSet(is_modified,merged_numbers)
def merge(self, numbers):
length=len(numbers)
if length < 2:
return numbers
result=self.inner_merge(numbers)
while result.is_modified==True:
result=self.inner_merge(numbers)
return result.merged_numbers
num3=[Interval(1,3),Interval(0,6),Interval(7,7),Interval(8,9),Interval(0,10)]
results=Solution().merge(num3)
for x in results:
print(x.start,x.end)
| #Author: Maple0
#Github:https://github.com/Maple0
#4th Sep 2016
#pGiven a collection of intervals, merge all overlapping intervals.
#For example,
#Given [1,3],[2,6],[8,10],[15,18],
#return [1,6],[8,10],[15,18].
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Merge_ResultSet(object):
def __init__(self,is_modified,merged_numbers):
self.is_modified = is_modified
self.merged_numbers = merged_numbers
class Solution(object):
def inner_merge(self,numbers):
is_modified=False
length=len(numbers)
merged_numbers=[numbers[0]]
for i in range(1,length):
c_start=numbers[i].start
c_end=numbers[i].end
check_status=0
for merged_num in merged_numbers:
m_start=merged_num.start
m_end=merged_num.end
if c_start >= m_start and c_end <=m_end:
check_status=1
if c_start < m_start and c_end>=m_start and c_end <= m_end:
merged_num.start=c_start
check_status=2
elif c_start >= m_start and c_start<=m_end and c_end > m_end:
merged_num.end=c_end
check_status=2
elif c_start<= m_start and c_end>=m_end:
if merged_num.start!=c_start:
merged_num.start=c_start
check_status=2
if merged_num.end!=c_end:
merged_num.end=c_end
check_status=2
if check_status==0:
merged_numbers.append(numbers[i])
if check_status==2:
is_modified=True
return Merge_ResultSet(is_modified,merged_numbers)
def merge(self, numbers):
length=len(numbers)
if length < 2:
return numbers
result=self.inner_merge(numbers)
while result.is_modified==True:
result=self.inner_merge(numbers)
return result.merged_numbers
num3=[Interval(1,3),Interval(0,6),Interval(7,7),Interval(8,9),Interval(0,10)]
results=Solution().merge(num3)
for x in results:
print(x.start,x.end)
| mit | Python |
b68149f807cd3814cb7ed7e9c695e80f9400a66b | Update _version.py | missionpinball/mpf,missionpinball/mpf | mpf/_version.py | mpf/_version.py | """Holds various Version strings of MPF.
This modules holds the MPF version strings, including the version of BCP it
needs and the config file version it needs.
It's used internally for all sorts of things, from printing the output of the
`mpf --version` command, to making sure any processes connected via BCP are
the proper versions, to automatically triggering new builds and deployments to
PyPI.
"""
__version__ = '0.53.0-dev.39'
'''The full version of MPF.'''
__short_version__ = '0.53'
'''The major.minor version of MPF.'''
__bcp_version__ = '1.1'
'''The version of BCP this build of MPF uses.'''
__config_version__ = '5'
'''The config file version this build of MPF uses.'''
__show_version__ = '5'
'''The show format version this build of MPF uses.'''
version = "MPF v{}".format(__version__)
'''A friendly version string for this build of MPF.'''
extended_version = "MPF v{}, Config version:{}, Show version: {}, " \
"BCP version:{}".format(__version__, __config_version__,
__show_version__, __bcp_version__)
'''An extended version string that includes the MPF version, show version,
and BCP versions used in this build of MPF.'''
__api__ = ['version',
'__short_version__',
'__bcp_version__',
'__config_version__',
'__show_version__',
'version',
'extended_version']
| """Holds various Version strings of MPF.
This modules holds the MPF version strings, including the version of BCP it
needs and the config file version it needs.
It's used internally for all sorts of things, from printing the output of the
`mpf --version` command, to making sure any processes connected via BCP are
the proper versions, to automatically triggering new builds and deployments to
PyPI.
"""
__version__ = '0.53.0-dev.38'
'''The full version of MPF.'''
__short_version__ = '0.53'
'''The major.minor version of MPF.'''
__bcp_version__ = '1.1'
'''The version of BCP this build of MPF uses.'''
__config_version__ = '5'
'''The config file version this build of MPF uses.'''
__show_version__ = '5'
'''The show format version this build of MPF uses.'''
version = "MPF v{}".format(__version__)
'''A friendly version string for this build of MPF.'''
extended_version = "MPF v{}, Config version:{}, Show version: {}, " \
"BCP version:{}".format(__version__, __config_version__,
__show_version__, __bcp_version__)
'''An extended version string that includes the MPF version, show version,
and BCP versions used in this build of MPF.'''
__api__ = ['version',
'__short_version__',
'__bcp_version__',
'__config_version__',
'__show_version__',
'version',
'extended_version']
| mit | Python |
39946f9fa5127d240d7147d50b676ad083514e85 | Add custom debug toolbar URL mount point. | fladi/django-campus02,fladi/django-campus02 | campus02/urls.py | campus02/urls.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^', include('django.contrib.auth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^web/', include('campus02.web.urls', namespace='web')),
url(r'^', include('campus02.base.urls', namespace='base')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns(
'',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'',
url(r'^', include('django.contrib.auth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^web/', include('campus02.web.urls', namespace='web')),
url(r'^', include('campus02.base.urls', namespace='base')),
)
| mit | Python |
c25947147ec592142ddbce7dc9e25e3969255c9a | Check iplayer installed | ianmiell/shutit,ianmiell/shutit,ianmiell/shutit | library/get_iplayer/get_iplayer.py | library/get_iplayer/get_iplayer.py | #Copyright (C) 2014 OpenBet Limited
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from shutit_module import ShutItModule
class get_iplayer(ShutItModule):
def build(self,shutit):
shutit.install('git')
shutit.install('liblwp-online-perl')
shutit.install('rtmpdump')
shutit.install('ffmpeg')
shutit.install('mplayer')
shutit.install('atomicparsley')
shutit.install('id3v2')
shutit.install('libmp3-info-perl')
shutit.install('libmp3-tag-perl')
shutit.install('libnet-smtp-ssl-perl')
shutit.install('libnet-smtp-tls-butmaintained-perl')
shutit.install('libxml-simple-perl')
shutit.send_and_expect('git clone git://git.infradead.org/get_iplayer.git')
shutit.send_and_expect('cd get_iplayer')
shutit.send_and_expect('chmod 755 get_iplayer')
shutit.send_and_expect('./get_iplayer')
shutit.send_and_expect('cd -')
return True
def is_installed(self,shutit):
return shutit.file_exists('~/get_iplayer/get_iplayer')
def module():
return get_iplayer(
'shutit.tk.get_iplayer.get_iplayer', 0.324,
description='iPlayer downloader. See ' +
'http://www.infradead.org/get_iplayer/html/get_iplayer.html',
depends=['shutit.tk.setup']
)
| #Copyright (C) 2014 OpenBet Limited
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from shutit_module import ShutItModule
class get_iplayer(ShutItModule):
def build(self,shutit):
shutit.install('git')
shutit.install('liblwp-online-perl')
shutit.install('rtmpdump')
shutit.install('ffmpeg')
shutit.install('mplayer')
shutit.install('atomicparsley')
shutit.install('id3v2')
shutit.install('libmp3-info-perl')
shutit.install('libmp3-tag-perl')
shutit.install('libnet-smtp-ssl-perl')
shutit.install('libnet-smtp-tls-butmaintained-perl')
shutit.install('libxml-simple-perl')
shutit.send_and_expect('git clone git://git.infradead.org/get_iplayer.git')
shutit.send_and_expect('cd get_iplayer')
shutit.send_and_expect('chmod 755 get_iplayer')
shutit.send_and_expect('./get_iplayer')
return True
def is_installed(self,shutit):
return False
def module():
return get_iplayer(
'shutit.tk.get_iplayer.get_iplayer', 0.324,
description='iPlayer downloader. See ' +
'http://www.infradead.org/get_iplayer/html/get_iplayer.html',
depends=['shutit.tk.setup']
)
| mit | Python |
f88a6c65fc0e6a1c65284890052b8e1ceb4081ec | Add Kennard-Stone algorithm | skearnes/muv | muv/__init__.py | muv/__init__.py | """
Miscellaneous utilities.
"""
import numpy as np
def kennard_stone(d, k):
"""
Use the Kennard-Stone algorithm to select k maximally separated
examples from a dataset.
Algorithm
---------
1. Choose the two examples separated by the largest distance. In the
case of a tie, use the first examples returned by np.where.
2. For the remaining k - 2 selections, choose the example with the
greatest distance to the closest example among all previously
chosen points.
Parameters
----------
d : ndarray
Pairwise distance matrix between dataset examples.
k : int
Number of examples to select.
"""
assert 1 < k < d.shape[0]
chosen = []
# choose initial points
first = np.where(d == np.amax(d))
chosen.append(first[0][0])
chosen.append(first[1][0])
d = np.ma.array(d, mask=np.ones_like(d, dtype=bool))
# choose remaining points
while len(chosen) < k:
d.mask[:, chosen] = False
d.mask[chosen] = True
print d
p = np.ma.argmax(np.ma.amin(d, axis=1))
chosen.append(p)
return chosen
| bsd-3-clause | Python | |
3a036c947835c51bc7dcf89b2b4e3ae6ab32de75 | Return bytes, not a bytearray | pypa/linehaul | linehaul/protocol/line_receiver.py | linehaul/protocol/line_receiver.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BufferTooLargeError(Exception):
pass
class TruncatedLineError(Exception):
def __init__(self, *args, line, **kwargs):
super().__init__(*args, **kwargs)
self.line = line
class LineReceiver:
def __init__(self, callback, *args, max_line_size=None, **kwargs):
super().__init__(*args, **kwargs)
if max_line_size is None:
max_line_size = 16384
self._callback = callback
self._buffer = bytearray()
self._searched = 0
self._max_line_size = max_line_size
def recieve_data(self, data):
self._buffer += data
if len(self._buffer) > self._max_line_size:
raise BufferTooLargeError
lines = []
while True:
try:
found = self._buffer.index(b"\n", self._searched)
except ValueError:
self._searched = len(self._buffer)
break
else:
line = self._callback(self._buffer[: found + 1])
if line is not None:
lines.append(line)
del self._buffer[: found + 1]
self._searched = 0
return lines
def close(self):
if len(self._buffer):
raise TruncatedLineError("Left over data in buffer.", line=bytes(self._buffer))
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BufferTooLargeError(Exception):
pass
class TruncatedLineError(Exception):
def __init__(self, *args, line, **kwargs):
super().__init__(*args, **kwargs)
self.line = line
class LineReceiver:
def __init__(self, callback, *args, max_line_size=None, **kwargs):
super().__init__(*args, **kwargs)
if max_line_size is None:
max_line_size = 16384
self._callback = callback
self._buffer = bytearray()
self._searched = 0
self._max_line_size = max_line_size
def recieve_data(self, data):
self._buffer += data
if len(self._buffer) > self._max_line_size:
raise BufferTooLargeError
lines = []
while True:
try:
found = self._buffer.index(b"\n", self._searched)
except ValueError:
self._searched = len(self._buffer)
break
else:
line = self._callback(self._buffer[: found + 1])
if line is not None:
lines.append(line)
del self._buffer[: found + 1]
self._searched = 0
return lines
def close(self):
if len(self._buffer):
raise TruncatedLineError("Left over data in buffer.", line=self._buffer)
| apache-2.0 | Python |
44186c1a03d8504aad8a68f1261538801f689c03 | Make sure backends is a fully functional list | bacontext/mopidy,pacificIT/mopidy,vrs01/mopidy,priestd09/mopidy,diandiankan/mopidy,SuperStarPL/mopidy,diandiankan/mopidy,jmarsik/mopidy,ZenithDK/mopidy,ZenithDK/mopidy,priestd09/mopidy,ali/mopidy,bencevans/mopidy,rawdlite/mopidy,swak/mopidy,bencevans/mopidy,swak/mopidy,glogiotatidis/mopidy,mokieyue/mopidy,abarisain/mopidy,ZenithDK/mopidy,glogiotatidis/mopidy,jmarsik/mopidy,ZenithDK/mopidy,bencevans/mopidy,jodal/mopidy,quartz55/mopidy,liamw9534/mopidy,jmarsik/mopidy,bacontext/mopidy,mopidy/mopidy,tkem/mopidy,tkem/mopidy,hkariti/mopidy,vrs01/mopidy,SuperStarPL/mopidy,woutervanwijk/mopidy,pacificIT/mopidy,diandiankan/mopidy,quartz55/mopidy,mokieyue/mopidy,vrs01/mopidy,jodal/mopidy,adamcik/mopidy,pacificIT/mopidy,hkariti/mopidy,dbrgn/mopidy,bacontext/mopidy,bencevans/mopidy,hkariti/mopidy,rawdlite/mopidy,rawdlite/mopidy,quartz55/mopidy,mokieyue/mopidy,ali/mopidy,quartz55/mopidy,swak/mopidy,liamw9534/mopidy,dbrgn/mopidy,bacontext/mopidy,woutervanwijk/mopidy,diandiankan/mopidy,kingosticks/mopidy,jcass77/mopidy,pacificIT/mopidy,ali/mopidy,jodal/mopidy,priestd09/mopidy,glogiotatidis/mopidy,mopidy/mopidy,kingosticks/mopidy,rawdlite/mopidy,jmarsik/mopidy,SuperStarPL/mopidy,vrs01/mopidy,swak/mopidy,kingosticks/mopidy,dbrgn/mopidy,mopidy/mopidy,tkem/mopidy,mokieyue/mopidy,tkem/mopidy,glogiotatidis/mopidy,jcass77/mopidy,abarisain/mopidy,ali/mopidy,dbrgn/mopidy,SuperStarPL/mopidy,hkariti/mopidy,adamcik/mopidy,adamcik/mopidy,jcass77/mopidy | mopidy/core/actor.py | mopidy/core/actor.py | import itertools
import pykka
from mopidy.audio import AudioListener
from .current_playlist import CurrentPlaylistController
from .library import LibraryController
from .playback import PlaybackController
from .stored_playlists import StoredPlaylistsController
class Core(pykka.ThreadingActor, AudioListener):
#: The current playlist controller. An instance of
#: :class:`mopidy.core.CurrentPlaylistController`.
current_playlist = None
#: The library controller. An instance of
# :class:`mopidy.core.LibraryController`.
library = None
#: The playback controller. An instance of
#: :class:`mopidy.core.PlaybackController`.
playback = None
#: The stored playlists controller. An instance of
#: :class:`mopidy.core.StoredPlaylistsController`.
stored_playlists = None
def __init__(self, audio=None, backends=None):
super(Core, self).__init__()
self.backends = Backends(backends)
self.current_playlist = CurrentPlaylistController(core=self)
self.library = LibraryController(backends=self.backends, core=self)
self.playback = PlaybackController(
audio=audio, backends=self.backends, core=self)
self.stored_playlists = StoredPlaylistsController(
backends=self.backends, core=self)
@property
def uri_schemes(self):
"""List of URI schemes we can handle"""
futures = [b.uri_schemes for b in self.backends]
results = pykka.get_all(futures)
uri_schemes = itertools.chain(*results)
return sorted(uri_schemes)
def reached_end_of_stream(self):
self.playback.on_end_of_track()
class Backends(list):
def __init__(self, backends):
super(Backends, self).__init__(backends)
self.by_uri_scheme = {}
for backend in backends:
uri_schemes = backend.uri_schemes.get()
for uri_scheme in uri_schemes:
self.by_uri_scheme[uri_scheme] = backend
| import itertools
import pykka
from mopidy.audio import AudioListener
from .current_playlist import CurrentPlaylistController
from .library import LibraryController
from .playback import PlaybackController
from .stored_playlists import StoredPlaylistsController
class Core(pykka.ThreadingActor, AudioListener):
#: The current playlist controller. An instance of
#: :class:`mopidy.core.CurrentPlaylistController`.
current_playlist = None
#: The library controller. An instance of
# :class:`mopidy.core.LibraryController`.
library = None
#: The playback controller. An instance of
#: :class:`mopidy.core.PlaybackController`.
playback = None
#: The stored playlists controller. An instance of
#: :class:`mopidy.core.StoredPlaylistsController`.
stored_playlists = None
def __init__(self, audio=None, backends=None):
super(Core, self).__init__()
self.backends = Backends(backends)
self.current_playlist = CurrentPlaylistController(core=self)
self.library = LibraryController(backends=self.backends, core=self)
self.playback = PlaybackController(
audio=audio, backends=self.backends, core=self)
self.stored_playlists = StoredPlaylistsController(
backends=self.backends, core=self)
@property
def uri_schemes(self):
"""List of URI schemes we can handle"""
futures = [b.uri_schemes for b in self.backends]
results = pykka.get_all(futures)
uri_schemes = itertools.chain(*results)
return sorted(uri_schemes)
def reached_end_of_stream(self):
self.playback.on_end_of_track()
class Backends(object):
def __init__(self, backends):
self._backends = backends
uri_schemes_by_backend = {
backend: backend.uri_schemes.get()
for backend in backends}
self.by_uri_scheme = {
uri_scheme: backend
for backend, uri_schemes in uri_schemes_by_backend.items()
for uri_scheme in uri_schemes}
def __len__(self):
return len(self._backends)
def __getitem__(self, key):
return self._backends[key]
| apache-2.0 | Python |
f7909cd72361d06bac259fea0cb021a410885336 | Add compulsory keywords | musalbas/listentotwitter,musalbas/listentotwitter,musalbas/listentotwitter | listentotwitter/keywordsmanager.py | listentotwitter/keywordsmanager.py | import time
from listentotwitter import socketio
from listentotwitter.tweetanalyser import TweetAnalyser
from listentotwitter.tweetstreamer import TweetStreamer
def keyword_test(keyword):
if len(keyword) >= 3 and len(keyword) <= 15:
return True
else:
return False
class KeywordsManager:
max_keywords = 100
ping_timeout = 30
compulsory_keywords = ('lol',)
def __init__(self):
self._keywords_tracking = []
self._keywords_info = {}
self._tweetanalyser = TweetAnalyser(socketio)
self._tweetstreamer = TweetStreamer(self._tweetanalyser.incoming_tweet)
for keyword in self.compulsory_keywords:
self.ping_keyword(keyword)
def _get_dead_keywords(self):
dead_keywords = []
for keyword in self._keywords_tracking:
if keyword not in self.compulsory_keywords and time.time() - self._keywords_info[keyword]['last_ping'] > self.ping_timeout:
dead_keywords.append(keyword)
return dead_keywords
def _purge_dead_keywords(self):
for keyword in self._get_dead_keywords():
self._untrack_keyword(keyword)
def _untrack_keyword(self, keyword):
if keyword in self._keywords_tracking:
self._keywords_tracking.remove(keyword)
del self._keywords_info[keyword]
def ping_keyword(self, keyword):
if keyword in self._keywords_tracking:
self._keywords_info[keyword]['last_ping'] = time.time()
return
self._purge_dead_keywords()
if len(self._keywords_tracking) >= self.max_keywords:
return # TODO display error message to user
self._keywords_tracking.append(keyword)
self._keywords_info[keyword] = {}
self._keywords_info[keyword]['last_ping'] = time.time()
self._tweetstreamer.update_keywords_tracking(self._keywords_tracking)
self._tweetanalyser.update_keywords_tracking(self._keywords_tracking)
| import time
from listentotwitter import socketio
from listentotwitter.tweetanalyser import TweetAnalyser
from listentotwitter.tweetstreamer import TweetStreamer
def keyword_test(keyword):
if len(keyword) >= 3 and len(keyword) <= 15:
return True
else:
return False
class KeywordsManager:
max_keywords = 100
ping_timeout = 30
def __init__(self):
self._keywords_tracking = []
self._keywords_info = {}
self._tweetanalyser = TweetAnalyser(socketio)
self._tweetstreamer = TweetStreamer(self._tweetanalyser.incoming_tweet)
def _get_dead_keywords(self):
dead_keywords = []
for keyword in self._keywords_tracking:
if time.time() - self._keywords_info[keyword]['last_ping'] > self.ping_timeout:
dead_keywords.append(keyword)
return dead_keywords
def _purge_dead_keywords(self):
for keyword in self._get_dead_keywords():
self._untrack_keyword(keyword)
def _untrack_keyword(self, keyword):
if keyword in self._keywords_tracking:
self._keywords_tracking.remove(keyword)
del self._keywords_info[keyword]
def ping_keyword(self, keyword):
if keyword in self._keywords_tracking:
self._keywords_info[keyword]['last_ping'] = time.time()
return
self._purge_dead_keywords()
if len(self._keywords_tracking) >= self.max_keywords:
return # TODO display error message to user
self._keywords_tracking.append(keyword)
self._keywords_info[keyword] = {}
self._keywords_info[keyword]['last_ping'] = time.time()
self._tweetstreamer.update_keywords_tracking(self._keywords_tracking)
self._tweetanalyser.update_keywords_tracking(self._keywords_tracking)
| agpl-3.0 | Python |
2fbbdb3725ec5abe73d633618e318e958617acd3 | Prepend file path prefix to XML doc file paths | MarquisLP/Sidewalk-Champion | lib/custom_data/character_loader_new.py | lib/custom_data/character_loader_new.py | """This module loads character data from XML files and stores them in
CharacterData objects that can be read by the game engine.
Attributes:
CHARACTER_LIST_PATH (String): The filepath for the text file which
lists the paths to all of the characters' XML files.
Each path is separated by a new-line.
FILEPATH_PREFIX (String): The file path of the root directory where
all character data files are kept.
"""
import os
from lib.custom_data.xml_ops import load_xml_doc_as_object
CHARACTER_LIST_PATH = 'characters/character_list.txt'
CHARACTER_SCHEMA_PATH = os.path.join(os.path.dirname(os.path.realpath(
__file__)),
'character.xsd')
FILEPATH_PREFIX = 'characters/'
def load_character(line_index):
"""Load a specific character from the list specified in the
character list text file.
Args:
line_index: An integer for the line index of the character file's
file path within the character list text file.
Note that like most indexing schemes, this starts at 0.
Returns:
The specified character's data as a CharacterData object. If
there was an error loading data, None is returned instead.
None will also be returned if line_index exceeds the number of
lines in the text file.
"""
xml_paths = get_character_paths()
if line_index > len(xml_paths) - 1:
return None
character_path = xml_paths[line_index]
char_data = load_xml_doc_as_object(character_path, CHARACTER_SCHEMA_PATH)
if char_data is None:
return None
else:
prepend_prefix_to_filepaths(char_data)
return char_data
def get_character_paths():
"""Return a list of all of the filepaths to the XML files for
playable characters.
"""
with open(CHARACTER_LIST_PATH) as f:
character_path_list = [FILEPATH_PREFIX + line.rstrip('\n')
for line in f]
return character_path_list
def prepend_prefix_to_filepaths(character):
"""Preprend FILEPATH_PREFIX to all file path attributes of a
CharacterData object.
Args:
character (CharacterData): A CharacterData instance.
"""
character.mugshot_path = prepend_prefix(character.mugshot_path)
for action in character.actions:
action.spritesheet_path = prepend_prefix(action.spritesheet_path)
def prepend_prefix(filepath):
"""Return the filepath string prepended with FILEPATH_PREFIX."""
return FILEPATH_PREFIX + filepath
| """This module loads character data from XML files and stores them in
CharacterData objects that can be read by the game engine.
Attributes:
CHARACTER_LIST_PATH (String): The filepath for the text file which
lists the paths to all of the characters' XML files.
Each path is separated by a new-line.
FILEPATH_PREFIX (String): The file path of the root directory where
all character data files are kept.
"""
import os
from lib.custom_data.xml_ops import load_xml_doc_as_object
CHARACTER_LIST_PATH = 'characters/character_list.txt'
CHARACTER_SCHEMA_PATH = os.path.join(os.path.dirname(os.path.realpath(
__file__)),
'character.xsd')
FILEPATH_PREFIX = 'characters/'
def load_character(line_index):
"""Load a specific character from the list specified in the
character list text file.
Args:
line_index: An integer for the line index of the character file's
file path within the character list text file.
Note that like most indexing schemes, this starts at 0.
Returns:
The specified character's data as a CharacterData object. If
there was an error loading data, None is returned instead.
None will also be returned if line_index exceeds the number of
lines in the text file.
"""
xml_paths = get_character_paths()
if line_index > len(xml_paths) - 1:
return None
character_path = xml_paths[line_index]
char_data = load_xml_doc_as_object(character_path, CHARACTER_SCHEMA_PATH)
if char_data is None:
return None
else:
prepend_prefix_to_filepaths(char_data)
return char_data
def get_character_paths():
"""Return a list of all of the filepaths to the XML files for
playable characters.
"""
with open(CHARACTER_LIST_PATH) as f:
character_path_list = [line.rstrip('\n') for line in f]
return character_path_list
def prepend_prefix_to_filepaths(character):
"""Preprend FILEPATH_PREFIX to all file path attributes of a
CharacterData object.
Args:
character (CharacterData): A CharacterData instance.
"""
character.mugshot_path = prepend_prefix(character.mugshot_path)
for action in character.actions:
action.spritesheet_path = prepend_prefix(action.spritesheet_path)
def prepend_prefix(filepath):
"""Return the filepath string prepended with FILEPATH_PREFIX."""
return FILEPATH_PREFIX + filepath
| unlicense | Python |
fa7f3be4fb9b4b34081f4bb46eaf836a87e9931c | add support to afreecatv.com.tw | lyhiving/livestreamer,flijloku/livestreamer,wolftankk/livestreamer,chhe/livestreamer,chhe/livestreamer,flijloku/livestreamer,Feverqwe/livestreamer,okaywit/livestreamer,programming086/livestreamer,hmit/livestreamer,hmit/livestreamer,Saturn/livestreamer,Dobatymo/livestreamer,programming086/livestreamer,caorong/livestreamer,Saturn/livestreamer,intact/livestreamer,lyhiving/livestreamer,derrod/livestreamer,Masaz-/livestreamer,derrod/livestreamer,charmander/livestreamer,intact/livestreamer,chrippa/livestreamer,wolftankk/livestreamer,Dobatymo/livestreamer,chrippa/livestreamer,Klaudit/livestreamer,Masaz-/livestreamer,charmander/livestreamer,okaywit/livestreamer,Klaudit/livestreamer,Feverqwe/livestreamer,caorong/livestreamer | src/livestreamer/plugins/afreecatv.py | src/livestreamer/plugins/afreecatv.py | import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.plugin.api.utils import parse_query
from livestreamer.stream import RTMPStream
VIEW_LIVE_API_URL = "http://api.afreeca.tv/live/view_live.php"
VIEW_LIVE_API_URL_TW = "http://api.afreecatv.com.tw/live/view_live.php"
_url_re = re.compile("http(s)?://(\w+\.)?(afreecatv.com.tw|afreeca.tv)/(?P<channel>[\w\-_]+)")
_url_re_tw = re.compile("http(s)?://(\w+\.)?(afreecatv.com.tw)/(?P<channel>[\w\-_]+)")
_flashvars_re = re.compile('<param name="flashvars" value="([^"]+)" />')
_flashvars_schema = validate.Schema(
validate.transform(_flashvars_re.findall),
validate.get(0),
validate.transform(parse_query),
validate.any(
{
"s": validate.text,
"id": validate.text
},
{}
)
)
_view_live_schema = validate.Schema(
{
"channel": {
"strm": [{
"bps": validate.text,
"purl": validate.url(scheme="rtmp")
}]
},
},
validate.get("channel"),
validate.get("strm")
)
class AfreecaTV(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
flashvars = http.get(self.url, schema=_flashvars_schema)
if not flashvars:
return
params = {
"rt": "json",
"lc": "en_US",
"pt": "view",
"bpw": "",
"bid": flashvars["id"],
"adok": "",
"bno": ""
}
if re.search(_url_re_tw, self.url):
res = http.get(VIEW_LIVE_API_URL_TW, params=params)
else:
res = http.get(VIEW_LIVE_API_URL, params=params)
streams = http.json(res, schema=_view_live_schema)
for stream in streams:
stream_name = "{0}p".format(stream["bps"])
stream_params = {
"rtmp": stream["purl"],
"live": True
}
yield stream_name, RTMPStream(self.session, stream_params)
__plugin__ = AfreecaTV
| import re
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.plugin.api.utils import parse_query
from livestreamer.stream import RTMPStream
VIEW_LIVE_API_URL = "http://api.afreeca.tv/live/view_live.php"
_url_re = re.compile("http(s)?://(\w+\.)?afreeca.tv/(?P<channel>[\w\-_]+)")
_flashvars_re = re.compile('<param name="flashvars" value="([^"]+)" />')
_flashvars_schema = validate.Schema(
validate.transform(_flashvars_re.findall),
validate.get(0),
validate.transform(parse_query),
validate.any(
{
"s": validate.text,
"id": validate.text
},
{}
)
)
_view_live_schema = validate.Schema(
{
"channel": {
"strm": [{
"brt": validate.text,
"bps": validate.text,
"purl": validate.url(scheme="rtmp")
}]
},
},
validate.get("channel"),
validate.get("strm")
)
class AfreecaTV(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
flashvars = http.get(self.url, schema=_flashvars_schema)
if not flashvars:
return
params = {
"rt": "json",
"lc": "en_US",
"pt": "view",
"bpw": "",
"bid": flashvars["id"],
"adok": "",
"bno": ""
}
res = http.get(VIEW_LIVE_API_URL, params=params)
streams = http.json(res, schema=_view_live_schema)
for stream in streams:
stream_name = "{0}p".format(stream["bps"])
stream_params = {
"rtmp": stream["purl"],
"live": True
}
yield stream_name, RTMPStream(self.session, stream_params)
__plugin__ = AfreecaTV
| bsd-2-clause | Python |
dff7153d7f68dfed2e4dc002c22f34008378a81a | Remove unused imports | robotichead/NearBeach,robotichead/NearBeach,robotichead/NearBeach | locust_tests/load_testing_locus.py | locust_tests/load_testing_locus.py | # Import the locust libraries
import getpass
from locust import HttpUser, task, between
# Request the username and password to login into NearBeach
username = input("\nPlease enter the username: ")
password = getpass.getpass("\nPlease enter the password: ")
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
def on_start(self):
"""on_start is called when a Locust start before any task is scheduled"""
self.client.post(
"/login", json={"id_username": username, "id_password": password})
@task
def dashboard(self):
self.client.get("/")
@task(3)
def requirement_information(self):
for _ in range(10):
self.client.get("/requirement_information/1")
| # Import the locust libraries
import time
import getpass
from locust import HttpUser, task, between
# Request the username and password to login into NearBeach
username = input("\nPlease enter the username: ")
password = getpass.getpass("\nPlease enter the password: ")
class QuickstartUser(HttpUser):
wait_time = between(1, 2)
def on_start(self):
"""on_start is called when a Locust start before any task is scheduled"""
self.client.post(
"/login", json={"id_username": username, "id_password": password})
@task
def dashboard(self):
self.client.get("/")
@task(3)
def requirement_information(self):
for _ in range(10):
self.client.get("/requirement_information/1")
| mit | Python |
d92b8c824b1efed241c1857f12132e8ef5742a8b | add staticfiles storage | liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin,liqd/a4-meinberlin | meinberlin/config/settings/build.py | meinberlin/config/settings/build.py | from .base import *
SECRET_KEY = "dummykeyforbuilding"
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
| from .base import *
SECRET_KEY = "dummykeyforbuilding"
| agpl-3.0 | Python |
7a4284cd1fd69847e9beba1fa45bfa9ebfebe2f2 | Use errno module instead of hardcoding error codes | jaraco/keyring | keyring/tests/backends/test_file.py | keyring/tests/backends/test_file.py | import os
import tempfile
import sys
import errno
from ..py30compat import unittest
from ..test_backend import BackendBasicTests
from ..util import random_string
from keyring.backends import file
class FileKeyringTests(BackendBasicTests):
def setUp(self):
super(FileKeyringTests, self).setUp()
self.keyring = self.init_keyring()
self.keyring.file_path = self.tmp_keyring_file = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tmp_keyring_file)
except (OSError,):
e = sys.exc_info()[1]
if e.errno != errno.ENOENT: # No such file or directory
raise
def test_encrypt_decrypt(self):
password = random_string(20)
# keyring.encrypt expects bytes
password = password.encode('utf-8')
encrypted = self.keyring.encrypt(password)
self.assertEqual(password, self.keyring.decrypt(encrypted))
class UncryptedFileKeyringTestCase(FileKeyringTests, unittest.TestCase):
def init_keyring(self):
return file.PlaintextKeyring()
@unittest.skipIf(sys.platform == 'win32',
"Group/World permissions aren't meaningful on Windows")
def test_keyring_not_created_world_writable(self):
"""
Ensure that when keyring creates the file that it's not overly-
permissive.
"""
self.keyring.set_password('system', 'user', 'password')
self.assertTrue(os.path.exists(self.keyring.file_path))
group_other_perms = os.stat(self.keyring.file_path).st_mode & 0o077
self.assertEqual(group_other_perms, 0)
| import os
import tempfile
import sys
from ..py30compat import unittest
from ..test_backend import BackendBasicTests
from ..util import random_string
from keyring.backends import file
class FileKeyringTests(BackendBasicTests):
def setUp(self):
super(FileKeyringTests, self).setUp()
self.keyring = self.init_keyring()
self.keyring.file_path = self.tmp_keyring_file = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tmp_keyring_file)
except (OSError,):
e = sys.exc_info()[1]
if e.errno != 2: # No such file or directory
raise
def test_encrypt_decrypt(self):
password = random_string(20)
# keyring.encrypt expects bytes
password = password.encode('utf-8')
encrypted = self.keyring.encrypt(password)
self.assertEqual(password, self.keyring.decrypt(encrypted))
class UncryptedFileKeyringTestCase(FileKeyringTests, unittest.TestCase):
def init_keyring(self):
return file.PlaintextKeyring()
@unittest.skipIf(sys.platform == 'win32',
"Group/World permissions aren't meaningful on Windows")
def test_keyring_not_created_world_writable(self):
"""
Ensure that when keyring creates the file that it's not overly-
permissive.
"""
self.keyring.set_password('system', 'user', 'password')
self.assertTrue(os.path.exists(self.keyring.file_path))
group_other_perms = os.stat(self.keyring.file_path).st_mode & 0o077
self.assertEqual(group_other_perms, 0)
| mit | Python |
ce990bfb3c742c9f19f0af43a10aad8193fa084c | Use the package name when looking up version | cernops/python-keystoneclient-kerberos | keystoneclient_kerberos/__init__.py | keystoneclient_kerberos/__init__.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from keystoneclient_kerberos import v3
__version__ = pbr.version.VersionInfo(
'python-keystoneclient-kerberos').version_string()
V3Kerberos = v3.Kerberos
__all__ = ['V3Kerberos']
| # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
from keystoneclient_kerberos import v3
__version__ = pbr.version.VersionInfo(
'keystoneclient_kerberos').version_string()
V3Kerberos = v3.Kerberos
__all__ = ['V3Kerberos']
| apache-2.0 | Python |
a05bb771a8b9ee7cffb296534b4b7f6244784d4b | Configure default logging handler | AdamWill/mwclient,Jobava/mirror-mwclient,JeroenDeDauw/mwclient,mwclient/mwclient,ubibene/mwclient,danmichaelo/mwclient,PierreSelim/mwclient | mwclient/__init__.py | mwclient/__init__.py | """
Copyright (c) 2006-2011 Bryan Tong Minh
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from errors import *
from client import Site, __ver__
import ex
# Logging: Add a null handler to avoid "No handler found" warnings.
import logging
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| """
Copyright (c) 2006-2011 Bryan Tong Minh
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from errors import *
from client import Site, __ver__
import ex
| mit | Python |
84a4f0e3e26403fc4618f06f56a1fbd8443d6e1a | Remove remnant debug statements | gogoair/foremast,gogoair/foremast | src/foremast/pipeline/__main__.py | src/foremast/pipeline/__main__.py | """Create Spinnaker Pipeline."""
import argparse
import logging
from ..args import add_app, add_debug, add_gitlab_token, add_properties
from ..consts import LOGGING_FORMAT, ENVS
from .create_pipeline import SpinnakerPipeline
from .create_pipeline_onetime import SpinnakerPipelineOnetime
def main():
"""Run newer stuffs."""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_properties(parser)
add_gitlab_token(parser)
parser.add_argument('-b',
'--base',
help='Base AMI name to use, e.g. fedora, tomcat')
parser.add_argument(
"--triggerjob",
help="The jenkins job to monitor for pipeline triggering",
required=True)
parser.add_argument(
"--onetime",
required=False,
choices=ENVS,
help='Onetime deployment environment')
args = parser.parse_args()
if args.base and '"' in args.base:
args.base = args.base.strip('"')
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
log.debug('Parsed arguments: %s', args)
if args.onetime:
spinnakerapps = SpinnakerPipelineOnetime(app_info=vars(args))
spinnakerapps.create_pipeline()
else:
spinnakerapps = SpinnakerPipeline(app_info=vars(args))
spinnakerapps.create_pipeline()
if __name__ == "__main__":
main()
| """Create Spinnaker Pipeline."""
import argparse
import logging
from ..args import add_app, add_debug, add_gitlab_token, add_properties, add_env
from ..consts import LOGGING_FORMAT, ENVS
from .create_pipeline import SpinnakerPipeline
from .create_pipeline_onetime import SpinnakerPipelineOnetime
def main():
"""Run newer stuffs."""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_properties(parser)
add_gitlab_token(parser)
parser.add_argument('-b',
'--base',
help='Base AMI name to use, e.g. fedora, tomcat')
parser.add_argument(
"--triggerjob",
help="The jenkins job to monitor for pipeline triggering",
required=True)
parser.add_argument(
"--onetime",
required=False,
choices=ENVS,
help='Onetime deployment environment')
args = parser.parse_args()
print(args)
if args.base and '"' in args.base:
args.base = args.base.strip('"')
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
log.debug('Parsed arguments: %s', args)
if args.onetime:
spinnakerapps = SpinnakerPipelineOnetime(app_info=vars(args))
spinnakerapps.create_pipeline()
else:
spinnakerapps = SpinnakerPipeline(app_info=vars(args))
spinnakerapps.create_pipeline()
if __name__ == "__main__":
main()
| apache-2.0 | Python |
9fa0c60a2277491d1666048f3043938880b7a66a | clarify docstring | gboeing/osmnx,gboeing/osmnx | osmnx/_api.py | osmnx/_api.py | """Expose most common parts of public API directly in `osmnx.` namespace."""
from .bearing import add_edge_bearings
from .boundaries import gdf_from_place
from .boundaries import gdf_from_places
from .distance import get_nearest_edge
from .distance import get_nearest_edges
from .distance import get_nearest_node
from .distance import get_nearest_nodes
from .elevation import add_edge_grades
from .elevation import add_node_elevations
from .folium import plot_graph_folium
from .folium import plot_route_folium
from .footprints import footprints_from_address
from .footprints import footprints_from_place
from .footprints import footprints_from_point
from .footprints import footprints_from_polygon
from .geocoder import geocode
from .geocoder import geocode_to_gdf
from .graph import graph_from_address
from .graph import graph_from_bbox
from .graph import graph_from_place
from .graph import graph_from_point
from .graph import graph_from_polygon
from .graph import graph_from_xml
from .io import load_graphml
from .io import save_graph_geopackage
from .io import save_graph_shapefile
from .io import save_graph_xml
from .io import save_graphml
from .plot import plot_figure_ground
from .plot import plot_footprints
from .plot import plot_graph
from .plot import plot_graph_route
from .plot import plot_graph_routes
from .pois import pois_from_address
from .pois import pois_from_place
from .pois import pois_from_point
from .pois import pois_from_polygon
from .projection import project_graph
from .simplification import consolidate_intersections
from .simplification import simplify_graph
from .speed import add_edge_speeds
from .speed import add_edge_travel_times
from .stats import basic_stats
from .stats import extended_stats
from .utils import citation
from .utils import config
from .utils import log
from .utils import ts
from .utils_graph import get_undirected
from .utils_graph import graph_from_gdfs
from .utils_graph import graph_to_gdfs
| """Expose the core OSMnx API."""
from .bearing import add_edge_bearings
from .boundaries import gdf_from_place
from .boundaries import gdf_from_places
from .distance import get_nearest_edge
from .distance import get_nearest_edges
from .distance import get_nearest_node
from .distance import get_nearest_nodes
from .elevation import add_edge_grades
from .elevation import add_node_elevations
from .folium import plot_graph_folium
from .folium import plot_route_folium
from .footprints import footprints_from_address
from .footprints import footprints_from_place
from .footprints import footprints_from_point
from .footprints import footprints_from_polygon
from .geocoder import geocode
from .geocoder import geocode_to_gdf
from .graph import graph_from_address
from .graph import graph_from_bbox
from .graph import graph_from_place
from .graph import graph_from_point
from .graph import graph_from_polygon
from .graph import graph_from_xml
from .io import load_graphml
from .io import save_graph_geopackage
from .io import save_graph_shapefile
from .io import save_graph_xml
from .io import save_graphml
from .plot import plot_figure_ground
from .plot import plot_footprints
from .plot import plot_graph
from .plot import plot_graph_route
from .plot import plot_graph_routes
from .pois import pois_from_address
from .pois import pois_from_place
from .pois import pois_from_point
from .pois import pois_from_polygon
from .projection import project_graph
from .simplification import consolidate_intersections
from .simplification import simplify_graph
from .speed import add_edge_speeds
from .speed import add_edge_travel_times
from .stats import basic_stats
from .stats import extended_stats
from .utils import citation
from .utils import config
from .utils import log
from .utils import ts
from .utils_graph import get_undirected
from .utils_graph import graph_from_gdfs
from .utils_graph import graph_to_gdfs
| mit | Python |
be751cb7e354a621502ee94e72f431f826e14444 | Convert mox to mock: tests/compute/test_nova.py | hguemar/cinder,nexusriot/cinder,JioCloud/cinder,duhzecca/cinder,rakeshmi/cinder,scottdangelo/RemoveVolumeMangerLocks,julianwang/cinder,manojhirway/ExistingImagesOnNFS,phenoxim/cinder,Nexenta/cinder,Paul-Ezell/cinder-1,JioCloud/cinder,manojhirway/ExistingImagesOnNFS,scality/cinder,leilihh/cinder,j-griffith/cinder,openstack/cinder,petrutlucian94/cinder,hguemar/cinder,bswartz/cinder,nexusriot/cinder,Akrog/cinder,j-griffith/cinder,sasukeh/cinder,saeki-masaki/cinder,Paul-Ezell/cinder-1,eharney/cinder,potsmaster/cinder,leilihh/cinder,sasukeh/cinder,Datera/cinder,cloudbase/cinder,blueboxgroup/cinder,apporc/cinder,potsmaster/cinder,Accelerite/cinder,Hybrid-Cloud/cinder,NetApp/cinder,mahak/cinder,takeshineshiro/cinder,mahak/cinder,scottdangelo/RemoveVolumeMangerLocks,dims/cinder,julianwang/cinder,tlakshman26/cinder-new-branch,winndows/cinder,dims/cinder,Nexenta/cinder,eharney/cinder,Akrog/cinder,duhzecca/cinder,tlakshman26/cinder-bug-fix-volume-conversion-full,ge0rgi/cinder,Accelerite/cinder,tlakshman26/cinder-https-changes,abusse/cinder,openstack/cinder,saeki-masaki/cinder,scality/cinder,nikesh-mahalka/cinder,tlakshman26/cinder-bug-fix-volume-conversion-full,petrutlucian94/cinder,tlakshman26/cinder-new-branch,bswartz/cinder,CloudServer/cinder,cloudbase/cinder,tlakshman26/cinder-https-changes,winndows/cinder,blueboxgroup/cinder,takeshineshiro/cinder,apporc/cinder,Datera/cinder,CloudServer/cinder,abusse/cinder,phenoxim/cinder,Hybrid-Cloud/cinder,NetApp/cinder,rakeshmi/cinder,nikesh-mahalka/cinder | cinder/tests/compute/test_nova.py | cinder/tests/compute/test_nova.py | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from cinder.compute import nova
from cinder import context
from cinder import test
class FakeNovaClient(object):
class Volumes(object):
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
def create_volume_snapshot(self, *args, **kwargs):
pass
def delete_volume_snapshot(self, *args, **kwargs):
pass
class NovaApiTestCase(test.TestCase):
def setUp(self):
super(NovaApiTestCase, self).setUp()
self.api = nova.API()
self.novaclient = FakeNovaClient()
self.ctx = context.get_admin_context()
def test_update_server_volume(self):
with contextlib.nested(
mock.patch.object(nova, 'novaclient'),
mock.patch.object(self.novaclient.volumes,
'update_server_volume')
) as (mock_novaclient, mock_update_server_volume):
mock_novaclient.return_value = self.novaclient
self.api.update_server_volume(self.ctx, 'server_id',
'attach_id', 'new_volume_id')
mock_novaclient.assert_called_once_with(self.ctx)
mock_update_server_volume.assert_called_once_with(
'server_id',
'attach_id',
'new_volume_id'
)
| # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder.compute import nova
from cinder import context
from cinder import test
class FakeNovaClient(object):
class Volumes(object):
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
def create_volume_snapshot(self, *args, **kwargs):
pass
def delete_volume_snapshot(self, *args, **kwargs):
pass
class NovaApiTestCase(test.TestCase):
def setUp(self):
super(NovaApiTestCase, self).setUp()
self.api = nova.API()
self.novaclient = FakeNovaClient()
self.ctx = context.get_admin_context()
self.mox.StubOutWithMock(nova, 'novaclient')
def test_update_server_volume(self):
nova.novaclient(self.ctx).AndReturn(self.novaclient)
self.mox.StubOutWithMock(self.novaclient.volumes,
'update_server_volume')
self.novaclient.volumes.update_server_volume('server_id', 'attach_id',
'new_volume_id')
self.mox.ReplayAll()
self.api.update_server_volume(self.ctx, 'server_id', 'attach_id',
'new_volume_id')
| apache-2.0 | Python |
81195e2c15e99e5281085afeebdb19c5e8dcdace | Update _script.py | LeastAuthority/kubetop | src/kubetop/_script.py | src/kubetop/_script.py | # Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
The command-line interface.
Theory of Operation
===================
#. Convert command line arguments to structured configuration, supplying defaults where necessary.
#. Construct the top-level kubetop service from the configuration.
#. Run the Twisted reactor.
"""
from sys import __stdout__ as outfile
from yaml import safe_load
from itertools import repeat
from os.path import expanduser
import os
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from ._twistmain import TwistMain
from ._runmany import run_many_service
from ._textrenderer import Sink, kubetop
configPath = os.getenv('KUBECONFIG', "~/.kube/config")
CONFIG = FilePath(expanduser(configPath))
def current_context(config_path):
with config_path.open() as cfg:
return safe_load(cfg)[u"current-context"]
class KubetopOptions(Options):
optParameters = [
("context", None, current_context(CONFIG), "The kubectl context to use."),
("interval", None, 3.0, "The number of seconds between iterations.", float),
("iterations", None, None, "The number of iterations to perform.", int),
]
def fixed_intervals(interval, iterations):
if iterations is None:
return repeat(interval)
return repeat(interval, iterations)
def makeService(main, options):
from twisted.internet import reactor
# _topdata imports txkube and treq, both of which import
# twisted.web.client, which imports the reactor, which installs a default.
# That breaks TwistMain unless we delay it until makeService is called.
from ._topdata import make_source
f = lambda: kubetop(reactor, s, Sink.from_file(outfile))
s = make_source(reactor, CONFIG, options["context"])
return run_many_service(
main, reactor, f,
fixed_intervals(options["interval"], options["iterations"]),
)
main = TwistMain(KubetopOptions, makeService)
| # Copyright Least Authority Enterprises.
# See LICENSE for details.
"""
The command-line interface.
Theory of Operation
===================
#. Convert command line arguments to structured configuration, supplying defaults where necessary.
#. Construct the top-level kubetop service from the configuration.
#. Run the Twisted reactor.
"""
from sys import __stdout__ as outfile
from yaml import safe_load
from itertools import repeat
from os.path import expanduser
import os
from twisted.python.usage import Options
from twisted.python.filepath import FilePath
from ._twistmain import TwistMain
from ._runmany import run_many_service
from ._textrenderer import Sink, kubetop
configPath = "~/.kube/config"
if os.environ['KUBECONFIG']:
configPath = os.environ['KUBECONFIG']
CONFIG = FilePath(expanduser(configPath))
def current_context(config_path):
with config_path.open() as cfg:
return safe_load(cfg)[u"current-context"]
class KubetopOptions(Options):
optParameters = [
("context", None, current_context(CONFIG), "The kubectl context to use."),
("interval", None, 3.0, "The number of seconds between iterations.", float),
("iterations", None, None, "The number of iterations to perform.", int),
]
def fixed_intervals(interval, iterations):
if iterations is None:
return repeat(interval)
return repeat(interval, iterations)
def makeService(main, options):
from twisted.internet import reactor
# _topdata imports txkube and treq, both of which import
# twisted.web.client, which imports the reactor, which installs a default.
# That breaks TwistMain unless we delay it until makeService is called.
from ._topdata import make_source
f = lambda: kubetop(reactor, s, Sink.from_file(outfile))
s = make_source(reactor, CONFIG, options["context"])
return run_many_service(
main, reactor, f,
fixed_intervals(options["interval"], options["iterations"]),
)
main = TwistMain(KubetopOptions, makeService) | mit | Python |
801c2233b61a96fa9a620c3c740cdce8dad3d490 | Complete iter sol | bowen0701/algorithms_data_structures | lc0941_valid_mountain_array.py | lc0941_valid_mountain_array.py | """Leetcode 941. Valid Mountain Array
Easy
URL: https://leetcode.com/problems/valid-mountain-array/
Given an array A of integers, return true if and only if it is a valid
mountain array.
Recall that A is a mountain array if and only if:
- A.length >= 3
- There exists some i with 0 < i < A.length - 1 such that:
* A[0] < A[1] < ... A[i-1] < A[i]
* A[i] > A[i+1] > ... > A[A.length - 1]
Example 1:
Input: [2,1]
Output: false
Example 2:
Input: [3,5,5]
Output: false
Example 3:
Input: [0,3,2,1]
Output: true
Note:
- 0 <= A.length <= 10000
- 0 <= A[i] <= 10000
"""
class SolutionIter(object):
def validMountainArray(self, A):
"""
:type A: List[int]
:rtype: bool
Time complexity: O(n).
Space complexity: O(1).
"""
# Edge case.
if len(A) <= 2:
return False
# Start from pos 0 to iteratively check strictly increasing.
i = 0
while i < len(A) and i + 1 < len(A) and A[i] < A[i + 1]:
i += 1
# If not strictly increasing at beginning or at all.
if i == 0 or i == len(A) - 1:
return False
# Start from last pos to check strictly decreasing.
while i < len(A) and i + 1 < len(A):
if A[i] <= A[i + 1]:
return False
i += 1
return True
def main():
# Output: false
A = [2,1]
print SolutionIter().validMountainArray(A)
# Output: false
A = [3,5,5]
print SolutionIter().validMountainArray(A)
# Output: true
A = [0,3,2,1]
print SolutionIter().validMountainArray(A)
if __name__ == '__main__':
main()
| """Leetcode 941. Valid Mountain Array
Easy
URL: https://leetcode.com/problems/valid-mountain-array/
Given an array A of integers, return true if and only if it is a valid
mountain array.
Recall that A is a mountain array if and only if:
- A.length >= 3
- There exists some i with 0 < i < A.length - 1 such that:
* A[0] < A[1] < ... A[i-1] < A[i]
* A[i] > A[i+1] > ... > A[A.length - 1]
Example 1:
Input: [2,1]
Output: false
Example 2:
Input: [3,5,5]
Output: false
Example 3:
Input: [0,3,2,1]
Output: true
Note:
- 0 <= A.length <= 10000
- 0 <= A[i] <= 10000
"""
class Solution(object):
def validMountainArray(self, A):
"""
:type A: List[int]
:rtype: bool
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
433d9b2c1c29f32a7d5289e84673308c96302d8d | FIX a bug, you fuck'in forgot to rename the new function | cardmaster/makeclub,cardmaster/makeclub,cardmaster/makeclub | controlers/access.py | controlers/access.py | '''Copyright(C): Leaf Johnson 2011
This file is part of makeclub.
makeclub is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
makeclub is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with makeclub. If not, see <http://www.gnu.org/licenses/>.
'''
from google.appengine.api import users
operations = [
"listclubs",
"createClub"
]
clubOperations = [
"view",
"create",
"edit",
"delete",
"arrange",
"finish",
"newact"
]
actOperatoins = [
"view",
"edit",
"join"
]
def isAccessible (user, operation):
return True
def hasClubPrivilige (user, club, operation):
return True
def hasActPrivilige (user, act, operation):
return True
| '''Copyright(C): Leaf Johnson 2011
This file is part of makeclub.
makeclub is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
makeclub is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with makeclub. If not, see <http://www.gnu.org/licenses/>.
'''
from google.appengine.api import users
operations = [
"listclubs",
"createClub"
]
clubOperations = [
"view",
"create",
"edit",
"delete",
"arrange",
"finish"
]
actOperatoins = [
"create",
"view",
"edit",
"join"
]
def isAccessible (user, operation):
return True
def hasClubPrivilige (user, club, operation):
return True
def hasClubPrivilige (user, act, operation):
return True
| agpl-3.0 | Python |
a861238786a71fe6f0bea5f0d5a8f6b21cb68a62 | add TODO | pyro-ppl/numpyro | numpyro/util.py | numpyro/util.py | from __future__ import division
def dual_averaging(t0=10, kappa=0.75, gamma=0.05):
# TODO: add docs
def init_fn(prox_center=0.):
x_t = 0.
x_avg = 0. # average of primal sequence
g_avg = 0. # average of dual sequence
t = 0
return (x_t, x_avg, g_avg, t, prox_center)
def update_fn(g, state):
x_t, x_avg, g_avg, t, prox_center = state
t = t + 1
# g_avg = (g_1 + ... + g_t) / t
g_avg = (1 - 1 / (t + t0)) * g_avg + g / (t + t0)
# According to formula (3.4) of [1], we have
# x_t = argmin{ g_avg . x + loc_t . |x - x0|^2 },
# where loc_t := beta_t / t, beta_t := (gamma/2) * sqrt(t)
x_t = prox_center - (t ** 0.5) / gamma * g_avg
# weight for the new x_t
weight_t = t ** (-kappa)
x_avg = (1 - weight_t) * x_avg + weight_t * x_t
return (x_t, x_avg, g_avg, t, prox_center)
return init_fn, update_fn
| from __future__ import division
def dual_averaging(t0=10, kappa=0.75, gamma=0.05):
def init_fn(prox_center=0.):
x_t = 0.
x_avg = 0. # average of primal sequence
g_avg = 0. # average of dual sequence
t = 0
return (x_t, x_avg, g_avg, t, prox_center)
def update_fn(g, state):
x_t, x_avg, g_avg, t, prox_center = state
t = t + 1
# g_avg = (g_1 + ... + g_t) / t
g_avg = (1 - 1 / (t + t0)) * g_avg + g / (t + t0)
# According to formula (3.4) of [1], we have
# x_t = argmin{ g_avg . x + loc_t . |x - x0|^2 },
# where loc_t := beta_t / t, beta_t := (gamma/2) * sqrt(t)
x_t = prox_center - (t ** 0.5) / gamma * g_avg
# weight for the new x_t
weight_t = t ** (-kappa)
x_avg = (1 - weight_t) * x_avg + weight_t * x_t
return (x_t, x_avg, g_avg, t, prox_center)
return init_fn, update_fn
| apache-2.0 | Python |
36cd68912202e04d533a26bcec3b0aa8514ba5fa | fix performance issues of purify | farakavco/lutino | src/lutino/persian.py | src/lutino/persian.py | # -*- coding: utf-8 -*-
_character_map = str.maketrans({
'ي': 'ی',
'ك': 'ک',
'ة': 'ه',
'ۀ': 'ه',
# Eastern Arabic-Indic digits (Persian and Urdu) U+06Fn: ۰۱۲۳۴۵۶۷۸۹
'۰': '0',
'۱': '1',
'۲': '2',
'۳': '3',
'۴': '4',
'۵': '5',
'۶': '6',
'۷': '7',
'۸': '8',
'۹': '9',
# Arabic-Indic digits: U+066n: ٠١٢٣٤٥٦٧٨٩
'٠': '0',
'١': '1',
'٢': '2',
'٣': '3',
'٤': '4',
'٥': '5',
'٦': '6',
'٧': '7',
'٨': '8',
'٩': '9',
})
def purify(s):
return s.translate(_character_map)
if __name__ == '__main__':
sample_input = 'يكةۀ ۱۲۳۴'
expected_output = 'یکهه 1234'
assert purify(sample_input) == expected_output
print('success')
| # -*- coding: utf-8 -*-
__author__ = 'vahid'
_character_map = {
'ي': 'ی',
'ك': 'ک',
'ة': 'ه',
'ۀ': 'ه',
# Eastern Arabic-Indic digits (Persian and Urdu) U+06Fn: ۰۱۲۳۴۵۶۷۸۹
'۰': '0',
'۱': '1',
'۲': '2',
'۳': '3',
'۴': '4',
'۵': '5',
'۶': '6',
'۷': '7',
'۸': '8',
'۹': '9',
# Arabic-Indic digits: U+066n: ٠١٢٣٤٥٦٧٨٩
'٠': '0',
'١': '1',
'٢': '2',
'٣': '3',
'٤': '4',
'٥': '5',
'٦': '6',
'٧': '7',
'٨': '8',
'٩': '9',
}
def purify(s):
res = ''
for c in s.strip():
if c in _character_map:
res += _character_map[c]
else:
res += c
return res
if __name__ == '__main__':
sample_input = 'يكةۀ'
expected_output = 'یکهه'
assert purify(sample_input) == expected_output
print('success')
| apache-2.0 | Python |
6fcdfbea98d2770d770122ef10bcc4bb349f64e9 | Remove stub | stripe/stripe-python | tests/api_resources/test_source_transaction.py | tests/api_resources/test_source_transaction.py | from __future__ import absolute_import, division, print_function
import stripe
from tests.helper import StripeTestCase
class SourceTransactionTest(StripeTestCase):
def test_is_listable(self):
source = stripe.Source.construct_from({
'id': 'src_123',
'object': 'source'
}, stripe.api_key)
source_transactions = source.source_transactions()
self.assert_requested(
'get',
'/v1/sources/src_123/source_transactions'
)
self.assertIsInstance(source_transactions.data, list)
self.assertIsInstance(source_transactions.data[0],
stripe.SourceTransaction)
| from __future__ import absolute_import, division, print_function
import stripe
from tests.helper import StripeTestCase
class SourceTransactionTest(StripeTestCase):
def test_is_listable(self):
# TODO: remove stub once stripe-mock supports source_transactions
self.stub_request(
'get',
'/v1/sources/src_123/source_transactions',
{
'object': 'list',
'data': [{
'id': 'srxtxn_123',
'object': 'source_transaction',
}],
}
)
source = stripe.Source.construct_from({
'id': 'src_123',
'object': 'source'
}, stripe.api_key)
source_transactions = source.source_transactions()
self.assert_requested(
'get',
'/v1/sources/src_123/source_transactions'
)
self.assertIsInstance(source_transactions.data, list)
self.assertIsInstance(source_transactions.data[0],
stripe.SourceTransaction)
| mit | Python |
1a32bd6d797ebff3c447f865274d809d1b058a13 | test BQ download_to_file download_to_dataframe | CartoDB/cartoframes,CartoDB/cartoframes | tests/unit/data/client/test_bigquery_client.py | tests/unit/data/client/test_bigquery_client.py | import os
import csv
import pandas as pd
from unittest.mock import Mock, patch
from cartoframes.auth import Credentials
from cartoframes.data.clients.bigquery_client import BigQueryClient
class ResponseMock(list):
def __init__(self, data, **kwargs):
super(ResponseMock, self).__init__(data, **kwargs)
self.total_rows = len(data)
class QueryJobMock(object):
def __init__(self, response):
self.response = response
def result(self):
return ResponseMock(self.response)
class TestBigQueryClient(object):
def setup_method(self):
self.original_init_clients = BigQueryClient._init_clients
BigQueryClient._init_clients = Mock(return_value=(True, True, True))
self.username = 'username'
self.apikey = 'apikey'
self.credentials = Credentials(self.username, self.apikey)
self.file_path = '/tmp/test_download.csv'
def teardown_method(self):
self.credentials = None
BigQueryClient._init_clients = self.original_init_clients
if os.path.isfile(self.file_path):
os.remove(self.file_path)
@patch.object(BigQueryClient, 'get_table_column_names')
@patch.object(BigQueryClient, '_download_by_bq_storage_api')
def test_download_to_file_full(self, download_mock, column_names_mock):
data = [{'0': 'word', '1': 'word word'}]
columns = ['column1', 'column2']
column_names_mock.return_value = Mock(return_value=columns)
download_mock.return_value = data
file_path = self.file_path
bq_client = BigQueryClient(self.credentials)
job = QueryJobMock(data)
bq_client.download_to_file(job, file_path, column_names=columns, progress_bar=False)
rows = []
with open(file_path) as csvfile:
csvreader = csv.reader(csvfile)
rows.append(next(csvreader))
rows.append(next(csvreader))
assert rows[0] == columns
assert rows[1] == list(data[0].values())
@patch.object(BigQueryClient, 'get_table_column_names')
@patch.object(BigQueryClient, '_download_by_bq_storage_api')
def test_download_to_dataframe_full(self, download_mock, column_names_mock):
data = [{'column1': 'word', 'column2': 'word word'}]
columns = ['column1', 'column2']
column_names_mock.return_value = Mock(return_value=columns)
download_mock.return_value = data
expected_df = pd.DataFrame(data, columns=columns)
bq_client = BigQueryClient(self.credentials)
job = QueryJobMock(data)
df = bq_client.download_to_dataframe(job)
assert df.equals(expected_df)
| import os
import csv
from unittest.mock import Mock, patch
from cartoframes.auth import Credentials
from cartoframes.data.clients.bigquery_client import BigQueryClient
class ResponseMock(list):
def __init__(self, data, **kwargs):
super(ResponseMock, self).__init__(data, **kwargs)
self.total_rows = len(data)
class QueryJobMock(object):
def __init__(self, response):
self.response = response
def result(self):
return ResponseMock(self.response)
class TestBigQueryClient(object):
def setup_method(self):
self.original_init_clients = BigQueryClient._init_clients
BigQueryClient._init_clients = Mock(return_value=(True, True, True))
self.username = 'username'
self.apikey = 'apikey'
self.credentials = Credentials(self.username, self.apikey)
self.file_path = '/tmp/test_download.csv'
def teardown_method(self):
self.credentials = None
BigQueryClient._init_clients = self.original_init_clients
if os.path.isfile(self.file_path):
os.remove(self.file_path)
@patch.object(BigQueryClient, 'get_table_column_names')
@patch.object(BigQueryClient, '_download_by_bq_storage_api')
def test_download_full(self, download_mock, column_names_mock):
data = [{'0': 'word', '1': 'word word'}]
columns = ['column1', 'column2']
column_names_mock.return_value = Mock(return_value=columns)
download_mock.return_value = data
file_path = self.file_path
bq_client = BigQueryClient(self.credentials)
job = QueryJobMock(data)
bq_client.download_to_file(job, file_path, column_names=columns, progress_bar=False)
rows = []
with open(file_path) as csvfile:
csvreader = csv.reader(csvfile)
rows.append(next(csvreader))
rows.append(next(csvreader))
assert rows[0] == columns
assert rows[1] == list(data[0].values())
| bsd-3-clause | Python |
97710a45761ff6220b94df4d518151e31eb8ef5d | Make CI happy | snazy2000/netbox,lampwins/netbox,Alphalink/netbox,digitalocean/netbox,Alphalink/netbox,lampwins/netbox,lampwins/netbox,digitalocean/netbox,digitalocean/netbox,snazy2000/netbox,digitalocean/netbox,Alphalink/netbox,Alphalink/netbox,snazy2000/netbox,lampwins/netbox,snazy2000/netbox | netbox/extras/api/customfields.py | netbox/extras/api/customfields.py | from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from extras.models import CF_TYPE_SELECT, CustomField, CustomFieldChoice
#
# Custom fields
#
class CustomFieldSerializer(serializers.BaseSerializer):
"""
Extends ModelSerializer to render any CustomFields and their values associated with an object.
"""
def to_representation(self, manager):
# Initialize custom fields dictionary
data = {f.name: None for f in self.parent._custom_fields}
# Assign CustomFieldValues from database
for cfv in manager.all():
if cfv.field.type == CF_TYPE_SELECT:
data[cfv.field.name] = CustomFieldChoiceSerializer(cfv.value).data
else:
data[cfv.field.name] = cfv.value
return data
class CustomFieldModelSerializer(serializers.ModelSerializer):
custom_fields = CustomFieldSerializer(source='custom_field_values')
def __init__(self, *args, **kwargs):
super(CustomFieldModelSerializer, self).__init__(*args, **kwargs)
# Cache the list of custom fields for this model
content_type = ContentType.objects.get_for_model(self.Meta.model)
self._custom_fields = CustomField.objects.filter(obj_type=content_type)
class CustomFieldChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = CustomFieldChoice
fields = ['id', 'value']
| from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from extras.models import CF_TYPE_SELECT, CustomField, CustomFieldChoice
#
# Custom fields
#
class CustomFieldSerializer(serializers.BaseSerializer):
"""
Extends ModelSerializer to render any CustomFields and their values associated with an object.
"""
def to_representation(self, manager):
# Initialize custom fields dictionary
data = {f.name: None for f in self.parent._custom_fields}
# Assign CustomFieldValues from database
for cfv in manager.all():
if cfv.field.type == CF_TYPE_SELECT:
data[cfv.field.name] = CustomFieldChoiceSerializer(cfv.value).data
else:
data[cfv.field.name] = cfv.value
return data
class CustomFieldModelSerializer(serializers.ModelSerializer):
custom_fields = CustomFieldSerializer(source='custom_field_values')
def __init__(self, *args, **kwargs):
super(CustomFieldModelSerializer, self).__init__(*args, **kwargs)
# Cache the list of custom fields for this model
content_type = ContentType.objects.get_for_model(self.Meta.model)
self._custom_fields = CustomField.objects.filter(obj_type=content_type)
class CustomFieldChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = CustomFieldChoice
fields = ['id', 'value'] | apache-2.0 | Python |
63310b5417b39bcc8d98bef3a6e82e9488c60d57 | Refactor code | akperkins/cover-letter | createCoverLetter.py | createCoverLetter.py | from django.template import Template,Context
from django.conf import settings
import pyperclip
import sys
'''
creating a python script that will read in the coverLetter template
add the company name, and save it to a new file and the system's clipboard
Also append the current date to the letters as well
the compnay_name variable in the tempate is passed to the application
'''
#parses args passed to the application
def init():
if sys.argv[0] == "createCoverLetter.py":
argsList = sys.argv[1:]
else:
argsList = sys.argv
if len(argsList) != 1:
print "Invalid number of arguments passed,"+str(len(argsList))+" , application terminating!"
print "Expected args: \"companyName>\""
sys.exit()
else:
company_name = argsList[0]
print "company name received:"+company_name
#Creates the cover letter and returns it as a String
def createLetter():
fp=open("templates/coverLetter.txt","r")
settings.configure()
t = Template(fp.read())
fp.close()
map = {
"company":sys.argv[1],
}
c = Context(map)
letter = str(t.render(c))
return letter
#writes the letter to the createdCLetters directory
def writeLetter(letter):
file_name = "createdCLetters/"+sys.argv[1]+"CoverLetter.txt"
outFile = open(file_name,"w+")
outFile.write(letter)
outFile.close()
print "Successfully created cover letter: "+file_name
#uses pyperclip module to set the system clipboard to letter
def copyToClipboard(letter):
pyperclip.setcb(letter)
print "The cover letter has been appended to the system clipboard. Enjoy!"
#start of application
def main():
init()
letter = createLetter()
writeLetter(letter)
copyToClipboard(letter)
main()
| from django.template import Template,Context
from django.conf import settings
import pyperclip
import sys
argsList = None
if sys.argv[0] == "createCoverLetter.py":
argsList = sys.argv[1:]
else:
argsList = sys.argv
company_name=""
if len(argsList) != 1:
print "Invalid number of arguments passed,"+str(len(argsList))+" , application terminating!"
print "Expected args: \"companyName>\""
sys.exit()
else:
company_name = argsList[0]
print "company name received:"+company_name
#creating a python script that will read in the coverLetter template
#add the company name, and save it to a new file and the system's clipboard
#Also append the current date to the letters as well
#now add company name parameter to python args
fp=open("templates/coverLetter.txt","r")
settings.configure()
t = Template(fp.read())
fp.close()
map = {
"company":company_name,
}
c = Context(map)
letter = str(t.render(c))
file_name = "createdCLetters/"+company_name+"CoverLetter.txt"
outFile = open(file_name,"w+")
outFile.write(letter)
outFile.close()
print "Successfully created cover letter: "+file_name
#uses pyperclip module to set the system clipboard to letter
pyperclip.setcb(letter)
print "The cover letter has been appended to the system clipboard. Enjoy!"
| mit | Python |
e47fa24ac7d5f56f2e1884247d2aa3068fa265e2 | fix loading ntlk and wordnet exception | makcedward/nlpaug,makcedward/nlpaug | nlpaug/model/word_dict/wordnet.py | nlpaug/model/word_dict/wordnet.py | try:
import nltk
from nltk.corpus import wordnet
except ImportError:
# No installation required if not using this function
pass
from nlpaug.model.word_dict import WordDictionary
class WordNet(WordDictionary):
def __init__(self, lang, is_synonym=True):
super().__init__(cache=True)
self.lang = lang
self.is_synonym = is_synonym
try:
wordnet
except NameError:
raise ImportError('Missed nltk library. Install it via `pip install nltk`')
try:
# Check whether wordnet package is downloaded
wordnet.synsets('computer')
# Check whether POS package is downloaded
nltk.pos_tag('computer')
except ImportError:
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
self.model = self.read()
def read(self):
return wordnet
def predict(self, word, pos=None):
results = []
for synonym in self.model.synsets(word, pos=pos, lang=self.lang):
for lemma in synonym.lemmas():
if self.is_synonym:
results.append(lemma.name())
else:
for antonym in lemma.antonyms():
results.append(antonym.name())
return results
@classmethod
def pos_tag(cls, tokens):
return nltk.pos_tag(tokens)
| try:
import nltk
from nltk.corpus import wordnet
except ImportError:
# No installation required if not using this function
pass
from nlpaug.model.word_dict import WordDictionary
class WordNet(WordDictionary):
def __init__(self, lang, is_synonym=True):
super().__init__(cache=True)
self.lang = lang
self.is_synonym = is_synonym
self.model = self.read()
try:
wordnet
except NameError:
raise ImportError('Missed nltk library. Install it via `pip install nltk`')
@classmethod
def read(cls):
try:
# Check whether wordnet package is downloaded
wordnet.synsets('computer')
except ImportError:
nltk.download('wordnet')
try:
# Check whether POS package is downloaded
nltk.pos_tag('computer')
except ImportError:
nltk.download('averaged_perceptron_tagger')
return wordnet
def predict(self, word, pos=None):
results = []
for synonym in self.model.synsets(word, pos=pos, lang=self.lang):
for lemma in synonym.lemmas():
if self.is_synonym:
results.append(lemma.name())
else:
for antonym in lemma.antonyms():
results.append(antonym.name())
return results
@classmethod
def pos_tag(cls, tokens):
return nltk.pos_tag(tokens)
| mit | Python |
3b2a7b392522cbbb34586affd1ba1826145a2cb5 | Fix passing of parameters. | pikinder/nn-patterns | nn_patterns/explainer/__init__.py | nn_patterns/explainer/__init__.py |
from .base import *
from .gradient_based import *
from .misc import *
from .pattern_based import *
from .relevance_based import *
def create_explainer(name,
output_layer, patterns=None, to_layer=None, **kwargs):
return {
# Gradient based
"gradient": GradientExplainer,
"gradient.alt": AlternativGradientExplainer,
"deconvnet": DeConvNetExplainer,
"guided": GuidedBackpropExplainer,
# Relevance based
"lrp.z": LRPZExplainer,
"lrp.eps": LRPEpsExplainer,
# Pattern based
"patternnet": PatternNetExplainer,
"patternnet.guided": PatternNetExplainer,
"patternlrp": PatternLRPExplainer,
}[name](output_layer, patterns=patterns, to_layer=to_layer, **kwargs)
|
from .base import *
from .gradient_based import *
from .misc import *
from .pattern_based import *
from .relevance_based import *
def create_explainer(name,
output_layer, patterns=None, to_layer=None, **kwargs):
return {
# Gradient based
"gradient": GradientExplainer,
"gradient.alt": AlternativGradientExplainer,
"deconvnet": DeConvNetExplainer,
"guided": GuidedBackpropExplainer,
# Relevance based
"lrp.z": LRPZExplainer,
"lrp.eps": LRPEpsExplainer,
# Pattern based
"patternnet": PatternNetExplainer,
"patternnet.guided": PatternNetExplainer,
"patternlrp": PatternLRPExplainer,
}[name](output_layer, patterns=None, to_layer=None, **kwargs)
| mit | Python |
bb9307ae43bab623f895db7c4040f736f545e8fc | Bump version number to 0.11. | ChristopherHogan/cython,mrGeen/cython,slonik-az/cython,mrGeen/cython,scoder/cython,c-blake/cython,hickford/cython,cython/cython,JelleZijlstra/cython,fabianrost84/cython,scoder/cython,rguillebert/CythonCTypesBackend,roxyboy/cython,mcanthony/cython,madjar/cython,hpfem/cython,bzzzz/cython,fperez/cython,roxyboy/cython,hhsprings/cython,dahebolangkuan/cython,bzzzz/cython,fabianrost84/cython,hpfem/cython,mcanthony/cython,larsmans/cython,roxyboy/cython,achernet/cython,achernet/cython,cython/cython,da-woods/cython,andreasvc/cython,acrispin/cython,andreasvc/cython,encukou/cython,roxyboy/cython,hhsprings/cython,ABcDexter/cython,hpfem/cython,roxyboy/cython,JelleZijlstra/cython,encukou/cython,hickford/cython,marscher/cython,hickford/cython,dahebolangkuan/cython,cython/cython,da-woods/cython,madjar/cython,c-blake/cython,mrGeen/cython,slonik-az/cython,JelleZijlstra/cython,rguillebert/CythonCTypesBackend,acrispin/cython,larsmans/cython,ABcDexter/cython,mcanthony/cython,fperez/cython,c-blake/cython,achernet/cython,marscher/cython,hpfem/cython,hhsprings/cython,JelleZijlstra/cython,larsmans/cython,hickford/cython,rguillebert/CythonCTypesBackend,encukou/cython,ABcDexter/cython,ABcDexter/cython,scoder/cython,larsmans/cython,slonik-az/cython,encukou/cython,ChristopherHogan/cython,fperez/cython,mrGeen/cython,andreasvc/cython,rguillebert/CythonCTypesBackend,bzzzz/cython,scoder/cython,acrispin/cython,hhsprings/cython,achernet/cython,madjar/cython,dahebolangkuan/cython,marscher/cython,achernet/cython,cython/cython,bzzzz/cython,c-blake/cython,mcanthony/cython,marscher/cython,ChristopherHogan/cython,fperez/cython,ABcDexter/cython,dahebolangkuan/cython,andreasvc/cython,madjar/cython,hickford/cython,andreasvc/cython,acrispin/cython,acrispin/cython,encukou/cython,fabianrost84/cython,hhsprings/cython,hpfem/cython,c-blake/cython,JelleZijlstra/cython,da-woods/cython,da-woods/cython,fabianrost84/cython,dahebolangkuan/cython,fperez/cython,marscher/cython,larsmans/cython,slonik-az/cython,fabianrost84/cython,madjar/cython,slonik-az/cython,mcanthony/cython,mrGeen/cython | Cython/Compiler/Version.py | Cython/Compiler/Version.py | version = '0.11'
| version = '0.11.rc3'
| apache-2.0 | Python |
59ce22d604241d5092096a8aea0a6d25fc1325f3 | Remove dangling Docker images | morninj/django-docker,morninj/django-docker,morninj/django-docker | fabfile.py | fabfile.py | from fabric.api import *
from fabric.contrib.files import *
import os
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
import time
# Configure server admin login credentials
if parser.get('production', 'USE_PASSWORD'):
env.password = parser.get('production', 'PASSWORD')
else:
env.key_filename = parser.get('production', 'PUBLIC_KEY')
# Deploy production server
@hosts(parser.get('production', 'USERNAME') + '@' + parser.get('production', 'HOST'))
def deploy_production():
start_time = time.time();
print 'Building Docker image...'
local('docker build -t %s .' % parser.get('general', 'DOCKER_IMAGE_NAME'))
print 'Pushing image to Docker Hub...'
local('docker push %s' % parser.get('general', 'DOCKER_IMAGE_NAME'))
print 'Removing any existing Docker containers on the production host...'
run('if [ "$(docker ps -qa)" != "" ]; then docker rm --force `docker ps -qa`; fi')
run('docker ps')
print 'Removing dangling Docker images...'
run('docker rmi $(docker images -f "dangling=true" -q)')
print 'Pulling image on production host...'
run('docker pull %s ' % parser.get('general', 'DOCKER_IMAGE_NAME'));
print 'Running image on production host...'
run_command = '''docker run \
-d \
-p 80:80 \
--env DJANGO_PRODUCTION=true \
--env ROOT_PASSWORD={ROOT_PASSWORD} \
--env DATABASE_HOST={DATABASE_HOST} \
--env DATABASE_USERNAME={DATABASE_USERNAME} \
--env DATABASE_PASSWORD={DATABASE_PASSWORD} \
--env DATABASE_NAME={DATABASE_NAME} \
--env SECRET_KEY={SECRET_KEY} \
{DOCKER_IMAGE_NAME}'''.format(
ROOT_PASSWORD=parser.get('general', 'ROOT_PASSWORD'),
DOCKER_IMAGE_NAME=parser.get('general', 'DOCKER_IMAGE_NAME'),
DATABASE_HOST=parser.get('production', 'DATABASE_HOST'),
DATABASE_USERNAME=parser.get('production', 'DATABASE_USERNAME'),
DATABASE_PASSWORD=parser.get('production', 'DATABASE_PASSWORD'),
DATABASE_NAME=parser.get('production', 'DATABASE_NAME'),
SECRET_KEY=parser.get('production', 'SECRET_KEY'),
)
run(run_command);
print '-' * 80
print parser.get('general', 'DOCKER_IMAGE_NAME') + ' successfully deployed to ' + parser.get('production', 'HOST')
print("Deployment time: %s seconds" % (time.time() - start_time))
| from fabric.api import *
from fabric.contrib.files import *
import os
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
parser.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
import time
# Configure server admin login credentials
if parser.get('production', 'USE_PASSWORD'):
env.password = parser.get('production', 'PASSWORD')
else:
env.key_filename = parser.get('production', 'PUBLIC_KEY')
# Deploy production server
@hosts(parser.get('production', 'USERNAME') + '@' + parser.get('production', 'HOST'))
def deploy_production():
start_time = time.time();
print 'Building Docker image...'
local('docker build -t %s .' % parser.get('general', 'DOCKER_IMAGE_NAME'))
print 'Pushing image to Docker Hub...'
local('docker push %s' % parser.get('general', 'DOCKER_IMAGE_NAME'))
print 'Removing any existing Docker containers on the production host...'
run('if [ "$(docker ps -qa)" != "" ]; then docker rm --force `docker ps -qa`; fi')
run('docker ps')
print 'Pulling image on production host...'
run('docker pull %s ' % parser.get('general', 'DOCKER_IMAGE_NAME'));
print 'Running image on production host...'
run_command = '''docker run \
-d \
-p 80:80 \
--env DJANGO_PRODUCTION=true \
--env ROOT_PASSWORD={ROOT_PASSWORD} \
--env DATABASE_HOST={DATABASE_HOST} \
--env DATABASE_USERNAME={DATABASE_USERNAME} \
--env DATABASE_PASSWORD={DATABASE_PASSWORD} \
--env DATABASE_NAME={DATABASE_NAME} \
--env SECRET_KEY={SECRET_KEY} \
{DOCKER_IMAGE_NAME}'''.format(
ROOT_PASSWORD=parser.get('general', 'ROOT_PASSWORD'),
DOCKER_IMAGE_NAME=parser.get('general', 'DOCKER_IMAGE_NAME'),
DATABASE_HOST=parser.get('production', 'DATABASE_HOST'),
DATABASE_USERNAME=parser.get('production', 'DATABASE_USERNAME'),
DATABASE_PASSWORD=parser.get('production', 'DATABASE_PASSWORD'),
DATABASE_NAME=parser.get('production', 'DATABASE_NAME'),
SECRET_KEY=parser.get('production', 'SECRET_KEY'),
)
run(run_command);
print '-' * 80
print parser.get('general', 'DOCKER_IMAGE_NAME') + ' successfully deployed to ' + parser.get('production', 'HOST')
print("Deployment time: %s seconds" % (time.time() - start_time))
| mit | Python |
addc1e83911f72282eca9603e2c483ba6ef5ef7c | Update to the latest XSP. | BansheeMediaPlayer/bockbuild,mono/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,mono/bockbuild | packages/xsp.py | packages/xsp.py | GitHubTarballPackage('mono', 'xsp', '3.0.11', '4587438369691b9b3e8415e1f113aa98b57d1fde', configure = './autogen.sh --prefix="%{prefix}"')
| GitHubTarballPackage('mono', 'xsp', '2.11', 'd3e2f80ff59ddff68e757a520655555e2fbf2695', configure = './autogen.sh --prefix="%{prefix}"')
| mit | Python |
cb702893ef902f120b1548057d2b433b48a5b04b | add filters for payment admin | lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django | src/payments/admin.py | src/payments/admin.py | from ajax_select.fields import AutoCompleteSelectField
from django import forms
from django.contrib import admin
from api.admin import admin_site
from payments import models
class PaymentForm(forms.ModelForm):
person = AutoCompleteSelectField(
"people",
required=True,
label="Personne"
)
class Meta:
fields = '__all__'
@admin.register(models.Payment, site=admin_site)
class PaymentAdmin(admin.ModelAdmin):
form = PaymentForm
list_display = ('person', 'email', 'first_name', 'last_name', 'price', 'status', )
readonly_fields = ('person', 'email', 'first_name', 'last_name', 'price', 'status')
list_filter = ('price', 'status')
| from ajax_select.fields import AutoCompleteSelectField
from django import forms
from django.contrib import admin
from api.admin import admin_site
from payments import models
class PaymentForm(forms.ModelForm):
person = AutoCompleteSelectField(
"people",
required=True,
label="Personne"
)
class Meta:
fields = '__all__'
@admin.register(models.Payment, site=admin_site)
class PaymentAdmin(admin.ModelAdmin):
form = PaymentForm
list_display = ('person', 'email', 'first_name', 'last_name', 'price', 'status')
| agpl-3.0 | Python |
f8e30e1f69976829de2d473fecb6e32264a8fa59 | Add canonical urls for pages model #21 | GoWebyCMS/goweby-core-dev,GoWebyCMS/goweby-core-dev,GoWebyCMS/goweby-core-dev | pages/models.py | pages/models.py | from django.db import models
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
from django.utils import timezone
# Create your models here.
class Page(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
menu_title = models.CharField(max_length=200)
page_title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200,unique_for_date='publish')
body = RichTextField()
show_in_menu = models.BooleanField(default=False)
publish = models.DateTimeField( auto_now=True)
status = models.CharField(max_length=10,choices=STATUS_CHOICES,default='draft')
image = models.ImageField(upload_to='page', blank=True, null=True)
def get_absolute_url(self):
return reverse('page_detail',
args=[self.slug])
class Meta:
ordering = ('page_title',)
def __str__(self):
return self.page_title
| from django.db import models
from django.contrib.auth.models import User
from ckeditor.fields import RichTextField
from django.utils import timezone
# Create your models here.
class Page(models.Model):
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
menu_title = models.CharField(max_length=200)
page_title = models.CharField(max_length=200)
slug = models.SlugField(max_length=200,unique_for_date='publish')
body = RichTextField()
show_in_menu = models.BooleanField(default=False)
publish = models.DateTimeField( auto_now=True)
status = models.CharField(max_length=10,choices=STATUS_CHOICES,default='draft')
image = models.ImageField(upload_to='page', blank=True, null=True)
class Meta:
ordering = ('page_title',)
def __str__(self):
return self.page_title
| mit | Python |
d8b29fd094a7a2d74c74e32b05a810930655fb47 | Fix raw_input() error in python 3 | marella/phython,marella/phython,marella/phython | src/modules/phython.py | src/modules/phython.py | import json
import runpy
import sys
def run():
args = sys.argv
if len(args) < 3:
raise Exception('Both module name and function name are required.')
module, function = args[1:3]
module = runpy.run_module(module)
if function not in module:
raise Exception(function + ' is not defined in ' + module['__file__'])
call(module[function])
def call(function):
# raw_input() is removed in python 3
try:
input = raw_input
except NameError:
pass
arguments = input().strip()
arguments = json.loads(arguments)
output = function(*arguments)
print(json.dumps(output))
run()
| import json
import runpy
import sys
def run():
args = sys.argv
if len(args) < 3:
raise Exception('Both module name and function name are required.')
module, function = args[1:3]
module = runpy.run_module(module)
if function not in module:
raise Exception(function + ' is not defined in ' + module['__file__'])
call(module[function])
def call(function):
arguments = raw_input().strip()
arguments = json.loads(arguments)
output = function(*arguments)
print(json.dumps(output))
run()
| mit | Python |
262bb53597268145f18dfb42fbb20fe7a37c6671 | Fix error under windows when generating a UID. | pyhmsa/pyhmsa | src/pyhmsa/type/uid.py | src/pyhmsa/type/uid.py | #!/usr/bin/env python
"""
================================================================================
:mod:`uid` -- Generate unique identifier id
================================================================================
.. module:: uid
:synopsis: Generate unique identifier id
.. inheritance-diagram:: pyhmsa.uid
"""
# Script information for the file.
__author__ = "Philippe T. Pinard"
__email__ = "philippe.pinard@gmail.com"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2013 Philippe T. Pinard"
__license__ = "GPL v3"
# Standard library modules.
import hashlib
import time
import uuid
import getpass
import socket
import binascii
try:
import winreg
except ImportError: # pragma: no cover
try:
import _winreg as winreg
except ImportError:
winreg = None
# Third party modules.
# Local modules.
# Globals and constants variables.
_REG_KEY = 'SOFTWARE\\HmsaUID'
def generate_uid():
"""
Generates a unique identifier id.
The method to generate the id was taken from the C implementation of the
HMSA lib.
"""
sha1 = hashlib.sha1()
# Current date/time
sha1.update(str(time.time()).encode('ascii'))
# Tick count
# Note: GetTickCount() not available in Python
sha1.update(str(time.clock()).encode('ascii'))
# Counter from registry (incremented here)
# Only on Windows
if winreg: # pragma: no cover
key = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, _REG_KEY,
0, winreg.KEY_READ | winreg.KEY_WRITE)
with key:
try:
lastuid, _ = winreg.QueryValueEx(key, "Counter")
lastuid += 1
except:
lastuid = 0
winreg.SetValueEx(key, 'Counter', 0, winreg.REG_DWORD, lastuid)
sha1.update(str(lastuid).encode('ascii'))
# MAC address
sha1.update(str(uuid.getnode()).encode('ascii'))
# User name
sha1.update(getpass.getuser().encode('ascii'))
# Computer name
sha1.update(socket.gethostname().encode('ascii'))
uid = sha1.hexdigest()
uid = uid[:16] # Take only the first 16 characters
return binascii.unhexlify(uid)
| #!/usr/bin/env python
"""
================================================================================
:mod:`uid` -- Generate unique identifier id
================================================================================
.. module:: uid
:synopsis: Generate unique identifier id
.. inheritance-diagram:: pyhmsa.uid
"""
# Script information for the file.
__author__ = "Philippe T. Pinard"
__email__ = "philippe.pinard@gmail.com"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2013 Philippe T. Pinard"
__license__ = "GPL v3"
# Standard library modules.
import hashlib
import time
import uuid
import getpass
import socket
import binascii
try:
import winreg
except ImportError: # pragma: no cover
try:
import _winreg as winreg
except ImportError:
winreg = None
# Third party modules.
# Local modules.
# Globals and constants variables.
_REG_KEY = 'SOFTWARE\\HmsaUID'
def generate_uid():
"""
Generates a unique identifier id.
The method to generate the id was taken from the C implementation of the
HMSA lib.
"""
sha1 = hashlib.sha1()
# Current date/time
sha1.update(str(time.time()).encode('ascii'))
# Tick count
# Note: GetTickCount() not available in Python
sha1.update(str(time.clock()).encode('ascii'))
# Counter from registry (incremented here)
# Only on Windows
if winreg: # pragma: no cover
key = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, _REG_KEY,
0, winreg.KEY_READ | winreg.KEY_WRITE)
with key:
lastuid, _ = winreg.QueryValueEx(key, "Counter")
lastuid += 1
winreg.SetValueEx(key, 'Counter', 0, winreg.REG_DWORD, lastuid)
sha1.update(str(lastuid).encode('ascii'))
# MAC address
sha1.update(str(uuid.getnode()).encode('ascii'))
# User name
sha1.update(getpass.getuser().encode('ascii'))
# Computer name
sha1.update(socket.gethostname().encode('ascii'))
uid = sha1.hexdigest()
uid = uid[:16] # Take only the first 16 characters
return binascii.unhexlify(uid)
| mit | Python |
c2f6f2c2ed32d715ff65ae332e83be5ff55238f5 | Disable startup.warm.chrome_signin on Android | Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,Just-D/chromium-1,Chilledheart/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Chilledheart/chromium,hgl888/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,chuan9/chromium-crosswalk | tools/perf/benchmarks/chrome_signin_startup.py | tools/perf/benchmarks/chrome_signin_startup.py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from measurements import startup
import page_sets
from telemetry import benchmark
class _StartupWarm(perf_benchmark.PerfBenchmark):
"""Measures warm startup time with a clean profile."""
options = {'pageset_repeat': 5}
@classmethod
def Name(cls):
return 'chrome_signin_starup'
def CreatePageTest(self, options):
return startup.Startup(cold=False)
@benchmark.Enabled('has tabs')
# On android logging in is done through system accounts workflow.
@benchmark.Disabled('android')
@benchmark.Disabled('reference') # crbug.com/499312
class SigninStartup(_StartupWarm):
"""Measures warm startup time of signing a profile into Chrome."""
page_set = page_sets.ChromeSigninPageSet
@classmethod
def Name(cls):
return 'startup.warm.chrome_signin'
| # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core import perf_benchmark
from measurements import startup
import page_sets
from telemetry import benchmark
class _StartupWarm(perf_benchmark.PerfBenchmark):
"""Measures warm startup time with a clean profile."""
options = {'pageset_repeat': 5}
@classmethod
def Name(cls):
return 'chrome_signin_starup'
def CreatePageTest(self, options):
return startup.Startup(cold=False)
@benchmark.Enabled('has tabs')
# crbug.com/499312
@benchmark.Disabled('reference')
class SigninStartup(_StartupWarm):
"""Measures warm startup time of signing a profile into Chrome."""
page_set = page_sets.ChromeSigninPageSet
@classmethod
def Name(cls):
return 'startup.warm.chrome_signin'
| bsd-3-clause | Python |
7726f50ee966d4f1ed542e6653907e2433b448b8 | set URI variable within conditional | jasonclark/python-samples | getFeed.py | getFeed.py | #!/usr/bin/env python
# get feed data from url
import urllib
import sys
import xml.dom.minidom
# URI of the feed
URI = sys.argv[1] if len(sys.argv) > 1 else 'http://www.npr.org/rss/rss.php?id=1019'
#URI = 'http://www.npr.org/rss/rss.php?id=1019'
#if len(sys.argv) > 1:
#URI = sys.argv[1]
# actual xml document
document = xml.dom.minidom.parse(urllib.urlopen(URI))
# create empty string to store information
info = ""
# parse feed elements
for item in document.getElementsByTagName('item'):
title = item.getElementsByTagName('title')[0].firstChild.data
link = item.getElementsByTagName('link')[0].firstChild.data
description = item.getElementsByTagName('description')[0].firstChild.data
# format string as html and display
info_str = '''<li><a title="%s" href="%s">%s</a></li>\n''' % \
(title.encode('UTF8','replace'),
link.encode('UTF8', 'replace'),
description.encode('UTF8','replace'))
# concatenate all html into one string for printing
info += info_str
# show result as HTML
print "<ul>"
print(info)
print "</ul>"
| #!/usr/bin/env python
# get feed data from url
import urllib
import sys
import xml.dom.minidom
# URI of the feed
#URI = 'https://www.npr.org/rss/rss.php?id=1019'
#allow URI to be passed to script
URI = sys.argv[1]
# actual xml document
document = xml.dom.minidom.parse(urllib.urlopen(URI))
# create empty string to store information
info = ""
# parse feed elements
for item in document.getElementsByTagName('item'):
title = item.getElementsByTagName('title')[0].firstChild.data
link = item.getElementsByTagName('link')[0].firstChild.data
description = item.getElementsByTagName('description')[0].firstChild.data
# format string as html and display
info_str = '''<li><a title="%s" href="%s">%s</a></li>\n''' % \
(title.encode('UTF8','replace'),
link.encode('UTF8', 'replace'),
description.encode('UTF8','replace'))
# concatenate all html into one string for printing
info += info_str
# check result
print "<ul>"
print(info)
print "</ul>"
| mit | Python |
5ba1ee67df42d00eb336cadd4748469121c17ad5 | Remove "settings" function | piotrekw/pirx | pirx/utils.py | pirx/utils.py | import os
def path(*p):
import __main__
project_root = os.path.dirname(os.path.realpath(__main__.__file__))
return os.path.join(project_root, *p)
| import os
def setting(name):
return name.upper()
def path(*p):
import __main__
project_root = os.path.dirname(os.path.realpath(__main__.__file__))
return os.path.join(project_root, *p)
| mit | Python |
051e7ea666aa987334a2243756a2c97484037b61 | Update boundaries to main_curve ones | M2-AAIS/BAD | plot_s_curve.py | plot_s_curve.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from numpy import array, log
import sys
import os
import matplotlib.animation as animation
fig = plt.figure()
inpath = sys.argv[1]
if os.path.isfile(inpath):
print('Visiting {}'.format(inpath))
filenames = [inpath]
else:
_filenames = os.listdir(inpath)
_filenames.sort()
filenames = [inpath + '/' + fname for fname in _filenames if '_tot.dat' in fname]
print('Visiting all files of {}'.format(inpath))
axline, = plt.plot(0, 0, 'o', label='')
def draw_once(filename):
x = []
y = []
if not 'tot.dat' in filename:
return ([0], [0])
else:
print('Visiting {}'.format(filename))
outfile = filename.replace('.dat', '.png')
for line in open(filename):
data = line.replace('\n', '').split()
try :
xData = float(data[0])
yData = float(data[1])
x.append(xData)
y.append(yData)
except ValueError:
pass
# this is a hack to get the x
arr = filename.split('/')[-1].split('_')
number = int(arr[2])
axline.set_xdata(log(x)/log(10))
axline.set_ydata(log(y)/log(10))
axline.set_label('$(T, \Sigma)_{'+str(number)+'}$')
plt.legend(loc='upper left')
return axline,
def init():
print('Initialisation')
plt.ylabel('$\log T$')
plt.xlabel('$\log \Sigma$')
plt.xlim(0.9, 3.1)
plt.ylim(5.5, 8)
plt.grid()
plt.legend()
if len(filenames) > 1:
ani = animation.FuncAnimation(fig, draw_once, filenames, init_func=init, interval=10)
ani.save('s_curve.mp4', writer='ffmpeg', fps=10, bitrate=10000, dpi=180)
plt.show()
else:
init()
draw_once(filenames[0])
plt.show()
# x, y = draw_once(filenames[2])
# plt.plot(x, y, 'o')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from numpy import array, log
import sys
import os
import matplotlib.animation as animation
fig = plt.figure()
inpath = sys.argv[1]
if os.path.isfile(inpath):
print('Visiting {}'.format(inpath))
filenames = [inpath]
else:
_filenames = os.listdir(inpath)
_filenames.sort()
filenames = [inpath + '/' + fname for fname in _filenames if '_tot.dat' in fname]
print('Visiting all files of {}'.format(inpath))
axline, = plt.plot(0, 0, 'o', label='')
def draw_once(filename):
x = []
y = []
if not 'tot.dat' in filename:
return ([0], [0])
else:
print('Visiting {}'.format(filename))
outfile = filename.replace('.dat', '.png')
for line in open(filename):
data = line.replace('\n', '').split()
try :
xData = float(data[0])
yData = float(data[1])
x.append(xData)
y.append(yData)
except ValueError:
pass
# this is a hack to get the x
arr = filename.split('/')[-1].split('_')
number = int(arr[2])
axline.set_xdata(log(x)/log(10))
axline.set_ydata(log(y)/log(10))
axline.set_label('$(T, \Sigma)_{'+str(number)+'}$')
plt.legend(loc='upper left')
return axline,
def init():
print('Initialisation')
plt.ylabel('$\log T$')
plt.xlabel('$\log \Sigma$')
plt.xlim(1.8, 3.2)
plt.ylim(6, 8)
plt.grid()
plt.legend()
if len(filenames) > 1:
ani = animation.FuncAnimation(fig, draw_once, filenames, init_func=init, interval=10)
ani.save('s_curve.mp4', writer='ffmpeg', fps=10, bitrate=10000, dpi=180)
plt.show()
else:
init()
draw_once(filenames[0])
plt.show()
# x, y = draw_once(filenames[2])
# plt.plot(x, y, 'o')
| mit | Python |
dde62a1bc100ba4be16489e4171f0c78593d95af | Add tests to http plugin | Muzer/smartbot,tomleese/smartbot,Cyanogenoid/smartbot,thomasleese/smartbot-old | plugins/http.py | plugins/http.py | import io
import requests
import unittest
class Plugin:
def on_command(self, bot, msg, stdin, stdout, reply):
url = None
if len(msg["args"]) >= 3:
url = msg["args"][2]
else:
url = stdin.read().strip()
if url:
headers = {"User-Agent": "SmartBot"}
page = requests.get(url, headers=headers)
print(page.text, file=stdout)
else:
print(self.on_help(), file=stdout)
def on_help(self):
return "Usage: http get <url>"
class Test(unittest.TestCase):
def setUp(self):
self.plugin = Plugin()
def test_search(self):
stdout = io.StringIO()
self.plugin.on_command(None, {"args": [None, "get", "http://google.com"]}, None, stdout, None)
self.assertNotEqual(self.plugin.on_help(), stdout.getvalue().strip())
def test_help(self):
self.assertTrue(self.plugin.on_help())
def test_no_args(self):
stdout = io.StringIO()
self.plugin.on_command(None, {"args": [None]}, stdout, stdout, None)
self.assertEqual(self.plugin.on_help(), stdout.getvalue().strip())
stdout = io.StringIO()
self.plugin.on_command(None, {"args": [None, None]}, stdout, stdout, None)
self.assertEqual(self.plugin.on_help(), stdout.getvalue().strip())
| import requests
import sys
class Plugin:
def on_command(self, bot, msg):
url = None
if len(sys.argv) >= 3:
url = sys.argv[2]
else:
url = sys.stdin.read().strip()
if url:
headers = {"User-Agent": "SmartBot"}
page = requests.get(url, headers=headers)
print(page.text)
else:
print(self.on_help())
def on_help(self):
return "Usage: http get <url>"
| mit | Python |
db1a951d7d1708546f1df1def92b6764135ff3cc | change the url base for lists | crateio/crate.web,crateio/crate.web | crate_project/urls.py | crate_project/urls.py | from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
import evaluator
evaluator.autodiscover
import ji18n.translate
ji18n.translate.patch()
from pinax.apps.account.openid_consumer import PinaxConsumer
from search.views import Search
handler500 = "pinax.views.server_error"
urlpatterns = patterns("",
url(r"^$", Search.as_view(), name="home"),
url(r"^admin/invite_user/$", "pinax.apps.signup_codes.views.admin_invite_user", name="admin_invite_user"),
url(r"^admin/", include(admin.site.urls)),
url(r"^about/", include("about.urls")),
url(r"^account/", include("pinax.apps.account.urls")),
url(r"^openid/", include(PinaxConsumer().urls)),
url(r"^admin_tools/", include("admin_tools.urls")),
url(r"^users/", include("lists.urls")),
url(r"^packages/", include("packages.urls")),
url(r"^stats/", include("packages.stats.urls")),
url(r"^help/", include("helpdocs.urls")),
url(r"^api/", include("crate_project.api_urls")),
url(r"^s/(?P<path>.+)?", "crate.views.simple_redirect", name="simple_redirect"),
url(r"^", include("search.urls")),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r"", include("staticfiles.urls")),
)
| from django.conf import settings
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
import evaluator
evaluator.autodiscover
import ji18n.translate
ji18n.translate.patch()
from pinax.apps.account.openid_consumer import PinaxConsumer
from search.views import Search
handler500 = "pinax.views.server_error"
urlpatterns = patterns("",
url(r"^$", Search.as_view(), name="home"),
url(r"^admin/invite_user/$", "pinax.apps.signup_codes.views.admin_invite_user", name="admin_invite_user"),
url(r"^admin/", include(admin.site.urls)),
url(r"^about/", include("about.urls")),
url(r"^account/", include("pinax.apps.account.urls")),
url(r"^openid/", include(PinaxConsumer().urls)),
url(r"^admin_tools/", include("admin_tools.urls")),
url(r"^packages/", include("packages.urls")),
url(r"^lists/", include("lists.urls")),
url(r"^stats/", include("packages.stats.urls")),
url(r"^help/", include("helpdocs.urls")),
url(r"^api/", include("crate_project.api_urls")),
url(r"^s/(?P<path>.+)?", "crate.views.simple_redirect", name="simple_redirect"),
url(r"^", include("search.urls")),
)
if settings.SERVE_MEDIA:
urlpatterns += patterns("",
url(r"", include("staticfiles.urls")),
)
| bsd-2-clause | Python |
8ff1206db08a53f5085f95eed3e85b8812723408 | validate url | littlezz/IslandCollection | core/analyzer.py | core/analyzer.py | from urllib import parse
from .islands import island_netloc_table, island_class_table, IslandNotDetectError
__author__ = 'zz'
def determine_island_name(url):
netloc = parse.urlparse(url).netloc
for url, name in island_netloc_table.items():
if url == netloc:
return name
else:
raise IslandNotDetectError('netloc is {}'.format(netloc))
def init_start_url(url):
island_name = determine_island_name(url)
island_class = island_class_table[island_name]
return island_class.init_start_url(url)
def validate_url(url):
"""
:param url:
:return:status code
status code ---> info
---------------------------
0 success
1 no scheme
2 island not support
"""
p = parse.urlparse(url)
if not p.sheme:
return 1
try:
determine_island_name(url)
except IslandNotDetectError:
return 2
else:
return 0
class Analyzer:
def __init__(self, res, max_page):
self.url = res.url
self.res = res
self.max_page = max_page
self.island_name = determine_island_name(self.url)
self._island = self._create_island_obj()
self.divs = self.split_page()
def _create_island_obj(self):
island_class = island_class_table[self.island_name]
return island_class(self.url, self.res)
def split_page(self):
return self._island.island_split_page()
def filter_divs(self, response_gt, *args):
return [div for div in self.divs if div.response_num>response_gt]
def next_page(self, current_page_url=None):
return self._island.next_page(self.max_page, current_page_url)
| from urllib import parse
from .islands import island_netloc_table, island_class_table, IslandNotDetectError
__author__ = 'zz'
def determine_island_name(url):
netloc = parse.urlparse(url).netloc
for url, name in island_netloc_table.items():
if url == netloc:
return name
else:
raise IslandNotDetectError('netloc is {}'.format(netloc))
def init_start_url(url):
island_name = determine_island_name(url)
island_class = island_class_table[island_name]
return island_class.init_start_url(url)
class Analyzer:
def __init__(self, res, max_page):
self.url = res.url
self.res = res
self.max_page = max_page
self.island_name = determine_island_name(self.url)
self._island = self._create_island_obj()
self.divs = self.split_page()
def _create_island_obj(self):
island_class = island_class_table[self.island_name]
return island_class(self.url, self.res)
def split_page(self):
return self._island.island_split_page()
def filter_divs(self, response_gt, *args):
return [div for div in self.divs if div.response_num>response_gt]
def next_page(self, current_page_url=None):
return self._island.next_page(self.max_page, current_page_url)
| mit | Python |
dd76e1b70f63cb5aa2c720c734b244789426abb8 | Add artful; stop building for yakkety | mit-athena/build-system | dabuildsys/config.py | dabuildsys/config.py | #!/usr/bin/python
"""
Shared configuration-level variables.
"""
from glob import glob
import os
import os.path
# If you edit these releases and tags, please also update
# debian-versions.sh in scripts.git (checked out at /mit/debathena/bin).
debian_releases = ['wheezy', 'jessie', 'stretch']
ubuntu_releases = ['precise', 'trusty', 'xenial', 'zesty', 'artful']
releases = debian_releases + ubuntu_releases
debian_tags = {
'wheezy' : 'debian7.0',
'jessie' : 'debian8.0',
'stretch' : 'debian9.0~0.1',
}
ubuntu_tags = {
'precise' : 'ubuntu12.04',
'trusty' : 'ubuntu14.04',
'xenial' : 'ubuntu16.04~0.1',
'zesty' : 'ubuntu17.04~0.1',
'artful' : 'ubuntu17.10~0.1',
}
release_tags = dict(debian_tags.items() + ubuntu_tags.items())
package_search_paths = ['athena/*', 'debathena/*', 'third/*']
package_root = os.environ['DEBATHENA_CHECKOUT_HOME']
package_paths = [ os.path.join(package_root, path) for path in package_search_paths ]
package_paths = sum(map(glob, package_paths), [])
package_map = { path.split('/')[-1] : path for path in package_paths }
arches = ['i386', 'amd64', 'armel', 'armhf', 'sparc']
builders = {
'i386' : 'local',
'amd64' : 'local',
'armel' : 'hecatoncheires.mit.edu',
'armhf' : 'hecatoncheires.mit.edu',
'sparc' : 'package-fusion.mit.edu',
}
def arch_for_release(arch, release):
"Check if we build the specified arch for given suite."
# We currently don't have the infrastructure for others
return arch == 'i386' or arch == 'amd64'
release_arches = { release : [arch for arch in arches if arch_for_release(arch, release)] for release in releases }
# Arch on which all packages are built
all_arch = 'amd64'
source_package_dir = os.environ['DEBATHENA_SOURCE_DIR']
binary_package_dir = os.environ['DEBATHENA_BINARY_DIR']
orig_tarball_dir = os.environ['DEBATHENA_ORIG_DIR']
apt_root_dir = os.environ['DEBATHENA_APT_DIR']
lock_file_path = os.environ['DEBATHENA_LOCK_FILE']
setup_hook_path = os.environ['DEBATHENA_SETUP_HOOK']
upstream_tarball_chroot = 'upstream-tarball-area'
release_tag_key = "0D8A9E8F"
| #!/usr/bin/python
"""
Shared configuration-level variables.
"""
from glob import glob
import os
import os.path
# If you edit these releases and tags, please also update
# debian-versions.sh in scripts.git (checked out at /mit/debathena/bin).
debian_releases = ['wheezy', 'jessie', 'stretch']
ubuntu_releases = ['precise', 'trusty', 'xenial', 'yakkety', 'zesty']
releases = debian_releases + ubuntu_releases
debian_tags = {
'wheezy' : 'debian7.0',
'jessie' : 'debian8.0',
'stretch' : 'debian9.0~0.1',
}
ubuntu_tags = {
'precise' : 'ubuntu12.04',
'trusty' : 'ubuntu14.04',
'xenial' : 'ubuntu16.04~0.1',
'yakkety' : 'ubuntu16.10~0.1',
'zesty' : 'ubuntu17.04~0.1',
}
release_tags = dict(debian_tags.items() + ubuntu_tags.items())
package_search_paths = ['athena/*', 'debathena/*', 'third/*']
package_root = os.environ['DEBATHENA_CHECKOUT_HOME']
package_paths = [ os.path.join(package_root, path) for path in package_search_paths ]
package_paths = sum(map(glob, package_paths), [])
package_map = { path.split('/')[-1] : path for path in package_paths }
arches = ['i386', 'amd64', 'armel', 'armhf', 'sparc']
builders = {
'i386' : 'local',
'amd64' : 'local',
'armel' : 'hecatoncheires.mit.edu',
'armhf' : 'hecatoncheires.mit.edu',
'sparc' : 'package-fusion.mit.edu',
}
def arch_for_release(arch, release):
"Check if we build the specified arch for given suite."
# We currently don't have the infrastructure for others
return arch == 'i386' or arch == 'amd64'
release_arches = { release : [arch for arch in arches if arch_for_release(arch, release)] for release in releases }
# Arch on which all packages are built
all_arch = 'amd64'
source_package_dir = os.environ['DEBATHENA_SOURCE_DIR']
binary_package_dir = os.environ['DEBATHENA_BINARY_DIR']
orig_tarball_dir = os.environ['DEBATHENA_ORIG_DIR']
apt_root_dir = os.environ['DEBATHENA_APT_DIR']
lock_file_path = os.environ['DEBATHENA_LOCK_FILE']
setup_hook_path = os.environ['DEBATHENA_SETUP_HOOK']
upstream_tarball_chroot = 'upstream-tarball-area'
release_tag_key = "0D8A9E8F"
| mit | Python |
b60d4d5a93f0f24e119f856c7877b8d386d0eee5 | make sure to return the state | neogenix/daikon | daikon/connection.py | daikon/connection.py | import requests
import anyjson as json
import urlparse
class Connection(object):
_state = None
_health = None
def __init__(self, host, port):
self.host = host
self.port = port
self.url = 'http://%s:%s' % (host, port)
def get(self, path, raise_for_status=True):
url = urlparse.urljoin(self.url, path)
req = requests.get(url)
if raise_for_status:
req.raise_for_status()
return req
def post(self, path, data=None, raise_for_status=True):
url = urlparse.urljoin(self.url, path)
req = requests.post(url, data=data)
if raise_for_status:
req.raise_for_status()
return req
def delete(self, path, raise_for_status=True):
url = urlparse.urljoin(self.url, path)
req = requests.delete(url)
if raise_for_status:
req.raise_for_status()
return req
@property
def health(self):
if self._health is not None:
return self._health
path = '/_cluster/health?level=indices'
health = json.loads(self.get(path).content)
self._health = health[u'indices']
return self._health
@property
def state(self):
if self._state is not None:
return self._state
path = '/_cluster/state'
state = json.loads(self.get(path).content)
self._state = state[u'metadata'][u'indices']
return self._state
| import requests
import anyjson as json
import urlparse
class Connection(object):
_state = None
_health = None
def __init__(self, host, port):
self.host = host
self.port = port
self.url = 'http://%s:%s' % (host, port)
def get(self, path, raise_for_status=True):
url = urlparse.urljoin(self.url, path)
req = requests.get(url)
if raise_for_status:
req.raise_for_status()
return req
def post(self, path, data=None, raise_for_status=True):
url = urlparse.urljoin(self.url, path)
req = requests.post(url, data=data)
if raise_for_status:
req.raise_for_status()
return req
def delete(self, path, raise_for_status=True):
url = urlparse.urljoin(self.url, path)
req = requests.delete(url)
if raise_for_status:
req.raise_for_status()
return req
@property
def health(self):
if self._health is not None:
return self._health
path = '/_cluster/health?level=indices'
health = json.loads(self.get(path).content)
self._health = health[u'indices']
return self._health
@property
def state(self):
if self._state is not None:
return self._state
path = '/_cluster/state'
state = json.loads(self.get(path).content)
self._state = state[u'metadata'][u'indices']
| apache-2.0 | Python |
bc1d19800d58291f4c4392d041a7913602fe8c7d | Fix sorting dict items in python 3 | jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger | dallinger/jupyter.py | dallinger/jupyter.py | from ipywidgets import widgets
from jinja2 import Template
from traitlets import (
observe,
Unicode,
)
from dallinger.config import get_config
header_template = Template(u"""
<h2>{{ name }}</h2>
<div>Status: {{ status }}</div>
{% if app_id %}<div>App ID: {{ app_id }}</div>{% endif %}
""")
config_template = Template(u"""
<table style="min-width: 50%">
{% for k, v in config %}
<tr>
<th>{{ k }}</th>
<td>{{ v }}</td>
</tr>
{% endfor %}
</table>
""")
class ExperimentWidget(widgets.VBox):
status = Unicode('Unknown')
def __init__(self, exp):
self.exp = exp
super(ExperimentWidget, self).__init__()
self.render()
@observe('status')
def render(self, change=None):
header = widgets.HTML(
header_template.render(
name=self.exp.task,
status=self.status,
app_id=self.exp.app_id,
),
)
config = get_config()
if config.ready:
config_items = list(config.as_dict().items())
config_items.sort()
config_tab = widgets.HTML(
config_template.render(config=config_items)
)
else:
config_tab = widgets.HTML('Not loaded.')
tabs = widgets.Tab(children=[config_tab])
tabs.set_title(0, 'Configuration')
self.children = [header, tabs]
| from ipywidgets import widgets
from jinja2 import Template
from traitlets import (
observe,
Unicode,
)
from dallinger.config import get_config
header_template = Template(u"""
<h2>{{ name }}</h2>
<div>Status: {{ status }}</div>
{% if app_id %}<div>App ID: {{ app_id }}</div>{% endif %}
""")
config_template = Template(u"""
<table style="min-width: 50%">
{% for k, v in config %}
<tr>
<th>{{ k }}</th>
<td>{{ v }}</td>
</tr>
{% endfor %}
</table>
""")
class ExperimentWidget(widgets.VBox):
status = Unicode('Unknown')
def __init__(self, exp):
self.exp = exp
super(ExperimentWidget, self).__init__()
self.render()
@observe('status')
def render(self, change=None):
header = widgets.HTML(
header_template.render(
name=self.exp.task,
status=self.status,
app_id=self.exp.app_id,
),
)
config = get_config()
if config.ready:
config_items = config.as_dict().items()
config_items.sort()
config_tab = widgets.HTML(
config_template.render(config=config_items)
)
else:
config_tab = widgets.HTML('Not loaded.')
tabs = widgets.Tab(children=[config_tab])
tabs.set_title(0, 'Configuration')
self.children = [header, tabs]
| mit | Python |
020bca819d0e6e2d40136c3e819383baab11699f | return split in LDA. | mostafa-mahmoud/HyPRec,mostafa-mahmoud/sahwaka | lib/LDA.py | lib/LDA.py | #!/usr/bin/env python
import numpy
from lib.content_based import ContentBased
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
class LDARecommender(ContentBased):
def __init__(self, abstracts, evaluator, config, verbose=False):
super(LDARecommender, self).__init__(abstracts, evaluator, config, verbose)
def train(self, n_iter=5):
term_freq_vectorizer = CountVectorizer(max_df=0.95, min_df=2, stop_words='english', max_features=self.n_items)
term_freq = term_freq_vectorizer.fit_transform(self.abstracts)
lda = LatentDirichletAllocation(n_topics=self.n_factors, max_iter=n_iter,
learning_method='online', learning_offset=50., random_state=0)
self.word_distribution = lda.fit_transform(term_freq)
def split(self):
return super(LDARecommender, self).split()
def set_config(self, config):
"""
set the hyperparamenters of the algorithm.
"""
super(LDARecommender, self).set_config(config)
def get_word_distribution(self):
"""
@returns a matrix of the words x topics distribution.
"""
return self.word_distribution
| #!/usr/bin/env python
import numpy
from lib.content_based import ContentBased
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
class LDARecommender(ContentBased):
def __init__(self, abstracts, evaluator, config, verbose=False):
super(LDARecommender, self).__init__(abstracts, evaluator, config, verbose)
def train(self, n_iter=5):
term_freq_vectorizer = CountVectorizer(max_df=0.95, min_df=2, stop_words='english', max_features=self.n_items)
term_freq = term_freq_vectorizer.fit_transform(self.abstracts)
lda = LatentDirichletAllocation(n_topics=self.n_factors, max_iter=n_iter,
learning_method='online', learning_offset=50., random_state=0)
self.word_distribution = lda.fit_transform(term_freq)
def split(self):
super(LDARecommender, self).split()
def set_config(self, config):
"""
set the hyperparamenters of the algorithm.
"""
super(LDARecommender, self).set_config(config)
def get_word_distribution(self):
"""
@returns a matrix of the words x topics distribution.
"""
return self.word_distribution
| apache-2.0 | Python |
6273aa694516cb38bdebc80886fa78497473b275 | update ip plugin | BruceZhang1993/PyIrcBot,BruceZhang1993/PyIrcBot | plugins/ip.py | plugins/ip.py | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# -----------------------------
# Author: Bruce Zhang
# Email: zy183525594@163.com
# Version: 0.1
# -----------------------------
'''ip lookup plugin'''
import re
import json
import requests
def ip(args, nick, channel, c, e):
if not args:
return "%s: %s" % (nick, _getip(e.source.host))
else:
return "%s: %s" % (nick, args)
def _getip(ip):
if re.match(r'\d{1-3}.\d{1-3}.\d{1-3}.\d{1-3}', ip):
ret = requests.get("http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=%s" % ip)
jret = json.loads(ret.text)
return "%s %s %s %s %s" % (jret['country'], jret['province'], jret['city'], jret['district'], jret['isp'])
else:
return "查询失败!检查是否已隐身或IP格式不正确!- 暂只支持IPv4地址"
| # !/usr/bin/env python3
# -*- coding: utf-8 -*-
# -----------------------------
# Author: Bruce Zhang
# Email: zy183525594@163.com
# Version: 0.1
# -----------------------------
'''ip lookup plugin'''
import re
import json
import requests
def ip(args, nick, channel, c, e):
if not args:
return "%s: %s" % (nick, _getip(e.source.host))
else:
return False
def _getip(ip):
if re.match(r'\d{1-3}.\d{1-3}.\d{1-3}.\d{1-3}', ip):
ret = requests.get("http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=%s" % ip)
jret = json.loads(ret.text)
return "%s %s %s %s %s" % (jret['country'], jret['province'], jret['city'], jret['district'], jret['isp'])
else:
return "查询失败!检查是否已隐身!"
| mit | Python |
1c01080116a94e1dcd30afa143454818a9aeaf31 | change version to represent actual version | androguard/androguard,shuxin/androguard,androguard/androguard,reox/androguard,huangtao2003/androguard | androguard/__init__.py | androguard/__init__.py | # The current version of Androguard
# Please use only this variable in any scripts,
# to keep the version number the same everywhere.
__version__ = "3.1.0-rc1"
| # The current version of Androguard
# Please use only this variable in any scripts,
# to keep the version number the same everywhere.
__version__ = "3.1.0"
| apache-2.0 | Python |
965f3ed4f4723547b69f9430fc5efc097c864579 | Remove extraneous spaces. | andybalaam/pepper,andybalaam/pepper,andybalaam/pepper,andybalaam/pepper,andybalaam/pepper | src/test/testparser.py | src/test/testparser.py |
from cStringIO import StringIO
from nose.tools import *
from tokenutils import Iterable2TokenStream, make_token
from libeeyore.functionvalues import *
from libeeyore.languagevalues import *
from libeeyore.values import *
from parse import EeyoreLexer
from parse import EeyoreParser
from parse import EeyoreTreeWalker
def _parse( tokens ):
parser = EeyoreParser.Parser( Iterable2TokenStream( tokens ) )
parser.program();
walker = EeyoreTreeWalker.Walker()
return walker.functionCall( parser.getAST() )
def test_hello_world():
value = _parse( (
make_token( "print", EeyoreLexer.SYMBOL ),
make_token( "(", EeyoreLexer.LPAREN ),
make_token( "Hello, world!", EeyoreLexer.STRING ),
make_token( ")", EeyoreLexer.RPAREN ),
) )
assert_equal( value.__class__, EeyFunctionCall )
assert_equal( value.func_name, "print" )
func = value.func
assert_equal( func.__class__, EeySymbol )
assert_equal( func.symbol_name, "print" )
args = value.args
assert_equal( len( args ), 1 )
assert_equal( args[0].__class__, EeyString )
assert_equal( args[0].value, "Hello, world!" )
def test_import():
value = _parse( (
make_token( "import", EeyoreLexer.LITERAL_import ),
make_token( "sys", EeyoreLexer.SYMBOL ),
) )
assert_equal( value.__class__, EeyImport )
assert_equal( value.module_name, "sys" )
|
from cStringIO import StringIO
from nose.tools import *
from tokenutils import Iterable2TokenStream, make_token
from libeeyore.functionvalues import *
from libeeyore.languagevalues import *
from libeeyore.values import *
from parse import EeyoreLexer
from parse import EeyoreParser
from parse import EeyoreTreeWalker
def _parse( tokens ):
parser = EeyoreParser.Parser( Iterable2TokenStream( tokens ) )
parser.program();
walker = EeyoreTreeWalker.Walker()
return walker.functionCall( parser.getAST() )
def test_hello_world():
value = _parse( (
make_token( "print", EeyoreLexer.SYMBOL ),
make_token( "(", EeyoreLexer.LPAREN ),
make_token( "Hello, world!", EeyoreLexer.STRING ),
make_token( ")", EeyoreLexer.RPAREN ),
) )
assert_equal( value.__class__, EeyFunctionCall )
assert_equal( value.func_name, "print" )
func = value.func
assert_equal( func.__class__, EeySymbol )
assert_equal( func.symbol_name, "print" )
args = value.args
assert_equal( len( args ), 1 )
assert_equal( args[0].__class__, EeyString )
assert_equal( args[0].value, "Hello, world!" )
def test_import():
value = _parse( (
make_token( "import", EeyoreLexer.LITERAL_import ),
make_token( "sys", EeyoreLexer.SYMBOL ),
) )
assert_equal( value.__class__, EeyImport )
assert_equal( value.module_name, "sys" )
| mit | Python |
5a85c1588d90524289af1d6f0cce99bf842dae5a | Fix a hack in the echo client. | CheeseLord/warts,CheeseLord/warts | src/test_echoclient.py | src/test_echoclient.py | import os
from twisted.internet import task, stdio, reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import Int16StringReceiver, LineReceiver
theClient = None
class EchoClient(Int16StringReceiver):
def __init__(self, onClientConnect):
# Apparently Int16StringReceiver doesn't have an __init__.
# Int16StringReceiver.__init__(self)
self.onClientConnect = onClientConnect
def connectionMade(self):
self.onClientConnect.callback(self)
# self.sendString("Hello, world!")
def stringReceived(self, line):
# TODO: Probably this should go through the StdioHandler rather than
# calling print directly....
print "[receive]", line
class EchoClientFactory(ClientFactory):
protocol = EchoClient
def __init__(self, onClientConnect):
self.done = Deferred()
self.onClientConnect = onClientConnect
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason.getErrorMessage()
self.done.errback(reason)
def clientConnectionLost(self, connector, reason):
print "connection lost:", reason.getErrorMessage()
self.done.callback(None)
def buildProtocol(self, addr):
# TODO Add more logic to prevent this from happening twice??
return self.protocol(self.onClientConnect)
class StdioHandler(LineReceiver):
delimiter = os.linesep
def __init__(self, onClientConnect):
onClientConnect.addCallback(self.connectedToServer)
# TODO: Should maybe have an errback as well?
self.client = None
def connectedToServer(self, client):
self.client = client
print "Successfully connected to server; you may now type messages."
def connectionMade(self):
self.sendLine("Stdio handler created, yay!")
def lineReceived(self, line):
if self.client is not None:
self.client.sendString(line)
self.sendLine("[send] {}".format(line))
else:
self.sendLine("[warning] message '{}' ignored; not connected to " \
"server.".format(line))
def runEchoClientHelper(reactor, host, port):
onClientConnect = Deferred()
stdio.StandardIO(StdioHandler(onClientConnect))
factory = EchoClientFactory(onClientConnect)
reactor.connectTCP(host, port, factory)
return factory.done
def runEchoClient(host, port):
task.react(runEchoClientHelper, (host, port))
| #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import print_function
import os
from twisted.internet import task, stdio, reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import Int16StringReceiver, LineReceiver
theClient = None
class EchoClient(Int16StringReceiver):
def __init__(self):
# Apparently Int16StringReceiver doesn't have an __init__.
# Int16StringReceiver.__init__(self)
# FIXME FIXME FIXME: This is a terrible hack, which doesn't even quite
# work. If you type something before we've finished connecting, then
# the stdio handler will try to send it using theClient even though
# theClient isn't actually initialized yet, so you'll get:
# AttributeError: 'NoneType' object has no attribute 'sendString'
# Really I think we should be setting up a deferred that waits to
# create the stdio handler until the client object is already created,
# so that the stdio handler can safely reference the client object from
# the start. But I'm not really sure how to do that, so for now we get
# this hack instead.
global theClient
theClient = self
def connectionMade(self):
pass
# self.sendString("Hello, world!")
def stringReceived(self, line):
# TODO: Probably this should go through the StdioHandler rather than
# calling print directly....
print("[receive]", line)
class EchoClientFactory(ClientFactory):
protocol = EchoClient
def __init__(self):
self.done = Deferred()
def clientConnectionFailed(self, connector, reason):
print('connection failed:', reason.getErrorMessage())
self.done.errback(reason)
def clientConnectionLost(self, connector, reason):
print('connection lost:', reason.getErrorMessage())
self.done.callback(None)
class StdioHandler(LineReceiver):
delimiter = os.linesep
def connectionMade(self):
self.sendLine("Stdio handler created, yay!")
def lineReceived(self, line):
theClient.sendString(line)
self.sendLine("[send] {}".format(line))
def runEchoClientHelper(reactor, host, port):
factory = EchoClientFactory()
reactor.connectTCP(host, port, factory)
stdio.StandardIO(StdioHandler())
return factory.done
def runEchoClient(host, port):
task.react(runEchoClientHelper, (host, port))
| mit | Python |
d449e9a81687de41d43a59a10eecf335dce9e30a | Use transaction.atomic() to manage transactions. | scibi/django-teryt,ad-m/django-teryt,ad-m/django-teryt,scibi/django-teryt | teryt/management/commands/teryt_parse.py | teryt/management/commands/teryt_parse.py | from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, DatabaseError, IntegrityError
from optparse import make_option
from teryt.models import (
RodzajMiejscowosci, JednostkaAdministracyjna, Miejscowosc, Ulica
)
from teryt.utils import parse
import os.path
class Command(BaseCommand):
args = '[xml file list]'
help = 'Import TERYT data from XML files prepared by GUS'
option_list = BaseCommand.option_list + (
make_option('--update',
action='store_true',
dest='update',
default=False,
help='Update exisitng data'),
)
def handle(self, *args, **options):
force_ins = not options['update']
fn_dict = {
'WMRODZ.xml': RodzajMiejscowosci,
'TERC.xml': JednostkaAdministracyjna,
'SIMC.xml': Miejscowosc,
'ULIC.xml': Ulica,
}
if not args:
raise CommandError('At least 1 file name required')
for a in args:
try:
c = fn_dict[os.path.basename(a)]
except KeyError as e:
raise CommandError('Unknown filename: {}'.format(e))
try:
with transaction.atomic():
c.objects.all().update(aktywny=False)
for vals in parse(a):
t = c()
t.set_val(vals)
t.aktywny = True
t.save(force_insert=force_ins)
except IntegrityError as e:
raise CommandError("Database integrity error: {}".format(e))
except DatabaseError as e:
raise CommandError("General database error: {}\n"
"Make sure you run syncdb or migrate before"
"importing data!".format(e))
| from django.core.management.base import BaseCommand, CommandError
from django.db import transaction, DatabaseError, IntegrityError
from optparse import make_option
from teryt.models import (
RodzajMiejscowosci, JednostkaAdministracyjna, Miejscowosc, Ulica
)
from teryt.utils import parse
import os.path
class Command(BaseCommand):
args = '[xml file list]'
help = 'Import TERYT data from XML files prepared by GUS'
option_list = BaseCommand.option_list + (
make_option('--update',
action='store_true',
dest='update',
default=False,
help='Update exisitng data'),
)
def handle(self, *args, **options):
force_ins = not options['update']
transaction.set_autocommit(False)
fn_dict = {
'WMRODZ.xml': RodzajMiejscowosci,
'TERC.xml': JednostkaAdministracyjna,
'SIMC.xml': Miejscowosc,
'ULIC.xml': Ulica,
}
if not args:
raise CommandError('At least 1 file name required')
for a in args:
try:
c = fn_dict[os.path.basename(a)]
except KeyError as e:
raise CommandError('Unknown filename: {}'.format(e))
try:
c.objects.all().update(aktywny=False)
for vals in parse(a):
t = c()
t.set_val(vals)
t.aktywny = True
t.save(force_insert=force_ins)
transaction.commit()
except IntegrityError as e:
transaction.rollback()
raise CommandError("Database integrity error: {}".format(e))
except DatabaseError as e:
transaction.rollback()
raise CommandError("General database error: {}\n"
"Make sure you run syncdb or migrate before"
"importing data!".format(e))
| mit | Python |
223ffba460867f566dec8ae71619e0ca96249f72 | Add doc string to git_manager.py | tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes | src/utils/code_etl/git_manager.py | src/utils/code_etl/git_manager.py | #!/usr/bin/env python
"""Provides a Repository class to handle the downloading and removal of a
github repository.
This class should be used as follows:
with Repository("lab41/hermes") as hermes_repo:
# do_stuff()
The repository is downloaded to a temporary directory when the with block
enters, and is removed when the with block exits even if there is an error.
"""
import tempfile
import shutil
import subprocess
class Repository(object):
"""Manage the download and cleanup of a github.com git repository.
Given the name of a github repository, this class will download it to a
local, temporary directory. The directory will be cleaned up at the end.
The correct way to use this class is with a with statement as follows:
with Repository("lab41/hermes") as hermes_repo:
pass
This insures that the temporary directory is cleaned up regardless of
exceptions.
Attributes:
name (str): The name of the github repository, for example
"lab41/hermes"
url (str): The url of the git repository.
tempdir (str): The directory which houses the repository.
"""
def __init__(self, name):
"""Initialize the class given a github repository name.
Args:
name (str): The name of the github repository, for example
"lab41/hermes"
"""
self.name = name
self.tempdir = tempfile.mkdtemp()
self.url = "https://github.com/" + self.name + ".git"
self.__clone_remote()
# enter and exit are called when the class is used in a "with" clause,
# like:
#
# with Repository("lab41/hermes") as hermes_repo:
# pass
#
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# Clean up the directory
shutil.rmtree(self.tempdir)
def __clone_remote(self):
command = [
"git",
"clone",
"--",
self.url,
self.tempdir,
]
subprocess.check_call(command)
| #!/usr/bin/env python
"""
"""
import tempfile
import shutil
import subprocess
class Repository(object):
"""Manage the download and cleanup of a github.com git repository.
Given the name of a github repository, this class will download it to a
local, temporary directory. The directory will be cleaned up at the end.
The correct way to use this class is with a with statement as follows:
with Repository("lab41/hermes") as hermes_repo:
pass
This insures that the temporary directory is cleaned up regardless of
exceptions.
Attributes:
name (str): The name of the github repository, for example
"lab41/hermes"
url (str): The url of the git repository.
tempdir (str): The directory which houses the repository.
"""
def __init__(self, name):
"""Initialize the class given a github repository name.
Args:
name (str): The name of the github repository, for example
"lab41/hermes"
"""
self.name = name
self.tempdir = tempfile.mkdtemp()
self.url = "https://github.com/" + self.name + ".git"
self.__clone_remote()
# enter and exit are called when the class is used in a "with" clause,
# like:
#
# with Repository("lab41/hermes") as hermes_repo:
# pass
#
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# Clean up the directory
shutil.rmtree(self.tempdir)
def __clone_remote(self):
command = [
"git",
"clone",
"--",
self.url,
self.tempdir,
]
subprocess.check_call(command)
| apache-2.0 | Python |
c8117584e3ebd331ed7aea8ec91751242d607a4e | clean up test_create_large_static_cells_and_rows | scylladb/scylla,scylladb/scylla,scylladb/scylla,scylladb/scylla | test/cql-pytest/test_large_cells_rows.py | test/cql-pytest/test_large_cells_rows.py | # Copyright 2020 ScyllaDB
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
from util import new_test_table
import requests
import nodetool
def test_create_large_static_cells_and_rows(cql, test_keyspace):
'''Test that `large_data_handler` successfully reports large static cells
and static rows and this doesn't cause a crash of Scylla server.
This is a regression test for https://github.com/scylladb/scylla/issues/6780'''
schema = "pk int, ck int, user_ids set<text> static, PRIMARY KEY (pk, ck)"
with new_test_table(cql, test_keyspace, schema) as table:
insert_stmt = cql.prepare(f"INSERT INTO {table} (pk, ck, user_ids) VALUES (?, ?, ?)")
# Default large data threshold for cells is 1 mb, for rows it is 10 mb.
# Take 10 mb cell to trigger large data reporting code both for
# static cells and static rows simultaneously.
large_set = {'x' * 1024 * 1024 * 10}
cql.execute(insert_stmt, [1, 1, large_set])
nodetool.flush(cql, table)
# No need to check that the Scylla server is running here, since the test will
# fail automatically in case Scylla crashes.
| # Copyright 2020 ScyllaDB
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
from util import new_test_table
import requests
def test_create_large_static_cells_and_rows(cql, test_keyspace):
'''Test that `large_data_handler` successfully reports large static cells
and static rows and this doesn't cause a crash of Scylla server.
This is a regression test for https://github.com/scylladb/scylla/issues/6780'''
schema = "pk int, ck int, user_ids set<text> static, PRIMARY KEY (pk, ck)"
with new_test_table(cql, test_keyspace, schema) as table:
insert_stmt = cql.prepare(f"INSERT INTO {table} (pk, ck, user_ids) VALUES (?, ?, ?) USING TIMEOUT 5m")
# Default large data threshold for cells is 1 mb, for rows it is 10 mb.
# Take 10 mb cell to trigger large data reporting code both for
# static cells and static rows simultaneously.
large_set = {'x' * 1024 * 1024 * 10}
cql.execute(insert_stmt, [1, 1, large_set])
# REST API endpoint address for test scylla node
node_address = f'http://{cql.cluster.contact_points[0]}:10000'
# Execute force flush of test table to persistent storage, which is necessary to trigger
# `large_data_handler` execution.
table_without_ks = table[table.find('.') + 1:] # strip keyspace part from the table name
requests.post(f'{node_address}/storage_service/keyspace_flush/{test_keyspace}', params={'cf' : table_without_ks})
# No need to check that the Scylla server is running here, since the test will
# fail automatically in case Scylla crashes. | agpl-3.0 | Python |
d8c31128ee581a167c3b7fede6bdb600a154b1f3 | Add missing LICENSE headers | dennybaa/st2,peak6/st2,pixelrebel/st2,emedvedev/st2,emedvedev/st2,armab/st2,grengojbo/st2,punalpatel/st2,tonybaloney/st2,alfasin/st2,StackStorm/st2,StackStorm/st2,grengojbo/st2,lakshmi-kannan/st2,jtopjian/st2,pinterb/st2,pixelrebel/st2,pixelrebel/st2,dennybaa/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,pinterb/st2,dennybaa/st2,alfasin/st2,StackStorm/st2,Plexxi/st2,punalpatel/st2,peak6/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2,lakshmi-kannan/st2,Itxaka/st2,lakshmi-kannan/st2,tonybaloney/st2,alfasin/st2,jtopjian/st2,tonybaloney/st2,peak6/st2,Itxaka/st2,armab/st2,armab/st2,emedvedev/st2,nzlosh/st2,pinterb/st2,grengojbo/st2,punalpatel/st2,Itxaka/st2,jtopjian/st2,StackStorm/st2 | st2common/st2common/util/jinja.py | st2common/st2common/util/jinja.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import jinja2
import six
def render_values(mapping=None, context=None):
"""
Render an incoming mapping using context provided in context using Jinja2. Returns a dict
containing rendered mapping.
:param mapping: Input as a dictionary of key value pairs.
:type mapping: ``dict``
:param context: Context to be used for dictionary.
:type context: ``dict``
:rtype: ``dict``
"""
if not context or not mapping:
return mapping
env = jinja2.Environment(undefined=jinja2.StrictUndefined)
rendered_mapping = {}
for k, v in six.iteritems(mapping):
# jinja2 works with string so transform list and dict to strings.
reverse_json_dumps = False
if isinstance(v, dict) or isinstance(v, list):
v = json.dumps(v)
reverse_json_dumps = True
else:
v = str(v)
rendered_v = env.from_string(v).render(context)
# no change therefore no templatization so pick params from original to retain
# original type
if rendered_v == v:
rendered_mapping[k] = mapping[k]
continue
if reverse_json_dumps:
rendered_v = json.loads(rendered_v)
rendered_mapping[k] = rendered_v
return rendered_mapping
| import json
import jinja2
import six
def render_values(mapping=None, context=None):
"""
Render an incoming mapping using context provided in context using Jinja2. Returns a dict
containing rendered mapping.
:param mapping: Input as a dictionary of key value pairs.
:type mapping: ``dict``
:param context: Context to be used for dictionary.
:type context: ``dict``
:rtype: ``dict``
"""
if not context or not mapping:
return mapping
env = jinja2.Environment(undefined=jinja2.StrictUndefined)
rendered_mapping = {}
for k, v in six.iteritems(mapping):
# jinja2 works with string so transform list and dict to strings.
reverse_json_dumps = False
if isinstance(v, dict) or isinstance(v, list):
v = json.dumps(v)
reverse_json_dumps = True
else:
v = str(v)
rendered_v = env.from_string(v).render(context)
# no change therefore no templatization so pick params from original to retain
# original type
if rendered_v == v:
rendered_mapping[k] = mapping[k]
continue
if reverse_json_dumps:
rendered_v = json.loads(rendered_v)
rendered_mapping[k] = rendered_v
return rendered_mapping
| apache-2.0 | Python |
9b8bfbbe7f55a72f78de8865e5d7b0c727528e35 | set allows_subquery to True (#7863) | apache/incubator-superset,mistercrunch/panoramix,mistercrunch/panoramix,zhouyao1994/incubator-superset,zhouyao1994/incubator-superset,zhouyao1994/incubator-superset,apache/incubator-superset,airbnb/caravel,airbnb/superset,zhouyao1994/incubator-superset,mistercrunch/panoramix,mistercrunch/panoramix,apache/incubator-superset,apache/incubator-superset,apache/incubator-superset,airbnb/superset,zhouyao1994/incubator-superset,airbnb/superset,airbnb/caravel,airbnb/superset,airbnb/caravel,airbnb/caravel | superset/db_engine_specs/druid.py | superset/db_engine_specs/druid.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
from superset.db_engine_specs.base import BaseEngineSpec
class DruidEngineSpec(BaseEngineSpec):
"""Engine spec for Druid.io"""
engine = "druid"
inner_joins = False
allows_subquery = True
time_grain_functions = {
None: "{col}",
"PT1S": "FLOOR({col} TO SECOND)",
"PT1M": "FLOOR({col} TO MINUTE)",
"PT1H": "FLOOR({col} TO HOUR)",
"P1D": "FLOOR({col} TO DAY)",
"P1W": "FLOOR({col} TO WEEK)",
"P1M": "FLOOR({col} TO MONTH)",
"P0.25Y": "FLOOR({col} TO QUARTER)",
"P1Y": "FLOOR({col} TO YEAR)",
}
@classmethod
def alter_new_orm_column(cls, orm_col):
if orm_col.column_name == "__time":
orm_col.is_dttm = True
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
from superset.db_engine_specs.base import BaseEngineSpec
class DruidEngineSpec(BaseEngineSpec):
"""Engine spec for Druid.io"""
engine = "druid"
inner_joins = False
allows_subquery = False
time_grain_functions = {
None: "{col}",
"PT1S": "FLOOR({col} TO SECOND)",
"PT1M": "FLOOR({col} TO MINUTE)",
"PT1H": "FLOOR({col} TO HOUR)",
"P1D": "FLOOR({col} TO DAY)",
"P1W": "FLOOR({col} TO WEEK)",
"P1M": "FLOOR({col} TO MONTH)",
"P0.25Y": "FLOOR({col} TO QUARTER)",
"P1Y": "FLOOR({col} TO YEAR)",
}
@classmethod
def alter_new_orm_column(cls, orm_col):
if orm_col.column_name == "__time":
orm_col.is_dttm = True
| apache-2.0 | Python |
4e628f9b2c3b30df81fb878d00343005badf952a | fix missing import | david-farrar/exaproxy,PrFalken/exaproxy,PrFalken/exaproxy,PrFalken/exaproxy,jbfavre/exaproxy,jbfavre/exaproxy,david-farrar/exaproxy,david-farrar/exaproxy | lib/exaproxy/network/server.py | lib/exaproxy/network/server.py | #!/usr/bin/env python
# encoding: utf-8
"""
server.py
Created by Thomas Mangin on 2011-11-30.
Copyright (c) 2011 Exa Networks. All rights reserved.
"""
# http://code.google.com/speed/articles/web-metrics.html
# http://itamarst.org/writings/pycon05/fast.html
from exaproxy.network.functions import listen
from exaproxy.util.logger import logger
import socket
class Server(object):
_listen = staticmethod(listen)
def __init__(self, poller):
self.socks = {}
self.poller = poller
def listen(self, ip, port, timeout, backlog):
s = self._listen(ip, port,timeout,backlog)
# XXX: check s is not None
self.socks[s] = True
# register the socket with the poller
self.poller.addReadSocket('read_socks', s)
return s
def accept(self, sock):
try:
# should we check to make sure it's a socket we provided
s, p = sock.accept()
s.setblocking(0)
# XXX: we really should try to handle the entire queue at once
yield s, p
except socket.error, e:
# It doesn't really matter if accept fails temporarily. We will
# try again next loop
logger.debug('server', 'failure on accept %s' % str(e))
def stop(self):
for sock in self.socks:
try:
sock.close()
except socket.error, e:
pass
self.socks = {}
self.poller.clearRead('read_socks')
| #!/usr/bin/env python
# encoding: utf-8
"""
server.py
Created by Thomas Mangin on 2011-11-30.
Copyright (c) 2011 Exa Networks. All rights reserved.
"""
# http://code.google.com/speed/articles/web-metrics.html
# http://itamarst.org/writings/pycon05/fast.html
from exaproxy.network.functions import listen
from exaproxy.util.logger import logger
class Server(object):
_listen = staticmethod(listen)
def __init__(self, poller):
self.socks = {}
self.poller = poller
def listen(self, ip, port, timeout, backlog):
s = self._listen(ip, port,timeout,backlog)
# XXX: check s is not None
self.socks[s] = True
# register the socket with the poller
self.poller.addReadSocket('read_socks', s)
return s
def accept(self, sock):
try:
# should we check to make sure it's a socket we provided
s, p = sock.accept()
s.setblocking(0)
# XXX: we really should try to handle the entire queue at once
yield s, p
except socket.error, e:
# It doesn't really matter if accept fails temporarily. We will
# try again next loop
logger.debug('server', 'failure on accept %s' % str(e))
def stop(self):
for sock in self.socks:
try:
sock.close()
except socket.error, e:
pass
self.socks = {}
self.poller.clearRead('read_socks')
| bsd-2-clause | Python |
e519b05d5137764b298c471e3b3566a25cb859d0 | Add depth first search | jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm | python_practice/graph/undirectedGraph.py | python_practice/graph/undirectedGraph.py |
class undirectedGraph(object):
def __init__(self, degrees):
self.degrees = degrees
self.adjacent_matrix = []
for i in range(degrees):
self.adjacent_matrix.append([0]*degrees)
def __str__(self):
output = ""
for row in self.adjacent_matrix:
for item in row:
output += "|"+str(item)
output += "|\n"
return output
def addEdge(self, vertex1, vertex2):
self.adjacent_matrix[vertex1][vertex2]+=1
self.adjacent_matrix[vertex2][vertex1]+=1
def isTree(self):
return True
def depthFirstSeach(self, start_vertex, finded_vertexes):
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0:
finded_vertexes.append(node)
finded_vertexes = self.depthFirstSeach(self, start_node, finded_vertexes)
return finded_vertexes
|
class undirectedGraph(object):
def __init__(self, degrees):
self.degrees = degrees
self.adjacent_matrix = []
for i in range(degrees):
self.adjacent_matrix.append([0]*degrees)
def __str__(self):
output = ""
for row in self.adjacent_matrix:
for item in row:
output += "|"+str(item)
output += "|\n"
return output
def addEdge(self, vertex1, vertex2):
self.adjacent_matrix[vertex1][vertex2]+=1
self.adjacent_matrix[vertex2][vertex1]+=1
| mit | Python |
ba64752915055014cf586ab854d64a6e30dc1690 | Add hours_since and minutes_since | webkom/coffee,webkom/coffee | coffee/models.py | coffee/models.py | import redis
from datetime import datetime
from coffee.config import app_config
class Status (object):
def __init__(self):
self.redis = redis.Redis(
host=app_config['REDIS_HOST'],
port=app_config['REDIS_PORT'],
password=app_config['REDIS_PW']
)
try:
self.get()
except:
self.current_status = False
self.last_start = datetime.strptime('1977-11-21 12:00', '%Y-%m-%d %H:%M')
span = datetime.now() - self.last_start
self.hours_since = (span.days*24)+(span.seconds//3600)
self.minutes_since = (span.seconds//60)%60
def save(self):
self.redis.hmset('coffeestatus', self.to_dict())
def get(self):
previous = self.redis.hgetall('coffeestatus')
self.current_status = previous['status'] == 'True'
self.last_start = datetime.strptime(previous['last_start'], '%Y-%m-%d %H:%M')
span = datetime.now() - self.last_start
self.hours_since = (span.days*24)+(span.seconds//3600)
self.minutes_since = (span.seconds//60)%60
def to_dict(self):
return {
'status': self.current_status,
'last_start': self.last_start.strftime('%Y-%m-%d %H:%M'),
'hours_since': self.hours_since,
'minutes_since': self.minutes_since
}
def calculate_last_start(self, status):
if status and datetime.now() > self.last_start:
return datetime.now()
else:
return self.last_start
def update(self, new_status):
if not self.current_status == new_status:
self.current_status = new_status
self.last_start = self.calculate_last_start(new_status)
self.save()
self.log_status(new_status)
def log_status(self, status):
if status:
self.redis.hincrby('coffeestats', datetime.now().strftime('%Y-%m-%d'), 1)
def get_count(self, date):
return self.redis.hget('coffeestats', date.strftime('%Y-%m-%d')) or 0
def get_stats(self):
return self.redis.hgetall('coffeestats')
| import redis
from datetime import datetime
from coffee.config import app_config
class Status (object):
def __init__(self):
self.redis = redis.Redis(
host=app_config['REDIS_HOST'],
port=app_config['REDIS_PORT'],
password=app_config['REDIS_PW']
)
try:
self.get()
except:
self.current_status = False
self.last_start = datetime.strptime('1977-11-21 12:00', '%Y-%m-%d %H:%M')
def save(self):
self.redis.hmset('coffeestatus', self.to_dict())
def get(self):
previous = self.redis.hgetall('coffeestatus')
self.current_status = previous['status'] == 'True'
self.last_start = datetime.strptime(previous['last_start'], '%Y-%m-%d %H:%M')
def to_dict(self):
return {
'status': self.current_status,
'last_start': self.last_start.strftime('%Y-%m-%d %H:%M')
}
def calculate_last_start(self, status):
if status and datetime.now() > self.last_start:
return datetime.now()
else:
return self.last_start
def update(self, new_status):
if not self.current_status == new_status:
self.current_status = new_status
self.last_start = self.calculate_last_start(new_status)
self.save()
self.log_status(new_status)
def log_status(self, status):
if status:
self.redis.hincrby('coffeestats', datetime.now().strftime('%Y-%m-%d'), 1)
def get_count(self, date):
return self.redis.hget('coffeestats', date.strftime('%Y-%m-%d')) or 0
def get_stats(self):
return self.redis.hgetall('coffeestats')
| mit | Python |
351c7645c43e217d9173362f0939648fd2c6123f | Fix Siri VM test | jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk,jclgoodwin/bustimes.org.uk | busstops/management/tests/test_import_sirivm.py | busstops/management/tests/test_import_sirivm.py | import os
from vcr import use_cassette
from django.test import TestCase
from ...models import DataSource
from ..commands import import_sirivm
class SiriVMImportTest(TestCase):
@use_cassette(os.path.join('data', 'vcr', 'import_sirivm.yaml'), decode_compressed_response=True)
def test_handle(self):
command = import_sirivm.Command()
items = command.get_items()
item = next(items)
command.source = DataSource.objects.create(datetime='2018-08-06T22:41:15+01:00')
vehicle, created, service = command.get_vehicle_and_service(item)
self.assertEqual('FE 69532', str(vehicle))
self.assertTrue(created)
self.assertIsNone(service)
| import os
from vcr import use_cassette
# from mock import patch
from django.test import TestCase
from ...models import DataSource
# with patch('time.sleep', return_value=None):
from ..commands import import_sirivm
@use_cassette(os.path.join('data', 'vcr', 'import_sirivm.yaml'), decode_compressed_response=True)
class SiriVMImportTest(TestCase):
def test_handle(self):
command = import_sirivm.Command()
items = command.get_items()
item = next(items)
command.source = DataSource.objects.create(datetime='2018-08-06T22:41:15+01:00')
vehicle, created, service = command.get_vehicle_and_service(item)
self.assertEqual('FE 67162', str(vehicle))
self.assertTrue(created)
self.assertIsNone(service)
| mpl-2.0 | Python |
159fa9d96a2182b9cf445f60c18fe465a298d5fb | Fix test logic | rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org | readthedocs/search/tests/test_proxied_api.py | readthedocs/search/tests/test_proxied_api.py | import pytest
from readthedocs.search.tests.test_api import BaseTestDocumentSearch
@pytest.mark.proxito
@pytest.mark.search
class TestProxiedSearchAPI(BaseTestDocumentSearch):
# This project slug needs to exist in the ``all_projects`` fixture.
host = 'docs.readthedocs.io'
@pytest.fixture(autouse=True)
def setup_settings(self, settings):
settings.PUBLIC_DOMAIN = 'readthedocs.io'
def get_search(self, api_client, search_params):
return api_client.get(self.url, search_params, HTTP_HOST=self.host)
| import pytest
from readthedocs.search.tests.test_api import BaseTestDocumentSearch
@pytest.mark.proxito
@pytest.mark.search
class TestProxiedSearchAPI(BaseTestDocumentSearch):
host = 'pip.readthedocs.io'
@pytest.fixture(autouse=True)
def setup_settings(self, settings):
settings.PUBLIC_DOMAIN = 'readthedocs.io'
def get_search(self, api_client, search_params):
return api_client.get(self.url, search_params, HTTP_HOST=self.host)
| mit | Python |
3d3325f5ad654b8b14f0935883eaab579fc13780 | bump version | vmalloc/backslash-python,slash-testing/backslash-python | backslash/__version__.py | backslash/__version__.py | __version__ = "2.4.1"
| __version__ = "2.4.0"
| bsd-3-clause | Python |
ee6637dd9d63227a018b8a24ddae88a64a758f70 | bump version | praiskup/atomic,rh-atomic-bot/atomic,praiskup/atomic,cdrage/atomic,charliedrage/atomic,cdrage/atomic,dustymabe/atomic,charliedrage/atomic,ibotty/atomic,ibotty/atomic,sallyom/atomic,lsm5/atomic,sallyom/atomic,rh-atomic-bot/atomic,lsm5/atomic,dustymabe/atomic,aveshagarwal/atomic,aveshagarwal/atomic | Atomic/__init__.py | Atomic/__init__.py | import sys
from .pulp import PulpServer
from .config import PulpConfig
from .atomic import Atomic
__version__ = "1.2"
def writeOut(output, lf="\n"):
sys.stdout.flush()
sys.stdout.write(str(output) + lf)
def push_image_to_pulp(image, server_url, username, password, verify_ssl,
docker_client):
if not image:
raise ValueError("Image required")
parts = image.split("/")
if parts > 1:
if parts[0].find(".") != -1:
server_url = parts[0]
image = ("/").join(parts[1:])
repo = image.replace("/", "-")
if not server_url:
raise ValueError("Server url required")
if not server_url.startswith("http"):
server_url = "https://" + server_url
try:
pulp = PulpServer(server_url=server_url, username=username,
password=password, verify_ssl=verify_ssl,
docker_client=docker_client)
except Exception as e:
raise IOError('Failed to initialize Pulp: {0}'.format(e))
try:
if not pulp.is_repo(repo):
pulp.create_repo(image, repo)
except Exception as e:
raise IOError('Failed to create Pulp repository: {0}'.format(e))
try:
writeOut('Uploading image "{0}" to pulp server "{1}"'
''.format(image, server_url))
pulp.upload_docker_image(image, repo)
writeOut("")
except Exception as e:
raise IOError('Failed to upload image to Pulp: {0}'.format(e))
pulp.publish_repo(repo)
pulp.export_repo(repo)
| import sys
from .pulp import PulpServer
from .config import PulpConfig
from .atomic import Atomic
__version__ = "1.1"
def writeOut(output, lf="\n"):
sys.stdout.flush()
sys.stdout.write(str(output) + lf)
def push_image_to_pulp(image, server_url, username, password, verify_ssl,
docker_client):
if not image:
raise ValueError("Image required")
parts = image.split("/")
if parts > 1:
if parts[0].find(".") != -1:
server_url = parts[0]
image = ("/").join(parts[1:])
repo = image.replace("/", "-")
if not server_url:
raise ValueError("Server url required")
if not server_url.startswith("http"):
server_url = "https://" + server_url
try:
pulp = PulpServer(server_url=server_url, username=username,
password=password, verify_ssl=verify_ssl,
docker_client=docker_client)
except Exception as e:
raise IOError('Failed to initialize Pulp: {0}'.format(e))
try:
if not pulp.is_repo(repo):
pulp.create_repo(image, repo)
except Exception as e:
raise IOError('Failed to create Pulp repository: {0}'.format(e))
try:
writeOut('Uploading image "{0}" to pulp server "{1}"'
''.format(image, server_url))
pulp.upload_docker_image(image, repo)
writeOut("")
except Exception as e:
raise IOError('Failed to upload image to Pulp: {0}'.format(e))
pulp.publish_repo(repo)
pulp.export_repo(repo)
| lgpl-2.1 | Python |
0fc759a2142c2733b74ae5283ef46b29c31dd94f | update version number | hanya/MRI,hanya/MRI | pythonpath/mytools_Mri/values.py | pythonpath/mytools_Mri/values.py | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
set()
except:
from sets import Set as set
MRINAME = 'MRI'
MRIVERSION = '1.1.0'
MRI_HOME = 'http://extensions.services.openoffice.org/project/MRI'
MRI_ID = 'mytools.mri'
MRI_DIR = None
def set_mri_dir(ctx):
global MRI_DIR
import mytools_Mri.tools
MRI_DIR = mytools_Mri.tools.get_extension_dirurl(ctx, MRI_ID)
class ConfigNames(object):
config_node = '/mytools.Mri.Configuration/Settings'
sdk_path = 'SDKDirectory'
browser = 'Browser'
pos_size = 'WindowPosSize'
font_name = 'CharFontName'
char_size = 'CharHeight'
sorted = 'Sorted'
abbrev = 'Abbreviated'
detailed = 'Detailed'
show_labels = 'ShowLabels'
origin = 'MRIOrigin'
show_code = 'ShowCode'
code_type = 'CodeType'
use_pseud_props = 'UsePseudProperty'
grid = 'UseGrid'
use_tab = 'UseTab'
macros = 'Macros'
IGNORED_INTERFACES = set(('com.sun.star.script.browse.XBrowseNode',))
IGNORED_PROPERTIES = set(('ActiveLayer', 'AsProperty', 'ClientMap', 'FontSlant',
'LayoutSize', 'Modified', 'PropertyToDefault', 'UIConfigurationManager',
'ParaIsNumberingRestart', 'NumberingLevel', 'NumberingStartValue', 'NumberingStartLevel', 'DataArray', 'FormulaArray', 'Printer', 'Material'))
# value descriptions
EMPTYSTR = '""'
VOIDVAL = '-void-'
NONSTRVAL = '-%s-' # values can not be converted to strings
VALUEERROR = '-Error-'
# additional informations
PSEUDPORP = 'Pseud ' # pseud property
IGNORED = 'Ignored' # listed in IGNORED_PROPERTIES
NOTACCESSED = '-----'
WRITEONLY = 'WriteOnly'
ATTRIBUTE = 'Attr.'
# abbreviated string
ABBROLD = 'com.sun.star.'
ABBRNEW = '.'
| # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
set()
except:
from sets import Set as set
MRINAME = 'MRI'
MRIVERSION = '1.0.0'
MRI_HOME = 'http://extensions.services.openoffice.org/project/MRI'
MRI_ID = 'mytools.mri'
MRI_DIR = None
def set_mri_dir(ctx):
global MRI_DIR
import mytools_Mri.tools
MRI_DIR = mytools_Mri.tools.get_extension_dirurl(ctx, MRI_ID)
class ConfigNames(object):
config_node = '/mytools.Mri.Configuration/Settings'
sdk_path = 'SDKDirectory'
browser = 'Browser'
pos_size = 'WindowPosSize'
font_name = 'CharFontName'
char_size = 'CharHeight'
sorted = 'Sorted'
abbrev = 'Abbreviated'
detailed = 'Detailed'
show_labels = 'ShowLabels'
origin = 'MRIOrigin'
show_code = 'ShowCode'
code_type = 'CodeType'
use_pseud_props = 'UsePseudProperty'
grid = 'UseGrid'
use_tab = 'UseTab'
macros = 'Macros'
IGNORED_INTERFACES = set(('com.sun.star.script.browse.XBrowseNode',))
IGNORED_PROPERTIES = set(('ActiveLayer', 'AsProperty', 'ClientMap', 'FontSlant',
'LayoutSize', 'Modified', 'PropertyToDefault', 'UIConfigurationManager',
'ParaIsNumberingRestart', 'NumberingLevel', 'NumberingStartValue', 'NumberingStartLevel', 'DataArray', 'FormulaArray', 'Printer', 'Material'))
# value descriptions
EMPTYSTR = '""'
VOIDVAL = '-void-'
NONSTRVAL = '-%s-' # values can not be converted to strings
VALUEERROR = '-Error-'
# additional informations
PSEUDPORP = 'Pseud ' # pseud property
IGNORED = 'Ignored' # listed in IGNORED_PROPERTIES
NOTACCESSED = '-----'
WRITEONLY = 'WriteOnly'
ATTRIBUTE = 'Attr.'
# abbreviated string
ABBROLD = 'com.sun.star.'
ABBRNEW = '.'
| apache-2.0 | Python |
a29563dab552d45a8ec6766246bacf4af2d16246 | Remove newimages method from ClosedSite | wikimedia/pywikibot-core,wikimedia/pywikibot-core | pywikibot/site/_obsoletesites.py | pywikibot/site/_obsoletesites.py | """Objects representing obsolete MediaWiki sites."""
#
# (C) Pywikibot team, 2019-2021
#
# Distributed under the terms of the MIT license.
#
import pywikibot
from pywikibot.exceptions import NoPage
from pywikibot.site._apisite import APISite
from pywikibot.site._basesite import BaseSite
from pywikibot.tools import remove_last_args
class RemovedSite(BaseSite):
"""Site removed from a family."""
pass
class ClosedSite(APISite):
"""Site closed to read-only mode."""
@remove_last_args(['sysop'])
def __init__(self, code, fam, user=None):
"""Initializer."""
super().__init__(code, fam, user)
def _closed_error(self, notice=''):
"""An error instead of pointless API call."""
pywikibot.error('Site {} has been closed. {}'.format(self.sitename,
notice))
def page_restrictions(self, page):
"""Return a dictionary reflecting page protections."""
if not self.page_exists(page):
raise NoPage(page)
if not hasattr(page, '_protection'):
page._protection = {'edit': ('steward', 'infinity'),
'move': ('steward', 'infinity'),
'delete': ('steward', 'infinity'),
'upload': ('steward', 'infinity'),
'create': ('steward', 'infinity')}
return page._protection
def recentchanges(self, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No recent changes can be returned.')
def is_uploaddisabled(self):
"""Return True if upload is disabled on site."""
if not hasattr(self, '_uploaddisabled'):
self._uploaddisabled = True
return self._uploaddisabled
def newpages(self, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No new pages can be returned.')
def newfiles(self, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No new files can be returned.')
| """Objects representing obsolete MediaWiki sites."""
#
# (C) Pywikibot team, 2019-2021
#
# Distributed under the terms of the MIT license.
#
import pywikibot
from pywikibot.exceptions import NoPage
from pywikibot.site._apisite import APISite
from pywikibot.site._basesite import BaseSite
from pywikibot.tools import remove_last_args
class RemovedSite(BaseSite):
"""Site removed from a family."""
pass
class ClosedSite(APISite):
"""Site closed to read-only mode."""
@remove_last_args(['sysop'])
def __init__(self, code, fam, user=None):
"""Initializer."""
super().__init__(code, fam, user)
def _closed_error(self, notice=''):
"""An error instead of pointless API call."""
pywikibot.error('Site {} has been closed. {}'.format(self.sitename,
notice))
def page_restrictions(self, page):
"""Return a dictionary reflecting page protections."""
if not self.page_exists(page):
raise NoPage(page)
if not hasattr(page, '_protection'):
page._protection = {'edit': ('steward', 'infinity'),
'move': ('steward', 'infinity'),
'delete': ('steward', 'infinity'),
'upload': ('steward', 'infinity'),
'create': ('steward', 'infinity')}
return page._protection
def recentchanges(self, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No recent changes can be returned.')
def is_uploaddisabled(self):
"""Return True if upload is disabled on site."""
if not hasattr(self, '_uploaddisabled'):
self._uploaddisabled = True
return self._uploaddisabled
def newpages(self, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No new pages can be returned.')
def newfiles(self, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No new files can be returned.')
def newimages(self, *args, **kwargs):
"""An error instead of pointless API call."""
self._closed_error('No new images can be returned.')
| mit | Python |
6a1c42fb34826e54a604903b010f71f63a991784 | Update PyPI homepage link (refs #101) | phargogh/paver,gregorynicholas/paver,cecedille1/paver,cecedille1/paver,thedrow/paver,nikolas/paver,gregorynicholas/paver | paver/release.py | paver/release.py | """Release metadata for Paver."""
from paver.options import Bunch
from paver.tasks import VERSION
setup_meta=Bunch(
name='Paver',
version=VERSION,
description='Easy build, distribution and deployment scripting',
long_description="""Paver is a Python-based build/distribution/deployment scripting tool along the
lines of Make or Rake. What makes Paver unique is its integration with
commonly used Python libraries. Common tasks that were easy before remain
easy. More importantly, dealing with *your* applications specific needs and
requirements is also easy.""",
author='Kevin Dangoor',
author_email='dangoor+paver@gmail.com',
maintainer='Lukas Linhart',
maintainer_email='bugs@almad.net',
url='http://github.com/paver/paver',
packages=['paver', 'paver.deps'],
tests_require=['nose', 'virtualenv', 'mock', 'cogapp'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Environment :: Console",
"Topic :: Documentation",
"Topic :: Utilities",
"Topic :: Software Development :: Build Tools",
])
| """Release metadata for Paver."""
from paver.options import Bunch
from paver.tasks import VERSION
setup_meta=Bunch(
name='Paver',
version=VERSION,
description='Easy build, distribution and deployment scripting',
long_description="""Paver is a Python-based build/distribution/deployment scripting tool along the
lines of Make or Rake. What makes Paver unique is its integration with
commonly used Python libraries. Common tasks that were easy before remain
easy. More importantly, dealing with *your* applications specific needs and
requirements is also easy.""",
author='Kevin Dangoor',
author_email='dangoor+paver@gmail.com',
maintainer='Lukas Linhart',
maintainer_email='bugs@almad.net',
url='http://paver.github.com/',
packages=['paver', 'paver.deps'],
tests_require=['nose', 'virtualenv', 'mock', 'cogapp'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Environment :: Console",
"Topic :: Documentation",
"Topic :: Utilities",
"Topic :: Software Development :: Build Tools",
])
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.