commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
da98b79f9d8b35ea2d5b503b2b50ecb985c3a966 | Fix building on Python 3 | graingert/dockhand,6si/shipwright | shipwright/build.py | shipwright/build.py | from . import fn
from .fn import compose, curry, maybe, flat_map, merge
from .tar import mkcontext
from .compat import json_loads
# (container->(str -> None))
# -> (container -> stream)
# -> [targets]
# -> [(container, docker_image_id)]
def do_build(client, git_rev, targets):
"""
Generic function for building multiple containers while
notifying a callback function with output produced.
Given a list of targets it builds the target with the given
build_func while streaming the output through the given
show_func.
Returns an iterator of (container, docker_image_id) pairs as
the final output.
Building a container can take sometime so the results are returned as
an iterator in case the caller wants to use restults in between builds.
The consequences of this is you must either call it as part of a for loop
or pass it to a function like list() which can consume an iterator.
"""
return flat_map(build(client, git_rev), targets)
@curry
def build(client, git_rev, container):
"""
builds the given container tagged with <git_rev> and ensures that
it depends on it's parent if it's part of this build group (shares
the same namespace)
"""
return fn.fmap(
compose(
merge(dict(event="build_msg", container=container, rev=git_rev)),
json_loads
),
client.build(
fileobj=mkcontext(git_rev, container.dir_path),
rm=True,
custom_context=True,
stream=True,
tag='{0}:{1}'.format(container.name, git_rev)
)
)
@fn.composed(maybe(fn._0), fn.search(r'^Successfully built ([a-f0-9]+)\s*$'))
def success(line):
"""
>>> success('Blah')
>>> success('Successfully built 1234\\n')
'1234'
"""
@fn.composed(fn.first, fn.filter(None), fn.map(success))
def success_from_stream(stream):
"""
>>> stream = iter(('Blah', 'Successfully built 1234\\n'))
>>> success_from_stream(stream)
'1234'
"""
| import json
from . import fn
from .fn import compose, curry, maybe, flat_map, merge
from .tar import mkcontext
# (container->(str -> None))
# -> (container -> stream)
# -> [targets]
# -> [(container, docker_image_id)]
def do_build(client, git_rev, targets):
"""
Generic function for building multiple containers while
notifying a callback function with output produced.
Given a list of targets it builds the target with the given
build_func while streaming the output through the given
show_func.
Returns an iterator of (container, docker_image_id) pairs as
the final output.
Building a container can take sometime so the results are returned as
an iterator in case the caller wants to use restults in between builds.
The consequences of this is you must either call it as part of a for loop
or pass it to a function like list() which can consume an iterator.
"""
return flat_map(build(client, git_rev), targets)
@curry
def build(client, git_rev, container):
"""
builds the given container tagged with <git_rev> and ensures that
it depends on it's parent if it's part of this build group (shares
the same namespace)
"""
return fn.fmap(
compose(
merge(dict(event="build_msg", container=container, rev=git_rev)),
json.loads
),
client.build(
fileobj=mkcontext(git_rev, container.dir_path),
rm=True,
custom_context=True,
stream=True,
tag='{0}:{1}'.format(container.name, git_rev)
)
)
@fn.composed(maybe(fn._0), fn.search(r'^Successfully built ([a-f0-9]+)\s*$'))
def success(line):
"""
>>> success('Blah')
>>> success('Successfully built 1234\\n')
'1234'
"""
@fn.composed(fn.first, fn.filter(None), fn.map(success))
def success_from_stream(stream):
"""
>>> stream = iter(('Blah', 'Successfully built 1234\\n'))
>>> success_from_stream(stream)
'1234'
"""
| apache-2.0 | Python |
e5c2fbdaf574d1200e5c96fa25a5b0a67d24e656 | Tweak virtualenv paths method | praekelt/sideloader2,praekelt/sideloader2,praekelt/sideloader2 | sideloader/utils.py | sideloader/utils.py | import os
import shutil
from collections import namedtuple
def rmtree_if_exists(tree_path):
"""
Delete a directory and its contents if the directory exists.
:param: tree_path:
The path to the directory.
:returns:
True if the directory existed and was deleted. False otherwise.
"""
if os.path.exists(tree_path):
shutil.rmtree(tree_path)
return True
return False
def listdir_abs(path):
"""
List the contents of a directory returning the absolute paths to the child
files/folders.
:param: path:
The relative or absolute path to the directory.
:returns:
A list of absolute paths to the child files/folders.
"""
abspath = os.path.abspath(path)
return [os.path.join(abspath, child) for child in os.listdir(abspath)]
""" A tuple of common virtualenv paths. """
VenvPaths = namedtuple('VenvPath',
['venv', 'bin', 'activate', 'pip', 'python'])
def create_venv_paths(root_path, name='ve'):
"""
Create a VenvPaths named tuple of common virtualenv paths.
:param: root_path:
The path in which to create the virtualenv directory.
:param: name:
The name of the virtualenv. Defaults to 've'.
:returns:
The VenvPaths named tuple containing the path to the virtualenv, the bin
directory, the activate script, pip, and python.
"""
venv_path = os.path.join(root_path, name)
venv_bin_path = os.path.join(venv_path, 'bin')
return VenvPaths(
venv=venv_path,
bin=venv_bin_path,
activate=os.path.join(venv_bin_path, 'activate'),
pip=os.path.join(venv_bin_path, 'pip'),
python=os.path.join(venv_bin_path, 'python')
)
| import os
import shutil
from collections import namedtuple
def rmtree_if_exists(tree_path):
"""
Delete a directory and its contents if the directory exists.
:param: tree_path:
The path to the directory.
:returns:
True if the directory existed and was deleted. False otherwise.
"""
if os.path.exists(tree_path):
shutil.rmtree(tree_path)
return True
return False
def listdir_abs(path):
"""
List the contents of a directory returning the absolute paths to the child
files/folders.
:param: path:
The relative or absolute path to the directory.
:returns:
A list of absolute paths to the child files/folders.
"""
abspath = os.path.abspath(path)
return [os.path.join(abspath, child) for child in os.listdir(abspath)]
""" A tuple of common virtualenv paths. """
VenvPaths = namedtuple('VenvPath',
['venv', 'bin', 'activate', 'pip', 'python'])
def create_venv_paths(root_path, venv_dir='ve'):
"""
Create a VenvPaths named tuple of common virtualenv paths.
:param: root_path:
The path in which to create the virtualenv directory.
:param: venv_dir:
The name of the virtualenv directory. Defaults to 've'.
:returns:
The VenvPaths named tuple containing the path to the virtualenv, the bin
directory, the activate script, pip, and python.
"""
venv_path = os.path.join(root_path, venv_dir)
venv_bin_path = os.path.join(venv_path, 'bin')
return VenvPaths(
venv=venv_path,
bin=venv_bin_path,
activate=os.path.join(venv_bin_path, 'activate'),
pip=os.path.join(venv_bin_path, 'pip'),
python=os.path.join(venv_bin_path, 'python')
)
| mit | Python |
65ab61af27aeea3ed82e842cd62f6493d55abfc1 | Add TalkType to admin pages. | CTPUG/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CarlFK/wafer,CarlFK/wafer | wafer/talks/admin.py | wafer/talks/admin.py | from django.contrib import admin
from wafer.talks.models import TalkType, Talk, TalkUrl
class TalkUrlInline(admin.TabularInline):
model = TalkUrl
class TalkAdmin(admin.ModelAdmin):
list_display = ('title', 'get_author_name', 'get_author_contact',
'status')
list_editable = ('status',)
inlines = [
TalkUrlInline,
]
admin.site.register(Talk, TalkAdmin)
admin.site.register(TalkType)
admin.site.register(TalkUrl)
| from django.contrib import admin
from wafer.talks.models import Talk, TalkUrl
class TalkUrlInline(admin.TabularInline):
model = TalkUrl
class TalkAdmin(admin.ModelAdmin):
list_display = ('title', 'get_author_name', 'get_author_contact',
'status')
list_editable = ('status',)
inlines = [
TalkUrlInline,
]
admin.site.register(Talk, TalkAdmin)
admin.site.register(TalkUrl)
| isc | Python |
db9b999a39d8b04d9b25c5ee1e50cfa510cf2b7d | Add the talk submitter as an initial author | CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer | wafer/talks/forms.py | wafer/talks/forms.py | from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, HTML
from markitup.widgets import MarkItUpWidget
from easy_select2.widgets import Select2Multiple
from wafer.talks.models import Talk, render_author
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
kwargs.setdefault('initial', {}).setdefault('authors', [self.user])
super(TalkForm, self).__init__(*args, **kwargs)
if not self.user.has_perm('talks.edit_private_notes'):
self.fields.pop('private_notes')
# We add the name, if known, to the authors list
self.fields['authors'].label_from_instance = render_author
self.helper = FormHelper(self)
submit_button = Submit('submit', _('Submit'))
instance = kwargs['instance']
if instance:
self.helper.layout.append(
FormActions(
submit_button,
HTML('<a href="%s" class="btn btn-danger">%s</a>'
% (reverse('wafer_talk_delete', args=(instance.pk,)),
_('Delete')))))
else:
self.helper.add_input(submit_button)
class Meta:
model = Talk
fields = ('title', 'talk_type', 'abstract', 'authors', 'notes',
'private_notes')
widgets = {
'abstract': MarkItUpWidget(),
'notes': forms.Textarea(attrs={'class': 'input-xxlarge'}),
'authors': Select2Multiple(),
}
| from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as _
from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, HTML
from markitup.widgets import MarkItUpWidget
from easy_select2.widgets import Select2Multiple
from wafer.talks.models import Talk, render_author
class TalkForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(TalkForm, self).__init__(*args, **kwargs)
if not self.user.has_perm('talks.edit_private_notes'):
self.fields.pop('private_notes')
# We add the name, if known, to the authors list
self.fields['authors'].label_from_instance = render_author
self.helper = FormHelper(self)
submit_button = Submit('submit', _('Submit'))
instance = kwargs['instance']
if instance:
self.helper.layout.append(
FormActions(
submit_button,
HTML('<a href="%s" class="btn btn-danger">%s</a>'
% (reverse('wafer_talk_delete', args=(instance.pk,)),
_('Delete')))))
else:
self.helper.add_input(submit_button)
class Meta:
model = Talk
fields = ('title', 'talk_type', 'abstract', 'authors', 'notes',
'private_notes')
widgets = {
'abstract': MarkItUpWidget(),
'notes': forms.Textarea(attrs={'class': 'input-xxlarge'}),
'authors': Select2Multiple(),
}
| isc | Python |
28b6e711da7470aadce9a1362c5a60227fc7268a | Add category datamanager setup | potzenheimer/meetshaus,potzenheimer/meetshaus,potzenheimer/meetshaus,potzenheimer/meetshaus | src/meetshaus.blog/meetshaus/blog/categories.py | src/meetshaus.blog/meetshaus/blog/categories.py | # -*- coding: UTF-8 -*-
""" Module listing available and asigned blog categories """
import json
import time
import urllib2
from Products.CMFCore.interfaces import IContentish
from five import grok
from plone import api
from plone.app.layout.navigation.interfaces import INavigationRoot
from zope.component import getUtility
from plone.i18n.normalizer.interfaces import IIDNormalizer
from meetshaus.blog.blogentry import IBlogEntry
class BlogCategories(grok.View):
grok.context(IContentish)
grok.require('zope2.View')
grok.name('blog-categories')
def catalog(self):
return api.portal.get_tool(name='portal_catalog')
def keywords(self):
catalog = self.catalog()
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def count_entries(self, subject):
catalog = self.catalog()
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=subject.encode('utf-8'))
return len(brains)
def archive_url(self, subject):
portal_url = api.portal.get().absolute_url()
sub = urllib2.quote(subject.encode('utf-8'))
url = '{0}/blog?category={1}'.format(portal_url, sub)
return url
class SetupBlogCategoryStorage(grok.View):
grok.context(INavigationRoot)
grok.require('cmf.ManagePortal')
grok.name('setup-blog-category-storage')
def keywords(self):
catalog = self.catalog()
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def _normalize_keyword(self, keyword):
normalizer = getUtility(IIDNormalizer)
return normalizer.normalize(keyword)
def _count_entries(self, keyword):
catalog = self.catalog()
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=keyword.encode('utf-8'))
return len(brains)
def _build_archive_url(self, keyword):
portal_url = api.portal.get().absolute_url()
sub = urllib2.quote(keyword.encode('utf-8'))
url = '{0}/blog?category={1}'.format(portal_url, sub)
return url
def _process_request(self):
api_url = self.request.get('ACTUAL_URL')
data = {
'url': api_url,
'timestamp': str(int(time.time())),
}
items = list()
for kw in self.keywords():
info = {
'id': self._normalize_keyword(kw),
'url': self._build_archive_url(kw),
'count': self._count_entries(kw),
'title': kw,
'description': ''
}
items.append(info)
data['items'] = items
return data
def render(self):
start = time.time()
data = self._process_request()
end = time.time()
data.update(dict(_runtime=end-start))
json_data = json.dumps(data)
api.portal.set_registry_record(
'meetshaus.blog.interfaces.IBlogToolSettings.blog_categories',
json_data)
next_url = api.portal.get().absolute_url()
return self.request.response.redirect(next_url)
| # -*- coding: UTF-8 -*-
""" Module listing available and asigned blog categories """
import urllib2
from plone import api
from five import grok
from Products.CMFCore.interfaces import IContentish
from meetshaus.blog.blogentry import IBlogEntry
class BlogCategories(grok.View):
grok.context(IContentish)
grok.require('zope2.View')
grok.name('blog-categories')
def catalog(self):
return api.portal.get_tool(name='portal_catalog')
def keywords(self):
catalog = self.catalog()
keywords = catalog.uniqueValuesFor('Subject')
keywords = [unicode(k, 'utf-8') for k in keywords]
return keywords
def count_entries(self, subject):
catalog = self.catalog()
brains = catalog(object_provides=IBlogEntry.__identifier__,
Subject=subject.encode('utf-8'))
return len(brains)
def archive_url(self, subject):
portal_url = api.portal.get().absolute_url()
sub = urllib2.quote(subject.encode('utf-8'))
url = '{0}/blog?category={1}'.format(portal_url, sub)
return url
| mit | Python |
be0e05187044e08ede722ae224ca59895edd0f46 | Add version 1.7.1 for cub. (#5164) | EmreAtes/spack,matthiasdiener/spack,iulian787/spack,lgarren/spack,mfherbst/spack,tmerrick1/spack,tmerrick1/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,LLNL/spack,skosukhin/spack,iulian787/spack,TheTimmy/spack,LLNL/spack,LLNL/spack,EmreAtes/spack,TheTimmy/spack,skosukhin/spack,mfherbst/spack,lgarren/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,EmreAtes/spack,skosukhin/spack,lgarren/spack,TheTimmy/spack,matthiasdiener/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,krafczyk/spack,krafczyk/spack,mfherbst/spack,LLNL/spack,skosukhin/spack,tmerrick1/spack,skosukhin/spack,lgarren/spack,EmreAtes/spack,LLNL/spack,iulian787/spack,TheTimmy/spack,TheTimmy/spack,matthiasdiener/spack,lgarren/spack,krafczyk/spack,mfherbst/spack,krafczyk/spack | var/spack/repos/builtin/packages/cub/package.py | var/spack/repos/builtin/packages/cub/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Cub(Package):
"""CUB is a C++ header library of cooperative threadblock primitives
and other utilities for CUDA kernel programming."""
homepage = "https://nvlabs.github.com/cub"
url = "https://github.com/NVlabs/cub/archive/1.6.4.zip"
version('1.7.1', '028ac43922a4538596338ad5aef0f0c4')
version('1.6.4', '924fc12c0efb17264c3ad2d611ed1c51')
version('1.4.1', '74a36eb84e5b5f0bf54aa3df39f660b2')
def install(self, spec, prefix):
mkdirp(prefix.include)
install_tree('cub', join_path(prefix.include, 'cub'))
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Cub(Package):
"""CUB is a C++ header library of cooperative threadblock primitives
and other utilities for CUDA kernel programming."""
homepage = "https://nvlabs.github.com/cub"
url = "https://github.com/NVlabs/cub/archive/1.6.4.zip"
version('1.6.4', '924fc12c0efb17264c3ad2d611ed1c51')
version('1.4.1', '74a36eb84e5b5f0bf54aa3df39f660b2')
def install(self, spec, prefix):
mkdirp(prefix.include)
install_tree('cub', join_path(prefix.include, 'cub'))
| lgpl-2.1 | Python |
8680dbb71e323df9d34e99b6e118014b3289ad7c | add missing imports to agent CLI | earaujoassis/watchman,earaujoassis/watchman,earaujoassis/watchman,earaujoassis/watchman,earaujoassis/watchman | agents/utils.py | agents/utils.py | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
import shlex
import socket
import fcntl
import struct
DEPLOYMENT_TYPE_STATIC = 0
DEPLOYMENT_TYPE_CONTAINERS = 1
DEPLOYMENT_TYPE_COMPOSE = 2
class ConsoleColors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_error(message):
sys.stderr.write(ConsoleColors.FAIL + message + ConsoleColors.END + '\n')
def print_success(message):
sys.stdout.write(ConsoleColors.GREEN + message + ConsoleColors.END + '\n')
def print_step(message):
sys.stdout.write(ConsoleColors.BOLD + message + ConsoleColors.END + '\n')
def call(c, shell=True):
return subprocess.call(c, shell=shell)
def run(c):
process = subprocess.Popen(shlex.split(c), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
retcode = process.poll()
return {"retcode": retcode, "stdout": stdout, "stderr": stderr}
def assert_step(r):
if r is not 0:
sys.stdout.write('> Something went wrong, aborting...\n')
sys.exit(1)
def get_agent_filepath(die=False):
agent_options_filepath = os.path.join(home_dir(), '.watchman-agent.json')
if not os.path.isfile(agent_options_filepath) and die:
sys.stdout.write('> Missing watchman-agent.json file; skipping\n')
sys.exit(1)
return agent_options_filepath
def home_dir():
return os.path.expanduser("~")
def get_ip_address_for_interface(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
| # -*- coding: utf-8 -*-
import os
import sys
import subprocess
import shlex
import socket
DEPLOYMENT_TYPE_STATIC = 0
DEPLOYMENT_TYPE_CONTAINERS = 1
DEPLOYMENT_TYPE_COMPOSE = 2
class ConsoleColors:
HEADER = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
END = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_error(message):
sys.stderr.write(ConsoleColors.FAIL + message + ConsoleColors.END + '\n')
def print_success(message):
sys.stdout.write(ConsoleColors.GREEN + message + ConsoleColors.END + '\n')
def print_step(message):
sys.stdout.write(ConsoleColors.BOLD + message + ConsoleColors.END + '\n')
def call(c, shell=True):
return subprocess.call(c, shell=shell)
def run(c):
process = subprocess.Popen(shlex.split(c), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
retcode = process.poll()
return {"retcode": retcode, "stdout": stdout, "stderr": stderr}
def assert_step(r):
if r is not 0:
sys.stdout.write('> Something went wrong, aborting...\n')
sys.exit(1)
def get_agent_filepath(die=False):
agent_options_filepath = os.path.join(home_dir(), '.watchman-agent.json')
if not os.path.isfile(agent_options_filepath) and die:
sys.stdout.write('> Missing watchman-agent.json file; skipping\n')
sys.exit(1)
return agent_options_filepath
def home_dir():
return os.path.expanduser("~")
def get_ip_address_for_interface(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
| mit | Python |
9ee3423a8be12c5b14d6f317d4fc535f1fa309b5 | Switch to development version. | igordejanovic/parglare,igordejanovic/parglare | parglare/__init__.py | parglare/__init__.py | # -*- coding: utf-8 -*-
# flake8: NOQA
from parglare.parser import Parser, Token, pos_to_line_col, \
Node, NodeTerm, NodeNonTerm
from parglare.tables import LALR, SLR, SHIFT, REDUCE, ACCEPT
from parglare.glr import GLRParser
from parglare.grammar import Grammar, NonTerminal, Terminal, \
RegExRecognizer, StringRecognizer, EMPTY, EOF, STOP
from parglare.common import get_collector
from parglare.exceptions import ParserInitError, ParseError, GrammarError, \
DisambiguationError
__version__ = "0.10.0.dev"
| # -*- coding: utf-8 -*-
# flake8: NOQA
from parglare.parser import Parser, Token, pos_to_line_col, \
Node, NodeTerm, NodeNonTerm
from parglare.tables import LALR, SLR, SHIFT, REDUCE, ACCEPT
from parglare.glr import GLRParser
from parglare.grammar import Grammar, NonTerminal, Terminal, \
RegExRecognizer, StringRecognizer, EMPTY, EOF, STOP
from parglare.common import get_collector
from parglare.exceptions import ParserInitError, ParseError, GrammarError, \
DisambiguationError
__version__ = "0.9.2"
| mit | Python |
845b5deb17a088e8dc7e899b294d6b821d3896a3 | Remove unused import | galaxy-iuc/parsec | parsec/decorators.py | parsec/decorators.py | import json
import wrapt
from .io import error
@wrapt.decorator
def bioblend_exception(wrapped, instance, args, kwargs):
try:
return wrapped(*args, **kwargs)
except Exception, e:
try:
error(json.loads(e.body)['err_msg'])
except Exception, e:
print e
@wrapt.decorator
def dict_output(wrapped, instance, args, kwargs):
#TODO enhance
output = wrapped(*args, **kwargs)
print(json.dumps(output, indent=4))
| import json
import pprint
import wrapt
from .io import error
@wrapt.decorator
def bioblend_exception(wrapped, instance, args, kwargs):
try:
return wrapped(*args, **kwargs)
except Exception, e:
try:
error(json.loads(e.body)['err_msg'])
except:
print e
@wrapt.decorator
def dict_output(wrapped, instance, args, kwargs):
#TODO enhance
output = wrapped(*args, **kwargs)
print(json.dumps(output, indent=4))
| apache-2.0 | Python |
c70cb13c1f54e5e64bb3b6e8be761ad86ee3926f | Update urls.py | funkybob/antfarm | antfarm/urls.py | antfarm/urls.py | '''
Django-style URL dispatcher view.
App(root_url=url_dispatcher([
(r'^/$', views.index),
(re.compile(r'^/(?P<foo>\d+)/'), views.detail, {'bar': True}),
])
The view will be called with the request, and any matched _named_ groups.
Extra kwargs can be passed as a 3rd positional argument.
There is a namedtuple class defined called URL provided. All patterns will be
assembled as a URL instance, and their regex compiled. If kwargs are not
specified, they will default to {}.
'''
from collections import namedtuple
import re
from . import response
class KeepLooking(Exception):
'''Used to tell a url_dispatcher to skip this pattern and keep looking.'''
pass
URL = namedtuple('url', ('regex', 'view'))
class url_dispatcher(object):
def __init__(self, patterns):
self.patterns = map(self._make_url, patterns)
def _make_url(self, pattern):
'''Helper to ensure all patterns are url instances.'''
if not isinstance(pattern, URL):
# Ensure the regex is compiled
pattern[0] = re.compile(pattern[0])
pattern = URL(*pattern)
return pattern
def __call__(self, request, *args, **kwargs):
path = getattr(request, 'remaining_path', request.path)
for pattern in self.patterns:
m = pattern.regex.match(path)
if m:
path.remaining_path = path[:m.end()]
try:
return pattern.view(request, *args, **kwargs)
except KeepLooking:
pass
return self.handle_not_found(request)
def handle_not_found(self, request):
return response.NotFound()
|
'''
Django-style URL dispatcher view.
App(root_url=url_dispatcher([
(r'^/$', views.index),
(re.compile(r'^/(?P<foo>\d+)/'), views.detail, {'bar': True}),
])
The view will be called with the request, and any matched _named_ groups.
Extra kwargs can be passed as a 3rd positional argument.
There is a namedtuple class defined called URL provided. All patterns will be
assembled as a URL instance, and their regex compiled. If kwargs are not
specified, they will default to {}.
'''
from collections import namedtuple
import re
from . import response
class KeepLooking(Exception):
'''Used to tell a url_dispatcher to skip this pattern and keep looking.'''
pass
URL = namedtuple('url', ('regex', 'view'))
class url_dispatcher(object):
def __init__(self, patterns):
self.patterns = map(self._make_url, patterns)
def _make_url(self, pattern):
'''Helper to ensure all patterns are url instances.'''
if not isinstance(pattern, URL):
pattern[0] = re.compile(pattern[0])
pattern = URL(*pattern)
# Ensure the regex is compiled
pattern.regex = re.compile(pattern.regex)
return pattern
def __call__(self, request, *args, **kwargs):
path = getattr(request, 'remaining_path', request.path)
for pattern in self.patterns:
m = pattern.regex.match(path)
if m:
path.remaining_path = path[:m.end()]
try:
return pattern.view(request, *args, **kwargs)
except KeepLooking:
pass
return self.handle_not_found(request)
def handle_not_found(self, request):
return response.NotFound()
| mit | Python |
cc683963bb5e9215aab3c7aefe35f9f483cdefef | Update scoring_engine version to 1.0.0 | pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine | scoring_engine/version.py | scoring_engine/version.py | import os
from scoring_engine.config import config
version = "1.0.0"
# If we specify the version specifically then use that one
if 'SCORINGENGINE_VERSION' in os.environ:
version = os.environ['SCORINGENGINE_VERSION']
# If we're in debug mode, just say dev
if config.debug is True:
version += '-dev'
| import os
from scoring_engine.config import config
version = "0.1.0"
# If we specify the version specifically then use that one
if 'SCORINGENGINE_VERSION' in os.environ:
version = os.environ['SCORINGENGINE_VERSION']
# If we're in debug mode, just say dev
if config.debug is True:
version += '-dev'
| mit | Python |
42f27447a5aa51f6de55e62c75222052679e59ad | Terminate Celery when exiting | amcat/amcat,amcat/amcat,amcat/amcat,amcat/amcat,amcat/amcat,amcat/amcat | run_celery.py | run_celery.py | #!/usr/bin/env python
"""Runs a celery worker, and reloads on a file change. Run as ./run_celery [directory]. If
directory is not given, default to cwd."""
import os
import sys
import signal
import time
import multiprocessing
import subprocess
import threading
import inotify.adapters
CELERY_CMD = tuple("celery -A amcat.amcatcelery worker -l info -Q amcat".split())
CHANGE_EVENTS = ("IN_MODIFY", "IN_ATTRIB", "IN_DELETE")
WATCH_EXTENSIONS = (".py",)
def watch_tree(stop, path, event):
"""
@type stop: multiprocessing.Event
@type event: multiprocessing.Event
"""
path = os.path.abspath(path)
for e in inotify.adapters.InotifyTree(path).event_gen():
if stop.is_set():
break
if e is not None:
_, attrs, path, filename = e
if filename is None:
continue
if any(filename.endswith(ename) for ename in WATCH_EXTENSIONS):
continue
if any(ename in attrs for ename in CHANGE_EVENTS):
event.set()
class Watcher(threading.Thread):
def __init__(self, path):
super(Watcher, self).__init__()
self.celery = subprocess.Popen(CELERY_CMD)
self.stop_event_wtree = multiprocessing.Event()
self.event_triggered_wtree = multiprocessing.Event()
self.wtree = multiprocessing.Process(target=watch_tree, args=(self.stop_event_wtree, path, self.event_triggered_wtree))
self.wtree.start()
self.running = True
def run(self):
while self.running:
if self.event_triggered_wtree.is_set():
self.event_triggered_wtree.clear()
self.restart_celery()
time.sleep(1)
def join(self, timeout=None):
self.running = False
self.stop_event_wtree.set()
self.celery.terminate()
self.wtree.join()
self.celery.wait()
super(Watcher, self).join(timeout=timeout)
def restart_celery(self):
self.celery.terminate()
self.celery.wait()
self.celery = subprocess.Popen(CELERY_CMD)
if __name__ == '__main__':
watcher = Watcher(sys.argv[1] if len(sys.argv) > 1 else ".")
watcher.start()
signal.signal(signal.SIGINT, lambda signal, frame: watcher.join())
signal.pause()
| #!/usr/bin/env python
"""Runs a celery worker, and reloads on a file change. Run as ./run_celery [directory]. If
directory is not given, default to cwd."""
import os
import sys
import signal
import time
import multiprocessing
import subprocess
import threading
import inotify.adapters
CELERY_CMD = tuple("celery -A amcat.amcatcelery worker -l info -Q amcat".split())
CHANGE_EVENTS = ("IN_MODIFY", "IN_ATTRIB", "IN_DELETE")
WATCH_EXTENSIONS = (".py",)
def watch_tree(stop, path, event):
"""
@type stop: multiprocessing.Event
@type event: multiprocessing.Event
"""
path = os.path.abspath(path)
for e in inotify.adapters.InotifyTree(path).event_gen():
if stop.is_set():
break
if e is not None:
_, attrs, path, filename = e
if filename is None:
continue
if any(filename.endswith(ename) for ename in WATCH_EXTENSIONS):
continue
if any(ename in attrs for ename in CHANGE_EVENTS):
event.set()
class Watcher(threading.Thread):
def __init__(self, path):
super(Watcher, self).__init__()
self.celery = subprocess.Popen(CELERY_CMD)
self.stop_event_wtree = multiprocessing.Event()
self.event_triggered_wtree = multiprocessing.Event()
self.wtree = multiprocessing.Process(target=watch_tree, args=(self.stop_event_wtree, path, self.event_triggered_wtree))
self.wtree.start()
self.running = True
def run(self):
while self.running:
if self.event_triggered_wtree.is_set():
self.event_triggered_wtree.clear()
self.restart_celery()
time.sleep(1)
def join(self, timeout=None):
self.running = False
self.stop_event_wtree.set()
self.wtree.join()
super(Watcher, self).join(timeout=timeout)
def restart_celery(self):
self.celery.terminate()
self.celery.wait()
self.celery = subprocess.Popen(CELERY_CMD)
if __name__ == '__main__':
watcher = Watcher(sys.argv[1] if len(sys.argv) > 1 else ".")
watcher.start()
signal.signal(signal.SIGINT, lambda signal, frame: watcher.join())
signal.pause()
| agpl-3.0 | Python |
cde48bca684e225b2f99be6637380f4ef3365f17 | Update version 1.0.0.dev3 -> 1.0.0.dev4 | dwavesystems/dimod,dwavesystems/dimod | dimod/package_info.py | dimod/package_info.py | __version__ = '1.0.0.dev4'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| __version__ = '1.0.0.dev3'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| apache-2.0 | Python |
c813f2e9540a9399cf41f0c17e272761243dd1e0 | Convert print to LOG in let.py | lperkin1/schemepy,perkinslr/schemepy | scheme/let.py | scheme/let.py | from scheme.environment import Environment
from scheme.procedure import SimpleProcedure
__author__ = 'perkins'
from scheme.macro import Macro, MacroSymbol
from scheme.Globals import Globals
from zope.interface import implements
class let(object):
implements(Macro)
def __init__(self):
pass
def __call__(self, processer, params):
env = processer.cenv
if isinstance(params[0], list):
bindings = params[0]
for binding in bindings:
if len(binding[1:])!=1:
raise SyntaxError("let requires a list of pairs for its first argument")
env[binding[0]]=processer.process([binding[1]], Environment(env))
processer.process(params[1:], env)
return
name=params[0]
bindings=params[1]
vars = [i[0] for i in bindings]
vals = [processer.process(i[1], Environment(env)) for i in bindings]
proc = SimpleProcedure([vars]+params[2:], env).setName(name)
env[name]=proc
LOG(32, [proc]+vals)
ret = processer.process([[proc]+vals])
processer.popStack(ret)
processer.stackPointer+=1
return
#{('lambda:%s' % t): SimpleProcedure([args] + rest, processer.cenv).setName("lambda:%s"%t)})
Globals['let'] = let()
| from scheme.environment import Environment
from scheme.procedure import SimpleProcedure
__author__ = 'perkins'
from scheme.macro import Macro, MacroSymbol
from scheme.Globals import Globals
from zope.interface import implements
class let(object):
implements(Macro)
def __init__(self):
pass
def __call__(self, processer, params):
env = processer.cenv
if isinstance(params[0], list):
bindings = params[0]
for binding in bindings:
if len(binding[1:])!=1:
raise SyntaxError("let requires a list of pairs for its first argument")
env[binding[0]]=processer.process([binding[1]], Environment(env))
processer.process(params[1:], env)
return
name=params[0]
bindings=params[1]
vars = [i[0] for i in bindings]
vals = [processer.process(i[1], Environment(env)) for i in bindings]
proc = SimpleProcedure([vars]+params[2:], env).setName(name)
env[name]=proc
print 32, [proc]+vals
ret = processer.process([[proc]+vals])
processer.popStack(ret)
processer.stackPointer+=1
return
#{('lambda:%s' % t): SimpleProcedure([args] + rest, processer.cenv).setName("lambda:%s"%t)})
Globals['let'] = let() | lgpl-2.1 | Python |
58daab07ab9cebb83ce54c932e1bfe7c9b6eaada | fix the team code review | allmightyspiff/softlayer-python,softlayer/softlayer-python | SoftLayer/CLI/hardware/credentials.py | SoftLayer/CLI/hardware/credentials.py | """List server credentials."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer import exceptions
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""List server credentials."""
manager = SoftLayer.HardwareManager(env.client)
hardware_id = helpers.resolve_id(manager.resolve_ids,
identifier,
'hardware')
instance = manager.get_hardware(hardware_id)
table = formatting.Table(['Username', 'Password', 'Software', 'Version'])
for item in instance['softwareComponents']:
if 'passwords' not in item:
raise exceptions.SoftLayerError("No passwords found in softwareComponents")
for credentials in item['passwords']:
table.add_row([credentials.get('username', 'None'),
credentials.get('password', 'None'),
item['softwareLicense']['softwareDescription']['referenceCode'],
item['softwareLicense']['softwareDescription']['version']])
env.fout(table)
| """List server credentials."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer import exceptions
@click.command(cls=SoftLayer.CLI.command.SLCommand, )
@click.argument('identifier')
@environment.pass_env
def cli(env, identifier):
"""List server credentials."""
manager = SoftLayer.HardwareManager(env.client)
hardware_id = helpers.resolve_id(manager.resolve_ids,
identifier,
'hardware')
instance = manager.get_hardware(hardware_id)
table = formatting.Table(['username', 'password', 'Software', 'Version'])
for item in instance['softwareComponents']:
if 'passwords' not in item:
raise exceptions.SoftLayerError("No passwords found in softwareComponents")
for credentials in item['passwords']:
table.add_row([credentials.get('username', 'None'),
credentials.get('password', 'None'),
item['softwareLicense']['softwareDescription']['referenceCode'],
item['softwareLicense']['softwareDescription']['version']])
env.fout(table)
| mit | Python |
6b0198b3fce3d4fffb01ede08e1cad08d4d4e5a9 | Fix options check for --disabled option | skraghu/softlayer-python,underscorephil/softlayer-python,Neetuj/softlayer-python,allmightyspiff/softlayer-python,nanjj/softlayer-python,softlayer/softlayer-python,kyubifire/softlayer-python | SoftLayer/CLI/loadbal/service_edit.py | SoftLayer/CLI/loadbal/service_edit.py | """Edit the properties of a service group."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import loadbal
@click.command()
@click.argument('identifier')
@click.option('--enabled / --disabled',
default=None,
help="Enable or disable the service")
@click.option('--port',
help="Change the port number for the service", type=click.INT)
@click.option('--weight',
type=click.INT,
help="Change the weight of the service")
@click.option('--healthcheck-type', help="Change the health check type")
@click.option('--ip-address', help="Change the IP of the service")
@environment.pass_env
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Edit the properties of a service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, service_id = loadbal.parse_id(identifier)
# check if any input is provided
if ((not any([ip_address, weight, port, healthcheck_type])) and
enabled is None):
raise exceptions.CLIAbort(
'At least one property is required to be changed!')
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id']
mgr.edit_service(loadbal_id,
service_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
env.fout('Load balancer service %s is being modified!' % identifier)
| """Edit the properties of a service group."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import loadbal
@click.command()
@click.argument('identifier')
@click.option('--enabled / --disabled',
default=None,
help="Enable or disable the service")
@click.option('--port',
help="Change the port number for the service", type=click.INT)
@click.option('--weight',
type=click.INT,
help="Change the weight of the service")
@click.option('--healthcheck-type', help="Change the health check type")
@click.option('--ip-address', help="Change the IP of the service")
@environment.pass_env
def cli(env, identifier, enabled, port, weight, healthcheck_type, ip_address):
"""Edit the properties of a service group."""
mgr = SoftLayer.LoadBalancerManager(env.client)
loadbal_id, service_id = loadbal.parse_id(identifier)
# check if any input is provided
if not any([ip_address, enabled, weight, port, healthcheck_type]):
raise exceptions.CLIAbort(
'At least one property is required to be changed!')
# check if the IP is valid
ip_address_id = None
if ip_address:
ip_service = env.client['Network_Subnet_IpAddress']
ip_record = ip_service.getByIpAddress(ip_address)
ip_address_id = ip_record['id']
mgr.edit_service(loadbal_id,
service_id,
ip_address_id=ip_address_id,
enabled=enabled,
port=port,
weight=weight,
hc_type=healthcheck_type)
env.fout('Load balancer service %s is being modified!' % identifier)
| mit | Python |
f29536ab98eb49a962141535d29866dcdd29a99f | Convert to int when assigning values, slight optimization | james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF | scoreboard.py | scoreboard.py | #!/usr/bin/env python
import os
print "Content-Type: text/html\n"
print ""
def gen_scoreboard(team_data):
if len(team_data) == 0:
print "There are no teams!"
else:
print "<br>"
print "<div class='container'>"
print "<table class='responsive-table bordered hoverable centered'>"
print "<thead>"
print "<tr><th>Rank</th><th>Team</th><th>Score</th></tr>"
print "</thead>"
length = len(team_data)
for x in range(length):
if len(team_data) == 0:
return
highest_score = max(team_data.values())
for team in team_data:
if team_data[team] == highest_score:
print "<tr><td>%d</td><td>%s</td><td>%s</td></tr>\n" %(x+1, team, highest_score)
del team_data[team]
break
def main():
fin = open("accounts/scores.txt", "r")
data = fin.readlines()
teams = {}
# Data is stored team,score
for info in data:
info = info.strip().split(",")
if info[0] == "":
continue
teams[info[0]] = int(info[1])
gen_scoreboard(teams)
if 'HTTP_COOKIE' not in os.environ:
html = open("templates/scoreboard_logged_out.html").read()
else:
html = open("templates/scoreboard_logged_in.html").read()
print html
main()
| #!/usr/bin/env python
import os
print "Content-Type: text/html\n"
print ""
def gen_scoreboard(team_data):
if len(team_data) == 0:
print "There are no teams!"
else:
print "<br>"
print "<div class='container'>"
print "<table class='responsive-table bordered hoverable centered'>"
print "<thead>"
print "<tr><th>Rank</th><th>Team</th><th>Score</th></tr>"
print "</thead>"
length = len(team_data)
for x in range(length):
if len(team_data) == 0:
return
for key in team_data:
team_data[key] = int(team_data[key])
highest_score = max(team_data.values())
for team in team_data:
if team_data[team] == highest_score:
print "<tr><td>%d</td><td>%s</td><td>%s</td></tr>\n" %(x+1, team, highest_score)
del team_data[team]
break
def main():
fin = open("accounts/scores.txt", "r")
data = fin.readlines()
teams = {}
# Data is stored team,score
for info in data:
info = info.strip().split(",")
if info[0] == "":
continue
teams[info[0]] = info[1]
gen_scoreboard(teams)
if 'HTTP_COOKIE' not in os.environ:
html = open("templates/scoreboard_logged_out.html").read()
else:
html = open("templates/scoreboard_logged_in.html").read()
print html
main()
| mit | Python |
ae1dacd8870c314ace48f497342c5a286f9fc492 | fix manifest code. close #143 | mgorny/django-pipeline,chipx86/django-pipeline,joshkehn/django-pipeline,demux/django-pipeline,beedesk/django-pipeline,lydell/django-pipeline,mgorny/django-pipeline,almost/django-pipeline,floppym/django-pipeline,almost/django-pipeline,vstoykov/django-pipeline,Kami/django-pipeline,jensenbox/django-pipeline,tayfun/django-pipeline,skirsdeda/django-pipeline,jazzband/django-pipeline,demux/django-pipeline,jazzband/django-pipeline,Kami/django-pipeline,Tekco/django-pipeline,theatlantic/django-pipeline,ei-grad/django-pipeline,apendleton/django-pipeline,lexqt/django-pipeline,theatlantic/django-pipeline,novapost/django-pipeline,adamcharnock/django-pipeline,kronion/django-pipeline,pombredanne/django-pipeline-1,beedesk/django-pipeline,simudream/django-pipeline,jazzband/django-pipeline,leonardoo/django-pipeline,caioariede/django-pipeline,sjhewitt/django-pipeline,zapier/django-pipeline,novapost/django-pipeline,skirsdeda/django-pipeline,caioariede/django-pipeline,zapier/django-pipeline,caioariede/django-pipeline,demux/django-pipeline,leonardoo/django-pipeline,skirsdeda/django-pipeline,Kami/django-pipeline,d9pouces/django-pipeline,sjhewitt/django-pipeline,apendleton/django-pipeline,Kobold/django-pipeline,skolsuper/django-pipeline,sideffect0/django-pipeline,lexqt/django-pipeline,kronion/django-pipeline,cyberdelia/django-pipeline,lydell/django-pipeline,camilonova/django-pipeline,wienczny/django-pipeline,apendleton/django-pipeline,joshkehn/django-pipeline,joshkehn/django-pipeline,TwigWorld/django-pipeline,cyberdelia/django-pipeline,novapost/django-pipeline,Tekco/django-pipeline,wienczny/django-pipeline,vstoykov/django-pipeline,fabiosantoscode/django-pipeline,TwigWorld/django-pipeline,leonardoo/django-pipeline,adamcharnock/django-pipeline,wienczny/django-pipeline,tayfun/django-pipeline,necaris/django-pipeline,edwinlunando/django-pipeline,botify-labs/django-pipeline,hyperoslo/django-pipeline,Kobold/django-pipeline,sideffect0/django-pipeline,tayfun/django-pipeline,ei-grad/django-pipeline,almost/django-pipeline,beedesk/django-pipeline,jwatson/django-pipeline,letolab/django-pipeline,theatlantic/django-pipeline,camilonova/django-pipeline,camilonova/django-pipeline,simudream/django-pipeline,adamcharnock/django-pipeline,floppym/django-pipeline,Tekco/django-pipeline,jensenbox/django-pipeline,perdona/django-pipeline,simudream/django-pipeline,mgorny/django-pipeline,perdona/django-pipeline,chipx86/django-pipeline,sjhewitt/django-pipeline,sideffect0/django-pipeline,pombredanne/django-pipeline-1,yuvadm/django-pipeline,jwatson/django-pipeline,jwatson/django-pipeline,necaris/django-pipeline,floppym/django-pipeline,fabiosantoscode/django-pipeline,letolab/django-pipeline,botify-labs/django-pipeline,perdona/django-pipeline,TwigWorld/django-pipeline,zapier/django-pipeline,d9pouces/django-pipeline,lydell/django-pipeline,cyberdelia/django-pipeline,edwinlunando/django-pipeline,hyperoslo/django-pipeline,hyperoslo/django-pipeline,edwinlunando/django-pipeline,kronion/django-pipeline,Kobold/django-pipeline,jensenbox/django-pipeline,skolsuper/django-pipeline,chipx86/django-pipeline,d9pouces/django-pipeline,yuvadm/django-pipeline,ei-grad/django-pipeline,yuvadm/django-pipeline,lexqt/django-pipeline,botify-labs/django-pipeline,skolsuper/django-pipeline | pipeline/manifest.py | pipeline/manifest.py | try:
from staticfiles.finders import DefaultStorageFinder
except ImportError:
from django.contrib.staticfiles.finders import DefaultStorageFinder # noqa
from django.conf import settings
from manifesto import Manifest
from pipeline.packager import Packager
class PipelineManifest(Manifest):
def __init__(self):
self.packager = Packager()
self.packages = self.collect_packages()
self.finder = DefaultStorageFinder()
def collect_packages(self):
packages = []
for package_name in self.packager.packages['css']:
package = self.packager.package_for('css', package_name)
if package.manifest:
packages.append(package)
for package_name in self.packager.packages['js']:
package = self.packager.package_for('js', package_name)
if package.manifest:
packages.append(package)
return packages
def cache(self):
ignore_patterns = getattr(settings, "STATICFILES_IGNORE_PATTERNS", None)
if settings.PIPELINE:
for package in self.packages:
yield str(self.packager.individual_url(package.output_filename))
else:
for package in self.packages:
for path in self.packager.compile(package.paths):
yield str(self.packager.individual_url(path))
for path, _ in self.finder.list(ignore_patterns):
yield str(self.packager.individual_url(path))
| try:
from staticfiles.finders import DefaultStorageFinder
except ImportError:
from django.contrib.staticfiles.storage import DefaultStorageFinder # noqa
from django.conf import settings
from manifesto import Manifest
from pipeline.packager import Packager
class PipelineManifest(Manifest):
def __init__(self):
self.packager = Packager()
self.packages = self.collect_packages()
self.finder = DefaultStorageFinder()
def collect_packages(self):
packages = []
for package_name in self.packager.packages['css']:
package = self.packager.package_for('css', package_name)
if package.manifest:
packages.append(package)
for package_name in self.packager.packages['js']:
package = self.packager.package_for('js', package_name)
if package.manifest:
packages.append(package)
return packages
def cache(self):
ignore_patterns = getattr(settings, "STATICFILES_IGNORE_PATTERNS", None)
if settings.PIPELINE:
for package in self.packages:
yield str(self.packager.individual_url(package.output_filename))
else:
for package in self.packages:
for path in self.packager.compile(package.paths):
yield str(self.packager.individual_url(path))
for path in self.finder.list(ignore_patterns):
yield str(self.packager.individual_url(path))
| mit | Python |
d5999505ee00767dca85667c04cd24bccde42eb6 | use _write to solve a problem when the order is blocked and set qty to 0.0 | ingadhoc/sale,ingadhoc/sale,ingadhoc/sale,ingadhoc/sale | sale_delivery_ux/models/sale_order.py | sale_delivery_ux/models/sale_order.py | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models
class SaleOrder(models.Model):
_inherit = 'sale.order'
def _create_delivery_line(self, carrier, price_unit):
"""
So that delivery lines are not waiting invoice or delivery,
if carrier price is:
* zero: we add with qty 0 so nothing is needed to be invoiced or sent
* not zero: we keep qty so it is set to be invoiced but we set it
as delivered so you dont need to set it manually
"""
sol = super(SaleOrder, self)._create_delivery_line(carrier, price_unit)
if not price_unit:
sol._write({'product_uom_qty': 0.0})
return sol
| ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models
class SaleOrder(models.Model):
_inherit = 'sale.order'
def _create_delivery_line(self, carrier, price_unit):
"""
So that delivery lines are not waiting invoice or delivery,
if carrier price is:
* zero: we add with qty 0 so nothing is needed to be invoiced or sent
* not zero: we keep qty so it is set to be invoiced but we set it
as delivered so you dont need to set it manually
"""
sol = super(SaleOrder, self)._create_delivery_line(carrier, price_unit)
if not price_unit:
sol.product_uom_qty = 0.0
return sol
| agpl-3.0 | Python |
34359ce6a0bda6ec061782f743f73fb528446553 | Support py3 | toslunar/chainerrl,toslunar/chainerrl | replay_buffer.py | replay_buffer.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import dict
from future import standard_library
standard_library.install_aliases()
from collections import deque
import random
import six.moves.cPickle as pickle
class ReplayBuffer(object):
def __init__(self, capacity):
self.memory = deque(maxlen=capacity)
def append(self, state, action, reward, next_state=None, next_action=None,
is_state_terminal=False):
"""Append a transition to this replay buffer
Args:
state: s_t
action: a_t
reward: r_t
next_state: s_{t+1} (can be None if terminal)
next_action: a_{t+1} (can be None for off-policy algorithms)
is_state_terminal (bool)
"""
experience = dict(state=state, action=action, reward=reward,
next_state=next_state, next_action=next_action,
is_state_terminal=is_state_terminal)
self.memory.append(experience)
def sample(self, n):
"""Sample n unique samples from this replay buffer
"""
assert len(self.memory) >= n
return random.sample(self.memory, n)
def __len__(self):
return len(self.memory)
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.memory, f)
def load(self, filename):
with open(filename, 'rb') as f:
self.memory = pickle.load(f)
| from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import dict
from future import standard_library
standard_library.install_aliases()
from collections import deque
import random
import cPickle as pickle
class ReplayBuffer(object):
def __init__(self, capacity):
self.memory = deque(maxlen=capacity)
def append(self, state, action, reward, next_state=None, next_action=None,
is_state_terminal=False):
"""Append a transition to this replay buffer
Args:
state: s_t
action: a_t
reward: r_t
next_state: s_{t+1} (can be None if terminal)
next_action: a_{t+1} (can be None for off-policy algorithms)
is_state_terminal (bool)
"""
experience = dict(state=state, action=action, reward=reward,
next_state=next_state, next_action=next_action,
is_state_terminal=is_state_terminal)
self.memory.append(experience)
def sample(self, n):
"""Sample n unique samples from this replay buffer
"""
assert len(self.memory) >= n
return random.sample(self.memory, n)
def __len__(self):
return len(self.memory)
def save(self, filename):
with open(filename, 'w') as f:
pickle.dump(self.memory, f)
def load(self, filename):
with open(filename) as f:
self.memory = pickle.load(f)
| mit | Python |
c25c9de19369467116714eadff86984f716d9b90 | Change fields order | Hackfmi/Diaphanum,Hackfmi/Diaphanum | reports/admin.py | reports/admin.py | # coding: utf-8
from django.contrib import admin
from .models import Report
class ReportAdmin(admin.ModelAdmin):
list_display = ('addressed_to', 'reported_from', 'content', 'signed_from', 'get_copies', 'created_at')
list_filter = ['created_at']
search_fields = ['addressed_to', 'reported_from', 'content', 'signed_from']
admin.site.register(Report, ReportAdmin) | # coding: utf-8
from django.contrib import admin
from .models import Report
class ReportAdmin(admin.ModelAdmin):
#Определя кои полета да се показват в заявката за извлизачне на анкети
list_display = ('addressed_to', 'reported_from', 'content', 'created_at', 'signed_from', 'get_copies')
#Добавя поле за филтриране
list_filter = ['created_at']
#Добавя поле за търсене по полето от БД въпрос
search_fields = ['addressed_to', 'reported_from', 'content', 'signed_from']
admin.site.register(Report, ReportAdmin) | mit | Python |
418af7f4c1cfae730d96ad26e0d22860a9c4df1e | Update ReConnect.py | txomon/SpockBot,nickelpro/SpockBot,OvercastNetwork/spock,Gjum/SpockBot,MrSwiss/SpockBot,guineawheek/spock,SpockBotMC/SpockBot,gamingrobot/SpockBot,luken/SpockBot | plugins/ReConnect.py | plugins/ReConnect.py | """
Hilariously out of date, I'll update this when it's not 3:30 in the morning
In the meantime, go look at plugins in spock.net.plugins for more up-to-date plugin examples
"""
import threading
from spock.mcp.mcpacket import Packet
from spock.net.cflags import cflags
from spock.net.timer import EventTimer
#Will relentlessly try to reconnect to a server
class ReConnectPlugin:
def __init__(self, client, settings):
self.client = client
self.lock = False
self.kill = False
self.delay = 0
client.register_handler(self.start_timer, cflags['SOCKET_ERR'], cflags['SOCKET_HUP'])
client.register_handler(self.stop, cflags['KILL_EVENT'])
client.register_dispatch(self.start_timer, 0xFF)
client.register_dispatch(self.grab_host, 0x02)
client.register_dispatch(self.reset_reconnect_time, 0x01)
def start_timer(self, *args):
if not self.lock:
self.client.register_timer(EventTimer(self.delay, self.reconnect))
self.lock = True
def stop(self, *args):
self.kill = True
def reconnect(self, *args):
if not self.kill:
if self.delay < 300:
self.delay += 30
self.client.start_session(self.client.mc_username, self.client.mc_password)
self.client.login(self.host, self.port)
self.lock = False
def reset_reconnect_time(self, *args):
self.delay = 0
#Grabs host and port on handshake
def grab_host(self, packet):
self.host = packet.data['host']
self.port = packet.data['port']
| import threading
from spock.mcp.mcpacket import Packet
from spock.net.cflags import cflags
from spock.net.timer import EventTimer
#Will relentlessly try to reconnect to a server
class ReConnectPlugin:
def __init__(self, client, settings):
self.client = client
self.lock = False
self.kill = False
self.delay = 0
client.register_handler(self.start_timer, cflags['SOCKET_ERR'], cflags['SOCKET_HUP'])
client.register_handler(self.stop, cflags['KILL_EVENT'])
client.register_dispatch(self.start_timer, 0xFF)
client.register_dispatch(self.grab_host, 0x02)
client.register_dispatch(self.reset_reconnect_time, 0x01)
def start_timer(self, *args):
if not self.lock:
self.client.register_timer(EventTimer(self.delay, self.reconnect))
self.lock = True
def stop(self, *args):
self.kill = True
def reconnect(self, *args):
if not self.kill:
if self.delay < 300:
self.delay += 30
self.client.start_session(self.client.mc_username, self.client.mc_password)
self.client.login(self.host, self.port)
self.lock = False
def reset_reconnect_time(self, *args):
self.delay = 0
#Grabs host and port on handshake
def grab_host(self, packet):
self.host = packet.data['host']
self.port = packet.data['port']
| mit | Python |
1806316e91eb98232424936f6bba16772861872b | Fix W605 warning | coala/corobo,coala/corobo | plugins/pitchfork.py | plugins/pitchfork.py | import re
import string
import textwrap
from errbot import BotPlugin, botcmd
class Pitchfork(BotPlugin):
"""
To pitchfork users down to ...
"""
@botcmd
def pitchfork(self, msg, arg):
"""
To pitchfork user down to ...
"""
match = re.match(r'@?([\w-]+)(?:\s+(?:down\s+)?to\s+(.+))?$',
arg)
if match:
user = match.group(1)
place = match.group(2) if match.group(2) else 'offtopic'
return textwrap.dedent((
string.Template(r"""
@$user, you are being pitchforked down to $place
```
.+====----->
\\('
=====================================<%{%{%{>>+===---> $user
//(,
.+====----->
```
""").substitute(user=user,
place=('[offtopic]('
'https://gitter.im/coala/coala/offtopic)'
if place == 'offtopic' else place))
))
else:
return "Usage: `pitchfork user [[down] to place]`"
| import re
import string
import textwrap
from errbot import BotPlugin, botcmd
class Pitchfork(BotPlugin):
"""
To pitchfork users down to ...
"""
@botcmd
def pitchfork(self, msg, arg):
"""
To pitchfork user down to ...
"""
match = re.match(r'@?([\w-]+)(?:\s+(?:down\s+)?to\s+(.+))?$',
arg)
if match:
user = match.group(1)
place = match.group(2) if match.group(2) else 'offtopic'
return textwrap.dedent((
string.Template("""
@$user, you are being pitchforked down to $place
```
.+====----->
\('
=====================================<%{%{%{>>+===---> $user
/(,
.+====----->
```
""").substitute(user=user,
place=('[offtopic]('
'https://gitter.im/coala/coala/offtopic)'
if place == 'offtopic' else place))
))
else:
return "Usage: `pitchfork user [[down] to place]`"
| mit | Python |
fcf1371162753b30597850487339b74e77f43b62 | set epsilon to 1 | dvav/dgeclust | DGEclust/viz/plotRA.py | DGEclust/viz/plotRA.py | ## Copyright (C) 2012-2013 Dimitrios V. Vavoulis
## Computational Genomics Group (http://bioinformatics.bris.ac.uk/)
## Department of Computer Science
## University of Bristol
################################################################################
import pylab as pl
import numpy as np
################################################################################
def plotRA(samples1, samples2, ids = None, epsilon = 1., *args, **kargs):
samples1 = samples1.astype('double')
samples2 = samples2.astype('double')
## set zero elements to epsilon
samples1[samples1 < 1.] = epsilon
samples2[samples2 < 1.] = epsilon
## compute means
lmeans1 = np.log2(samples1).mean(0)
lmeans2 = np.log2(samples2).mean(0)
## compute A and R
A = ( lmeans1 + lmeans2 ) * 0.5
R = lmeans1 - lmeans2
## generate RA plot
if ids is not None:
pl.plot(A[~ids], R[~ids], 'k.', A[ids], R[ids], 'r.')
else:
pl.plot(A, R, 'k.')
pl.plot(pl.gca().get_xlim(),(0.,0.),'k--')
pl.xlabel('mean')
pl.ylabel('log2 fold change')
return A, R
################################################################################
| ## Copyright (C) 2012-2013 Dimitrios V. Vavoulis
## Computational Genomics Group (http://bioinformatics.bris.ac.uk/)
## Department of Computer Science
## University of Bristol
################################################################################
import pylab as pl
import numpy as np
################################################################################
def plotRA(samples1, samples2, ids = None, epsilon = 0.5, *args, **kargs):
samples1 = samples1.astype('double')
samples2 = samples2.astype('double')
## set zero elements to epsilon
samples1[samples1 < 1.] = epsilon
samples2[samples2 < 1.] = epsilon
## compute means
lmeans1 = np.log2(samples1).mean(0)
lmeans2 = np.log2(samples2).mean(0)
## compute A and R
A = ( lmeans1 + lmeans2 ) * 0.5
R = lmeans1 - lmeans2
## generate RA plot
if ids is not None:
pl.plot(A[~ids], R[~ids], 'k.', A[ids], R[ids], 'r.')
else:
pl.plot(A, R, 'k.')
pl.plot(pl.gca().get_xlim(),(0.,0.),'k--')
pl.xlabel('mean')
pl.ylabel('log2 fold change')
return A, R
################################################################################
| mit | Python |
34a2fac19b9711ce341fec31c1e09370bfb34bec | convert string to bytes before writing to file | houqp/shell.py | shell/util.py | shell/util.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import tempfile
def str_to_pipe(s):
input_pipe = tempfile.SpooledTemporaryFile()
if isinstance(s, str):
s = s.encode('utf-8')
input_pipe.write(s)
input_pipe.seek(0)
return input_pipe
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
import tempfile
def str_to_pipe(s):
input_pipe = tempfile.SpooledTemporaryFile()
input_pipe.write(s)
input_pipe.seek(0)
return input_pipe
| mit | Python |
a8eea42f1462ac1c83eb0d16195b7c546a907e7b | revert wikipedia namespace | Teino1978-Corp/Teino1978-Corp-skybot,jmgao/skybot,Jeebeevee/DouweBot_JJ15,olslash/skybot,Jeebeevee/DouweBot,elitan/mybot,craisins/wh2kbot,ddwo/nhl-bot,rmmh/skybot,TeamPeggle/ppp-helpdesk,cmarguel/skybot,craisins/nascarbot,crisisking/skybot,callumhogsden/ausbot,andyeff/skybot,SophosBlitz/glacon,df-5/skybot,isislab/botbot,parkrrr/skybot | plugins/wikipedia.py | plugins/wikipedia.py | '''Searches wikipedia and returns first sentence of article
Scaevolus 2009'''
import re
from util import hook, http
api_prefix = "http://en.wikipedia.org/w/api.php"
search_url = api_prefix + "?action=opensearch&format=xml"
paren_re = re.compile('\s*\(.*\)$')
@hook.command('w')
@hook.command
def wiki(inp):
'''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
'''article on <phrase>'''
x = http.get_xml(search_url, search=inp)
ns = '{http://opensearch.org/searchsuggest2}'
items = x.findall(ns + 'Section/' + ns + 'Item')
if items == []:
if x.find('error') is not None:
return 'error: %(code)s: %(info)s' % x.find('error').attrib
else:
return 'no results found'
def extract(item):
return [item.find(ns + x).text for x in
('Text', 'Description', 'Url')]
title, desc, url = extract(items[0])
if 'may refer to' in desc:
title, desc, url = extract(items[1])
title = paren_re.sub('', title)
if title.lower() not in desc.lower():
desc = title + desc
desc = re.sub('\s+', ' ', desc).strip() # remove excess spaces
if len(desc) > 300:
desc = desc[:300] + '...'
return '%s -- %s' % (desc, http.quote(url, ':/'))
| '''Searches wikipedia and returns first sentence of article
Scaevolus 2009'''
import re
from util import hook, http
api_prefix = "http://en.wikipedia.org/w/api.php"
search_url = api_prefix + "?action=opensearch&format=xml"
paren_re = re.compile('\s*\(.*\)$')
@hook.command('w')
@hook.command
def wiki(inp):
'''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
'''article on <phrase>'''
x = http.get_xml(search_url, search=inp)
ns = '{http://www.mediawiki.org/xml/api/}'
items = x.findall(ns + 'Section/' + ns + 'Item')
if items == []:
if x.find('error') is not None:
return 'error: %(code)s: %(info)s' % x.find('error').attrib
else:
return 'no results found'
def extract(item):
return [item.find(ns + x).text for x in
('Text', 'Description', 'Url')]
title, desc, url = extract(items[0])
if 'may refer to' in desc:
title, desc, url = extract(items[1])
title = paren_re.sub('', title)
if title.lower() not in desc.lower():
desc = title + desc
desc = re.sub('\s+', ' ', desc).strip() # remove excess spaces
if len(desc) > 300:
desc = desc[:300] + '...'
return '%s -- %s' % (desc, http.quote(url, ':/'))
| unlicense | Python |
7c4124b3e21702a730b8bc454bad2a62c7a797c5 | Add lighting schedule for lighting model. | VOLTTRON/volttron-applications,VOLTTRON/volttron-applications,VOLTTRON/volttron-applications,VOLTTRON/volttron-applications,VOLTTRON/volttron-applications | pnnl/models/light.py | pnnl/models/light.py | import logging
import importlib
import pandas as pd
from volttron.platform.agent import utils
from datetime import timedelta as td
from volttron.pnnl.models.utils import clamp
_log = logging.getLogger(__name__)
utils.setup_logging()
class Light(object):
DOL = "dol"
OCC = "occ"
def __init__(self, config, **kwargs):
model_type = config.get("model_type", "simple")
_log.debug("Light Agent Model: {}".format(model_type))
module = importlib.import_module("volttron.pnnl.models.light")
model_class = getattr(module, model_type)
self.model = model_class(config, self)
def get_q(self, _set, sched_index, market_index, occupied):
q = self.model.predict(_set, sched_index, market_index, occupied)
return q
class simple(object):
def __init__(self, config, parent, **kwargs):
self.parent = parent
self.inputs = parent.inputs
self.rated_power = config["rated_power"]
def update_data(self):
pass
def predict(self, _set, sched_index, market_index, occupied):
return _set*self.rated_power
class simple_profile(object):
def __init__(self, config, parent, **kwargs):
self.parent = parent
self.inputs = parent.inputs
self.rated_power = config["rated_power"]
try:
self.lighting_schedule = config["default_lighting_schedule"]
except KeyError:
_log.warn("No no default lighting schedule!")
self.lighting_schedule = [1.0]*24
def update_data(self):
pass
def predict(self, _set, sched_index, market_index, occupied):
if not occupied:
power = self.lighting_schedule[sched_index]*self.rated_power
else:
power = _set*self.rated_power
return power
| import logging
import importlib
import pandas as pd
from volttron.platform.agent import utils
from datetime import timedelta as td
from volttron.pnnl.models.utils import clamp
_log = logging.getLogger(__name__)
utils.setup_logging()
class Light(object):
DOL = "dol"
OCC = "occ"
def __init__(self, config, **kwargs):
model_type = config.get("model_type", "simple")
_log.debug("Light Agent Model: {}".format(model_type))
module = importlib.import_module("volttron.pnnl.models.light")
model_class = getattr(module, model_type)
self.model = model_class(config, self)
def get_q(self, _set, sched_index, market_index, occupied):
q = self.model.predict(_set, sched_index, market_index, occupied)
return q
class simple(object):
def __init__(self, config, parent, **kwargs):
self.parent = parent
self.inputs = parent.inputs
if "rated_power" in config:
self.rated_power = config["rated_power"]
else:
self.rated_power = config["model_parameters"]["rated_power"]
def update_data(self):
pass
def predict(self, _set, sched_index, market_index, occupied):
return _set*self.rated_power
| bsd-3-clause | Python |
c737cf939ef9e028ada0ae3087d45b7a0dc8710c | Tweak script that gets test list in xdist builds | EDUlib/edx-platform,jolyonb/edx-platform,eduNEXT/edx-platform,EDUlib/edx-platform,arbrandes/edx-platform,mitocw/edx-platform,jolyonb/edx-platform,stvstnfrd/edx-platform,cpennington/edx-platform,cpennington/edx-platform,ESOedX/edx-platform,mitocw/edx-platform,mitocw/edx-platform,eduNEXT/edx-platform,edx-solutions/edx-platform,msegado/edx-platform,stvstnfrd/edx-platform,angelapper/edx-platform,philanthropy-u/edx-platform,arbrandes/edx-platform,a-parhom/edx-platform,stvstnfrd/edx-platform,mitocw/edx-platform,a-parhom/edx-platform,a-parhom/edx-platform,ESOedX/edx-platform,angelapper/edx-platform,ESOedX/edx-platform,eduNEXT/edx-platform,edx/edx-platform,philanthropy-u/edx-platform,msegado/edx-platform,eduNEXT/edx-platform,eduNEXT/edunext-platform,angelapper/edx-platform,appsembler/edx-platform,jolyonb/edx-platform,appsembler/edx-platform,edx/edx-platform,msegado/edx-platform,appsembler/edx-platform,eduNEXT/edunext-platform,appsembler/edx-platform,stvstnfrd/edx-platform,ESOedX/edx-platform,cpennington/edx-platform,a-parhom/edx-platform,EDUlib/edx-platform,edx/edx-platform,arbrandes/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform,edx-solutions/edx-platform,eduNEXT/edunext-platform,EDUlib/edx-platform,arbrandes/edx-platform,msegado/edx-platform,msegado/edx-platform,edx/edx-platform,angelapper/edx-platform,edx-solutions/edx-platform,edx-solutions/edx-platform,eduNEXT/edunext-platform,jolyonb/edx-platform,cpennington/edx-platform | scripts/xdist/get_worker_test_list.py | scripts/xdist/get_worker_test_list.py | """
This script strips the console log of a pytest-xdist Jenkins run into the test
lists of each pytest worker.
Assumes the following format:
[test-suite] [worker] RESULT test
"""
import click
import io
import re
import os
import shutil
@click.command()
@click.option(
'--log-file',
help="File name of console log .txt file from a Jenkins build "
"that ran pytest-xdist. This can be acquired by running: "
"curl -o console.txt https://build.testeng.edx.org/job/JOBNAME/BUILDNUMBER/consoleText",
required=True
)
@click.option(
'--test-suite',
help="Test suite that the pytest worker ran.",
type=click.Choice(['lms-unit', 'cms-unit', 'commonlib-unit']),
required=True
)
def main(log_file, test_suite):
worker_test_dict = {}
with io.open(log_file, 'r') as console_file:
for line in console_file:
regex_search = re.search(r'\[{}] \[gw(\d+)] (PASSED|FAILED|SKIPPED|ERROR)'.format(test_suite), line)
if regex_search:
worker_num_string = regex_search.group(1)
if worker_num_string not in worker_test_dict:
worker_test_dict[worker_num_string] = []
test = line.split()[3]
if test_suite == "commonlib-unit":
if "pavelib" not in test:
test = u"common/lib/{}".format(test)
worker_test_dict[worker_num_string].append(test)
output_folder_name = "worker_list_files"
if os.path.isdir(output_folder_name):
shutil.rmtree(output_folder_name)
os.mkdir(output_folder_name)
for worker_num in worker_test_dict:
output_file_name = "{}/{}_gw{}_test_list.txt".format(output_folder_name, test_suite, worker_num)
with io.open(output_file_name, 'w') as output_file:
for line in worker_test_dict[worker_num]:
output_file.write(line + "\n")
if __name__ == "__main__":
main()
| """
This script strips the console log of a pytest-xdist Jenkins run into the test
lists of each pytest worker.
Assumes the following format:
[test-suite] [worker] RESULT test
"""
import click
import io
import re
import os
import shutil
@click.command()
@click.option(
'--log-file',
help="File name of console log .txt file from a Jenkins build "
"that ran pytest-xdist. This can be acquired by running: "
"curl -o console.txt https://build.testeng.edx.org/job/JOBNAME/BUILDNUMBER/consoleText",
required=True
)
@click.option(
'--test-suite',
help="Test suite that the pytest worker ran. Example: lms-unit",
required=True
)
def main(log_file, test_suite):
worker_test_dict = {}
with io.open(log_file, 'r') as console_file:
for line in console_file:
regex_search = re.search(r'\[{}] \[gw(\d+)] PASSED|FAILED|SKIPPED|ERROR'.format(test_suite), line)
if regex_search:
worker_num_string = regex_search.group(1)
if worker_num_string not in worker_test_dict:
worker_test_dict[worker_num_string] = []
worker_test_dict[worker_num_string].append(line.split()[3])
output_folder_name = "worker_list_files"
if os.path.isdir(output_folder_name):
shutil.rmtree(output_folder_name)
os.mkdir(output_folder_name)
for worker_num in worker_test_dict:
output_file_name = "{}/{}_gw{}_test_list.txt".format(output_folder_name, test_suite, worker_num)
with io.open(output_file_name, 'w') as output_file:
for line in worker_test_dict[worker_num]:
output_file.write(line + "\n")
if __name__ == "__main__":
main()
| agpl-3.0 | Python |
72e0b78ea1578df68f95f97f6080f5c011371753 | Return None if there are no results for a resource | ckan/ckanext-deadoralive,ckan/ckanext-deadoralive,ckan/ckanext-deadoralive | ckanext/deadoralive/logic/action/get.py | ckanext/deadoralive/logic/action/get.py | import datetime
import ckanext.deadoralive.model.results as results
import ckanext.deadoralive.config as config
def get_resources_to_check(context, data_dict):
"""Return a list of up to ``n`` resource IDs to be checked.
Returns up to ``n`` resource IDs to be checked for broken links.
Resources that have not been checked before will be returned first, oldest
resources first.
Resources that have not been checked in the last 24 hours (configurable:
``ckanext.deadoralive.recheck_resources_after``) will be returned next,
most-recently-checked resources last.
As soon as a resource's ID is returned by this function that resource is
considered to have a "pending" check (we are expecting to receive a link
check result for that resource soon). Resources with pending checks will
not be returned by this function again for at least 2 hours (configurable:
``ckanext.deadoralive.resend_pending_resources_after``).
:param n: the maximum number of resources to return at once
:type n: int
:rtype: list of strings
"""
# TODO: Authorization.
# TODO: Validation.
recheck_resources_after = config.recheck_resources_after
since_delta = datetime.timedelta(hours=recheck_resources_after)
resend_pending_resources_after = (
config.resend_pending_resources_after)
pending_since_delta = datetime.timedelta(
hours=resend_pending_resources_after)
n = data_dict.get("n", 50)
return results.get_resources_to_check(n, since=since_delta,
pending_since=pending_since_delta)
def get(context, data_dict):
"""Get the latest link check result data for a resource.
:param resource_id: the resource to return the result data for
:type resource_id: string
:returns: the latest link check data for the resource, or None if there are
no results for this resource
:rtype: dict or None
"""
# TODO: Authorization.
# TODO: Validation.
resource_id = data_dict["resource_id"]
try:
result = results.get(resource_id)
except results.NoResultForResourceError:
return None
# datetimes aren't JSON serializable.
result["last_checked"] = result["last_checked"].isoformat()
if result["last_successful"]:
result["last_successful"] = result["last_successful"].isoformat()
if result["pending_since"]:
result["pending_since"] = result["pending_since"].isoformat()
return result
| import datetime
import ckanext.deadoralive.model.results as results
import ckanext.deadoralive.config as config
def get_resources_to_check(context, data_dict):
"""Return a list of up to ``n`` resource IDs to be checked.
Returns up to ``n`` resource IDs to be checked for broken links.
Resources that have not been checked before will be returned first, oldest
resources first.
Resources that have not been checked in the last 24 hours (configurable:
``ckanext.deadoralive.recheck_resources_after``) will be returned next,
most-recently-checked resources last.
As soon as a resource's ID is returned by this function that resource is
considered to have a "pending" check (we are expecting to receive a link
check result for that resource soon). Resources with pending checks will
not be returned by this function again for at least 2 hours (configurable:
``ckanext.deadoralive.resend_pending_resources_after``).
:param n: the maximum number of resources to return at once
:type n: int
:rtype: list of strings
"""
# TODO: Authorization.
# TODO: Validation.
recheck_resources_after = config.recheck_resources_after
since_delta = datetime.timedelta(hours=recheck_resources_after)
resend_pending_resources_after = (
config.resend_pending_resources_after)
pending_since_delta = datetime.timedelta(
hours=resend_pending_resources_after)
n = data_dict.get("n", 50)
return results.get_resources_to_check(n, since=since_delta,
pending_since=pending_since_delta)
def get(context, data_dict):
"""Get the latest link check result data for a resource.
:param resource_id: the resource to return the result data for
:type resource_id: string
:returns: the latest link check data for the resource
:rtype: dict
"""
# TODO: Authorization.
# TODO: Validation.
resource_id = data_dict["resource_id"]
result = results.get(resource_id)
# datetimes aren't JSON serializable.
result["last_checked"] = result["last_checked"].isoformat()
if result["last_successful"]:
result["last_successful"] = result["last_successful"].isoformat()
if result["pending_since"]:
result["pending_since"] = result["pending_since"].isoformat()
return result
| agpl-3.0 | Python |
bf0986f2b404b97d30b047a55b4914a8c71975a7 | set sort on list view | bhoggard/nurtureart,bhoggard/nurtureart,bhoggard/nurtureart | benefit/urls.py | benefit/urls.py | from django.conf.urls import patterns, url
from django.views.generic import DetailView, ListView, TemplateView
from .models import Artwork
urlpatterns = patterns('',
url(r'^$', 'benefit.views.index', name='index'),
url(r'^artworks$', ListView.as_view(
model=Artwork, paginate_by=24,
queryset=Artwork.objects.order_by('artist_last_name', 'artist_first_name', 'title')),
name='artworks-list'),
url(r'^artworks/(?P<pk>\d+)$', DetailView.as_view(model=Artwork), name='artwork-detail'),
) | from django.conf.urls import patterns, url
from django.views.generic import DetailView, ListView, TemplateView
from .models import Artwork
urlpatterns = patterns('',
url(r'^$', 'benefit.views.index', name='index'),
url(r'^artworks$', ListView.as_view(
model=Artwork, paginate_by=24),
name='artworks-list'),
url(r'^artworks/(?P<pk>\d+)$', DetailView.as_view(model=Artwork), name='artwork-detail'),
) | mit | Python |
858d0ba40131787940a71204a12b509e873c4fba | Normalize version number | dreispt/project,acsone/project,OCA/project-service,NeovaHealth/project-service,xpansa/project-service,ddico/project,dreispt/project-service,acsone/project-service,eezee-it/project-service | project_stage_state_issue/__openerp__.py | project_stage_state_issue/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis, 2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Add State field to Project Issues',
'version': '8.0.1.0.0',
'category': 'Project Management',
'summary': 'Restore State attribute removed from Project Stages in 8.0',
'author': "Daniel Reis,Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/project-service',
'license': 'AGPL-3',
'depends': [
'project_stage_state',
'project_issue',
],
'data': [
'project_issue_view.xml',
],
'installable': True,
'auto_install': True,
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# Daniel Reis, 2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Add State field to Project Issues',
'version': '1.0',
'category': 'Project Management',
'summary': 'Restore State attribute removed from Project Stages in 8.0',
'author': "Daniel Reis,Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/project-service',
'license': 'AGPL-3',
'depends': [
'project_stage_state',
'project_issue',
],
'data': [
'project_issue_view.xml',
],
'installable': True,
'auto_install': True,
}
| agpl-3.0 | Python |
26da8d4ee15ccdb3338c0ad0ca8096d8da455050 | add `adapt2cuds` in the simphony plugin | simphony/simphony-mayavi | simphony_mayavi/plugin.py | simphony_mayavi/plugin.py | from simphony_mayavi._version import full_version as __version__
from simphony_mayavi.api import show, snapshot, adapt2cuds
from simphony_mayavi.cuds.api import VTKParticles, VTKLattice, VTKMesh
__all__ = [
'show', 'snapshot', 'adapt2cuds',
'__version__',
'VTKParticles', 'VTKLattice', 'VTKMesh']
| from simphony_mayavi._version import full_version as __version__
from simphony_mayavi.show import show
from simphony_mayavi.snapshot import snapshot
from simphony_mayavi.cuds.api import VTKParticles, VTKLattice, VTKMesh
__all__ = [
'show', 'snapshot', '__version__', 'VTKParticles', 'VTKLattice', 'VTKMesh']
| bsd-2-clause | Python |
fab28d49551012e2db97a109ae78f87c6d9d9a68 | Fix bug. Cannot do sudo. | svenkreiss/databench_examples,svenkreiss/databench_examples,svenkreiss/databench_examples,svenkreiss/databench_examples | .ebextensions/01rewrite_nginx_config.py | .ebextensions/01rewrite_nginx_config.py | #! /usr/bin/python
"""Modifies nginx configuration file on AWS Elastic Beanstalk to support
WebSocket connections."""
__author__ = "Sven Kreiss <me@svenkreiss.com>"
__version__ = "0.0.2"
import os
NGINX_CONF_FILE = '/etc/nginx/sites-enabled/elasticbeanstalk-nginx-docker.conf'
NGINX_CONFIG = """
location /socket.io {
proxy_pass http://docker/socket.io;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
"""
def file_contains_string(trigger='location /socket.io'):
with open(NGINX_CONF_FILE, 'r') as f:
for line in f:
if trigger in line:
return True
return False
def add_string_after_block(block='location /', string=NGINX_CONFIG):
f = open(NGINX_CONF_FILE, 'r').readlines()
new_f = []
inside_block = False
for line in f:
new_f.append(line)
if block in line:
inside_block = True
if inside_block and '}' in line:
new_f += [l+'\n' for l in string.split('\n')]
inside_block = False
print new_f
# overwrite config file
f = open(NGINX_CONF_FILE, 'w')
for line in new_f:
f.write(line)
f.close()
def restart_nginx():
os.system("service nginx restart")
def main():
print '--- NginX conf file exists ---'
print NGINX_CONF_FILE
isfile = os.path.isfile(NGINX_CONF_FILE)
print isfile
if not isfile:
print 'abort'
return
print '--- Checking NginX configuration ---'
already_fixed = file_contains_string()
print already_fixed
if already_fixed:
print 'abort'
return
print '--- Changing NginX configuration ---'
add_string_after_block()
print '--- Restart NginX ---'
restart_nginx()
if __name__ == "__main__":
main()
| #! /usr/bin/python
"""Modifies nginx configuration file on AWS Elastic Beanstalk to support
WebSocket connections."""
__author__ = "Sven Kreiss <me@svenkreiss.com>"
__version__ = "0.0.2"
import os
NGINX_CONF_FILE = '/etc/nginx/sites-enabled/elasticbeanstalk-nginx-docker.conf'
NGINX_CONFIG = """
location /socket.io {
proxy_pass http://docker/socket.io;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
"""
def file_contains_string(trigger='location /socket.io'):
with open(NGINX_CONF_FILE, 'r') as f:
for line in f:
if trigger in line:
return True
return False
def add_string_after_block(block='location /', string=NGINX_CONFIG):
f = open(NGINX_CONF_FILE, 'r').readlines()
new_f = []
inside_block = False
for line in f:
new_f.append(line)
if block in line:
inside_block = True
if inside_block and '}' in line:
new_f += [l+'\n' for l in string.split('\n')]
inside_block = False
print new_f
# overwrite config file
f = open(NGINX_CONF_FILE, 'w')
for line in new_f:
f.write(line)
f.close()
def restart_nginx():
os.system("sudo service nginx restart")
def main():
print '--- NginX conf file exists ---'
print NGINX_CONF_FILE
isfile = os.path.isfile(NGINX_CONF_FILE)
print isfile
if not isfile:
print 'abort'
return
print '--- Checking NginX configuration ---'
already_fixed = file_contains_string()
print already_fixed
if already_fixed:
print 'abort'
return
print '--- Changing NginX configuration ---'
add_string_after_block()
print '--- Restart NginX ---'
restart_nginx()
if __name__ == "__main__":
main()
| mit | Python |
e02abe17b521508a6fb90a628c5c6dd59f7d3186 | Allow customizations of user-config.py | yuvipanda/paws,yuvipanda/paws | singleuser/user-config.py | singleuser/user-config.py | import os
mylang = 'test'
family = 'wikipedia'
custom_path = os.path.expanduser('~/user-config.py')
if os.path.exists(custom_path):
with open(custom_path, 'r') as f:
exec(compile(f.read(), custom_path, 'exec'), globals())
# Things that should be non-easily-overridable
usernames['*']['*'] = os.environ['JPY_USER']
| import os
mylang = 'test'
family = 'wikipedia'
usernames['wikipedia']['test'] = os.environ['JPY_USER']
| mit | Python |
fdab958fe17747e5a0a869dd367b2c9c277b4462 | Bump slybot version | hanicker/portia,anjuncc/portia,hanicker/portia,livepy/portia,NoisyText/portia,Suninus/portia,anjuncc/portia,Suninus/portia,SouthStar/portia,chennqqi/portia,NicoloPernigo/portia,pombredanne/portia,chennqqi/portia,chennqqi/portia,amikey/portia,nju520/portia,pombredanne/portia,naveenvprakash/portia,livepy/portia,livepy/portia,naveenvprakash/portia,hanicker/portia,anjuncc/portia,sntran/portia,SouthStar/portia,PrasannaVenkadesh/portia,naveenvprakash/portia,SouthStar/portia,PrasannaVenkadesh/portia,NoisyText/portia,pombredanne/portia,NoisyText/portia,sntran/portia,nju520/portia,SouthStar/portia,livepy/portia,chennqqi/portia,nju520/portia,amikey/portia,PrasannaVenkadesh/portia,sntran/portia,PrasannaVenkadesh/portia,Suninus/portia,amikey/portia,NicoloPernigo/portia,Youwotma/portia,amikey/portia,Youwotma/portia,naveenvprakash/portia,hanicker/portia,NoisyText/portia,sntran/portia,NicoloPernigo/portia,Youwotma/portia,NicoloPernigo/portia,Suninus/portia,Youwotma/portia,pombredanne/portia,anjuncc/portia,nju520/portia | slybot/slybot/__init__.py | slybot/slybot/__init__.py | __version__ = '0.11.1'
| __version__ = '0.11'
| bsd-3-clause | Python |
bba5011432e484a906f0008c46f4d049492b2643 | change location of computenode.txt | ElofssonLab/web_common_backend,ElofssonLab/web_common_backend,ElofssonLab/web_common_backend,ElofssonLab/web_common_backend,ElofssonLab/web_common_backend | proj/pro_settings.py | proj/pro_settings.py | """
Django settings for proj project in production
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append("%s/pred/app/"%(BASE_DIR))
import myfunc
try:
from shared_settings import *
except ImportError:
pass
with open('/etc/django_pro_secret_key.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', 'commonbackend.*', 'commonbackend.computenode.pcons3.se']
computenodefile = "%s/pred/config/computenode.txt"%(BASE_DIR)
if os.path.exists(computenodefile):
nodelist = []
try:
nodelist = myfunc.ReadIDList2(computenodefile,col=0)
except:
pass
ALLOWED_HOSTS += nodelist
| """
Django settings for proj project in production
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append("%s/pred/app/"%(BASE_DIR))
import myfunc
try:
from shared_settings import *
except ImportError:
pass
with open('/etc/django_pro_secret_key.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['localhost', 'commonbackend.*', 'commonbackend.computenode.pcons3.se']
computenodefile = "%s/pred/static/computenode.txt"%(BASE_DIR)
if os.path.exists(computenodefile):
nodelist = []
try:
nodelist = myfunc.ReadIDList2(computenodefile,col=0)
except:
pass
ALLOWED_HOSTS += nodelist
| mit | Python |
71ea6816eea95e8bf750563718b0dd39114a3c49 | Add a cookie based authentication source | usingnamespace/pyramid_authsanity | pyramid_authsanity/sources.py | pyramid_authsanity/sources.py | from webob.cookies (
SignedCookieProfile,
SignedSerializer,
)
from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
def CookieAuthSourceFactory(
secret,
cookie_name='auth',
secure=False,
max_age=None,
httponly=False,
path="/",
domains=None,
timeout=None,
reissue_time=None,
debug=False,
hashalg='sha512',
):
""" An authentication source that uses a unique cookie """
@implementer(IAuthSourceService)
class CookieAuthSource(object):
def __init__(self, context, request):
self.domains = domains
if self.domains is None:
self.domains = []
self.domains.append(request.domain)
self.cookie = SignedCookieProfile(
secret,
'authsanity',
cookie_name,
secure=secure,
max_age=max_age,
httponly=httponly,
path=path,
domains=domains,
hashalg=hashalg,
)
# Bind the cookie to the current request
self.cookie = self.cookie.bind(request)
return self
def get_value(self):
return self.cookie.get_value()
def headers_remember(self, value):
return self.cookie.get_headers(value, domains=self.domains)
def headers_forget(self):
return self.cookie.get_headers('', max_age=0)
return CookieAuthSource
| from zope.interface import implementer
from .interfaces (
IAuthSourceService,
)
@implementer(IAuthSourceService)
class SessionAuthSource(object):
""" An authentication source that uses the current session """
vary = ()
value_key = 'sanity.value'
def __init__(self, context, request):
self.request = request
self.session = request.session
return self
def get_value(self):
return self.session.get(value_key, [None, None])
def headers_remember(self, value):
self.session[value_key] = value
return []
def headers_forget(self):
if value_key in self.session:
del self.session[value_key]
return []
| isc | Python |
494f14a69d08e9bfd556fccc6b4e2319db129a38 | Add created and modified fields to Receipt | trimailov/finance,trimailov/finance,trimailov/finance | books/models.py | books/models.py | from django.contrib.auth.models import User
from django.db import models
from django.db.models import fields
from django.utils import timezone
class Receipt(models.Model):
title = fields.CharField(max_length=255)
price = fields.DecimalField(max_digits=10, decimal_places=2)
created = fields.DateTimeField(auto_now=True)
modified = fields.DateTimeField(default=timezone.now())
user = models.ForeignKey(User)
def __str__(self):
return "{}_{}".format(self.title, self.price)
| from django.contrib.auth.models import User
from django.db import models
from django.db.models import fields
class Receipt(models.Model):
title = fields.CharField(max_length=255)
price = fields.DecimalField(max_digits=10, decimal_places=2)
user = models.ForeignKey(User)
def __str__(self):
return "{}_{}".format(self.title, self.price)
| mit | Python |
b2c185ee6f02584b8a3e3c512cf62c5943cbbde5 | work this time | akhilari7/pa-dude,neerajvashistha/pa-dude,akhilari7/pa-dude,neerajvashistha/pa-dude,neerajvashistha/pa-dude,akhilari7/pa-dude,neerajvashistha/pa-dude,akhilari7/pa-dude,neerajvashistha/pa-dude,neerajvashistha/pa-dude | spellcheck.py | spellcheck.py | import re, collections
#import enchant
import sys
sys.path.append('pyenchant-1.6.6/enchant')
sys.path.append('pyenchant-1.6.6/enchant/checker/')
from enchant.checker import SpellChecker
def words(text):
return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(file('corpus.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b)>1]
replaces = [a + c + b[1:] for a, b in s for c in alphabet if b]
inserts = [a + c + b for a, b in s for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words):
return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
def correct_top(word, n):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
s = sorted(candidates, key=NWORDS.get, reverse=True)
return s[0], s[:n]
def sentence_correct(sentence):
wordlist = sentence.split()
correctSentenceList = []
for word in wordlist:
chkr = SpellChecker('en_US')
chkr.set_text(word)
for err in chkr:
word = correct(err.word)
#print word
correctSentenceList.append(word)
#print correctSentenceList
correctSentence = ' '.join(correctSentenceList)
return correctSentence
if __name__ == "__main__":
print(sentence_correct("I would like to order mancurian"))
| import re, collections
import enchant
import sys
#sys.path.append('pyenchant-1.6.6/enchant')
from enchant.checker import SpellChecker
def words(text):
return re.findall('[a-z]+', text.lower())
def train(features):
model = collections.defaultdict(lambda: 1)
for f in features:
model[f] += 1
return model
NWORDS = train(words(file('corpus.txt').read()))
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def edits1(word):
s = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [a + b[1:] for a, b in s if b]
transposes = [a + b[1] + b[0] + b[2:] for a, b in s if len(b)>1]
replaces = [a + c + b[1:] for a, b in s for c in alphabet if b]
inserts = [a + c + b for a, b in s for c in alphabet]
return set(deletes + transposes + replaces + inserts)
def known_edits2(word):
return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)
def known(words):
return set(w for w in words if w in NWORDS)
def correct(word):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
return max(candidates, key=NWORDS.get)
def correct_top(word, n):
candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]
s = sorted(candidates, key=NWORDS.get, reverse=True)
return s[0], s[:n]
def sentence_correct(sentence):
wordlist = sentence.split()
correctSentenceList = []
for word in wordlist:
chkr = SpellChecker('en_US')
chkr.set_text(word)
for err in chkr:
word = correct(err.word)
#print word
correctSentenceList.append(word)
#print correctSentenceList
correctSentence = ' '.join(correctSentenceList)
return correctSentence
if __name__ == "__main__":
print(sentence_correct("I would like to order mancurian"))
| mit | Python |
b1547647deec6c1edf54c497fa4ed20235ea6902 | Add missing egun.bias in init | lnls-fac/sirius | pymodels/middlelayer/devices/__init__.py | pymodels/middlelayer/devices/__init__.py | from .dcct import DCCT
from .li_llrf import LiLLRF
from .rf import RF
from .sofb import SOFB
from .kicker import Kicker
from .septum import Septum
from .screen import Screen
from .bpm import BPM
from .ict import ICT
from .ict import TranspEff
from .egun import Bias
from .egun import Filament
from .egun import HVPS
| from .dcct import DCCT
from .li_llrf import LiLLRF
from .rf import RF
from .sofb import SOFB
from .kicker import Kicker
from .septum import Septum
from .screen import Screen
from .bpm import BPM
from .ict import ICT
from .ict import TranspEff
from .egun import HVPS
from .egun import Filament
| mit | Python |
83799d81fe734938024aa1f5d4a438fba3cc1807 | Add uploading to AWS machinery | jasonsbrooks/ysniff-software,jasonsbrooks/ysniff-software | ysniff.py | ysniff.py | #!/usr/bin/env python
import boto.rds
import fileinput
import sys
import os
mac_index = 12
time_index = 1
start_t_us = 0
start_u_us = 0
MAC_LEN = 17
SAMPLE_PERIOD = 30 # Seconds.
PUSH_TO_AWS_PERIOD = 3600 # Seconds. One hour.
maclist = set()
buffer = {}
conn=boto.connect_sdb()
domain=conn.get_domain('tmp_ysniff')
# TODO: Upload buffer to AWS every collection period.
for line in fileinput.input():
splitline = line.split(" ")
if mac_index < len(splitline):
mac = splitline[mac_index]
if mac == "DA:Broadcast":
mac = splitline[mac_index+1]
ts = int(splitline[time_index][:-2])
mac = mac[len(mac)-MAC_LEN:]
# Make list of timestamps for each mac
if mac not in buffer:
buffer[mac]=[]
# Only pair timestamp to mac address once
if start_t_us not in buffer[mac]:
buffer[mac].append(start_t_us)
# Update start_t_us every SAMPLE_PERIOD
if start_t_us is 0 or ts - start_t_us > (SAMPLE_PERIOD * 1000000):
start_t_us = ts
# upload buffer to AWS every PUSH_TO_AWS_PERIOD
if start_u_us is 0:
start_u_us = ts
elif ts - start_u_us > (PUSH_TO_AWS_PERIOD * 1000000):
for key in buffer:
item = domain.get_item(key)
for timestamp in buffer[key]:
item[timestamp] = os.environ['PI_LOCATION']
item.save()
buffer = {}
start_t_us = ts
#print buffer, len(buffer)
| #!/usr/bin/env python
import boto.rds
import fileinput
import sys
mac_index = 12
time_index = 1
start_t_us = 0
start_u_us = 0
MAC_LEN = 17
SAMPLE_PERIOD = 30 # Seconds.
PUSH_TO_AWS_PERIOD = 3600 # Seconds. One hour.
maclist = set()
buffer = {}
conn = boto.rds.connect_to_region("us-west-2",aws_access_key_id=sys.argv[1],aws_secret_key_id=sys.argv[2])
aws_secret_access_key='<aws secret key>')
# TODO: Upload buffer to AWS every collection period.
for line in fileinput.input():
splitline = line.split(" ")
if mac_index < len(splitline):
mac = splitline[mac_index]
if mac == "DA:Broadcast":
mac = splitline[mac_index+1]
ts = int(splitline[time_index][:-2])
mac = mac[len(mac)-MAC_LEN:]
maclist.update(mac)
# move maclist to buffer every SAMPLE_PERIOD
if start_t_us is 0:
start_t_us = ts
elif ts - start_t_us > (SAMPLE_PERIOD * 1000000):
buffer[start_t_us] = maclist
maclist = set()
start_t_us = ts
# upload buffer every PUSH_TO_AWS_PERIOD
if start_u_us is 0:
start_u_us = ts
elif ts - start_u_us > (PUSH_TO_AWS_PERIOD * 1000000):
# upload buffer here
buffer = {}
start_t_us = ts
print ts,mac
#print buffer, len(buffer)
| mit | Python |
6df951062e73559f0d3cca50370969617c415d6e | make imports local | erm0l0v/python_wrap_cases | python_wrap_cases/__init__.py | python_wrap_cases/__init__.py | # -*- coding: utf-8 -*-
__author__ = 'Kirill Ermolov'
__email__ = 'erm0l0v@ya.ru'
__version__ = '0.1.2'
from python_wrap_cases.wrap_cases import * | # -*- coding: utf-8 -*-
__author__ = 'Kirill Ermolov'
__email__ = 'erm0l0v@ya.ru'
__version__ = '0.1.2'
from python_wrap_cases import generators as g
from python_wrap_cases.wrap_cases import * | bsd-3-clause | Python |
4b3475eec6cb174c78fe6c8ec7d8495500e7bef3 | Remove reference to OHSU. | ohsu-qin/qipipe | qipipe/staging/collections.py | qipipe/staging/collections.py | from .staging_error import StagingError
extent = {}
"""The {name: collection} dictionary."""
def add(*collections):
"""
Adds the given :class:`qipipe.staging.collection.Collection`s to the
list of known collections.
:param collections: the collection objects to add
"""
for coll in collections:
extent[coll.name] = coll
def with_name(name):
"""
:param name: the image collection name
:return: the corresponding :class:`qipipe.staging.collection.Collection`
:raise StagingError: if the collection is not recognized
"""
coll = extent.get(name, None)
if not coll:
raise StagingError("The collection name is not recognized: %s" %
name)
return coll
| from .staging_error import StagingError
extent = {}
"""The {name: collection} dictionary."""
def add(*collections):
"""
Adds the given :class:`qipipe.staging.collection.Collection`s to the
list of known collections.
:param collections: the collection objects to add
"""
for coll in collections:
extent[coll.name] = coll
def with_name(name):
"""
:param name: the image collection name
:return: the corresponding :class:`qipipe.staging.collection.Collection`
:raise StagingError: if the collection is not recognized
"""
coll = extent.get(name, None)
if not coll:
raise StagingError("The AIRC collection name is not recognized: %s" %
name)
return coll
| bsd-2-clause | Python |
cff0599bbc891ec690f818cb85bffce3e78212df | Fix pep8 | lxc/pylxd,lxc/pylxd | pylxd/deprecation.py | pylxd/deprecation.py | import warnings
warnings.simplefilter('once', DeprecationWarning)
class deprecated():
"""A decorator for warning about deprecation warnings.
The decorator takes an optional message argument. This message can
be used to direct the user to a new API or specify when it will
be removed.
"""
DEFAULT_MESSAGE = '{} is deprecated and will be removed soon.'
def __init__(self, message=None):
self.message = message
def __call__(self, f):
def wrapped(*args, **kwargs):
if self.message is None:
self.message = self.DEFAULT_MESSAGE.format(
f.__name__)
warnings.warn(self.message, DeprecationWarning)
return f(*args, **kwargs)
return wrapped
| import warnings
warnings.simplefilter('once', DeprecationWarning)
class deprecated():
"""A decorator for warning about deprecation warnings.
The decorator takes an optional message argument. This message can
be used to direct the user to a new API or specify when it will
be removed.
"""
def __init__(self, message=None):
self.message = message
def __call__(self, f):
def wrapped(*args, **kwargs):
if self.message is None:
self.message = '{} is deprecated and will be removed soon.'.format(
f.__name__)
warnings.warn(self.message, DeprecationWarning)
return f(*args, **kwargs)
return wrapped
| apache-2.0 | Python |
9435e211e3e54184946a66c5f59edfc20ff17c9f | Change default TB version | lnls-fac/sirius | pymodels/__init__.py | pymodels/__init__.py | """PyModels package."""
import os as _os
from . import LI_V01_01
from . import TB_V03_02
from . import BO_V05_04
from . import TS_V03_03
from . import SI_V24_04
from . import coordinate_system
with open(_os.path.join(__path__[0], 'VERSION'), 'r') as _f:
__version__ = _f.read().strip()
__all__ = ('LI_V01_01', 'TB_V03_02', 'BO_V05_04', 'TS_V03_03', 'SI_V24_04')
li = LI_V01_01
tb = TB_V03_02
bo = BO_V05_04
ts = TS_V03_03
si = SI_V24_04
| """PyModels package."""
import os as _os
from . import LI_V01_01
from . import TB_V02_01
from . import BO_V05_04
from . import TS_V03_03
from . import SI_V24_04
from . import coordinate_system
with open(_os.path.join(__path__[0], 'VERSION'), 'r') as _f:
__version__ = _f.read().strip()
__all__ = ('LI_V01_01', 'TB_V02_01', 'BO_V05_04', 'TS_V03_03', 'SI_V24_04')
li = LI_V01_01
tb = TB_V02_01
bo = BO_V05_04
ts = TS_V03_03
si = SI_V24_04
| mit | Python |
58b88ac1b6612f4bb3b78c09de0616fcc71fa0b9 | add remove blank lines | przemyslawjanpietrzak/pyMonet | pymonet/test_lazy.py | pymonet/test_lazy.py | from pymonet.lazy import Lazy
from random import random
class LazySpy:
def mapper(self, input):
return input + 1
def fn(self):
return 42
def fold_function(self, value):
return value + 1
def fn():
return 42
def fn1():
return 43
def test_applicative_should_call_stored_function_during_fold_method_call(mocker):
lazy_spy = LazySpy()
mocker.spy(lazy_spy, 'fn')
applicative = Lazy(lazy_spy.fn)
assert lazy_spy.fn.call_count == 0
assert applicative.fold(lambda number: number + 1) == 43
assert lazy_spy.fn.call_count == 1
def test_applicative_should_call_mapper_during_fold_method_call(mocker):
lazy_spy = LazySpy()
mocker.spy(lazy_spy, 'fn')
mocker.spy(lazy_spy, 'mapper')
mocker.spy(lazy_spy, 'fold_function')
applicative = Lazy(lazy_spy.fn).map(lazy_spy.mapper)
assert lazy_spy.mapper.call_count == 0
assert applicative.fold(lazy_spy.fold_function) == 44
assert lazy_spy.mapper.call_count == 1
assert lazy_spy.fold_function.call_count == 1
def test_applicative_should_call_memoize_saved_value(mocker):
lazy_spy = LazySpy()
mocker.spy(lazy_spy, 'fn')
lazy = Lazy(lazy_spy.fn)
value1 = lazy.get()
assert lazy_spy.fn.call_count == 1
value2 = lazy.get()
assert lazy_spy.fn.call_count == 1
assert value1 is value2
def test_applicative_eq():
assert Lazy(fn) != {}
assert Lazy(fn) != []
assert Lazy(fn) != Lazy(fn1)
assert Lazy(fn) == Lazy(fn)
def test_applicative_eq_evaluated():
lazy1 = Lazy(fn)
lazy2 = Lazy(fn)
lazy1.get()
assert lazy1 != lazy2
lazy2.get()
assert lazy1 == lazy2
def test_applicative_eq_value():
lazy1 = Lazy(random)
lazy2 = Lazy(random)
lazy1.get()
lazy2.get()
assert lazy1 == lazy1
assert lazy2 == lazy2
assert lazy1 != lazy2
| from pymonet.lazy import Lazy
from random import random
class LazySpy:
def mapper(self, input):
return input + 1
def fn(self):
return 42
def fold_function(self, value):
return value + 1
def fn():
return 42
def fn1():
return 43
def test_applicative_should_call_stored_function_during_fold_method_call(mocker):
lazy_spy = LazySpy()
mocker.spy(lazy_spy, 'fn')
applicative = Lazy(lazy_spy.fn)
assert lazy_spy.fn.call_count == 0
assert applicative.fold(lambda number: number + 1) == 43
assert lazy_spy.fn.call_count == 1
def test_applicative_should_call_mapper_during_fold_method_call(mocker):
lazy_spy = LazySpy()
mocker.spy(lazy_spy, 'fn')
mocker.spy(lazy_spy, 'mapper')
mocker.spy(lazy_spy, 'fold_function')
applicative = Lazy(lazy_spy.fn).map(lazy_spy.mapper)
assert lazy_spy.mapper.call_count == 0
assert applicative.fold(lazy_spy.fold_function) == 44
assert lazy_spy.mapper.call_count == 1
assert lazy_spy.fold_function.call_count == 1
def test_applicative_should_call_memoize_saved_value(mocker):
lazy_spy = LazySpy()
mocker.spy(lazy_spy, 'fn')
lazy = Lazy(lazy_spy.fn)
value1 = lazy.get()
assert lazy_spy.fn.call_count == 1
value2 = lazy.get()
assert lazy_spy.fn.call_count == 1
assert value1 is value2
def test_applicative_eq():
assert Lazy(fn) != {}
assert Lazy(fn) != []
assert Lazy(fn) != Lazy(fn1)
assert Lazy(fn) == Lazy(fn)
def test_applicative_eq_evaluated():
lazy1 = Lazy(fn)
lazy2 = Lazy(fn)
lazy1.get()
assert lazy1 != lazy2
lazy2.get()
assert lazy1 == lazy2
def test_applicative_eq_value():
lazy1 = Lazy(random)
lazy2 = Lazy(random)
lazy1.get()
lazy2.get()
assert lazy1 == lazy1
assert lazy2 == lazy2
assert lazy1 != lazy2
| mit | Python |
371aed8cc93d1903f7d20b114054aade9bdc72cd | update version | Pythonicos/qdict | qdict/__version__.py | qdict/__version__.py | __version__ = '1.1.0'
| __version__ = '1.0.0'
| mit | Python |
d78b3476a3244ba62df4df2e4c6b6840fbb34c67 | Update version to 0.13.2 | quantumlib/qsim,quantumlib/qsim,quantumlib/qsim,quantumlib/qsim | qsimcirq/_version.py | qsimcirq/_version.py | """The version number defined here is read automatically in setup.py."""
__version__ = "0.13.2"
| """The version number defined here is read automatically in setup.py."""
__version__ = "0.13.1"
| apache-2.0 | Python |
70ae79d1704279a92af9406e2717b8847c22024d | Use raw_id_field for users in admin. | istresearch/readthedocs.org,attakei/readthedocs-oauth,ojii/readthedocs.org,royalwang/readthedocs.org,hach-que/readthedocs.org,nyergler/pythonslides,kdkeyser/readthedocs.org,sid-kap/readthedocs.org,emawind84/readthedocs.org,attakei/readthedocs-oauth,rtfd/readthedocs.org,kdkeyser/readthedocs.org,sils1297/readthedocs.org,istresearch/readthedocs.org,laplaceliu/readthedocs.org,GovReady/readthedocs.org,Tazer/readthedocs.org,espdev/readthedocs.org,kdkeyser/readthedocs.org,davidfischer/readthedocs.org,mhils/readthedocs.org,wanghaven/readthedocs.org,wijerasa/readthedocs.org,sunnyzwh/readthedocs.org,hach-que/readthedocs.org,sid-kap/readthedocs.org,singingwolfboy/readthedocs.org,davidfischer/readthedocs.org,mrshoki/readthedocs.org,agjohnson/readthedocs.org,KamranMackey/readthedocs.org,singingwolfboy/readthedocs.org,techtonik/readthedocs.org,safwanrahman/readthedocs.org,titiushko/readthedocs.org,nikolas/readthedocs.org,atsuyim/readthedocs.org,soulshake/readthedocs.org,ojii/readthedocs.org,royalwang/readthedocs.org,johncosta/private-readthedocs.org,jerel/readthedocs.org,Tazer/readthedocs.org,techtonik/readthedocs.org,takluyver/readthedocs.org,kenwang76/readthedocs.org,mhils/readthedocs.org,wijerasa/readthedocs.org,fujita-shintaro/readthedocs.org,wijerasa/readthedocs.org,atsuyim/readthedocs.org,kenwang76/readthedocs.org,jerel/readthedocs.org,KamranMackey/readthedocs.org,istresearch/readthedocs.org,stevepiercy/readthedocs.org,wanghaven/readthedocs.org,royalwang/readthedocs.org,johncosta/private-readthedocs.org,Carreau/readthedocs.org,safwanrahman/readthedocs.org,mhils/readthedocs.org,techtonik/readthedocs.org,atsuyim/readthedocs.org,sunnyzwh/readthedocs.org,attakei/readthedocs-oauth,LukasBoersma/readthedocs.org,singingwolfboy/readthedocs.org,cgourlay/readthedocs.org,attakei/readthedocs-oauth,GovReady/readthedocs.org,dirn/readthedocs.org,kenwang76/readthedocs.org,rtfd/readthedocs.org,alex/readthedocs.org,asampat3090/readthedocs.org,d0ugal/readthedocs.org,kenwang76/readthedocs.org,nyergler/pythonslides,clarkperkins/readthedocs.org,alex/readthedocs.org,SteveViss/readthedocs.org,SteveViss/readthedocs.org,Tazer/readthedocs.org,laplaceliu/readthedocs.org,stevepiercy/readthedocs.org,espdev/readthedocs.org,laplaceliu/readthedocs.org,titiushko/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,CedarLogic/readthedocs.org,KamranMackey/readthedocs.org,raven47git/readthedocs.org,mhils/readthedocs.org,wijerasa/readthedocs.org,kenshinthebattosai/readthedocs.org,kenshinthebattosai/readthedocs.org,fujita-shintaro/readthedocs.org,SteveViss/readthedocs.org,espdev/readthedocs.org,clarkperkins/readthedocs.org,mrshoki/readthedocs.org,sid-kap/readthedocs.org,espdev/readthedocs.org,jerel/readthedocs.org,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,wanghaven/readthedocs.org,agjohnson/readthedocs.org,CedarLogic/readthedocs.org,gjtorikian/readthedocs.org,rtfd/readthedocs.org,stevepiercy/readthedocs.org,emawind84/readthedocs.org,michaelmcandrew/readthedocs.org,takluyver/readthedocs.org,d0ugal/readthedocs.org,nikolas/readthedocs.org,soulshake/readthedocs.org,soulshake/readthedocs.org,takluyver/readthedocs.org,tddv/readthedocs.org,dirn/readthedocs.org,alex/readthedocs.org,VishvajitP/readthedocs.org,emawind84/readthedocs.org,laplaceliu/readthedocs.org,pombredanne/readthedocs.org,asampat3090/readthedocs.org,davidfischer/readthedocs.org,singingwolfboy/readthedocs.org,safwanrahman/readthedocs.org,asampat3090/readthedocs.org,raven47git/readthedocs.org,wanghaven/readthedocs.org,dirn/readthedocs.org,nikolas/readthedocs.org,atsuyim/readthedocs.org,kdkeyser/readthedocs.org,fujita-shintaro/readthedocs.org,stevepiercy/readthedocs.org,ojii/readthedocs.org,johncosta/private-readthedocs.org,hach-que/readthedocs.org,clarkperkins/readthedocs.org,cgourlay/readthedocs.org,rtfd/readthedocs.org,cgourlay/readthedocs.org,gjtorikian/readthedocs.org,istresearch/readthedocs.org,dirn/readthedocs.org,VishvajitP/readthedocs.org,sils1297/readthedocs.org,titiushko/readthedocs.org,sils1297/readthedocs.org,raven47git/readthedocs.org,tddv/readthedocs.org,royalwang/readthedocs.org,raven47git/readthedocs.org,d0ugal/readthedocs.org,GovReady/readthedocs.org,VishvajitP/readthedocs.org,asampat3090/readthedocs.org,nikolas/readthedocs.org,michaelmcandrew/readthedocs.org,kenshinthebattosai/readthedocs.org,soulshake/readthedocs.org,CedarLogic/readthedocs.org,takluyver/readthedocs.org,titiushko/readthedocs.org,gjtorikian/readthedocs.org,nyergler/pythonslides,LukasBoersma/readthedocs.org,ojii/readthedocs.org,jerel/readthedocs.org,LukasBoersma/readthedocs.org,Carreau/readthedocs.org,alex/readthedocs.org,Carreau/readthedocs.org,fujita-shintaro/readthedocs.org,kenshinthebattosai/readthedocs.org,gjtorikian/readthedocs.org,agjohnson/readthedocs.org,pombredanne/readthedocs.org,clarkperkins/readthedocs.org,cgourlay/readthedocs.org,nyergler/pythonslides,SteveViss/readthedocs.org,Carreau/readthedocs.org,hach-que/readthedocs.org,mrshoki/readthedocs.org,agjohnson/readthedocs.org,michaelmcandrew/readthedocs.org,sils1297/readthedocs.org,CedarLogic/readthedocs.org,emawind84/readthedocs.org,KamranMackey/readthedocs.org,tddv/readthedocs.org,d0ugal/readthedocs.org,pombredanne/readthedocs.org,Tazer/readthedocs.org,GovReady/readthedocs.org,LukasBoersma/readthedocs.org,sid-kap/readthedocs.org,mrshoki/readthedocs.org,sunnyzwh/readthedocs.org,sunnyzwh/readthedocs.org,techtonik/readthedocs.org,VishvajitP/readthedocs.org | readthedocs/projects/admin.py | readthedocs/projects/admin.py | """Django administration interface for `~projects.models.Project`
and related models.
"""
from builds.models import Version
from django.contrib import admin
from projects.models import Project, File, ImportedFile
class VersionInline(admin.TabularInline):
model = Version
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
list_display = ('name', 'repo', 'repo_type', 'featured', 'theme')
list_filter = ('repo_type', 'featured')
list_editable = ('featured',)
search_fields = ('name', 'repo')
inlines = [VersionInline]
raw_id_fields = ('users',)
class FileAdmin(admin.ModelAdmin):
pass
admin.site.register(Project, ProjectAdmin)
admin.site.register(File, FileAdmin)
admin.site.register(ImportedFile)
| """Django administration interface for `~projects.models.Project`
and related models.
"""
from builds.models import Version
from django.contrib import admin
from projects.models import Project, File, ImportedFile
class VersionInline(admin.TabularInline):
model = Version
class ProjectAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
list_display = ('name', 'repo', 'repo_type', 'featured', 'theme')
list_filter = ('repo_type', 'featured')
list_editable = ('featured',)
search_fields = ('name', 'repo')
inlines = [VersionInline]
class FileAdmin(admin.ModelAdmin):
pass
admin.site.register(Project, ProjectAdmin)
admin.site.register(File, FileAdmin)
admin.site.register(ImportedFile)
| mit | Python |
5856e4daaf141e5bf9cdef438378a3757297f9c0 | Add wrapper methods for clarity. | hhursev/recipe-scraper | recipe_scrapers/wholefoods.py | recipe_scrapers/wholefoods.py | from ._abstract import AbstractScraper
class WholeFoods(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"www.wholefoodsmarket.{domain}"
def title(self):
return self.schema.title()
def total_time(self):
return self.schema.total_time()
def yields(self):
return self.schema.yields()
def image(self):
return self.schema.image()
def ingredients(self):
return self.schema.ingredients()
def instructions(self):
return self.schema.instructions()
def ratings(self):
return self.schema.ratings()
| from ._abstract import AbstractScraper
class WholeFoods(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"www.wholefoodsmarket.{domain}"
| mit | Python |
873d42ddccc5f1fe2c8234ff6da3e00cf4beb8aa | Update setup.py | phac-nml/bioconda-recipes,peterjc/bioconda-recipes,rob-p/bioconda-recipes,joachimwolff/bioconda-recipes,matthdsm/bioconda-recipes,martin-mann/bioconda-recipes,xguse/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,zachcp/bioconda-recipes,phac-nml/bioconda-recipes,matthdsm/bioconda-recipes,ostrokach/bioconda-recipes,ivirshup/bioconda-recipes,rvalieris/bioconda-recipes,dmaticzka/bioconda-recipes,shenwei356/bioconda-recipes,lpantano/recipes,joachimwolff/bioconda-recipes,blankenberg/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,jfallmann/bioconda-recipes,peterjc/bioconda-recipes,bebatut/bioconda-recipes,daler/bioconda-recipes,gregvonkuster/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,npavlovikj/bioconda-recipes,oena/bioconda-recipes,bow/bioconda-recipes,peterjc/bioconda-recipes,bioconda/bioconda-recipes,martin-mann/bioconda-recipes,gvlproject/bioconda-recipes,CGATOxford/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,abims-sbr/bioconda-recipes,lpantano/recipes,matthdsm/bioconda-recipes,hardingnj/bioconda-recipes,cokelaer/bioconda-recipes,HassanAmr/bioconda-recipes,JenCabral/bioconda-recipes,xguse/bioconda-recipes,CGATOxford/bioconda-recipes,joachimwolff/bioconda-recipes,shenwei356/bioconda-recipes,cokelaer/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,bow/bioconda-recipes,dmaticzka/bioconda-recipes,xguse/bioconda-recipes,colinbrislawn/bioconda-recipes,mcornwell1957/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,bow/bioconda-recipes,hardingnj/bioconda-recipes,roryk/recipes,dmaticzka/bioconda-recipes,daler/bioconda-recipes,mcornwell1957/bioconda-recipes,dkoppstein/recipes,rvalieris/bioconda-recipes,saketkc/bioconda-recipes,dkoppstein/recipes,shenwei356/bioconda-recipes,daler/bioconda-recipes,rvalieris/bioconda-recipes,daler/bioconda-recipes,JenCabral/bioconda-recipes,HassanAmr/bioconda-recipes,keuv-grvl/bioconda-recipes,bow/bioconda-recipes,zachcp/bioconda-recipes,jasper1918/bioconda-recipes,colinbrislawn/bioconda-recipes,saketkc/bioconda-recipes,saketkc/bioconda-recipes,dmaticzka/bioconda-recipes,joachimwolff/bioconda-recipes,mdehollander/bioconda-recipes,acaprez/recipes,abims-sbr/bioconda-recipes,lpantano/recipes,shenwei356/bioconda-recipes,colinbrislawn/bioconda-recipes,keuv-grvl/bioconda-recipes,gvlproject/bioconda-recipes,HassanAmr/bioconda-recipes,ostrokach/bioconda-recipes,phac-nml/bioconda-recipes,mcornwell1957/bioconda-recipes,JenCabral/bioconda-recipes,gvlproject/bioconda-recipes,daler/bioconda-recipes,colinbrislawn/bioconda-recipes,Luobiny/bioconda-recipes,ostrokach/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,chapmanb/bioconda-recipes,jasper1918/bioconda-recipes,jasper1918/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,bioconda/recipes,CGATOxford/bioconda-recipes,jasper1918/bioconda-recipes,peterjc/bioconda-recipes,mdehollander/bioconda-recipes,hardingnj/bioconda-recipes,matthdsm/bioconda-recipes,mdehollander/bioconda-recipes,saketkc/bioconda-recipes,phac-nml/bioconda-recipes,abims-sbr/bioconda-recipes,bioconda/recipes,bioconda/bioconda-recipes,mcornwell1957/bioconda-recipes,lpantano/recipes,chapmanb/bioconda-recipes,ivirshup/bioconda-recipes,chapmanb/bioconda-recipes,keuv-grvl/bioconda-recipes,blankenberg/bioconda-recipes,acaprez/recipes,npavlovikj/bioconda-recipes,omicsnut/bioconda-recipes,jfallmann/bioconda-recipes,roryk/recipes,rob-p/bioconda-recipes,CGATOxford/bioconda-recipes,phac-nml/bioconda-recipes,keuv-grvl/bioconda-recipes,bioconda/bioconda-recipes,blankenberg/bioconda-recipes,martin-mann/bioconda-recipes,cokelaer/bioconda-recipes,xguse/bioconda-recipes,bebatut/bioconda-recipes,chapmanb/bioconda-recipes,bebatut/bioconda-recipes,saketkc/bioconda-recipes,ostrokach/bioconda-recipes,rob-p/bioconda-recipes,joachimwolff/bioconda-recipes,ivirshup/bioconda-recipes,ivirshup/bioconda-recipes,saketkc/bioconda-recipes,npavlovikj/bioconda-recipes,joachimwolff/bioconda-recipes,oena/bioconda-recipes,rvalieris/bioconda-recipes,peterjc/bioconda-recipes,roryk/recipes,ivirshup/bioconda-recipes,dmaticzka/bioconda-recipes,zachcp/bioconda-recipes,gvlproject/bioconda-recipes,mcornwell1957/bioconda-recipes,hardingnj/bioconda-recipes,hardingnj/bioconda-recipes,matthdsm/bioconda-recipes,HassanAmr/bioconda-recipes,rvalieris/bioconda-recipes,peterjc/bioconda-recipes,JenCabral/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,abims-sbr/bioconda-recipes,dmaticzka/bioconda-recipes,dkoppstein/recipes,matthdsm/bioconda-recipes,Luobiny/bioconda-recipes,abims-sbr/bioconda-recipes,oena/bioconda-recipes,bow/bioconda-recipes,omicsnut/bioconda-recipes,mdehollander/bioconda-recipes,daler/bioconda-recipes,blankenberg/bioconda-recipes,oena/bioconda-recipes,npavlovikj/bioconda-recipes,CGATOxford/bioconda-recipes,omicsnut/bioconda-recipes,CGATOxford/bioconda-recipes,mdehollander/bioconda-recipes,oena/bioconda-recipes,colinbrislawn/bioconda-recipes,mdehollander/bioconda-recipes,omicsnut/bioconda-recipes,bioconda/bioconda-recipes,keuv-grvl/bioconda-recipes,ivirshup/bioconda-recipes,bow/bioconda-recipes,rob-p/bioconda-recipes,Luobiny/bioconda-recipes,bebatut/bioconda-recipes,martin-mann/bioconda-recipes,HassanAmr/bioconda-recipes,JenCabral/bioconda-recipes,bioconda/recipes,gregvonkuster/bioconda-recipes,keuv-grvl/bioconda-recipes,abims-sbr/bioconda-recipes,Luobiny/bioconda-recipes,cokelaer/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,ostrokach/bioconda-recipes,zachcp/bioconda-recipes,gregvonkuster/bioconda-recipes,HassanAmr/bioconda-recipes,gregvonkuster/bioconda-recipes,gvlproject/bioconda-recipes,chapmanb/bioconda-recipes,rvalieris/bioconda-recipes,martin-mann/bioconda-recipes,JenCabral/bioconda-recipes,colinbrislawn/bioconda-recipes,omicsnut/bioconda-recipes,gvlproject/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,jasper1918/bioconda-recipes,jfallmann/bioconda-recipes,xguse/bioconda-recipes,ostrokach/bioconda-recipes,acaprez/recipes,acaprez/recipes,jfallmann/bioconda-recipes | recipes/python-omero/setup.py | recipes/python-omero/setup.py | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
from distutils.core import setup
import os
setup(name='Omero Python',
version=os.environ['OMERO_VERSION'],
description='OME (Open Microscopy Environment) develops open-source software and data format standards for the storage and manipulation of biological light microscopy data.',
url='http://www.openmicroscopy.org/',
packages=['omero', 'omeroweb', 'omero_ext', 'pipeline']
)
| #!/usr/bin/python
# -*- coding: latin-1 -*-
from distutils.core import setup
import os
setup(name='Omero Python',
version=os.environ['OMERO_VERSION'],
description='OME (Open Microscopy Environment) develops open-source software and data format standards for the storage and manipulation of biological light microscopy data.',
url='http://www.openmicroscopy.org/',
packages=['omero', 'omeroweb', 'omero_ext', 'pipeline']
)
| mit | Python |
35346bc78d18009cddf19e39c8ea7e70c6647f7b | Use assertRaises in test_wipe_no_params (#2309) | safwanrahman/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org,pombredanne/readthedocs.org,pombredanne/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,tddv/readthedocs.org,tddv/readthedocs.org,tddv/readthedocs.org,safwanrahman/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,davidfischer/readthedocs.org | readthedocs/rtd_tests/tests/test_urls.py | readthedocs/rtd_tests/tests/test_urls.py | from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.test import TestCase
class WipeUrlTests(TestCase):
def test_wipe_no_params(self):
with self.assertRaises(NoReverseMatch):
reverse('wipe_version')
def test_wipe_alphabetic(self):
project_slug = 'alphabetic'
version = 'version'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alphabetic/version/')
def test_wipe_alphanumeric(self):
project_slug = 'alpha123'
version = '123alpha'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha123/123alpha/')
def test_wipe_underscore_hyphen(self):
project_slug = 'alpha_123'
version = '123-alpha'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha_123/123-alpha/')
def test_wipe_version_dot(self):
project_slug = 'alpha-123'
version = '1.2.3'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha-123/1.2.3/')
def test_wipe_version_start_dot(self):
project_slug = 'alpha-123'
version = '.2.3'
try:
reverse('wipe_version', args=[project_slug, version])
except NoReverseMatch:
pass
class TestVersionURLs(TestCase):
def test_version_url_with_caps(self):
url = reverse(
'project_download_media',
kwargs={'type_': 'pdf', 'version_slug': u'1.4.X', 'project_slug': u'django'}
)
self.assertTrue(url)
| from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.test import TestCase
class WipeUrlTests(TestCase):
def test_wipe_no_params(self):
try:
reverse('wipe_version')
self.fail('reverse with no parameters should fail')
except NoReverseMatch:
pass
def test_wipe_alphabetic(self):
project_slug = 'alphabetic'
version = 'version'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alphabetic/version/')
def test_wipe_alphanumeric(self):
project_slug = 'alpha123'
version = '123alpha'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha123/123alpha/')
def test_wipe_underscore_hyphen(self):
project_slug = 'alpha_123'
version = '123-alpha'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha_123/123-alpha/')
def test_wipe_version_dot(self):
project_slug = 'alpha-123'
version = '1.2.3'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha-123/1.2.3/')
def test_wipe_version_start_dot(self):
project_slug = 'alpha-123'
version = '.2.3'
try:
reverse('wipe_version', args=[project_slug, version])
except NoReverseMatch:
pass
class TestVersionURLs(TestCase):
def test_version_url_with_caps(self):
url = reverse(
'project_download_media',
kwargs={'type_': 'pdf', 'version_slug': u'1.4.X', 'project_slug': u'django'}
)
self.assertTrue(url)
| mit | Python |
3acee896fccfc2a99c8d38d432635b09a7d56d7d | remove unused future import in file script | luci/recipes-py,luci/recipes-py | recipe_modules/file/resources/symlink.py | recipe_modules/file/resources/symlink.py | #!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Simple script for creating symbolic links for an arbitrary number of path pairs."""
import argparse
import errno
import json
import os
import sys
def main(args):
parser = argparse.ArgumentParser(description='Create symlinks')
parser.add_argument("--link-json",
help="Simple JSON mapping of a source to a linkname",
required=True)
args = parser.parse_args()
with open(args.link_json, 'r') as f:
links = json.load(f)
made_dirs = set()
def make_parent_dirs(path):
path = os.path.dirname(path)
if path in made_dirs:
return
try:
os.makedirs(path, 0o777)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
while path and path not in made_dirs:
made_dirs.add(path)
path = os.path.dirname(path)
for target, linknames in links.items():
for linkname in linknames:
make_parent_dirs(linkname)
os.symlink(target, linkname)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Simple script for creating symbolic links for an arbitrary number of path pairs."""
import argparse
import errno
import json
import os
import sys
from future.utils import iteritems
def main(args):
parser = argparse.ArgumentParser(description='Create symlinks')
parser.add_argument("--link-json",
help="Simple JSON mapping of a source to a linkname",
required=True)
args = parser.parse_args()
with open(args.link_json, 'r') as f:
links = json.load(f)
made_dirs = set()
def make_parent_dirs(path):
path = os.path.dirname(path)
if path in made_dirs:
return
try:
os.makedirs(path, 0o777)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
while path and path not in made_dirs:
made_dirs.add(path)
path = os.path.dirname(path)
for target, linknames in links.items():
for linkname in linknames:
make_parent_dirs(linkname)
os.symlink(target, linkname)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 | Python |
b4e8dd76e3095941c9837151b263365f08426ea1 | Fix privileges of package frontend. | 82Flex/DCRM,82Flex/DCRM,82Flex/DCRM,82Flex/DCRM | WEIPDCRM/styles/DefaultStyle/views/chart.py | WEIPDCRM/styles/DefaultStyle/views/chart.py | # coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com> & 0xJacky <jacky-943572677@qq.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Notice: You have used class-based views, that's awesome.
If not necessary, you can try function-based views.
You may add lines above as license.
"""
from django.views.generic import ListView
from WEIPDCRM.models.package import Package
class ChartView(ListView):
model = Package
context_object_name = 'package_list'
ordering = '-download_count'
template_name = 'frontend/chart.html'
def get_queryset(self):
"""
Get 24 packages ordering by download times.
:return: QuerySet
"""
queryset = super(ChartView, self).get_queryset().all()[:24]
return queryset
| # coding=utf-8
"""
DCRM - Darwin Cydia Repository Manager
Copyright (C) 2017 WU Zheng <i.82@me.com> & 0xJacky <jacky-943572677@qq.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Notice: You have used class-based views, that's awesome.
If not necessary, you can try function-based views.
You may add lines above as license.
"""
from django.views.generic import ListView
from WEIPDCRM.models.package import Package
class ChartView(ListView):
model = Package
context_object_name = 'package_list'
ordering = '-download_times'
template_name = 'frontend/chart.html'
def get_queryset(self):
"""
Get 24 packages ordering by download times.
:return: QuerySet
"""
queryset = super(ChartView, self).get_queryset().all()[:24]
return queryset
| agpl-3.0 | Python |
56dde1833dd61f6210a37d819a3df4d6a5e2db51 | Update download Sector help description | makhidkarun/traveller_pyroute | PyRoute/downloadsec.py | PyRoute/downloadsec.py | '''
Created on Jun 3, 2014
@author: tjoneslo
'''
import urllib2
import urllib
import codecs
import string
import time
import os
import argparse
def get_url (url, sector, suffix):
f = urllib2.urlopen(url)
encoding=f.headers['content-type'].split('charset=')[-1]
content = f.read()
if encoding == 'text/xml':
ucontent = unicode(content, 'utf-8')
ucontent = string.replace(content, '\r\n', '\n')
else:
ucontent = unicode(content, encoding)
ucontent = string.replace(ucontent, '\r\n', '\n')
path = os.path.join(args.output_dir, '%s.%s' % (sector, suffix))
with codecs.open(path, 'wb', 'utf-8') as out:
out.write (ucontent)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download sector/metadata from TravellerMap')
parser.add_argument('sector_list', help='List of sectors to download')
parser.add_argument('output_dir', help='output directory for sector data and xml metadata')
args = parser.parse_args()
sectorsList = [line for line in codecs.open(args.sector_list,'r', 'utf-8')]
for sector in sectorsList:
sector = sector.rstrip()
print 'Downloading %s' % sector
params = urllib.urlencode({'sector': sector, 'type': 'SecondSurvey'})
url = 'http://www.travellermap.com/api/sec?%s' % params
get_url (url, sector, 'sec')
params = urllib.urlencode({'sector': sector, 'accept': 'text/xml'})
url = 'http://travellermap.com/api/metadata?%s' % params
get_url (url, sector, 'xml')
time.sleep(60) | '''
Created on Jun 3, 2014
@author: tjoneslo
'''
import urllib2
import urllib
import codecs
import string
import time
import os
import argparse
def get_url (url, sector, suffix):
f = urllib2.urlopen(url)
encoding=f.headers['content-type'].split('charset=')[-1]
content = f.read()
if encoding == 'text/xml':
ucontent = unicode(content, 'utf-8')
ucontent = string.replace(content, '\r\n', '\n')
else:
ucontent = unicode(content, encoding)
ucontent = string.replace(ucontent, '\r\n', '\n')
path = os.path.join(args.output_dir, '%s.%s' % (sector, suffix))
with codecs.open(path, 'wb', 'utf-8') as out:
out.write (ucontent)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='route remap for trade routes')
parser.add_argument('sector_list', help='List of sectors to download')
parser.add_argument('output_dir', help='output directory for sector data and xml metadata')
args = parser.parse_args()
sectorsList = [line for line in codecs.open(args.sector_list,'r', 'utf-8')]
for sector in sectorsList:
sector = sector.rstrip()
print 'Downloading %s' % sector
params = urllib.urlencode({'sector': sector, 'type': 'SecondSurvey'})
url = 'http://www.travellermap.com/api/sec?%s' % params
get_url (url, sector, 'sec')
params = urllib.urlencode({'sector': sector, 'accept': 'text/xml'})
url = 'http://travellermap.com/api/metadata?%s' % params
get_url (url, sector, 'xml')
time.sleep(60) | mit | Python |
93b4357e4438d9c63e6f09ba4c3e534e0a03386e | add ComputeMolShape and ComputeMolVolume convenience functions | greglandrum/rdkit,adalke/rdkit,jandom/rdkit,strets123/rdkit,rvianello/rdkit,strets123/rdkit,strets123/rdkit,rvianello/rdkit,bp-kelley/rdkit,greglandrum/rdkit,AlexanderSavelyev/rdkit,rdkit/rdkit,soerendip42/rdkit,ptosco/rdkit,rdkit/rdkit,ptosco/rdkit,soerendip42/rdkit,bp-kelley/rdkit,bp-kelley/rdkit,adalke/rdkit,rvianello/rdkit,soerendip42/rdkit,jandom/rdkit,strets123/rdkit,strets123/rdkit,bp-kelley/rdkit,jandom/rdkit,ptosco/rdkit,bp-kelley/rdkit,soerendip42/rdkit,jandom/rdkit,greglandrum/rdkit,rvianello/rdkit,rdkit/rdkit,jandom/rdkit,adalke/rdkit,AlexanderSavelyev/rdkit,jandom/rdkit,rvianello/rdkit,soerendip42/rdkit,adalke/rdkit,jandom/rdkit,AlexanderSavelyev/rdkit,ptosco/rdkit,AlexanderSavelyev/rdkit,rdkit/rdkit,AlexanderSavelyev/rdkit,rvianello/rdkit,ptosco/rdkit,rdkit/rdkit,soerendip42/rdkit,adalke/rdkit,adalke/rdkit,bp-kelley/rdkit,jandom/rdkit,greglandrum/rdkit,ptosco/rdkit,rvianello/rdkit,AlexanderSavelyev/rdkit,rvianello/rdkit,bp-kelley/rdkit,rdkit/rdkit,adalke/rdkit,strets123/rdkit,greglandrum/rdkit,greglandrum/rdkit,bp-kelley/rdkit,AlexanderSavelyev/rdkit,rdkit/rdkit,soerendip42/rdkit,strets123/rdkit,greglandrum/rdkit,rdkit/rdkit,strets123/rdkit,ptosco/rdkit,AlexanderSavelyev/rdkit,AlexanderSavelyev/rdkit,adalke/rdkit,ptosco/rdkit,soerendip42/rdkit,greglandrum/rdkit | Python/Chem/AllChem.py | Python/Chem/AllChem.py | # $Id$
#
# Copyright (C) 2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
#
""" Import all RDKit chemistry modules
"""
import rdBase
import RDConfig
import Numeric
import DataStructs
from Geometry import rdGeometry
from Chem import *
from rdPartialCharges import *
from rdDepictor import *
from rdForceFieldHelpers import *
from Chem.ChemicalFeatures import *
from rdDistGeom import *
from rdMolAlign import *
from rdMolTransforms import *
from rdShapeHelpers import *
from rdChemReactions import *
import ForceField
Mol.Compute2DCoords = Compute2DCoords
Mol.ComputeGasteigerCharges = ComputeGasteigerCharges
def TransformMol(mol,tform):
""" Applies the transformation to a molecule and sets it up with
a single conformer
"""
import Numeric
newConf = Conformer()
newConf.SetId(0)
refConf = mol.GetConformer()
for i in range(refConf.GetNumAtoms()):
pos = list(refConf.GetAtomPosition(i))
pos.append(1.0)
newPos = Numeric.matrixmultiply(tform,Numeric.array(pos))
newConf.SetAtomPosition(i,list(newPos)[:3])
mol.RemoveAllConformers()
mol.AddConformer(newConf)
def ComputeMolShape(mol,confId=-1,boxDim=(20,20,20),spacing=0.5,**kwargs):
res = rdGeometry.UniformGrid3D(boxDim[0],boxDim[1],boxDim[2],spacing=spacing)
apply(EncodeShape,(mol,res,confId),kwargs)
return res
def ComputeMolVolume(mol,confId=-1,gridSpacing=0.1,boxMargin=2.0):
import copy
mol = copy.deepcopy(mol)
conf = mol.GetConformer(confId)
CanonicalizeConformer(conf)
box = ComputeConfBox(conf)
sideLen = ( box[1].x-box[0].x + 2*boxMargin, \
box[1].y-box[0].y + 2*boxMargin, \
box[1].z-box[0].z + 2*boxMargin )
shape = rdGeometry.UniformGrid3D(sideLen[0],sideLen[1],sideLen[2],
spacing=gridSpacing)
EncodeShape(mol,shape,confId,ignoreHs=False,vdwScale=1.0)
voxelVol = gridSpacing**3
vol = 0.0
occVect = shape.GetOccupancyVect()
for i in range(len(occVect)):
if occVect[i]==3:
vol+= voxelVol
return vol
def GenerateDepictionMatching3DStructure(mol,reference,confId=-1,
**kwargs):
stripRef = RemoveHs(reference)
nAts = mol.GetNumAtoms()
dm = []
conf = stripRef.GetConformer(confId)
for i in range(nAts):
pi = conf.GetAtomPosition(i)
for j in range(i+1,nAts):
pj = conf.GetAtomPosition(j)
dm.append((pi-pj).Length())
dm = Numeric.array(dm)
apply(Compute2DCoordsMimicDistmat,(mol,dm),kwargs)
| # $Id$
#
# Copyright (C) 2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
#
""" Import all RDKit chemistry modules
"""
import rdBase
import RDConfig
import Numeric
import DataStructs
from Geometry import rdGeometry
from Chem import *
from rdPartialCharges import *
from rdDepictor import *
from rdForceFieldHelpers import *
from Chem.ChemicalFeatures import *
from rdDistGeom import *
from rdMolAlign import *
from rdMolTransforms import *
from rdShapeHelpers import *
from rdChemReactions import *
import ForceField
Mol.Compute2DCoords = Compute2DCoords
Mol.ComputeGasteigerCharges = ComputeGasteigerCharges
def TransformMol(mol,tform):
""" Applies the transformation to a molecule and sets it up with
a single conformer
"""
import Numeric
newConf = Conformer()
newConf.SetId(0)
refConf = mol.GetConformer()
for i in range(refConf.GetNumAtoms()):
pos = list(refConf.GetAtomPosition(i))
pos.append(1.0)
newPos = Numeric.matrixmultiply(tform,Numeric.array(pos))
newConf.SetAtomPosition(i,list(newPos)[:3])
mol.RemoveAllConformers()
mol.AddConformer(newConf)
def GenerateDepictionMatching3DStructure(mol,reference,confId=-1,
**kwargs):
stripRef = RemoveHs(reference)
nAts = mol.GetNumAtoms()
dm = []
conf = stripRef.GetConformer(confId)
for i in range(nAts):
pi = conf.GetAtomPosition(i)
for j in range(i+1,nAts):
pj = conf.GetAtomPosition(j)
dm.append((pi-pj).Length())
dm = Numeric.array(dm)
apply(Compute2DCoordsMimicDistmat,(mol,dm),kwargs)
| bsd-3-clause | Python |
61cad8665fe84c74ade0c30f7935f064dafea0d2 | add imports section | mylokin/redisext,mylokin/redisext | redisext/__init__.py | redisext/__init__.py | '''
Introduction
------------
Redisext is a tool for data modeling. Its primary goal is to provide light
interface to well-known data models based on Redis such as queues, hashmaps,
counters, pools and stacks. Redisext could be treated as a ORM for Redis.
Tutorial
--------
Counter Model allows you to build counters in a minute. For example::
import redisext.backend.redis
import redisext.counter
import redisext.serializer
class Connection(redisext.backend.redis.Connection):
MASTER = {'host': 'localhost', 'port': 6379, 'db': 0}
class Visitors(Connection, redisext.counter.Counter):
SERIALIZER = redisext.serializer.Numeric
This is it! You can start using it. Example of mythical frontpage view::
def frontpage():
visitors_counter = Visitors('fronpage')
visitors_counter.increment()
context = {
'visitors': visitors_counter.get()
}
return context
.. note::
Details on :class:`redisext.counter.Counter`.
Data Models
===========
.. automodule:: redisext.counter
.. automodule:: redisext.hashmap
.. automodule:: redisext.pool
.. automodule:: redisext.queue
.. automodule:: redisext.stack
.. note::
Imports section is intentionaly skiped, but for ther order it is listed below.
Imports::
import redisext.backend.redis
import redisext.serializer
class Connection(redisext.backend.redis.Connection):
MASTER = {'host': 'localhost', 'port': 6379, 'db': 0}
Abstract Model
--------------
.. automodule:: redisext.models
.. automodule:: redisext.serializer
.. automodule:: redisext.key
.. automodule:: redisext.backend
'''
| '''
Introduction
------------
Redisext is a tool for data modeling. Its primary goal is to provide light
interface to well-known data models based on Redis such as queues, hashmaps,
counters, pools and stacks. Redisext could be treated as a ORM for Redis.
Tutorial
--------
Counter Model allows you to build counters in a minute. For example::
import redisext.backend.redis
import redisext.counter
import redisext.serializer
class Connection(redisext.backend.redis.Connection):
MASTER = {'host': 'localhost', 'port': 6379, 'db': 0}
class Visitors(Connection, redisext.counter.Counter):
SERIALIZER = redisext.serializer.Numeric
This is it! You can start using it. Example of mythical frontpage view::
def frontpage():
visitors_counter = Visitors('fronpage')
visitors_counter.increment()
context = {
'visitors': visitors_counter.get()
}
return context
.. note::
Details on :class:`redisext.counter.Counter`.
Data Models
===========
.. automodule:: redisext.counter
.. automodule:: redisext.hashmap
.. automodule:: redisext.pool
.. automodule:: redisext.queue
.. automodule:: redisext.stack
Imports section is intentionaly skiped, but for ther order::
import redisext.backend.redis
import redisext.serializer
class Connection(redisext.backend.redis.Connection):
MASTER = {'host': 'localhost', 'port': 6379, 'db': 0}
Abstract Model
--------------
.. automodule:: redisext.models
.. automodule:: redisext.serializer
.. automodule:: redisext.key
.. automodule:: redisext.backend
'''
| mit | Python |
5026b687f74f351c17fbaa1d219d4cad34b77eb6 | Add missing space | mphe/pychatbot,mphe/pychatbot | chatbot/main.py | chatbot/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import argparse
import chatbot.bot
def main():
parser = argparse.ArgumentParser(
description="Run the bot using a given profile and/or a given API.",
epilog="At least one of -p/--profile or -a/--api has to be specified."
)
parser.add_argument("-p", "--profile", help="Profile name", default="")
parser.add_argument("-a", "--api", help="API name", default="")
parser.add_argument("-d", "--profiledir", help="Profile directory (bot config). If not specified, the platform specific default directory is used.")
parser.add_argument("-c", "--configdir", help="Config directory (plugin configs). If not specified, the profile directory is used..")
parser.add_argument("-v", "--verbose", help="Show debug output",
action="store_true")
args = parser.parse_args()
if not args.profile and not args.api:
parser.error("an API and/or a profile has to specified.")
return 1
logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO),
format="%(levelname)s: %(message)s")
bot = chatbot.bot.Bot(args.profiledir)
return bot.run(profile=args.profile, apiname=args.api,
configdir=args.configdir)
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import argparse
import chatbot.bot
def main():
parser = argparse.ArgumentParser(
description="Run the bot using a given profile and/or a given API.",
epilog="At least one of -p/--profile or -a/--api has to be specified."
)
parser.add_argument("-p", "--profile", help="Profile name", default="")
parser.add_argument("-a", "--api", help="API name", default="")
parser.add_argument("-d", "--profiledir", help="Profile directory (bot config). If not specified, the platform specific default directory is used.")
parser.add_argument("-c", "--configdir", help="Config directory (plugin configs). If not specified, the profile directory is used..")
parser.add_argument("-v", "--verbose", help="Show debug output",
action="store_true")
args = parser.parse_args()
if not args.profile and not args.api:
parser.error("an API and/or a profile has to specified.")
return 1
logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO),
format="%(levelname)s:%(message)s")
bot = chatbot.bot.Bot(args.profiledir)
return bot.run(profile=args.profile, apiname=args.api,
configdir=args.configdir)
if __name__ == "__main__":
sys.exit(main())
| mit | Python |
20328fd50002af69eb441e68b89a85dbda4ab43a | fix django 1.11.1 | simon-db/django-cked,simon-db/django-cked,simon-db/django-cked,simon-db/django-cked | cked/widgets.py | cked/widgets.py | from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.forms.utils import flatatt
try:
import json
except ImportError:
from django.utils import simplejson as json
from cked import default_settings
json_encode = json.JSONEncoder().encode
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
"""
class Media:
js = (settings.STATIC_URL + 'cked/ckeditor/ckeditor.js',)
def __init__(self, *args, **kwargs):
config_name = kwargs.pop('config_name', 'default')
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Use default config
self.options = default_settings.CKEDITOR_DEFAULT_OPTIONS.copy()
# If CKEDITOR_OPTIONS presented in settings, use it!
general_options = getattr(settings, 'CKEDITOR_OPTIONS', {})
if general_options is None:
general_options = {}
if config_name in general_options:
options = general_options[config_name]
else:
options = None
if options is not None:
if isinstance(options, dict):
# Override defaults with CKEDITOR_OPTIONS.
self.options.update(options)
else:
raise ImproperlyConfigured('CKEDITOR_OPTIONS setting must be'
' a dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(self.attrs, attrs, name=name)
self.options['filebrowserBrowseUrl'] = reverse('cked_elfinder')
return mark_safe(render_to_string('cked/ckeditor.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'options': json_encode(self.options)})
)
def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):
"""
Helper function for building an attribute dictionary.
This is combination of the same method from Django<=1.10 and Django1.11+
"""
attrs = dict(base_attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
| from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.forms.utils import flatatt
try:
import json
except ImportError:
from django.utils import simplejson as json
from cked import default_settings
json_encode = json.JSONEncoder().encode
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
"""
class Media:
js = (settings.STATIC_URL + 'cked/ckeditor/ckeditor.js',)
def __init__(self, *args, **kwargs):
config_name = kwargs.pop('config_name', 'default')
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Use default config
self.options = default_settings.CKEDITOR_DEFAULT_OPTIONS.copy()
# If CKEDITOR_OPTIONS presented in settings, use it!
general_options = getattr(settings, 'CKEDITOR_OPTIONS', {})
if general_options is None:
general_options = {}
if config_name in general_options:
options = general_options[config_name]
else:
options = None
if options is not None:
if isinstance(options, dict):
# Override defaults with CKEDITOR_OPTIONS.
self.options.update(options)
else:
raise ImproperlyConfigured('CKEDITOR_OPTIONS setting must be'
' a dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.options['filebrowserBrowseUrl'] = reverse('cked_elfinder')
return mark_safe(render_to_string('cked/ckeditor.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'options': json_encode(self.options)})
)
| bsd-2-clause | Python |
2f67d775600bce74e1f4cc59b63d63f4f0dc5dc7 | Update pion.py | Niceboy5275/PythonChess,Niceboy5275/PythonChess | classes/pion.py | classes/pion.py | from piece import piece
class pion(piece):
def move(self, pos_x, pos_y, tableau, possible):
if (self.getColor() == piece._players['NOIR']):
if tableau.getPion(pos_x, pos_y + 1) == None:
tableau.setPossible(pos_x, pos_y + 1, piece._players['NOIR'], possible)
if pos_y == 1:
if tableau.getPion(pos_x, pos_y + 2) == None:
tableau.setPossible(pos_x, pos_y + 2, 1, possible)
if tableau.getPion(pos_x-1, pos_y + 1) != None and tableau.getPion(pos_x-1, pos_y + 1).getColor() < 0:
tableau.setPossible(pos_x-1, pos_y + 1, 0, possible)
if tableau.getPion(pos_x+1, pos_y + 1) != None and tableau.getPion(pos_x+1, pos_y + 1).getColor() < 0:
tableau.setPossible(pos_x+1, pos_y + 1, 0, possible)
if (self.getColor() == piece._players['BLANC']):
if tableau.getPion(pos_x, pos_y - 1) == None:
tableau.setPossible(pos_x, pos_y - 1, -1, possible)
if pos_y == 6:
if tableau.getPion(pos_x, pos_y - 2) == None:
tableau.setPossible(pos_x, pos_y - 2, piece._players['BLANC'], possible)
if tableau.getPion(pos_x-1, pos_y - 1) != None and tableau.getPion(pos_x-1, pos_y - 1).getColor() > 0:
tableau.setPossible(pos_x-1, pos_y - 1, 0, possible)
if tableau.getPion(pos_x+1, pos_y - 1) != None and tableau.getPion(pos_x+1, pos_y - 1).getColor() > 0:
tableau.setPossible(pos_x+1, pos_y - 1, 0, possible)
def getLetter(self):
return "P"
| from piece import piece
from tkinter import *
class pion(piece):
def move(self, pos_x, pos_y, tableau, possible):
if (self.getColor() == piece._players['NOIR']):
if tableau.getPion(pos_x, pos_y + 1) == None:
tableau.setPossible(pos_x, pos_y + 1, piece._players['NOIR'], possible)
if pos_y == 1:
if tableau.getPion(pos_x, pos_y + 2) == None:
tableau.setPossible(pos_x, pos_y + 2, 1, possible)
if tableau.getPion(pos_x-1, pos_y + 1) != None and tableau.getPion(pos_x-1, pos_y + 1).getColor() < 0:
tableau.setPossible(pos_x-1, pos_y + 1, 0, possible)
if tableau.getPion(pos_x+1, pos_y + 1) != None and tableau.getPion(pos_x+1, pos_y + 1).getColor() < 0:
tableau.setPossible(pos_x+1, pos_y + 1, 0, possible)
if (self.getColor() == piece._players['BLANC']):
if tableau.getPion(pos_x, pos_y - 1) == None:
tableau.setPossible(pos_x, pos_y - 1, -1, possible)
if pos_y == 6:
if tableau.getPion(pos_x, pos_y - 2) == None:
tableau.setPossible(pos_x, pos_y - 2, piece._players['BLANC'], possible)
if tableau.getPion(pos_x-1, pos_y - 1) != None and tableau.getPion(pos_x-1, pos_y - 1).getColor() > 0:
tableau.setPossible(pos_x-1, pos_y - 1, 0, possible)
if tableau.getPion(pos_x+1, pos_y - 1) != None and tableau.getPion(pos_x+1, pos_y - 1).getColor() > 0:
tableau.setPossible(pos_x+1, pos_y - 1, 0, possible)
def getLetter(self):
return "P"
def getImage(self):
if self.getColor() == piece._players['NOIR']:
return PhotoImage(file="pion_n.png")
else:
return PhotoImage(file="pion_b.png")
| mit | Python |
c680852e6c2c378bf00bd6ff12d72bd6ecd3376d | Save email | SYNHAK/spiff,SYNHAK/spiff,SYNHAK/spiff | spiff/membership/views.py | spiff/membership/views.py | from django.template import RequestContext
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
import models
import forms
def index(request):
users = User.objects.filter(is_active=True)
return render_to_response('membership/index.html',
{'users': users},
context_instance=RequestContext(request))
def view(request, username):
user = User.objects.get(username=username)
return render_to_response('membership/view.html',
{'viewUser': user},
context_instance=RequestContext(request))
def edit(request, username=None):
if username is None:
user = request.user
else:
user = User.objects.get(username=username)
if user != request.user and not request.user.has_perm('auth.can_change_user'):
raise PermissionDenied()
fields = models.Field.objects.filter()
values = user.member.attributes.all()
if request.method == "POST":
userForm = forms.UserForm(request.POST, instance=user)
profileForm = forms.ProfileForm(request.POST, fields=fields, values=values)
else:
userForm = forms.UserForm(instance=user)
profileForm = forms.ProfileForm(fields=fields, values=values)
if userForm.is_valid() and profileForm.is_valid():
user.first_name = userForm.cleaned_data['firstName']
user.last_name = userForm.cleaned_data['lastName']
user.email = userForm.cleaned_data['email']
user.save()
member = user.member
member.birthday = userForm.cleaned_data['birthday']
member.profession = userForm.cleaned_data['profession']
member.save()
for f in fields:
value,created = models.FieldValue.objects.get_or_create(member=member, field=f)
value.value = profileForm.fieldValue(f)
value.save()
messages.info(request, "Profile Saved.")
return render_to_response('membership/edit.html',
{'editUser': user, 'userForm':userForm, 'profileForm':profileForm},
context_instance=RequestContext(request))
| from django.template import RequestContext
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.contrib.auth.models import User
from django.shortcuts import render_to_response
import models
import forms
def index(request):
users = User.objects.filter(is_active=True)
return render_to_response('membership/index.html',
{'users': users},
context_instance=RequestContext(request))
def view(request, username):
user = User.objects.get(username=username)
return render_to_response('membership/view.html',
{'viewUser': user},
context_instance=RequestContext(request))
def edit(request, username=None):
if username is None:
user = request.user
else:
user = User.objects.get(username=username)
if user != request.user and not request.user.has_perm('auth.can_change_user'):
raise PermissionDenied()
fields = models.Field.objects.filter()
values = user.member.attributes.all()
if request.method == "POST":
userForm = forms.UserForm(request.POST, instance=user)
profileForm = forms.ProfileForm(request.POST, fields=fields, values=values)
else:
userForm = forms.UserForm(instance=user)
profileForm = forms.ProfileForm(fields=fields, values=values)
if userForm.is_valid() and profileForm.is_valid():
user.first_name = userForm.cleaned_data['firstName']
user.last_name = userForm.cleaned_data['lastName']
user.save()
member = user.member
member.birthday = userForm.cleaned_data['birthday']
member.profession = userForm.cleaned_data['profession']
member.save()
for f in fields:
value,created = models.FieldValue.objects.get_or_create(member=member, field=f)
value.value = profileForm.fieldValue(f)
value.save()
messages.info(request, "Profile Saved.")
return render_to_response('membership/edit.html',
{'editUser': user, 'userForm':userForm, 'profileForm':profileForm},
context_instance=RequestContext(request))
| agpl-3.0 | Python |
54e0d90fd1682d3aa7e87d83c9a19495190b718b | read sequence from ctm and audacity | ynop/spych,ynop/spych | spych/scoring/sequence.py | spych/scoring/sequence.py | from spych.assets import ctm
from spych.assets import audacity
class SequenceItem(object):
"""
Represents an item in a sequence. An item at least consists of a label. It may has start time and duration in seconds.
"""
def __init__(self, label, start=-1.0, duration=-1.0):
"""
Create instance.
:param label: Label
:param start: Start time [seconds]
:param duration: Duration [seconds]
"""
self.label = label
self.start = start
self.duration = duration
class Sequence(object):
"""
Represents a sequence of items.
"""
def __init__(self, items=[]):
self.items = list(items)
def items_in_interval(self, start, end):
"""
Return all items and their indices that intersect the given range.
:param start: start of interval in seconds
:param end: end of interval in seconds
:return: list of (index, item) tuples
"""
if end <= start:
raise ValueError("End ({}) of range has to greater than start ({}).".format(end, start))
matching_items = []
for index, item in enumerate(self.items):
if not (end <= item.start or start >= item.start + item.duration):
matching_items.append((index, item))
return matching_items
def append_item(self, item):
self.items.append(item)
@classmethod
def from_tuples(cls, tuples):
"""
Return sequence from list of tuples (label, start, duration).
"""
items = []
for value in tuples:
items.append(SequenceItem(value[0], value[1], value[2]))
return cls(items)
@classmethod
def from_ctm(cls, path):
"""
Return list of sequences from ctm file.
"""
ctm_entries = ctm.read_file(path)
sequences = {}
for wav_name, segments in ctm_entries.items():
sequence = cls()
for segment in segments:
item = SequenceItem(segment[3], start=segment[1], duration=segment[2])
sequence.append_item(item)
sequences[wav_name] = sequence
return sequences
@classmethod
def from_audacity_labels(cls, path):
"""
Return sequence from audacity label file.
"""
sequence = cls()
audacity_entries = audacity.read_label_file(path)
for segment in audacity_entries:
item = SequenceItem(segment[2], start=segment[0], duration=segment[1] - segment[0])
sequence.append_item(item)
return sequence
| class SequenceItem(object):
"""
Represents an item in a sequence. An item at least consists of a label. It may has start time and duration in seconds.
"""
def __init__(self, label, start=-1.0, duration=-1.0):
"""
Create instance.
:param label: Label
:param start: Start time [seconds]
:param duration: Duration [seconds]
"""
self.label = label
self.start = start
self.duration = duration
class Sequence(object):
"""
Represents a sequence of items.
"""
def __init__(self, items=[]):
self.items = list(items)
def items_in_interval(self, start, end):
"""
Return all items and their indices that intersect the given range.
:param start: start of interval in seconds
:param end: end of interval in seconds
:return: list of (index, item) tuples
"""
if end <= start:
raise ValueError("End ({}) of range has to greater than start ({}).".format(end, start))
matching_items = []
for index, item in enumerate(self.items):
if not (end <= item.start or start >= item.start + item.duration):
matching_items.append((index, item))
return matching_items
@classmethod
def from_tuples(cls, tuples):
items = []
for value in tuples:
items.append(SequenceItem(value[0], value[1], value[2]))
return cls(items) | mit | Python |
e9bd104dfdbaae815d1dc96907b51cac0a6baefa | Update test_cryptorandom.py to match changes | statlab/cryptorandom | cryptorandom/tests/test_cryptorandom.py | cryptorandom/tests/test_cryptorandom.py | from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from nose.tools import assert_raises, raises
from ..cryptorandom import BaseRandom, SHA256
def test_SHA256():
r = SHA256(5)
assert(repr(r) == 'SHA256 PRNG with seed 5 and counter 0')
assert(str(r) == 'SHA256 PRNG with seed 5 and counter 0')
assert(r.getstate() == (5, 0))
r.next()
assert(r.getstate() == (5, 1))
r.jumpahead(5)
assert(r.getstate() == (5, 6))
r.seed(22, 3)
assert(r.getstate() == (22, 3))
def test_SHA256_random():
'''The following tests match the output of Ron's and Philip's implementations.'''
r = SHA256(12345678901234567890)
r.next()
e1 = int("4da594a8ab6064d666eab2bdf20cb4480e819e0c3102ca353de57caae1d11fd1", 16)
e2 = int("ae230ec16bee77f77c7378f4eb5d265d931665e29e8bbee7e733f58d3815d338", 16)
expected = np.array([e1, e2]) * 2**-256
assert((r.random(2) == expected).all())
r = SHA256(12345678901234567890)
r.next()
expected = int("4da594a8ab6064d666eab2bdf20cb4480e819e0c3102ca353de57caae1d11fd1", 16)
assert(r.nextRandom() == expected)
r = SHA256(12345678901234567890)
fiverand = r.randint(1, 1001, 5)
assert( (fiverand == np.array([405, 426, 921, 929, 56])).all() )
| from __future__ import (absolute_import, division,
print_function, unicode_literals)
import numpy as np
from nose.tools import assert_raises, raises
from ..cryptorandom import BaseRandom, SHA256
def test_SHA256():
r = SHA256(5)
assert(repr(r) == 'SHA256 PRNG with seed 5')
assert(str(r) == 'SHA256 PRNG with seed 5')
assert(r.getstate() == (5, 0))
r.next()
assert(r.getstate() == (5, 1))
r.jumpahead(5)
assert(r.getstate() == (5, 6))
r.seed(22, 3)
assert(r.getstate() == (22, 3))
def test_SHA256_random():
'''The following tests match the output of Ron's and Philip's implementations.'''
r = SHA256(12345678901234567890)
r.next()
e1 = int("4da594a8ab6064d666eab2bdf20cb4480e819e0c3102ca353de57caae1d11fd1", 16)
e2 = int("ae230ec16bee77f77c7378f4eb5d265d931665e29e8bbee7e733f58d3815d338", 16)
expected = np.array([e1, e2]) * 2**-256
assert((r.random(2) == expected).all())
r = SHA256(12345678901234567890)
r.next()
expected = int("4da594a8ab6064d666eab2bdf20cb4480e819e0c3102ca353de57caae1d11fd1", 16)
assert(r.nextRandom() == expected)
r = SHA256(12345678901234567890)
fiverand = r.randint(1, 1000, 5)
assert( (fiverand == np.array([405, 426, 921, 929, 56])).all() ) | bsd-2-clause | Python |
2dfbe90d3282f033d4ee24d1955aa1fd9838a6b7 | fix python syntax error | bigfootproject/sahara,bigfootproject/sahara,bigfootproject/sahara | savanna/tests/integration/tests/spark.py | savanna/tests/integration/tests/spark.py | '''
Created on Jan 1, 2014
@author: DO Huy-Hoang
'''
from savanna.openstack.common import excutils
from savanna.tests.integration.tests import base
class SparkTest(base.ITestCase):
def __run_RL_job(self, masternode_ip, masternode_port):
self.execute_command(
'./spark/run-example org.apache.spark.examples.SparkLR spark://%s:%s'
% (masternode_ip, masternode_port))
@base.skip_test('SKIP__TEST',
message='Test for Spark was skipped.')
def __run_HdfsLR_job(self, master_ip, namenode_ip, namenode_port, master_port, filename):
self.execute_command(
'./spark/run-example org.apache.spark.examples.JavaHdfsLR spark://%s:%s hdfs://%s:%s/%s'
% (master_ip, master_port, namenode_ip, namenode_port, filename))
def __copy_data_to_Hdfs(self, local_file_name, remote_file_name):
self.execute_command('sudo hdfs dfs -copyFromLocal %s %s' %
{local_file_name, remote_file_name})
def _spark_testing(self, cluster_info):
plugin_config = cluster_info['plugin_config']
node_count = cluster_info['node_info']['node_count']
namenode_ip = cluster_info['node_info']['namenode_ip']
masternode_ip = cluster_info['node_info']['master_ip']
namenode_port = cluster_info['plugin_config']['HADOOP_PROCESSES_WITH_PORTS']['namenode']
masternode_port = cluster_info['plugin_config']['SPARK_MASTER_PORT']
# Test standalone Spark job (without HDFS)
self.open_ssh_connection(masternode_ip, plugin_config.NODE_USERNAME)
self.__run_RL_job(masternode_ip, masternode_port)
self.close_ssh_connection()
# Test Spark job with HDFS data
self.open_ssh_connection(masternode_ip, plugin_config.NODE_USERNAME)
self.__copy_data_to_Hdfs('/home/ubuntu/spark/examples_data/lr_data.txt', '/')
self.__run_HdfsLR_job(masternode_ip, namenode_ip,
namenode_port, masternode_port, 'lr_data.txt')
self.close_ssh_connection()
| '''
Created on Jan 1, 2014
@author: DO Huy-Hoang
'''
from savanna.openstack.common import excutils
from savanna.tests.integration.tests import base
class SparkTest(base.ITestCase):
def __run_RL_job(self, hostname, port):
self.execute_command(
'./spark/run-example org.apache.spark.examples.SparkLR spark://%s:%s'
% {hostname, port})
@base.skip_test('SKIP__TEST',
message='Test for Spark was skipped.')
def __run_HdfsLR_job(self, master_ip, namenode_ip, namenode_port, master_port, filename):
self.execute_command(
'./spark/run-example org.apache.spark.examples.JavaHdfsLR spark://%s:%s hdfs://%s:%s/%s'
% {master_ip, master_port, namenode_ip, namenode_port, filename})
def __copy_data_to_Hdfs(self, local_file_name, remote_file_name):
self.execute_command('sudo hdfs dfs -copyFromLocal %s %s' %
{local_file_name, remote_file_name})
def _spark_testing(self, cluster_info):
plugin_config = cluster_info['plugin_config']
node_count = cluster_info['node_info']['node_count']
namenode_ip = cluster_info['node_info']['namenode_ip']
masternode_ip = cluster_info['node_info']['master_ip']
namenode_port = cluster_info['plugin_config']['HADOOP_PROCESSES_WITH_PORTS']['namenode']
masternode_port = cluster_info['plugin_config']['SPARK_MASTER_PORT']
# Test standalone Spark job (without HDFS)
self.open_ssh_connection(masternode_ip, plugin_config.NODE_USERNAME)
self.__run_RL_job(masternode_ip, masternode_port)
self.close_ssh_connection()
# Test Spark job with HDFS data
self.open_ssh_connection(masternode_ip, plugin_config.NODE_USERNAME)
self.__copy_data_to_Hdfs('/home/ubuntu/spark/examples_data/lr_data.txt', '/')
self.__run_HdfsLR_job(masternode_ip, namenode_ip,
namenode_port, masternode_port, 'lr_data.txt')
self.close_ssh_connection()
| apache-2.0 | Python |
5f42f76ffd11e82d51a334b91d64723388ca4a0d | Add RSS Feed Provider docs | michaelkuty/django-newswall,registerguard/django-newswall,matthiask/django-newswall,HerraLampila/django-newswall,registerguard/django-newswall,HerraLampila/django-newswall,michaelkuty/django-newswall,matthiask/django-newswall | newswall/providers/feed.py | newswall/providers/feed.py | """
RSS Feed Provider
=================
Required configuration keys::
{
"provider": "newswall.providers.feed",
"source": "http://twitter.com/statuses/user_timeline/feinheit.rss"
}
"""
from datetime import datetime
import feedparser
import time
from newswall.providers.base import ProviderBase
class Provider(ProviderBase):
def update(self):
feed = feedparser.parse(self.config['source'])
for entry in feed['entries']:
self.create_story(entry.link,
title=entry.title,
body=entry.description,
timestamp=datetime.fromtimestamp(time.mktime(entry.date_parsed)),
)
| from datetime import datetime
import feedparser
import time
from newswall.providers.base import ProviderBase
class Provider(ProviderBase):
def update(self):
feed = feedparser.parse(self.config['source'])
for entry in feed['entries']:
self.create_story(entry.link,
title=entry.title,
body=entry.description,
timestamp=datetime.fromtimestamp(time.mktime(entry.date_parsed)),
)
| bsd-3-clause | Python |
bd5b7001e38fbabf5bfee18747c0d192289e2284 | Bump to 3.2.2 proper | timgraham/django-cms,jproffitt/django-cms,FinalAngel/django-cms,czpython/django-cms,bittner/django-cms,mkoistinen/django-cms,datakortet/django-cms,benzkji/django-cms,FinalAngel/django-cms,rsalmaso/django-cms,mkoistinen/django-cms,bittner/django-cms,jsma/django-cms,czpython/django-cms,jproffitt/django-cms,yakky/django-cms,netzkolchose/django-cms,benzkji/django-cms,vxsx/django-cms,vxsx/django-cms,nimbis/django-cms,divio/django-cms,FinalAngel/django-cms,datakortet/django-cms,netzkolchose/django-cms,netzkolchose/django-cms,mkoistinen/django-cms,benzkji/django-cms,evildmp/django-cms,timgraham/django-cms,mkoistinen/django-cms,divio/django-cms,czpython/django-cms,czpython/django-cms,datakortet/django-cms,yakky/django-cms,datakortet/django-cms,rsalmaso/django-cms,jsma/django-cms,yakky/django-cms,rsalmaso/django-cms,jsma/django-cms,jproffitt/django-cms,vxsx/django-cms,vxsx/django-cms,FinalAngel/django-cms,nimbis/django-cms,evildmp/django-cms,bittner/django-cms,divio/django-cms,bittner/django-cms,rsalmaso/django-cms,nimbis/django-cms,jsma/django-cms,timgraham/django-cms,netzkolchose/django-cms,yakky/django-cms,divio/django-cms,evildmp/django-cms,jproffitt/django-cms,evildmp/django-cms,nimbis/django-cms,benzkji/django-cms | cms/__init__.py | cms/__init__.py | # -*- coding: utf-8 -*-
__version__ = '3.2.2'
default_app_config = 'cms.apps.CMSConfig'
| # -*- coding: utf-8 -*-
__version__ = '3.2.2.dev1'
default_app_config = 'cms.apps.CMSConfig'
| bsd-3-clause | Python |
02f82308f83ab9803bc80e195a257075837ea096 | Update @graknlabs_build_tools dependency to latest 'master' branch | lolski/grakn,graknlabs/grakn,graknlabs/grakn,lolski/grakn,lolski/grakn,graknlabs/grakn,lolski/grakn,graknlabs/grakn | dependencies/graknlabs/dependencies.bzl | dependencies/graknlabs/dependencies.bzl | #
# GRAKN.AI - THE KNOWLEDGE GRAPH
# Copyright (C) 2018 Grakn Labs Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def graknlabs_graql():
git_repository(
name = "graknlabs_graql",
remote = "https://github.com/graknlabs/graql",
commit = "3aca6d63b7c5b287c8f473aaf47646b55a7ba0f4", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_graql
)
def graknlabs_client_java():
git_repository(
name = "graknlabs_client_java",
remote = "https://github.com/graknlabs/client-java",
commit = "c2485b7321bcdff2475d0e3ae0cb7108b8d44a75",
)
def graknlabs_build_tools():
git_repository(
name = "graknlabs_build_tools",
remote = "https://github.com/graknlabs/build-tools",
commit = "02a56228e27397ad9173b7f07897101c4c357e04", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_build_tools
) | #
# GRAKN.AI - THE KNOWLEDGE GRAPH
# Copyright (C) 2018 Grakn Labs Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
def graknlabs_graql():
git_repository(
name = "graknlabs_graql",
remote = "https://github.com/graknlabs/graql",
commit = "3aca6d63b7c5b287c8f473aaf47646b55a7ba0f4", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_graql
)
def graknlabs_client_java():
git_repository(
name = "graknlabs_client_java",
remote = "https://github.com/graknlabs/client-java",
commit = "c2485b7321bcdff2475d0e3ae0cb7108b8d44a75",
)
def graknlabs_build_tools():
git_repository(
name = "graknlabs_build_tools",
remote = "https://github.com/graknlabs/build-tools",
commit = "32edb9d829fb24340a2c4678a5dd0b94b1217405", # sync-marker: do not remove this comment, this is used for sync-dependencies by @graknlabs_build_tools
) | agpl-3.0 | Python |
d4195ff992bb4776e5c0927b6e2eac254b214968 | Add missing return True to proposal conversion | rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son | app/soc/mapreduce/convert_proposal.py | app/soc/mapreduce/convert_proposal.py | #!/usr/bin/python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for data seeding operations.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import logging
from google.appengine.ext import db
from google.appengine.ext.mapreduce import operation
from soc.modules.gsoc.models.score import GSoCScore
from soc.modules.gsoc.models.proposal import GSoCProposal
def process(proposal_key):
def update_proposal_txn():
proposal = db.get(proposal_key)
if not proposal:
logging.error("Missing profile for key ''." % proposal_key)
return False
number = db.Query(GSoCScore).ancestor(proposal).count()
proposal.nr_scores = number
proposal.put()
return True
if db.run_in_transaction(update_proposal_txn):
yield operation.counters.Increment("proposals_updated")
else:
yield operation.counters.Increment("missing_proposal")
| #!/usr/bin/python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for data seeding operations.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import logging
from google.appengine.ext import db
from google.appengine.ext.mapreduce import operation
from soc.modules.gsoc.models.score import GSoCScore
from soc.modules.gsoc.models.proposal import GSoCProposal
def process(proposal_key):
def update_proposal_txn():
proposal = db.get(proposal_key)
if not proposal:
logging.error("Missing profile for key ''." % proposal_key)
return False
number = db.Query(GSoCScore).ancestor(proposal).count()
proposal.nr_scores = number
proposal.put()
if db.run_in_transaction(update_proposal_txn):
yield operation.counters.Increment("proposals_updated")
else:
yield operation.counters.Increment("missing_proposal")
| apache-2.0 | Python |
c6362677dd8703cf9934ada5cfdcebf5a7dd7d94 | Add comment for new `testsetup` directive [skip ci] | bsipocz/astropy-helpers,Cadair/astropy-helpers,bsipocz/astropy-helpers,bsipocz/astropy-helpers,Cadair/astropy-helpers,dpshelio/astropy-helpers,astropy/astropy-helpers,dpshelio/astropy-helpers,astropy/astropy-helpers | astropy_helpers/sphinx/ext/doctest.py | astropy_helpers/sphinx/ext/doctest.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of three directives that allow us to insert metadata
about doctests into the .rst files so the testing framework knows
which tests to skip.
This is quite different from the doctest extension in Sphinx itself,
which actually does something. For astropy, all of the testing is
centrally managed from py.test and Sphinx is not used for running
tests.
"""
import re
from docutils.nodes import literal_block
from docutils.parsers.rst import Directive
class DoctestSkipDirective(Directive):
has_content = True
def run(self):
# Check if there is any valid argument, and skip it. Currently only
# 'win32' is supported in astropy.tests.pytest_plugins.
if re.match('win32', self.content[0]):
self.content = self.content[2:]
code = '\n'.join(self.content)
return [literal_block(code, code)]
class DoctestOmitDirective(Directive):
has_content = True
def run(self):
# Simply do not add any content when this directive is encountered
return []
class DoctestRequiresDirective(DoctestSkipDirective):
# This is silly, but we really support an unbounded number of
# optional arguments
optional_arguments = 64
def setup(app):
app.add_directive('doctest-requires', DoctestRequiresDirective)
app.add_directive('doctest-skip', DoctestSkipDirective)
app.add_directive('doctest-skip-all', DoctestSkipDirective)
app.add_directive('doctest', DoctestSkipDirective)
# Code blocks that use this directive will not appear in the generated
# documentation. This is intended to hide boilerplate code that is only
# useful for testing documentation using doctest, but does not actually
# belong in the documentation itself.
app.add_directive('testsetup', DoctestOmitDirective)
return {'parallel_read_safe': True,
'parallel_write_safe': True}
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of three directives that allow us to insert metadata
about doctests into the .rst files so the testing framework knows
which tests to skip.
This is quite different from the doctest extension in Sphinx itself,
which actually does something. For astropy, all of the testing is
centrally managed from py.test and Sphinx is not used for running
tests.
"""
import re
from docutils.nodes import literal_block
from docutils.parsers.rst import Directive
class DoctestSkipDirective(Directive):
has_content = True
def run(self):
# Check if there is any valid argument, and skip it. Currently only
# 'win32' is supported in astropy.tests.pytest_plugins.
if re.match('win32', self.content[0]):
self.content = self.content[2:]
code = '\n'.join(self.content)
return [literal_block(code, code)]
class DoctestOmitDirective(Directive):
has_content = True
def run(self):
# Simply do not add any content when this directive is encountered
return []
class DoctestRequiresDirective(DoctestSkipDirective):
# This is silly, but we really support an unbounded number of
# optional arguments
optional_arguments = 64
def setup(app):
app.add_directive('doctest-requires', DoctestRequiresDirective)
app.add_directive('doctest-skip', DoctestSkipDirective)
app.add_directive('doctest-skip-all', DoctestSkipDirective)
app.add_directive('doctest', DoctestSkipDirective)
app.add_directive('testsetup', DoctestOmitDirective)
return {'parallel_read_safe': True,
'parallel_write_safe': True}
| bsd-3-clause | Python |
603dbd5deb9c1008a8149308d7edd9aacc34d28c | Update test_pypi_helper.py | sdpython/pyquickhelper,sdpython/pyquickhelper,sdpython/pyquickhelper,sdpython/pyquickhelper | _unittests/ut_loghelper/test_pypi_helper.py | _unittests/ut_loghelper/test_pypi_helper.py | """
@brief test log(time=42s)
"""
import sys
import os
import unittest
import datetime
if "temp_" in os.path.abspath(__file__):
raise ImportError(
"this file should not be imported in that location: " +
os.path.abspath(__file__))
from pyquickhelper.pycode import ExtTestCase, skipif_circleci
from pyquickhelper.loghelper import fLOG, enumerate_pypi_versions_date
class TestPypiHelper(unittest.TestCase):
@skipif_circleci("connectivity issue")
def test_clone_repo(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
iter = enumerate_pypi_versions_date('pyquickhelper')
res = []
for it in iter:
res.append(it)
if len(res) >= 2:
break
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0][0], datetime.datetime)
self.assertGreater(res[0][2], 0)
self.assertIn('.', res[0][1])
if __name__ == "__main__":
unittest.main()
| """
@brief test log(time=42s)
"""
import sys
import os
import unittest
import datetime
if "temp_" in os.path.abspath(__file__):
raise ImportError(
"this file should not be imported in that location: " +
os.path.abspath(__file__))
from pyquickhelper.loghelper import fLOG
from pyquickhelper.loghelper import enumerate_pypi_versions_date
class TestPypiHelper(unittest.TestCase):
def test_clone_repo(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
iter = enumerate_pypi_versions_date('pyquickhelper')
res = []
for it in iter:
res.append(it)
if len(res) >= 2:
break
self.assertEqual(len(res), 2)
self.assertIsInstance(res[0][0], datetime.datetime)
self.assertGreater(res[0][2], 0)
self.assertIn('.', res[0][1])
if __name__ == "__main__":
unittest.main()
| mit | Python |
c47279b32af2fac33ef50ed9cad454896951e0f1 | update notrebooks | sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs,sdpython/ensae_teaching_cs | _unittests/ut_notebooks/test_1A_notebook.py | _unittests/ut_notebooks/test_1A_notebook.py | #-*- coding: utf-8 -*-
"""
@brief test log(time=50s)
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder, add_missing_development_version
class TestNotebookRunner1a_ (unittest.TestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "pymmails"],
__file__, hide=True)
def test_notebook_runner_1a(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
from src.ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_1a, unittest_raise_exception_notebook
temp = get_temp_folder(__file__, "temp_notebook1a_")
keepnote = ls_notebooks("1a")
assert len(keepnote) > 0
res = execute_notebooks(temp, keepnote,
lambda i, n: "deviner" not in n,
fLOG=fLOG,
clean_function=clean_function_1a)
unittest_raise_exception_notebook(res, fLOG)
if __name__ == "__main__":
unittest.main()
| #-*- coding: utf-8 -*-
"""
@brief test log(time=60s)
"""
import sys
import os
import unittest
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
try:
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import get_temp_folder
from src.ensae_teaching_cs.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_1a, unittest_raise_exception_notebook
class TestNotebookRunner1a_ (unittest.TestCase):
def test_notebook_runner_1a(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
temp = get_temp_folder(__file__, "temp_notebook1a_")
keepnote = ls_notebooks("1a")
assert len(keepnote) > 0
res = execute_notebooks(temp, keepnote,
lambda i, n: "deviner" not in n,
fLOG=fLOG,
clean_function=clean_function_1a)
unittest_raise_exception_notebook(res, fLOG)
if __name__ == "__main__":
unittest.main()
| mit | Python |
931e2d1e8ba3fd6b129a6d74e3a1ad9984c1938a | Add benchmark tests for numpy.random.randint. | shoyer/numpy,Dapid/numpy,jakirkham/numpy,WarrenWeckesser/numpy,chatcannon/numpy,WarrenWeckesser/numpy,b-carter/numpy,anntzer/numpy,ssanderson/numpy,simongibbons/numpy,nbeaver/numpy,SiccarPoint/numpy,numpy/numpy,Eric89GXL/numpy,kiwifb/numpy,seberg/numpy,rgommers/numpy,ESSS/numpy,shoyer/numpy,anntzer/numpy,utke1/numpy,dwillmer/numpy,grlee77/numpy,ddasilva/numpy,charris/numpy,tacaswell/numpy,simongibbons/numpy,endolith/numpy,solarjoe/numpy,numpy/numpy,WarrenWeckesser/numpy,stuarteberg/numpy,SiccarPoint/numpy,mhvk/numpy,ahaldane/numpy,rgommers/numpy,bringingheavendown/numpy,anntzer/numpy,ContinuumIO/numpy,Eric89GXL/numpy,kiwifb/numpy,bringingheavendown/numpy,MSeifert04/numpy,solarjoe/numpy,ahaldane/numpy,jakirkham/numpy,maniteja123/numpy,anntzer/numpy,ssanderson/numpy,tacaswell/numpy,WarrenWeckesser/numpy,ContinuumIO/numpy,maniteja123/numpy,njase/numpy,jakirkham/numpy,maniteja123/numpy,drasmuss/numpy,tynn/numpy,shoyer/numpy,endolith/numpy,madphysicist/numpy,stuarteberg/numpy,madphysicist/numpy,jakirkham/numpy,abalkin/numpy,Dapid/numpy,pbrod/numpy,ContinuumIO/numpy,pdebuyl/numpy,pbrod/numpy,mattip/numpy,gmcastil/numpy,rherault-insa/numpy,stuarteberg/numpy,ESSS/numpy,njase/numpy,jonathanunderwood/numpy,jorisvandenbossche/numpy,gfyoung/numpy,b-carter/numpy,jorisvandenbossche/numpy,grlee77/numpy,jonathanunderwood/numpy,pizzathief/numpy,seberg/numpy,drasmuss/numpy,skwbc/numpy,skwbc/numpy,grlee77/numpy,Eric89GXL/numpy,AustereCuriosity/numpy,gfyoung/numpy,SiccarPoint/numpy,pbrod/numpy,rherault-insa/numpy,dwillmer/numpy,ddasilva/numpy,charris/numpy,simongibbons/numpy,chiffa/numpy,chatcannon/numpy,simongibbons/numpy,argriffing/numpy,mhvk/numpy,shoyer/numpy,njase/numpy,grlee77/numpy,pbrod/numpy,WarrenWeckesser/numpy,pizzathief/numpy,pizzathief/numpy,SiccarPoint/numpy,dwillmer/numpy,MSeifert04/numpy,MSeifert04/numpy,seberg/numpy,joferkington/numpy,MSeifert04/numpy,skwbc/numpy,joferkington/numpy,nbeaver/numpy,pdebuyl/numpy,abalkin/numpy,bertrand-l/numpy,madphysicist/numpy,pdebuyl/numpy,bertrand-l/numpy,rherault-insa/numpy,rgommers/numpy,gmcastil/numpy,dwillmer/numpy,tacaswell/numpy,drasmuss/numpy,seberg/numpy,chiffa/numpy,jakirkham/numpy,endolith/numpy,pbrod/numpy,mhvk/numpy,pdebuyl/numpy,mhvk/numpy,charris/numpy,argriffing/numpy,gfyoung/numpy,chatcannon/numpy,pizzathief/numpy,AustereCuriosity/numpy,stuarteberg/numpy,charris/numpy,MSeifert04/numpy,bringingheavendown/numpy,joferkington/numpy,shoyer/numpy,numpy/numpy,jorisvandenbossche/numpy,Dapid/numpy,simongibbons/numpy,mhvk/numpy,mattip/numpy,jorisvandenbossche/numpy,endolith/numpy,ESSS/numpy,behzadnouri/numpy,chiffa/numpy,kiwifb/numpy,argriffing/numpy,jorisvandenbossche/numpy,joferkington/numpy,behzadnouri/numpy,AustereCuriosity/numpy,utke1/numpy,tynn/numpy,grlee77/numpy,ssanderson/numpy,behzadnouri/numpy,madphysicist/numpy,mattip/numpy,Eric89GXL/numpy,ahaldane/numpy,jonathanunderwood/numpy,abalkin/numpy,ahaldane/numpy,madphysicist/numpy,solarjoe/numpy,utke1/numpy,gmcastil/numpy,ddasilva/numpy,numpy/numpy,tynn/numpy,b-carter/numpy,pizzathief/numpy,mattip/numpy,ahaldane/numpy,bertrand-l/numpy,rgommers/numpy,nbeaver/numpy | benchmarks/benchmarks/bench_random.py | benchmarks/benchmarks/bench_random.py | from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
from numpy.lib import NumpyVersion
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
'poisson 10']
def setup(self, name):
items = name.split()
name = items.pop(0)
params = [float(x) for x in items]
self.func = getattr(np.random, name)
self.params = tuple(params) + ((100, 100),)
def time_rng(self, name):
self.func(*self.params)
class Shuffle(Benchmark):
def setup(self):
self.a = np.arange(100000)
def time_100000(self):
np.random.shuffle(self.a)
class Randint(Benchmark):
def time_randint_fast(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30, size=10**5)
def time_randint_slow(self):
"""Compare to uint32 below"""
np.random.randint(0, 2**30 + 1, size=10**5)
class Randint_dtype(Benchmark):
high = {
'bool': 1,
'uint8': 2**7,
'uint16': 2**15,
'uint32': 2**31,
'uint64': 2**63
}
param_names = ['dtype']
params = ['bool', 'uint8', 'uint16', 'uint32', 'uint64']
def setup(self, name):
if NumpyVersion(np.__version__) < '1.11.0.dev0':
raise NotImplementedError
def time_randint_fast(self, name):
high = self.high[name]
np.random.randint(0, high, size=10**5, dtype=name)
def time_randint_slow(self, name):
high = self.high[name]
np.random.randint(0, high + 1, size=10**5, dtype=name)
| from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
'poisson 10']
def setup(self, name):
items = name.split()
name = items.pop(0)
params = [float(x) for x in items]
self.func = getattr(np.random, name)
self.params = tuple(params) + ((100, 100),)
def time_rng(self, name):
self.func(*self.params)
class Shuffle(Benchmark):
def setup(self):
self.a = np.arange(100000)
def time_100000(self):
np.random.shuffle(self.a)
| bsd-3-clause | Python |
343eea168bfa0e84e173dd053f4740c42a58e3d4 | Upgrade BeautifulSoup | camptocamp/ngeo,kalbermattenm/ngeo,Jenselme/ngeo,camptocamp/ngeo,ger-benjamin/ngeo,camptocamp/ngeo,ger-benjamin/ngeo,adube/ngeo,camptocamp/ngeo,adube/ngeo,adube/ngeo,kalbermattenm/ngeo,adube/ngeo,Jenselme/ngeo,ger-benjamin/ngeo,camptocamp/ngeo,Jenselme/ngeo,kalbermattenm/ngeo,Jenselme/ngeo | buildtools/generate-examples-index.py | buildtools/generate-examples-index.py | import os
import bs4
from mako.template import Template
from argparse import ArgumentParser
if __name__ == '__main__':
examples = []
parser = ArgumentParser()
parser.add_argument(
'--app', action='append', nargs=3, metavar=('TITLE', 'HREF', 'DESC'),
help='Add an application', default=[],
)
parser.add_argument(
'template', nargs=1, help='The template',
)
parser.add_argument(
'example', nargs='+', help='Example file',
)
args = parser.parse_args()
for application in args.app:
examples.append({
'title': '<b>%s</b>' % application[0],
'href': application[1],
'desc': application[2],
})
for examplefile in args.example:
basename = os.path.basename(examplefile)
soup = bs4.BeautifulSoup(open(examplefile), "html.parser")
example = {}
if soup.title is None:
raise Exception('Example %s has no title.' % basename)
example['title'] = soup.title.string
example['href'] = basename
descelt = soup.find(id='desc')
if descelt is None:
raise Exception('Example %s has no description.' % basename)
example['desc'] = \
'' if descelt is None else \
''.join(map(str, descelt.contents))
examples.append(example)
template = Template(filename=args.template[0])
print(template.render(examples=examples))
| import os
import bs4
from mako.template import Template
from argparse import ArgumentParser
if __name__ == '__main__':
examples = []
parser = ArgumentParser()
parser.add_argument(
'--app', action='append', nargs=3, metavar=('TITLE', 'HREF', 'DESC'),
help='Add an application', default=[],
)
parser.add_argument(
'template', nargs=1, help='The template',
)
parser.add_argument(
'example', nargs='+', help='Example file',
)
args = parser.parse_args()
for application in args.app:
examples.append({
'title': '<b>%s</b>' % application[0],
'href': application[1],
'desc': application[2],
})
for examplefile in args.example:
basename = os.path.basename(examplefile)
soup = bs4.BeautifulSoup(open(examplefile))
example = {}
if soup.title is None:
raise Exception('Example %s has no title.' % basename)
example['title'] = soup.title.string
example['href'] = basename
descelt = soup.find(id='desc')
if descelt is None:
raise Exception('Example %s has no description.' % basename)
example['desc'] = \
'' if descelt is None else \
''.join(map(str, descelt.contents))
examples.append(example)
template = Template(filename=args.template[0])
print(template.render(examples=examples))
| mit | Python |
ca8e15d50b816c29fc2a0df27d0266826e38b5b8 | Update serializer to deal with new model | cellcounter/cellcounter,haematologic/cellcounter,cellcounter/cellcounter,cellcounter/cellcounter,haematologic/cellcounter,haematologic/cellcounter,cellcounter/cellcounter | cellcounter/statistics/serializers.py | cellcounter/statistics/serializers.py | from rest_framework.serializers import ModelSerializer
from .models import CountInstance
class CountInstanceSerializer(ModelSerializer):
class Meta:
model = CountInstance
fields = ('count_total',)
| from rest_framework.serializers import ModelSerializer
from .models import CountInstance
class CountInstanceSerializer(ModelSerializer):
class Meta:
model = CountInstance
| mit | Python |
56b11241909758f41ec924d761db3762c14698a6 | Correct source filtering | wfxiang08/changes,bowlofstew/changes,dropbox/changes,bowlofstew/changes,dropbox/changes,bowlofstew/changes,wfxiang08/changes,wfxiang08/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,wfxiang08/changes | changes/api/project_commit_details.py | changes/api/project_commit_details.py | from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload, contains_eager
from changes.api.base import APIView
from changes.models import Build, Project, Revision, Source
class ProjectCommitDetailsAPIView(APIView):
def get(self, project_id, commit_id):
project = Project.get(project_id)
if not project:
return '', 404
repo = project.repository
revision = Revision.query.filter(
Revision.repository_id == repo.id,
Revision.sha == commit_id,
).join(Revision.author).first()
if not revision:
return '', 404
build_list = list(Build.query.options(
joinedload('author'),
contains_eager('source'),
).join(
Source, Build.source_id == Source.id,
).filter(
Build.project_id == project.id,
Source.revision_sha == revision.sha,
Source.patch == None, # NOQA
).order_by(Build.date_created.desc()))[:100]
context = self.serialize(revision)
context.update({
'repository': repo,
'builds': build_list,
})
return self.respond(context)
def get_stream_channels(self, project_id, commit_id):
return [
'revisions:{0}:*'.format(commit_id),
]
| from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
from changes.api.base import APIView
from changes.models import Build, Project, Revision, Source
class ProjectCommitDetailsAPIView(APIView):
def get(self, project_id, commit_id):
project = Project.get(project_id)
if not project:
return '', 404
repo = project.repository
revision = Revision.query.filter(
Revision.repository_id == repo.id,
Revision.sha == commit_id,
).join(Revision.author).first()
if not revision:
return '', 404
build_list = list(Build.query.options(
joinedload('author'),
joinedload('source'),
).filter(
Build.project_id == project.id,
Source.revision_sha == revision.sha,
Source.patch == None, # NOQA
).order_by(Build.date_created.desc()))[:100]
context = self.serialize(revision)
context.update({
'repository': repo,
'builds': build_list,
})
return self.respond(context)
def get_stream_channels(self, project_id, commit_id):
return [
'revisions:{0}:*'.format(commit_id),
]
| apache-2.0 | Python |
b2f9f5ec0894fcba54efdc6f8e188b77ef2bedf7 | Make modules uninstallable | OCA/server-tools,OCA/server-tools,OCA/server-tools,YannickB/server-tools,YannickB/server-tools,YannickB/server-tools | base_report_auto_create_qweb/__openerp__.py | base_report_auto_create_qweb/__openerp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Report qweb auto generation",
"version": "8.0.1.0.0",
"depends": [
"report",
],
"external_dependencies": {
"python": [
"unidecode",
],
},
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA), ",
"website": "http://www.odoomrp.com",
"license": "AGPL-3",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <anajuaristi@avanzosc.es>",
],
"category": "Tools",
"data": [
"wizard/report_duplicate_view.xml",
"views/report_xml_view.xml",
],
'installable': False,
}
| # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Report qweb auto generation",
"version": "8.0.1.0.0",
"depends": [
"report",
],
"external_dependencies": {
"python": [
"unidecode",
],
},
"author": "OdooMRP team, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA), ",
"website": "http://www.odoomrp.com",
"license": "AGPL-3",
"contributors": [
"Oihane Crucelaegui <oihanecrucelaegi@avanzosc.es>",
"Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>",
"Ana Juaristi <anajuaristi@avanzosc.es>",
],
"category": "Tools",
"data": [
"wizard/report_duplicate_view.xml",
"views/report_xml_view.xml",
],
"installable": True,
}
| agpl-3.0 | Python |
cbdf70bff1078a80167cfc54caab6b621907ceb6 | migrate to pyiem and improve plotting some | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/current/today_high.py | scripts/current/today_high.py | # Output the 12z morning low temperature
import sys
import matplotlib.cm as cm
import numpy as np
import datetime
from pyiem.plot import MapPlot
now = datetime.datetime.now()
import psycopg2
IEM = psycopg2.connect(database='iem', host='iemdb', user='nobody')
icursor = IEM.cursor()
sql = """
select s.id,
x(s.geom) as lon, y(s.geom) as lat,
max_tmpf as high, s.network
from summary c, stations s
WHERE c.iemid = s.iemid and day = 'TODAY' and max_tmpf > -40
and s.network in ('IA_ASOS', 'AWOS', 'IL_ASOS','MO_ASOS','KS_ASOS',
'NE_ASOS','SD_ASOS','MN_ASOS','WI_ASOS') ORDER by high ASC
"""
lats = []
lons = []
vals = []
valmask = []
labels = []
icursor.execute(sql)
dsm = None
for row in icursor:
if row[0] == 'DSM':
dsm = row[3]
lats.append( row[2] )
lons.append( row[1] )
vals.append( row[3] )
labels.append( row[0] )
valmask.append( row[4] in ['AWOS', 'IA_ASOS'] )
if len(lats) < 4:
sys.exit()
m = MapPlot(sector='iowa',
title='%s Iowa ASOS/AWOS High Temperature' % (
now.strftime("%-d %b %Y"),),
subtitle='map valid: %s' % (now.strftime("%d %b %Y %-I:%M %p"), ))
bottom = int(dsm) - 15
top = int(dsm) + 15
bins = np.linspace( bottom, top, 11)
cmap = cm.get_cmap('jet')
m.contourf(lons, lats, vals, bins, units='F', cmap=cmap)
m.plot_values(lons, lats, vals, '%.0f', valmask=valmask, labels=labels)
m.drawcounties()
pqstr = "plot ac %s summary/iowa_asos_high.png iowa_asos_high.png png" % (
now.strftime("%Y%m%d%H%M"), )
m.postprocess(view=True, pqstr=pqstr)
m.close()
| # Output the 12z morning low temperature
import sys
import os, random
import iemdb
import iemplot
import mx.DateTime
now = mx.DateTime.now()
IEM = iemdb.connect('iem', bypass=True)
icursor = IEM.cursor()
sql = """
select s.id,
x(s.geom) as lon, y(s.geom) as lat,
max_tmpf as high, s.network
from summary_%s c, stations s
WHERE c.iemid = s.iemid and day = 'TODAY' and max_tmpf > -40
and s.network in ('IA_ASOS', 'AWOS', 'IL_ASOS','MO_ASOS','KS_ASOS',
'NE_ASOS','SD_ASOS','MN_ASOS','WI_ASOS')
""" % (now.year, )
lats = []
lons = []
vals = []
valmask = []
labels = []
icursor.execute(sql)
for row in icursor:
lats.append( row[2] )
lons.append( row[1] )
vals.append( row[3] )
labels.append( row[0] )
valmask.append( row[4] in ['AWOS', 'IA_ASOS'] )
if len(lats) < 4:
sys.exit()
cfg = {
'wkColorMap': 'BlAqGrYeOrRe',
'_showvalues' : True,
'_valuemask' : valmask,
'lbTitleString' : 'F',
'_format' : '%.0f',
'_title' : "Iowa ASOS/AWOS High Temperature",
'_valid' : "%s" % (now.strftime("%d %b %Y %-I:%M %p"), ),
'_labels' : labels
}
# Generates tmp.ps
tmpfp = iemplot.simple_contour(lons, lats, vals, cfg)
pqstr = "plot ac %s summary/iowa_asos_high.png iowa_asos_high.png png" % (
now.strftime("%Y%m%d%H%M"), )
iemplot.postprocess(tmpfp, pqstr)
#iemplot.makefeature(tmpfp)
| mit | Python |
6f4758b39c257dcabcabc6405cf400e8f6a358ea | Update develop version to 0.36.0 | conan-io/conan-package-tools | cpt/__init__.py | cpt/__init__.py |
__version__ = '0.36.0-dev'
def get_client_version():
from conans.model.version import Version
from conans import __version__ as client_version
from os import getenv
# It is a mess comparing dev versions, lets assume that the -dev is the further release
return Version(client_version.replace("-dev", ""))
|
__version__ = '0.35.0-dev'
def get_client_version():
from conans.model.version import Version
from conans import __version__ as client_version
from os import getenv
# It is a mess comparing dev versions, lets assume that the -dev is the further release
return Version(client_version.replace("-dev", ""))
| mit | Python |
f052ff59e99534c1e19559c4234a7916d40e606a | test against stories, nlu, domain | RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu | tests/docs/test_docs_training_data.py | tests/docs/test_docs_training_data.py | from pathlib import Path
from typing import List, Text
import re
import pytest
import rasa.shared.utils.validation
from rasa.shared.core.training_data.story_reader.yaml_story_reader import (
CORE_SCHEMA_FILE,
)
from rasa.shared.nlu.training_data.formats.rasa_yaml import NLU_SCHEMA_FILE
from rasa.shared.constants import (
DOMAIN_SCHEMA_FILE,
CONFIG_SCHEMA_FILE,
RESPONSES_SCHEMA_FILE,
)
DOCS_BASE_DIR = Path("docs/")
MDX_DOCS_FILES = list((DOCS_BASE_DIR / "docs").glob("**/*.mdx"))
# we're matching codeblocks with either `yaml-rasa` or `yml-rasa` types
# we support title or no title (you'll get a nice error message if there is a title)
TRAINING_DATA_CODEBLOCK_RE = re.compile(
r"```y(?:a)?ml-rasa(?: title=[\"'][^\"']+[\"'])?(?: \((?P<yaml_path>.+?)\))?[^\n]*\n(?P<codeblock>.*?)```",
re.DOTALL,
)
@pytest.mark.parametrize("mdx_file_path", MDX_DOCS_FILES)
def test_docs_training_data(mdx_file_path: Path):
with mdx_file_path.open("r") as handle:
mdx_content = handle.read()
matches = TRAINING_DATA_CODEBLOCK_RE.finditer(mdx_content)
lines_with_errors: List[Text] = []
for match in matches:
yaml_path = match.group("yaml_path")
if yaml_path:
with (DOCS_BASE_DIR / yaml_path).open("r") as handle:
codeblock = handle.read()
else:
codeblock = match.group("codeblock")
start_index = match.span()[0]
line_number = mdx_content.count("\n", 0, start_index) + 1
schemas_to_try = [NLU_SCHEMA_FILE, CORE_SCHEMA_FILE, DOMAIN_SCHEMA_FILE]
for schema in schemas_to_try:
try:
rasa.shared.utils.validation.validate_yaml_schema(codeblock, schema)
except ValueError:
lines_with_errors.append(str(line_number))
if lines_with_errors:
raise AssertionError(
f"({mdx_file_path}): Invalid training data found "
f"at line{'s' if len(lines_with_errors) > 1 else ''} {', '.join(lines_with_errors)}"
)
| from pathlib import Path
from typing import List, Text
import re
import pytest
from rasa.shared.nlu.training_data.formats import RasaYAMLReader
DOCS_BASE_DIR = Path("docs/")
MDX_DOCS_FILES = list((DOCS_BASE_DIR / "docs").glob("**/*.mdx"))
# we're matching codeblocks with either `yaml-rasa` or `yml-rasa` types
# we support title or no title (you'll get a nice error message if there is a title)
TRAINING_DATA_CODEBLOCK_RE = re.compile(
r"```y(?:a)?ml-rasa(?: title=[\"'][^\"']+[\"'])?(?: \((?P<yaml_path>.+?)\))?[^\n]*\n(?P<codeblock>.*?)```",
re.DOTALL,
)
@pytest.mark.parametrize("mdx_file_path", MDX_DOCS_FILES)
def test_docs_training_data(mdx_file_path: Path):
with mdx_file_path.open("r") as handle:
mdx_content = handle.read()
matches = TRAINING_DATA_CODEBLOCK_RE.finditer(mdx_content)
lines_with_errors: List[Text] = []
for match in matches:
yaml_path = match.group("yaml_path")
if yaml_path:
with (DOCS_BASE_DIR / yaml_path).open("r") as handle:
codeblock = handle.read()
else:
codeblock = match.group("codeblock")
start_index = match.span()[0]
line_number = mdx_content.count("\n", 0, start_index) + 1
try:
RasaYAMLReader.validate(codeblock)
except ValueError:
lines_with_errors.append(str(line_number))
if lines_with_errors:
raise AssertionError(
f"({mdx_file_path}): Invalid training data found "
f"at line{'s' if len(lines_with_errors) > 1 else ''} {', '.join(lines_with_errors)}"
)
| apache-2.0 | Python |
3245946ff25889149dc60cf6b1364bd09c953809 | Change url from relative to internal service endpoint | klmcwhirter/huntwords,klmcwhirter/huntwords,klmcwhirter/huntwords,klmcwhirter/huntwords | faas/puzzleboard-pop/puzzleboard_pop.py | faas/puzzleboard-pop/puzzleboard_pop.py | import json
from datetime import datetime
import requests
from .model.puzzleboard import pop_puzzleboard
class HuntwordsPuzzleBoardPopCommand(object):
'''Command class that processes puzzleboard-pop message'''
def run(self, jreq):
'''Command that processes puzzleboard-pop message'''
req = json.loads(jreq)
pboard = pop_puzzleboard(req['puzzle'])
jpboard = json.dumps(dict(pboard))
resp = {
'puzzleboard': jpboard,
'processed': {
'at': f'{datetime.now().isoformat()}',
'status': 'ok'
}
}
send_consumed(pboard)
return json.dumps(resp)
def send_consumed(pboard):
'''Send async request to generate a new copy'''
url = 'http://puzzleboard-consumed.openfaas-fn:8080'
data = f'{{"puzzle": "{pboard.puzzle.name}" }}'
requests.post(url, data)
| import json
from datetime import datetime
import requests
from .model.puzzleboard import pop_puzzleboard
class HuntwordsPuzzleBoardPopCommand(object):
'''Command class that processes puzzleboard-pop message'''
def run(self, jreq):
'''Command that processes puzzleboard-pop message'''
req = json.loads(jreq)
pboard = pop_puzzleboard(req['puzzle'])
jpboard = json.dumps(dict(pboard))
resp = {
'puzzleboard': jpboard,
'processed': {
'at': f'{datetime.now().isoformat()}',
'status': 'ok'
}
}
send_consumed(pboard)
return json.dumps(resp)
def send_consumed(pboard):
'''Send async request to generate a new copy'''
url = '/async-function/puzzleboard-consumed'
data = f'{{"puzzle": "{pboard.puzzle.name}" }}'
requests.post(url, data)
| mit | Python |
2c58fd815faae7e6b2aeb02a0d0b4aff9131c201 | Bring more data into the test failure | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/integration/states/test_cron.py | tests/integration/states/test_cron.py | """
Tests for the cron state
"""
import logging
import pprint
import salt.utils.platform
from tests.support.case import ModuleCase
from tests.support.helpers import skip_if_binaries_missing, slowTest
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
@skipIf(salt.utils.platform.is_windows(), "minion is windows")
@skip_if_binaries_missing("crontab")
class CronTest(ModuleCase):
"""
Validate the file state
"""
def setUp(self):
"""
Setup
"""
ret = self.run_state("user.present", name="test_cron_user")
assert ret
def tearDown(self):
"""
Teardown
"""
# Remove cron file
if salt.utils.platform.is_freebsd():
self.run_function("cmd.run", cmd="crontab -u test_cron_user -rf")
else:
self.run_function("cmd.run", cmd="crontab -u test_cron_user -r")
# Delete user
self.run_state("user.absent", name="test_cron_user")
@slowTest
def test_managed(self):
"""
file.managed
"""
ret = self.run_state(
"cron.file", name="salt://issue-46881/cron", user="test_cron_user"
)
assert ret
self.assertIn(
"cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file",
ret,
msg="Assertion failed. run_state retuned: {}".format(pprint.pformat(ret)),
)
state = ret["cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file"]
self.assertIn(
"changes",
state,
msg="Assertion failed. ret: {}".format(pprint.pformat(ret)),
)
self.assertIn(
"diff",
state["changes"],
msg="Assertion failed. ret: {}".format(pprint.pformat(ret)),
)
expected = "--- \n+++ \n@@ -1 +1,2 @@\n-\n+# Lines below here are managed by Salt, do not edit\n+@hourly touch /tmp/test-file\n"
self.assertEqual(
expected,
state["changes"]["diff"],
msg="Assertion failed. ret: {}".format(pprint.pformat(ret)),
)
| """
Tests for the cron state
"""
import logging
import salt.utils.platform
from tests.support.case import ModuleCase
from tests.support.helpers import slowTest
from tests.support.unit import skipIf
log = logging.getLogger(__name__)
@skipIf(salt.utils.platform.is_windows(), "minion is windows")
class CronTest(ModuleCase):
"""
Validate the file state
"""
def setUp(self):
"""
Setup
"""
self.run_state("user.present", name="test_cron_user")
def tearDown(self):
"""
Teardown
"""
# Remove cron file
if salt.utils.platform.is_freebsd():
self.run_function("cmd.run", cmd="crontab -u test_cron_user -rf")
else:
self.run_function("cmd.run", cmd="crontab -u test_cron_user -r")
# Delete user
self.run_state("user.absent", name="test_cron_user")
@slowTest
def test_managed(self):
"""
file.managed
"""
ret = self.run_state(
"cron.file", name="salt://issue-46881/cron", user="test_cron_user"
)
_expected = "--- \n+++ \n@@ -1 +1,2 @@\n-\n+# Lines below here are managed by Salt, do not edit\n+@hourly touch /tmp/test-file\n"
self.assertIn(
"changes",
ret["cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file"],
)
self.assertIn(
"diff",
ret["cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file"][
"changes"
],
)
self.assertEqual(
_expected,
ret["cron_|-salt://issue-46881/cron_|-salt://issue-46881/cron_|-file"][
"changes"
]["diff"],
)
| apache-2.0 | Python |
190c1a8b436f5ead14eddb1d2669c1ef7301159b | Test class for transformer | rsk-mind/rsk-mind-framework | tests/transformer/test_transformer.py | tests/transformer/test_transformer.py | import os
from nose.tools import assert_equals, assert_items_equal
from rsk_mind.dataset import Dataset
from rsk_mind.transformer import *
class CustomTransformer(Transformer):
class Feats:
a1 = Feat()
a2 = Feat()
f1 = CompositeFeat(['a1', 'a2'])
def get_a1(self, feat):
return [-float(feat)]
def get_a2(self, feat):
return [-float(feat)]
def get_f1(self, a1, a2):
return [float(a1) + float(a2)]
class TestDataset:
def setUp(self):
self.header = ['a1', 'a2', 'y']
self.rows = [['0', '0', '0'], ['0', '2', '0'], ['1', '0.5', '1'], ['0.9', '2', '1']]
self.transformer = CustomTransformer()
def tearDown(self):
# delete variables to release memory
del self.header
del self.rows
del self.transformer
def test_get_feats(self):
_expected_feats = ['a1', 'a2', 'f1']
_actual_feats = self.transformer.get_feats()
assert_items_equal(_expected_feats, _actual_feats)
def test_get_transformer_func(self):
_expected_value = [-2]
_actual_value = self.transformer.get_transformer_func('a1')(2)
assert_items_equal(_expected_value, _actual_value)
| import os
from nose.tools import assert_equals, assert_items_equal
from rsk_mind.dataset import Dataset
from rsk_mind.transformer import *
class CustomTransformer(Transformer):
class Feats():
a1 = Feat()
a2 = Feat()
f1 = CompositeFeat(['a1', 'a2'])
def get_a1(self, feat):
return [-float(feat)]
def get_a2(self, feat):
return [-float(feat)]
def get_f1(self, a1, a2):
return [float(a1) + float(a2)]
class TestDataset:
def setUp(self):
self.header = ['a1', 'a2', 'y']
self.rows = [['0', '0', '0'], ['0', '2', '0'], ['1', '0.5', '1'] ,['0.9', '2', '1']]
def tearDown(self):
# delete variables to release memory
del self.header
del self.rows
| mit | Python |
608dc0db688be1dabe3c6ba7647807f6697fcefe | Test image definition in SADL | chipster/chipster-tools,chipster/chipster-tools,chipster/chipster-tools,chipster/chipster-tools | tools/misc/python/test-data-in-out.py | tools/misc/python/test-data-in-out.py | # TOOL test-data-in-out.py: "Test data input and output in Python" (Data input output test.)
# INPUT input TYPE GENERIC
# OUTPUT output
# OUTPUT OPTIONAL missing_output.txt
# IMAGE chipster-tools-python
import shutil
shutil.copyfile('input', 'output')
| # TOOL test-data-in-out.py: "Test data input and output in Python" (Data input output test.)
# INPUT input TYPE GENERIC
# OUTPUT output
# OUTPUT OPTIONAL missing_output.txt
import shutil
shutil.copyfile('input', 'output')
| mit | Python |
52b1fa1edd1804945e0810369cf785d91a055710 | Fix typo in vis/__init__.py | tensorflow/docs,tensorflow/docs,tensorflow/docs | tools/tensorflow_docs/vis/__init__.py | tools/tensorflow_docs/vis/__init__.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visualization tools for tensorflow_docs.
Use this module for plotting and visualization code that is too long to inline
into a notebook.
"""
from tensorflow_docs.vis.webp_animation import Webp
| # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Visualization tools for tensorflow_docs.
Use this module for plotting and viaualization code that is too long to inline
into a notebook.
"""
from tensorflow_docs.vis.webp_animation import Webp
| apache-2.0 | Python |
5548e32a32bd1cd5951ce50e74c0fad944a1cf04 | Stop using the extra field for Colombia | ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube | ideascube/conf/idb_col_llavedelsaber.py | ideascube/conf/idb_col_llavedelsaber.py | """Configuration for Llave Del Saber, Colombia"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'es'
DOMAIN = 'bibliotecamovil.lan'
ALLOWED_HOSTS = ['.bibliotecamovil.lan', 'localhost']
USER_FORM_FIELDS = USER_FORM_FIELDS + (
(_('Personal informations'), ['disabilities']),
)
| """Configuration for Llave Del Saber, Colombia"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
LANGUAGE_CODE = 'es'
DOMAIN = 'bibliotecamovil.lan'
ALLOWED_HOSTS = ['.bibliotecamovil.lan', 'localhost']
USER_FORM_FIELDS = USER_FORM_FIELDS + (
(_('Personal informations'), ['extra', 'disabilities']),
)
USER_EXTRA_FIELD_LABEL = 'Etnicidad'
| agpl-3.0 | Python |
c1b433e5ed4c06b956b4d27f6da4e8b1dab54aaf | Fix issue in cloudwacth service credentials | rolandovillca/aws_samples_boto3_sdk | services/cloudwatch/sample.py | services/cloudwatch/sample.py | '''
===================================
Boto 3 - CloudWatch Service Example
===================================
This application implements the CloudWatch service that lets you gets
information from Amazon Cloud Watch. See the README for more details.
'''
import boto3
'''
Define your AWS credentials:
'''
AWS_ACCESS_KEY_ID = '<YOUR ACCESS KEY ID>'
AWS_SECRET_ACCESS_KEY = '<YOUR SECRET ACCESS KEY>'
'''
Connection to AWS.
'''
client = boto3.client('cloudwatch',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
# Main program:
if __name__ == '__main__':
print_results() | '''
===================================
Boto 3 - CloudWatch Service Example
===================================
This application implements the CloudWatch service that lets you gets
information from Amazon Cloud Watch. See the README for more details.
'''
import boto3
'''
Define your AWS credentials:
'''
AWS_ACCESS_KEY_ID = 'AKIAJM7BQ4WBJJSVU2JQ'
AWS_SECRET_ACCESS_KEY = 'Fq9GmwWEsvbcdHuh4McD+ZUmfowPKrnzFmhczV2U'
'''
Connection to AWS.
'''
client = boto3.client('cloudwatch',
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
# Main program:
if __name__ == '__main__':
print_results() | mit | Python |
a05a05f24c29dcf039e02b55c18c476dc69757df | Update repo entrypoint and remote_update stub. | RitwikGupta/picoCTF-shell-manager,cganas/picoCTF-shell-manager,RitwikGupta/picoCTF-shell-manager,cganas/picoCTF-shell-manager,picoCTF/picoCTF-shell-manager,cganas/picoCTF-shell-manager,cganas/picoCTF-shell-manager,RitwikGupta/picoCTF-shell-manager,picoCTF/picoCTF-shell-manager,picoCTF/picoCTF-shell-manager,picoCTF/picoCTF-shell-manager,RitwikGupta/picoCTF-shell-manager | shell_manager/problem_repo.py | shell_manager/problem_repo.py | """
Problem repository management for the shell manager.
"""
import spur, gzip
from shutil import copy2
from os.path import join
def update_repo(args):
"""
Main entrypoint for repo update operations.
"""
if args.repo_type == "local":
local_update(args.repository, args.package_paths)
else:
remote_update(args.repository, args.package_paths)
def remote_update(repo_ui, deb_paths=[]):
"""
Pushes packages to a remote deb repository.
Args:
repo_uri: location of the repository.
deb_paths: list of problem deb paths to copy.
"""
pass
def local_update(repo_path, deb_paths=[]):
"""
Updates a local deb repository by copying debs and running scanpackages.
Args:
repo_path: the path to the local repository.
dep_paths: list of problem deb paths to copy.
"""
[copy2(deb_path, repo_path) for deb_path in deb_paths]
shell = spur.LocalShell()
result = shell.run(["dpkg-scanpackages", ".", "/dev/null"], cwd=repo_path)
packages_path = join(repo_path, "Packages.gz")
with gzip.open(packages_path, "wb") as packages:
packages.write(result.output)
print("Updated problem repository.")
| """
Problem repository management for the shell manager.
"""
import spur, gzip
from shutil import copy2
from os.path import join
def local_update(repo_path, deb_paths=[]):
"""
Updates a local deb repository by copying debs and running scanpackages.
Args:
repo_path: the path to the local repository.
dep_paths: list of problem deb paths to copy.
"""
[copy2(deb_path, repo_path) for deb_path in deb_paths]
shell = spur.LocalShell()
result = shell.run(["dpkg-scanpackages", ".", "/dev/null"], cwd=repo_path)
packages_path = join(repo_path, "Packages.gz")
with gzip.open(packages_path, "wb") as packages:
packages.write(result.output)
print("Updated problem repository.")
| mit | Python |
f04291ad54e345f2265fda886326d3a6c4bd3438 | Update Meh.py | kallerdaller/Cogs-Yorkfield | Meh/Meh.py | Meh/Meh.py | import discord
from discord.ext import commands
class Mycog:
"""Tells a user that you said meh"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def meh(self, ctx, user : discord.Member):
"""Tags a person and tells them meh"""
#Your code will go here
message = ctx.message.id
await self.bot.delete_message(message)
author = ctx.message.author
await self.bot.say("Hey, " + user.mention + ", " + author.mention + " says 'Meh'")
def setup(bot):
bot.add_cog(Mycog(bot))
| import discord
from discord.ext import commands
class Mycog:
"""Tells a user that you said meh"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def meh(self, ctx, user : discord.Member):
"""Tags a person and tells them meh"""
#Your code will go here
message = discord.Message.id
await self.bot.delete_message(message)
author = ctx.message.author
await self.bot.say("Hey, " + user.mention + ", " + author.mention + " says 'Meh'")
def setup(bot):
bot.add_cog(Mycog(bot))
| mit | Python |
8cd8e0ecc3f878c13d1d3a8aac85798ecac11afb | remove tfidfpredicate from tfidf.py, its in blocking.py now | neozhangthe1/dedupe,tfmorris/dedupe,pombredanne/dedupe,nmiranda/dedupe,nmiranda/dedupe,datamade/dedupe,neozhangthe1/dedupe,datamade/dedupe,01-/dedupe,dedupeio/dedupe,davidkunio/dedupe,davidkunio/dedupe,01-/dedupe,tfmorris/dedupe,dedupeio/dedupe,pombredanne/dedupe | dedupe/tfidf.py | dedupe/tfidf.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from collections import defaultdict
from zope.index.text.parsetree import ParseError
logger = logging.getLogger(__name__)
#@profile
def makeCanopy(index, token_vector, threshold) :
canopies = {}
seen = set([])
corpus_ids = set(token_vector.keys())
while corpus_ids:
center_id = corpus_ids.pop()
center_vector = token_vector[center_id]
seen.add(center_id)
if not center_vector :
continue
try :
search_string = ' OR '.join(center_vector)
candidates = index.apply(search_string).byValue(threshold)
except ParseError :
continue
candidates = set(k for _, k in candidates) - seen
seen.update(candidates)
corpus_ids.difference_update(candidates)
for candidate_id in candidates :
canopies[candidate_id] = center_id
if candidates :
canopies[center_id] = center_id
return canopies
def _createCanopies(field_inverted_index,
token_vector,
threshold,
field) :
logger.info("Canopy: %s", str(threshold) + field)
canopy = makeCanopy(field_inverted_index, token_vector, threshold)
return ((threshold, field), canopy)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from collections import defaultdict
from zope.index.text.parsetree import ParseError
logger = logging.getLogger(__name__)
class TfidfPredicate(object):
type = "TfidfPredicate"
def __init__(self, threshold, field):
self.__name__ = 'TF-IDF:' + str(threshold) + field
self.field = field
self.canopy = defaultdict(int)
self.threshold = threshold
def __repr__(self) :
return self.__name__
def __call__(self, record) :
record_id = record[0]
center = self.canopy[record_id]
if center :
return (unicode(center),)
else :
return ()
#@profile
def makeCanopy(index, token_vector, threshold) :
canopies = {}
seen = set([])
corpus_ids = set(token_vector.keys())
while corpus_ids:
center_id = corpus_ids.pop()
center_vector = token_vector[center_id]
seen.add(center_id)
if not center_vector :
continue
try :
search_string = ' OR '.join(center_vector)
candidates = index.apply(search_string).byValue(threshold)
except ParseError :
continue
candidates = set(k for _, k in candidates) - seen
seen.update(candidates)
corpus_ids.difference_update(candidates)
for candidate_id in candidates :
canopies[candidate_id] = center_id
if candidates :
canopies[center_id] = center_id
return canopies
def _createCanopies(field_inverted_index,
token_vector,
threshold,
field) :
logger.info("Canopy: %s", str(threshold) + field)
canopy = makeCanopy(field_inverted_index, token_vector, threshold)
return ((threshold, field), canopy)
| mit | Python |
8282434f913e80bdf2467e130f0c727853651911 | Use new debug statement | bwc126/MLND-Subvocal | svr_record.py | svr_record.py | from tkinter import *
from pcf8591read import *
import threading
root = Tk()
def testytesty():
print ('oh look we used a callback')
# words from https://www.randomlists.com/random-words
words = ['dusty','march','direful','complete','superb','poised','wait','quaint','save','copy','interest','separate','bright','utter','bored','nondescript','license','vest','dance','money','languid','swim','enthusiastic','quartz','planes','spiritual','imperfect','coal','hobbies','sound','bow','squirrel','push','treatment','mine','precede','weather','amazing','round','stingy','signal','marry','country','uncle','dust','certain','loose','knock','advice','confuse','animated','loving','feeling','absorbing','trick','spare','rod','caption','raspy','throne','clumsy','vague','tow','hang','rely','tired','barbarous','pan','innocent','combative','low','rub','mixed','actually','faulty','thirsty','dam','doubtful','flowers','defective','frogs','outstanding','ducks','icicle','fry','load','cracker','efficient','hop','fax','fancy','reading','real','addicted','motion','clean','unsuitable','race','aspiring','gold','check','bouncy','regret','chop','various','eminent','wander','living','equable','cluttered','geese','tightfisted','aftermath','quince','division','board','amuck','pretty','extra-large','sun','person','magical','invent','flap','stomach','black','river','town','type','stereotyped','paddle','expand','puncture','cakes','measly','kitty','courageous','shoe','number','third','ugliest','haircut','increase','wrathful','jog','straw','whisper','kick','talented','curious']
reader = adc_reader()
# reader_worker = reader.run
def key(event):
current = 0
print ("pressed", repr(event.char))
suffix = 0
current_word = words[current]
filename = current_word + '-' + str(suffix)
r_t = threading.Thread(target=reader.run,args=[filename])
if event.char.isdigit():
suffix = event.char
print ('file series will take',suffix,'as suffix in filename')
if event.char==' ':
print('starting thread')
reader.record = True
# Get the next word
# Start the recording for that word
print(current_word)
# reader.run(filename)
# r_t.run([filename])
r_t.daemon = True
r_t.start()
# If the recording is running:
if event.char=='s':
print('terminating thread',current)
reader.record = False
# Stop the recording
current += 1
# Iterate the word
def callback(event):
frame.focus_set()
print ("clicked at", event.x, event.y)
frame = Frame(root, width=100, height=100)
frame.bind("<Key>", key)
frame.bind("<Button-1>", callback)
frame.pack()
root.mainloop()
| from tkinter import *
from pcf8591read import *
import threading
root = Tk()
def testytesty():
print ('oh look we used a callback')
# words from https://www.randomlists.com/random-words
words = ['dusty','march','direful','complete','superb','poised','wait','quaint','save','copy','interest','separate','bright','utter','bored','nondescript','license','vest','dance','money','languid','swim','enthusiastic','quartz','planes','spiritual','imperfect','coal','hobbies','sound','bow','squirrel','push','treatment','mine','precede','weather','amazing','round','stingy','signal','marry','country','uncle','dust','certain','loose','knock','advice','confuse','animated','loving','feeling','absorbing','trick','spare','rod','caption','raspy','throne','clumsy','vague','tow','hang','rely','tired','barbarous','pan','innocent','combative','low','rub','mixed','actually','faulty','thirsty','dam','doubtful','flowers','defective','frogs','outstanding','ducks','icicle','fry','load','cracker','efficient','hop','fax','fancy','reading','real','addicted','motion','clean','unsuitable','race','aspiring','gold','check','bouncy','regret','chop','various','eminent','wander','living','equable','cluttered','geese','tightfisted','aftermath','quince','division','board','amuck','pretty','extra-large','sun','person','magical','invent','flap','stomach','black','river','town','type','stereotyped','paddle','expand','puncture','cakes','measly','kitty','courageous','shoe','number','third','ugliest','haircut','increase','wrathful','jog','straw','whisper','kick','talented','curious']
reader = adc_reader()
# reader_worker = reader.run
def key(event):
current = 0
print ("pressed", repr(event.char))
suffix = 0
current_word = words[current]
filename = current_word + '-' + str(suffix)
r_t = threading.Thread(target=reader.run,args=[filename])
if event.char.isdigit():
suffix = event.char
print ('file series will take',suffix,'as suffix in filename')
if event.char==' ':
print('starting thread')
reader.record = True
# Get the next word
# Start the recording for that word
print(current_word)
# reader.run(filename)
# r_t.run([filename])
r_t.daemon = True
r_t.start()
# If the recording is running:
if event.char=='s':
print('terminating thread')
reader.record = False
# Stop the recording
current += 1
# Iterate the word
def callback(event):
frame.focus_set()
print ("clicked at", event.x, event.y)
frame = Frame(root, width=100, height=100)
frame.bind("<Key>", key)
frame.bind("<Button-1>", callback)
frame.pack()
root.mainloop()
| mit | Python |
b77b573a9b89aafdfdf321d6867f3891b5ed73be | Remove todo | wintoncode/winton-kafka-streams | winton_kafka_streams/state/in_memory_key_value_store.py | winton_kafka_streams/state/in_memory_key_value_store.py | class InMemoryKeyValueStore:
def __init__(self, name):
self.name = name
self.dict = {}
def initialise(self, context, root):
pass
def __setitem__(self, key, value):
self.dict[key] = value
def __getitem__(self, key):
return self.dict[key]
def get(self, key, default=None):
return self.dict.get(key, default)
def __delitem__(self, key):
v = self.dict[key]
del self.dict[key]
return v
| class InMemoryKeyValueStore:
def __init__(self, name):
self.name = name
self.dict = {}
def initialise(self, context, root):
pass
# TODO: register with context, passing restore callback
def __setitem__(self, key, value):
self.dict[key] = value
def __getitem__(self, key):
return self.dict[key]
def get(self, key, default=None):
return self.dict.get(key, default)
def __delitem__(self, key):
v = self.dict[key]
del self.dict[key]
return v
| apache-2.0 | Python |
e0f77da5acfacb95c8899f46f5847ff636bac3b3 | fix AttributeError when user is None | ResolveWang/WeiboSpider,ResolveWang/WeiboSpider,yzsz/weibospider,yzsz/weibospider | tasks/user.py | tasks/user.py | # coding:utf-8
from tasks.workers import app
from page_get import user as user_get
from db.seed_ids import (
get_seed_ids,
get_seed_by_id,
insert_seeds,
set_seed_other_crawled
)
@app.task(ignore_result=True)
def crawl_follower_fans(uid):
seed = get_seed_by_id(uid)
if seed.other_crawled == 0:
rs = user_get.get_fans_or_followers_ids(uid, 1)
rs.extend(user_get.get_fans_or_followers_ids(uid, 2))
datas = set(rs)
# If data already exits, just skip it
if datas:
insert_seeds(datas)
set_seed_other_crawled(uid)
@app.task(ignore_result=True)
def crawl_person_infos(uid):
"""
Crawl user info and their fans and followers
For the limit of weibo's backend, we can only crawl 5 pages of the fans and followers.
We also have no permissions to view enterprise's followers and fans info
:param uid: current user id
:return: None
"""
if not uid:
return
user, is_crawled = user_get.get_profile(uid)
# If it's enterprise user, just skip it
if user and user.verify_type == 2:
set_seed_other_crawled(uid)
return
# Crawl fans and followers
if not is_crawled:
app.send_task('tasks.user.crawl_follower_fans', args=(uid,), queue='fans_followers',
routing_key='for_fans_followers')
@app.task(ignore_result=True)
def excute_user_task():
seeds = get_seed_ids()
if seeds:
for seed in seeds:
app.send_task('tasks.user.crawl_person_infos', args=(seed.uid,), queue='user_crawler',
routing_key='for_user_info')
| # coding:utf-8
from tasks.workers import app
from page_get import user as user_get
from db.seed_ids import (
get_seed_ids,
get_seed_by_id,
insert_seeds,
set_seed_other_crawled
)
@app.task(ignore_result=True)
def crawl_follower_fans(uid):
seed = get_seed_by_id(uid)
if seed.other_crawled == 0:
rs = user_get.get_fans_or_followers_ids(uid, 1)
rs.extend(user_get.get_fans_or_followers_ids(uid, 2))
datas = set(rs)
# If data already exits, just skip it
if datas:
insert_seeds(datas)
set_seed_other_crawled(uid)
@app.task(ignore_result=True)
def crawl_person_infos(uid):
"""
Crawl user info and their fans and followers
For the limit of weibo's backend, we can only crawl 5 pages of the fans and followers.
We also have no permissions to view enterprise's followers and fans info
:param uid: current user id
:return: None
"""
if not uid:
return
user, is_crawled = user_get.get_profile(uid)
# If it's enterprise user, just skip it
if user.verify_type == 2:
set_seed_other_crawled(uid)
return
# Crawl fans and followers
if not is_crawled:
app.send_task('tasks.user.crawl_follower_fans', args=(uid,), queue='fans_followers',
routing_key='for_fans_followers')
@app.task(ignore_result=True)
def excute_user_task():
seeds = get_seed_ids()
if seeds:
for seed in seeds:
app.send_task('tasks.user.crawl_person_infos', args=(seed.uid,), queue='user_crawler',
routing_key='for_user_info')
| mit | Python |
ce9931b15aa4bc3986f36e8cfeae9dc8191eff48 | add some redirects for fun | sunlightlabs/tcamp,sunlightlabs/tcamp,sunlightlabs/tcamp,sunlightlabs/tcamp | tcamp/urls.py | tcamp/urls.py | from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
from django.contrib import admin
from sked.views import RedirectFromPk
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^logistics/$', RedirectView.as_view(url="/about/logistics/")),
url(r'^sessions/$', RedirectView.as_view(url="/schedule/")),
url(r'^sessions/(?P<pk>[\d]+)/$', RedirectFromPk.as_view()),
# FIXME: These will need to be updated next year.
url(r'^submit/$', RedirectView.as_view(url='/schedule/2013/new/')),
url(r'^wall/$', RedirectView.as_view(url='/schedule/2013/wall/?timeslots=11:30am,12:30pm,1:30pm,2:30pm,3:30pm,4:30pm&refresh=300000')),
url(r'^tv/$', RedirectView.as_view(url='/schedule/2013/tv/?timeslots=11:30am,12:30pm,1:30pm,2:30pm,3:30pm,4:30pm&refresh=300000')),
url(r'^treenav/', include('treenav.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/varnish/', include('varnishapp.urls')),
url(r'^staff/$', RedirectView.as_view(url="/staff/login")),
url(r'^staff/', include('googleauth.urls')),
url(r'^schedule/', include('sked.urls', namespace='sked')),
url(r'^sms/', include('sms.urls', namespace='sms')),
url(r'^api/', include('api.urls')),
url(r'^login/$', 'camp.views.login', name='login'),
url(r'^logged-in/$', 'camp.views.logged_in', name='logged_in'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/', 'redirect_field_name': 'next'}, name='logout'),
url(r'^sponsor-contact/$', 'camp.views.sponsor_contact', name='sponsor_contact'),
url(r'^', include('sfapp.urls')),
url(r'^', include('social.apps.django_app.urls', namespace='social')),
url(r'^', include('brainstorm.urls', namespace='brainstorm')),
)
| from django.conf.urls import patterns, include, url
from django.views.generic.base import RedirectView
from django.contrib import admin
from sked.views import RedirectFromPk
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^logistics/$', RedirectView.as_view(url="/about/logistics/")),
url(r'^sessions/$', RedirectView.as_view(url="/schedule/")),
url(r'^sessions/(?P<pk>[\d]+)/$', RedirectFromPk.as_view()),
url(r'^treenav/', include('treenav.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/varnish/', include('varnishapp.urls')),
url(r'^staff/$', RedirectView.as_view(url="/staff/login")),
url(r'^staff/', include('googleauth.urls')),
url(r'^schedule/', include('sked.urls', namespace='sked')),
url(r'^sms/', include('sms.urls', namespace='sms')),
url(r'^api/', include('api.urls')),
url(r'^login/$', 'camp.views.login', name='login'),
url(r'^logged-in/$', 'camp.views.logged_in', name='logged_in'),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': '/', 'redirect_field_name': 'next'}, name='logout'),
url(r'^sponsor-contact/$', 'camp.views.sponsor_contact', name='sponsor_contact'),
url(r'^', include('sfapp.urls')),
url(r'^', include('social.apps.django_app.urls', namespace='social')),
url(r'^', include('brainstorm.urls', namespace='brainstorm')),
)
| bsd-3-clause | Python |
0f9b394a88ca68f2b8b43da0acf22840e1e97330 | Add height to arm_controller_test | ufieeehw/IEEE2016,ufieeehw/IEEE2016,ufieeehw/IEEE2016,ufieeehw/IEEE2016 | Simulator/scripts/arm_controller_test.py | Simulator/scripts/arm_controller_test.py | #!/usr/bin/env python
import tf
import rospy
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
import geometry_msgs.msg
import math
import numpy as np
#THIS IS MEANT FOR THE SIMULATOR ONLY!
#THE COMMANDS SENT TO THE CONTROLLER ARE NOT REALIZABLE AND WILL BREAK THINGS!
'''This publisher sends out PointStamped commands for the arm to spin in a circle.
This is a tool to used for assessing the simulator.
USAGE:
Set r to the desired radius
rosrun ieee2016_simulator arm_controller_test
'''
def point(xcomp, ycomp, r, z):
dist1 = (r*xcomp/10, r*ycomp/10, z)
dist2 = Point(*dist1)
dist = PointStamped()
dist.header.stamp = rospy.Time.now()
dist.header.frame_id = '/robot'
dist.point = dist2
des_pose.publish(dist)
return dist
if __name__== "__main__":
rospy.init_node('arm_controller_test')
while(True):
time = rospy.get_time()
des_pose = rospy.Publisher('/sim/arm_des_pose', PointStamped, queue_size=10)
try:
x=np.cos(time)
y=np.sin(time)
r=1.05
z=abs(0.5*np.cos(time))
point(x,y, r,z)
except rospy.ROSInterruptException:
pass
rospy.sleep(0.2)
| #!/usr/bin/env python
import tf
import rospy
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
import geometry_msgs.msg
import math
import numpy as np
#THIS IS MEANT FOR THE SIMULATOR ONLY!
#THE COMMANDS SENT TO THE CONTROLLER ARE NOT REALIZABLE AND WILL BREAK THINGS!
'''This publisher sends out PointStamped commands for the arm to spin in a circle.
This is a tool to used for assessing the simulator.
USAGE:
Set r to the desired radius
rosrun ieee2016_simulator arm_controller_test
'''
def point(xcomp, ycomp, r):
dist1 = (r*xcomp/10, r*ycomp/10, 0.0)
dist2 = Point(*dist1)
dist = PointStamped()
dist.header.stamp = rospy.Time.now()
dist.header.frame_id = '/robot'
dist.point = dist2
des_pose.publish(dist)
return dist
if __name__== "__main__":
rospy.init_node('arm_controller_test')
while(True):
time = rospy.get_time()
des_pose = rospy.Publisher('/sim/arm_des_pose', PointStamped, queue_size=10)
try:
x=np.cos(time)
y=np.sin(time)
r=3.05
point(x,y, r)
except rospy.ROSInterruptException:
pass
rospy.sleep(0.2)
| mit | Python |
9b6ed295cd8c3dbba1b88ce0fdccb497fe45d0b7 | Add TODO for cleaning out GitLab AMI lookup | gogoair/foremast,gogoair/foremast | src/foremast/utils/lookups.py | src/foremast/utils/lookups.py | # Foremast - Pipeline Tooling
#
# Copyright 2016 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lookup AMI ID from a simple name."""
import json
import logging
from base64 import b64decode
import gitlab
import requests
from ..consts import GIT_URL, GITLAB_TOKEN, AMI_JSON_URL
LOG = logging.getLogger(__name__)
def ami_lookup(region='us-east-1', name='tomcat8'):
"""Use _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided,
_name_ is returned as the ami id
Args:
region (str): AWS Region to find AMI ID.
name (str): Simple AMI base name to lookup.
Returns:
str: AMI ID for _name_ in _region_.
"""
if AMI_JSON_URL:
LOG.info("Getting AMI from %s", AMI_JSON_URL)
response = requests.get(AMI_JSON_URL)
assert response.ok, "Error getting ami info from {}".format(
AMI_JSON_URL)
ami_dict = response.json()
LOG.debug('Lookup AMI table: %s', ami_dict)
ami_id = ami_dict[region][name]
elif GITLAB_TOKEN:
# TODO: Remove GitLab repository in favour of JSON URL option.
LOG.info("Getting AMI from Gitlab")
server = gitlab.Gitlab(GIT_URL, token=GITLAB_TOKEN)
project_id = server.getproject('devops/ansible')['id']
ami_blob = server.getfile(project_id, 'scripts/{0}.json'.format(region),
'master')
ami_contents = b64decode(ami_blob['content']).decode()
ami_dict = json.loads(ami_contents)
LOG.debug('Lookup AMI table: %s', ami_dict)
ami_id = ami_dict[name]
else:
ami_id = name
LOG.info('Using AMI: %s', ami_id)
return ami_id
| # Foremast - Pipeline Tooling
#
# Copyright 2016 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lookup AMI ID from a simple name."""
import json
import logging
from base64 import b64decode
import gitlab
import requests
from ..consts import GIT_URL, GITLAB_TOKEN, AMI_JSON_URL
LOG = logging.getLogger(__name__)
def ami_lookup(region='us-east-1', name='tomcat8'):
"""Use _name_ to find AMI ID. If no ami_base_url or gitlab_token is provided,
_name_ is returned as the ami id
Args:
region (str): AWS Region to find AMI ID.
name (str): Simple AMI base name to lookup.
Returns:
str: AMI ID for _name_ in _region_.
"""
if AMI_JSON_URL:
LOG.info("Getting AMI from %s", AMI_JSON_URL)
response = requests.get(AMI_JSON_URL)
assert response.ok, "Error getting ami info from {}".format(
AMI_JSON_URL)
ami_dict = response.json()
LOG.debug('Lookup AMI table: %s', ami_dict)
ami_id = ami_dict[region][name]
elif GITLAB_TOKEN:
LOG.info("Getting AMI from Gitlab")
server = gitlab.Gitlab(GIT_URL, token=GITLAB_TOKEN)
project_id = server.getproject('devops/ansible')['id']
ami_blob = server.getfile(project_id, 'scripts/{0}.json'.format(region),
'master')
ami_contents = b64decode(ami_blob['content']).decode()
ami_dict = json.loads(ami_contents)
LOG.debug('Lookup AMI table: %s', ami_dict)
ami_id = ami_dict[name]
else:
ami_id = name
LOG.info('Using AMI: %s', ami_id)
return ami_id
| apache-2.0 | Python |
36dedd5c1aa8dd36a323990cd2a9d131b845dbed | Add unit tests (incomplete coverage) | mrestko/img-dl | test/tests.py | test/tests.py | from imgdl import img_dl
class TestCreateFolderName(object):
def sub_title_and_key(self, title, album_key):
return '{0} ({1})'.format(title, album_key)
def test_no_change_to_valid_title(self):
album_key = 'XbUFk'
title_1 = 'A Valid title'
name_1 = img_dl.create_folder_name(title_1, album_key)
assert name_1 == self.sub_title_and_key(title_1, album_key)
title_2 = 'a valid title 123'
name_2 = img_dl.create_folder_name(title_2, album_key)
assert name_2 == self.sub_title_and_key(title_2, album_key)
def test_if_title_is_none_only_album_key(self):
album_key = 'XbUFk'
title = None
name_1 = img_dl.create_folder_name(title, album_key)
assert name_1 == 'XbUFk'
def test_replace_exclamation_mark(self):
album_key = 'XbUFk'
title_1 = 'Test title!'
name_1 = img_dl.create_folder_name(title_1, album_key)
assert name_1 == self.sub_title_and_key('Test title', album_key)
def test_replace_question_mark(self):
album_key = 'XbUFk'
title_1 = 'Test title?'
name_1 = img_dl.create_folder_name(title_1, album_key)
assert name_1 == self.sub_title_and_key('Test title', album_key)
class TestSourceUrl(object):
def test_accept_imgur_urls(self):
assert img_dl.SourceUrl('http://imgur.com/a/B0s3o').is_imgur() == True
def test_extracts_album_key(self):
assert img_dl.SourceUrl('http://imgur.com/a/B0s3o').album_key == 'B0s3o'
def test_blog_url_format(self):
blog_url = img_dl.SourceUrl('http://imgur.com/a/B0s3o').blog_url
assert blog_url == 'https://imgur.com/a/B0s3o/layout/blog'
class TestAlbum(object):
def test_extracts_album_title(self):
source_url = img_dl.SourceUrl('http://imgur.com/gallery/nmHpn')
album = img_dl.Album(source_url)
title = album.album_title
assert title == 'Lion gets best foot massage ever!'
def test_counts_correct_number_of_images(self):
source_url = img_dl.SourceUrl('http://imgur.com/a/B0s3o')
album = img_dl.Album(source_url)
assert album.num_images == 5
| from imgdl import img_dl
class TestCreateFolderName(object):
def sub_title_and_key(self, title, album_key):
return '{0} ({1})'.format(title, album_key)
def test_no_change_to_valid_title(self):
album_key = 'XbUFk'
title_1 = 'A Valid title'
name_1 = img_dl.create_folder_name(title_1, album_key)
assert name_1 == self.sub_title_and_key(title_1, album_key)
title_2 = 'a valid title 123'
name_2 = img_dl.create_folder_name(title_2, album_key)
assert name_2 == self.sub_title_and_key(title_2, album_key)
def test_if_title_is_none_only_album_key(self):
album_key = 'XbUFk'
title = None
name_1 = img_dl.create_folder_name(title, album_key)
assert name_1 == 'XbUFk'
def test_replace_exclamation_mark(self):
album_key = 'XbUFk'
title_1 = 'Test title!'
name_1 = img_dl.create_folder_name(title_1, album_key)
assert name_1 == self.sub_title_and_key('Test title', album_key)
def test_replace_question_mark(self):
album_key = 'XbUFk'
title_1 = 'Test title?'
name_1 = img_dl.create_folder_name(title_1, album_key)
assert name_1 == self.sub_title_and_key('Test title', album_key)
| mit | Python |
6f7dba3beccca655b84879ccd0f3071d15536b2f | Add word_count parameter for lorem_ipsum generator | sarutobi/Rynda,sarutobi/flowofkindness,sarutobi/ritmserdtsa,sarutobi/ritmserdtsa,sarutobi/flowofkindness,sarutobi/Rynda,sarutobi/ritmserdtsa,sarutobi/ritmserdtsa,sarutobi/Rynda,sarutobi/flowofkindness,sarutobi/flowofkindness,sarutobi/Rynda | test/utils.py | test/utils.py | # coding: utf-8
import string
import random
def generate_string(str_len=6, src=string.ascii_lowercase):
return "".join(random.choice(src) for x in xrange(str_len))
def lorem_ipsum(words_count=30):
lorem = list([])
for i in xrange(words_count):
word_length = random.randint(4, 8)
lorem.append(generate_string(str_len=word_length))
return " ".join(lorem)
| # coding: utf-8
import string
import random
def generate_string(str_len=6, src=string.ascii_lowercase):
return "".join(random.choice(src) for x in xrange(str_len))
def lorem_ipsum():
words_count = random.randint(20, 50)
lorem = list([])
for i in xrange(words_count):
word_length = random.randint(4, 8)
lorem.append(generate_string(str_len=word_length))
return " ".join(lorem)
| mit | Python |
d80f7a89b5bc23802ad5ec9bb8cc6ad523976718 | Add rename branch locally test | eteq/gitnl,eteq/gitnl | test_gitnl.py | test_gitnl.py | from __future__ import print_function, division, absolute_import
import unittest
import gitnl
class GitnlTestCase(unittest.TestCase):
"""Tests from 'gitnl.py'."""
def test_push_remotename_branchfrom(self):
desired = 'push remotename branchfrom'
actual = gitnl.parse_to_git('push my branch branchfrom to a remote called remotename')
self.assertEqual(actual, desired)
def test_rename_branch(self):
desired = 'branch -m old_branch new_branch'
actual = gitnl.parse_to_git('branch rename branch old_branch to new_branch')
self.assertEqual(actual, desired)
if __name__ == '__main__':
unittest.main()
| from __future__ import print_function, division, absolute_import
import unittest
import gitnl
class GitnlTestCase(unittest.TestCase):
"""Tests from 'gitnl.py'."""
def test_push_remotename_branchfrom(self):
desired = 'push remotename branchfrom'
actual = gitnl.parse_to_git('push my branch branchfrom to a remote called remotename')
self.assertEqual(actual, desired)
if __name__ == '__main__':
unittest.main()
| mit | Python |
81ce69335caaef6920853c85b5801355374c9eeb | add test to test the accessing of a static file | axelhodler/notesdude,axelhodler/notesdude | test_notes.py | test_notes.py | from webtest import TestApp
import os
import re
import notes
import dbaccessor
DB = 'notes.db'
class TestWebserver():
def setUp(self):
self.bottle = TestApp(notes.app)
def test_route_index(self):
dba = dbaccessor.DbAccessor(DB)
dba.addNote('eins', 'lorem ipsum')
dba.addNote('zwei', 'blabla')
result = self.bottle.get('/')
assert result.status == '200 OK'
match = re.search(r'<td>blabla</td>\s*</tr>', result.body)
assert match
def test_route_new(self):
result = self.bottle.get('/new')
assert result.status == '200 OK'
form = result.form
assert form.action == '/new'
assert form.method == 'GET'
assert form['title'].value == ''
assert form['content'].value == ''
def test_adding_new_note(self):
result = self.bottle.get('/new')
form = result.form
form['title'] = "testtitle"
form['content'] = "testcontent"
result = form.submit('save')
assert result.status == '200 OK'
# check if has been added to the DB
dba = dbaccessor.DbAccessor(DB)
notes = dba.getAllNotes()
assert len(notes) == 1
assert notes[0][0] == 1
assert notes[0][1] == 'testtitle'
assert notes[0][2] == 'testcontent'
assert result.body == "Note with id: 1 was added"
def test_accessing_static_file(self):
result = self.bottle.get('/static/css/bootstrap.min.css')
assert result.status == '200 OK'
def tearDown(self):
if os.path.isfile(DB):
os.remove(DB)
| from webtest import TestApp
import os
import re
import notes
import dbaccessor
DB = 'notes.db'
class TestWebserver():
def setUp(self):
self.bottle = TestApp(notes.app)
def test_route_index(self):
dba = dbaccessor.DbAccessor(DB)
dba.addNote('eins', 'lorem ipsum')
dba.addNote('zwei', 'blabla')
result = self.bottle.get('/')
assert result.status == '200 OK'
match = re.search(r'<td>blabla</td>\s*</tr>', result.body)
assert match
def test_route_new(self):
result = self.bottle.get('/new')
assert result.status == '200 OK'
form = result.form
assert form.action == '/new'
assert form.method == 'GET'
assert form['title'].value == ''
assert form['content'].value == ''
def test_adding_new_note(self):
result = self.bottle.get('/new')
form = result.form
form['title'] = "testtitle"
form['content'] = "testcontent"
result = form.submit('save')
assert result.status == '200 OK'
# check if has been added to the DB
dba = dbaccessor.DbAccessor(DB)
notes = dba.getAllNotes()
assert len(notes) == 1
assert notes[0][0] == 1
assert notes[0][1] == 'testtitle'
assert notes[0][2] == 'testcontent'
assert result.body == "Note with id: 1 was added"
def tearDown(self):
if os.path.isfile(DB):
os.remove(DB)
| mit | Python |
7b5132af5ababe3ec454b2b3c092d8e95ba5af2b | Increment port in tests so that tests can be parallelized | kiip/statsite | tests/base.py | tests/base.py | """
Contains the basic classes for test classes.
"""
import socket
import time
import threading
from graphite import GraphiteServer, GraphiteHandler
class IntegrationBase(object):
"""
This is the base class for integration tests of Statsite.
"""
DEFAULT_INTERVAL = 1
"The default flush interval for Statsite servers."
current_statsite_port = 16000
current_graphite_port = 12000
"""The current statsite/graphite ports to use when instantiating
servers. These will be incremented."""
def pytest_funcarg__client(self, request):
"""
This creates a pytest funcarg for a client to a running Statsite
server.
"""
host = "localhost"
port = IntegrationBase.current_statsite_port
IntegrationBase.current_statsite_port += 1
# TODO: Instantiate server
# Create the UDP client connected to the statsite server
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.connect((host, port))
return client
def pytest_funcarg__graphite(self, request):
"""
This creates a pytest funcarg for a fake Graphite server.
"""
host = "localhost"
port = IntegrationBase.current_graphite_port
IntegrationBase.current_graphite_port += 1
# Instantiate the actual TCP server
server = GraphiteServer(("localhost", port), GraphiteHandler)
server.allow_reuse_address = True
# Create the thread to run the server and start it up
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
# Add a finalizer to make sure our server is properly
# shutdown after every test
request.addfinalizer(lambda: server.shutdown())
return server
def after_flush_interval(self, callback, interval=None):
"""
This waits the configured flush interval prior to calling
the callback.
"""
# Wait the given interval
interval = self.DEFAULT_INTERVAL if interval is None else interval
time.sleep(interval)
# Call the callback
callback()
| """
Contains the basic classes for test classes.
"""
import socket
import time
import threading
from graphite import GraphiteServer, GraphiteHandler
class IntegrationBase(object):
"""
This is the base class for integration tests of Statsite.
"""
DEFAULT_INTERVAL = 1
def pytest_funcarg__client(self, request):
"""
This creates a pytest funcarg for a client to a running Statsite
server.
"""
host = "localhost"
port = 16000
# TODO: Instantiate server
# Create the UDP client connected to the statsite server
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.connect((host, port))
return client
def pytest_funcarg__graphite(self, request):
"""
This creates a pytest funcarg for a fake Graphite server.
"""
# Instantiate the actual TCP server
server = GraphiteServer(("localhost", 12000), GraphiteHandler)
server.allow_reuse_address = True
# Create the thread to run the server and start it up
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
# Add a finalizer to make sure our server is properly
# shutdown after every test
request.addfinalizer(lambda: server.shutdown())
return server
def after_flush_interval(self, callback, interval=None):
"""
This waits the configured flush interval prior to calling
the callback.
"""
# Wait the given interval
interval = self.DEFAULT_INTERVAL if interval is None else interval
time.sleep(interval)
# Call the callback
callback()
| bsd-3-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.